max_stars_repo_path
stringlengths
4
277
max_stars_repo_name
stringlengths
4
130
max_stars_count
int64
0
191k
id
stringlengths
1
8
content
stringlengths
1
996k
score
float64
-1.25
4.06
int_score
int64
0
4
27. Remove Element/solution2.py
sunshot/LeetCode
0
3600
from typing import List class Solution: def removeElement(self, nums: List[int], val: int) -> int: if not nums: return 0 curr = 0 n = len(nums) while curr < n: if nums[curr] == val: nums[curr] = nums[n-1] n -= 1 else: curr += 1 return n if __name__== '__main__': solution = Solution() nums = [3,2,2,3] val = 3 ans = solution.removeElement(nums, val) # print(ans) print(nums[:ans])
2.328125
2
src/zope/publisher/tests/test_requestdataproperty.py
Shoobx/zope.publisher
3
3608
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Request Data-Property Tests """ from unittest import TestCase, makeSuite from zope.interface.common.tests.basemapping \ import testIEnumerableMapping, testIReadMapping from zope.publisher.base \ import RequestDataProperty, RequestDataGetter, RequestDataMapper class TestDataGettr(RequestDataGetter): _gettrname = 'getSomething' class TestDataMapper(RequestDataMapper): _mapname = '_data' _marker = object() class Data(object): def getSomething(self, name, default=_marker): if name.startswith('Z'): return "something %s" % name if default is not _marker: return default raise KeyError(name) something = RequestDataProperty(TestDataGettr) somedata = RequestDataProperty(TestDataMapper) class Test(TestCase): def testRequestDataGettr(self): testIReadMapping(self, Data().something, {"Zope": "something Zope"}, ["spam"]) def testRequestDataMapper(self): data = Data() sample = {'foo': 'Foo', 'bar': 'Bar'} data._data = sample inst = data.somedata testIReadMapping(self, inst, sample, ["spam"]) testIEnumerableMapping(self, inst, sample) def testNoAssign(self): data = Data() try: data.something = {} except AttributeError: pass else: raise AssertionError("Shouldn't be able to assign") try: data.somedata = {} except AttributeError: pass else: raise AssertionError("Shouldn't be able to assign") def test_suite(): return makeSuite(Test)
1.460938
1
tests/core_ptl/check_for_ranks.py
PatrykNeubauer/NeMo
2
3624
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import torch from omegaconf import OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.utilities.distributed import rank_zero_only from nemo.core import ModelPT from nemo.utils import logging from nemo.utils.exp_manager import ExpManagerConfig, exp_manager class OnesDataset(torch.utils.data.Dataset): def __init__(self, dataset_len): super().__init__() self.__dataset_len = dataset_len def __getitem__(self, *args): return torch.ones(2) def __len__(self): return self.__dataset_len class ExampleModel(ModelPT): def __init__(self, *args, **kwargs): cfg = OmegaConf.structured({}) super().__init__(cfg, trainer=kwargs.get('trainer', None)) # dummy parameter in order to allow DDP to execute self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1) def train_dataloader(self): return None def val_dataloader(self): return None def predict_dataloader(self): dataset = OnesDataset(2) return torch.utils.data.DataLoader(dataset, batch_size=2) def forward(self, batch): return batch.mean() def validation_step(self, batch, batch_idx): return self(batch) def training_step(self, batch, batch_idx): return self(batch) def list_available_models(self): pass def setup_training_data(self): pass def setup_validation_data(self): pass def validation_epoch_end(self, loss): self.log("val_loss", torch.stack(loss).mean()) def instantiate_multinode_ddp_if_possible(): num_gpus = torch.cuda.device_count() trainer = Trainer(gpus=num_gpus, accelerator='ddp', logger=None, checkpoint_callback=None) exp_manager_cfg = ExpManagerConfig(exp_dir='./ddp_check/', use_datetime_version=False, version="") exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg)) return trainer def setup_model(trainer: Trainer): model = ExampleModel(trainer=trainer) logging.info(f"M.Global Rank:{model.global_rank}") logging.info(f"M.Local Rank:{model.local_rank}") logging.info(f"M.World Size:{model.trainer.world_size}") trainer.predict(model) return model def get_rank_info(texts: list, rank_key: str) -> int: for line in texts: if rank_key in line: rank_value = line.split(":")[-1] rank_value = int(rank_value) return rank_value print("Could not find the correct rank key !") exit(1) @rank_zero_only def check_model_ranks(model: ExampleModel): basedir = os.path.join('./ddp_check/', 'default', 'version_0') file_template = "nemo_log_globalrank-{rank}_localrank-{rank}.txt" world_size = torch.cuda.device_count() for rank in range(world_size): filename = file_template.format(rank=rank) filepath = os.path.join(basedir, filename) with open(filepath, 'r') as f: texts = f.readlines() texts = [t.replace("\n", "") for t in texts] log_global_rank = get_rank_info(texts, rank_key='M.Global Rank') log_world_size = get_rank_info(texts, rank_key='M.World Size') if log_global_rank != rank: print("Logged global rank is not equal to trainer.global_rank !") exit(1) if log_world_size != world_size: print("Logged world size if not equal to trainer.world_size !") exit(1) @rank_zero_only def cleanup(): if os.path.exists('./ddp_check'): shutil.rmtree('./ddp_check', ignore_errors=True) def run_checks(): cleanup() trainer = instantiate_multinode_ddp_if_possible() model = setup_model(trainer) check_model_ranks(model) print("DDP checks passed !") cleanup() if __name__ == '__main__': run_checks()
1.921875
2
pantam_cli/utils/messages.py
flmnt/pantam
2
3632
from sys import stderr, stdout from enum import Enum from colored import fg, attr PANTAM: str = fg("yellow") + attr("bold") + "PANTAM" + attr("reset") colour_msg = lambda msg, colour: fg(colour) + attr("bold") + msg + attr("reset") info_msg = lambda msg: colour_msg(msg, "blue") success_msg = lambda msg: colour_msg(msg, "green") error_msg = lambda msg: colour_msg(msg, "red") class NewLine(Enum): before = 1 after = 2 both = 3 def write_msg(msg: str, spacing: NewLine = None) -> None: """Write message to stdout""" prefix: str = "\n" if spacing in (NewLine.before, NewLine.both) else "" suffix: str = "\n" if spacing in (NewLine.after, NewLine.both) else "" stdout.write("%s%s%s" % (prefix, msg, suffix)) def write_error(msg: str) -> None: """Write message to stderr""" stderr.write("\n%s\n" % msg) welcome_msg = ( lambda: PANTAM + """ The microframework for microservices. Let's build your app... """ ) name_index_file_msg = lambda: "What is the name of your main script?" name_actions_folder_msg = lambda: "What is the name of your actions folder?" def create_actions_file_msg(second_run: bool): """Actions File Message""" article = "another" if second_run else "an" return "Do you want to create %s action file?" % article name_actions_file_msg = lambda: "What is the name of your actions file?" confirm_structure_msg = ( lambda structure: """Your application will look like this: %s Happy to proceed?""" % structure )
1.9375
2
django_events/users/management/commands/create_default_su.py
chrisBrookes93/django-events-management
0
3640
from django.core.management.base import BaseCommand from django.contrib.auth import get_user_model class Command(BaseCommand): help = "Creates a default super user if one doesn't already exist. " \ "This is designed to be used in the docker-compose.yml to create an initial super user on deployment." def handle(self, *args, **kwargs): """ Checks whether any super users exist and creates a default one if not :param args: Unused :param kwargs: Unused """ super_users = get_user_model().objects.filter(is_superuser=True) if super_users.exists(): self.stdout.write('A superuser already exists, not creating one') else: get_user_model().objects.create_superuser(email="<EMAIL>", password="<PASSWORD>") self.stdout.write('Created default superuser "<EMAIL>"') self.stdout.write('Make sure you change the password immediately!')
1.648438
2
BookingScraper-joao_v2/BookingScraper/airbnb.py
joaocamargo/estudos-python
1
3648
#! /usr/bin/env python3.6 import argparse import argcomplete from argcomplete.completers import ChoicesCompleter from argcomplete.completers import EnvironCompleter import requests from bthread import BookingThread from bs4 import BeautifulSoup from file_writer import FileWriter hotels = [] def get_countries(): with open("europa2020.txt", "r") as f: countries = f.read().splitlines() return countries def get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim): print('get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):') print(session, offset, rooms, country, dest_id, DayIni, DayFim) diaInicial = str(int(DayIni[0:2])) mesInicial = str(int(DayIni[3:5])) anoInicial = str(int(DayIni[6:10])) diaFinal = str(int(DayFim[0:2])) mesFinal = str(int(DayFim[3:5])) anoFinal = str(int(DayFim[6:10])) ''' Make request to airbnb page and parse html :param offset: :return: html page ''' url = 'https://www.airbnb.com.br/s/Londres/'\ 'homes?refinement_paths%5B%5D=%2Fhomes&current_tab_id=home_tab&selected_tab_id=home_tab&source=mc_search_bar&search_type=unknown'\ '&click_referer=t%3ASEE_ALL%7Csid%3A874f16ee-6196-4289-9717-17dec73e1e5c%7Cst%3AMAGAZINE_HOMES&screen_size=large&hide_dates_and_guests_filters=false'\ '&ne_lat=51.80546533345978&ne_lng=0.4969575708007312&sw_lat=51.17528882051496&sw_lng=-0.8200285131836154&zoom=10&search_by_map=false&checkin={anoInicial}-{mesInicial}-{diaInicial}'\ '&checkout={anoFinal}-{mesFinal}-{diaFinal}&adults={rooms}&property_type_id%5B%5D=1&property_type_id%5B%5D=43&property_type_id%5B%5D=47'\ '&place_id=ChIJdd4hrwug2EcRmSrV3Vo6llI&room_types%5B%5D=Entire%20home%2Fapt'\ '&section_offset=6&items_offset=18'.format(rooms=rooms, country=country.replace(' ', '+'),anoFinal=anoFinal,mesFinal=mesFinal,diaInicial=diaInicial,mesInicial=mesInicial,anoInicial=anoInicial,diaFinal=diaFinal,dest_id=dest_id) + str(offset) r = requests.get(url, headers= {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0)' ' Gecko/20100101 Firefox/48.0'}) html = r.content print(url) parsed_html = BeautifulSoup(html, 'lxml') return parsed_html def process_hotels(session, offset, rooms, country, dest_id, DayIni, DayFim): parsed_html = get_booking_page(session, offset, rooms, country, dest_id,DayIni, DayFim) hotel = parsed_html.find_all('div', {'class': 'sr_item'}) for ho in hotel: #print("ho.find('a', {'class': 'jq_tooltip'})") #print(ho.find('a', {'class': 'jq_tooltip'})) #name = ho.find('a', {'class': 'jq_tooltip'})['data-title'] print("ho.find('span', {'class': 'sr-hotel__name'})") #print(ho.find('span', {'class': 'sr-hotel__name'})) if ho.find('span', {'class': 'sr-hotel__name'}) is not None: name = str(ho.find('span', {'class': 'sr-hotel__name'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','') else: name = '-1' if ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}) is not None: price = ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}).text.replace('\n','').replace("b","").replace("'","") else: price = '-1' if ho.find('span', {'class': '_ky9opu0'}) is not None: nota = str(ho.find('span', {'class': '_ky9opu0'}).text.replace('\n','').replace("b","").replace("'","")) else : nota = '-1' if ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}) is not None: distance = str(ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','') else : distance = '-1' # if ho.find('a', {'class': 'bui-link'}) is not None : # result = [str(item) for item in ho.find_all('span', attrs={'data-bui-component' : 'Tooltip'})] # print('TAMANHO TOOLTIP', str(len(result))) # for i in result: # print(i) # for i in result: # if i in 'km': # distance = str(i) # else: # distance = '----' # else: # distance = '----' # if len(result) ==1: # if result[0] in 'km': # distance = result # else: # distance = 'aaaaa' + str(len(result)) # else: # distance = '---' hotels.append(DayIni+';'+DayFim+';'+name + ';' + price + ';' + nota + ';' + distance) #hotels.append(str(len(hotels) + 1) + ' : ' + name + ' : ' + price) def prep_data(rooms=1, country='Macedonia', dest_id='-1', DayIni='01/01/2019', DayFim='02/01/2019', out_format=None): ''' Prepare data for saving :return: hotels: set() ''' offset = 1 session = requests.Session() parsed_html = get_booking_page(session, offset, rooms, country, dest_id, DayIni,DayFim) all_offset = parsed_html.find_all('li', {'class': 'sr_pagination_item'})[-1].get_text().splitlines()[-1] threads = [] for i in range(int(all_offset)): offset += 1 t = BookingThread(session, offset, rooms, country,dest_id,DayIni, DayFim, process_hotels) threads.append(t) for t in threads: t.start() for t in threads: t.join() hotels2 = hotels return hotels2 def get_data(rooms=1, country='Macedonia', dest_id='-1',DayIni='01/01/2019',DayFim='02/01/2019', out_format=None): ''' Get all accomodations in Macedonia and save them in file :return: hotels-in-macedonia.{txt/csv/xlsx} file ''' print('Procurando por',country) hotels_list = prep_data(rooms, country,dest_id, DayIni, DayFim, out_format) save_data(hotels_list , out_format=out_format, country=country) def save_data(data, out_format, country): ''' Saves hotels list in file :param data: hotels list :param out_format: json, csv or excel :return: ''' writer = FileWriter(data, out_format, country) file = writer.output_file() print('All accommodations are saved.') print('You can find them in', file, 'file') if __name__ == "__main__": parser = argparse.ArgumentParser() countries = get_countries() parser.add_argument("--rooms", help='Add the number of rooms to the booking request.', default=1, type=int, nargs='?') parser.add_argument("--country", help='Add the country to the booking request.', default='Macedonia', nargs='?').completer = ChoicesCompleter(countries) parser.add_argument("--dest_id", help='Add the country to the booking request.', default='0', nargs='?') parser.add_argument("--DayIni", help='Data inicial', default='01/01/2019', nargs='?') parser.add_argument("--DayFim", help='Data inicial', default='02/01/2019', nargs='?') parser.add_argument("--out_format", help='Add the format for the output file. Add excel, json or csv.', default='json', choices=['json', 'excel', 'csv'], nargs='?').completer = EnvironCompleter argcomplete.autocomplete(parser) args = parser.parse_args() localidades = [{ 'Pais': 'London', 'dest_id': '-2601889' }, { 'Pais': 'Utrecht', 'dest_id': '-2154382' }, { 'Pais': 'Buzios', 'dest_id': '-626254' }, { 'Pais': '', 'dest_id': '' }] countryAux = [d['Pais'] for d in localidades if args.dest_id in d['dest_id']] if len(countryAux)>0: country = countryAux[0] print('Parametros') print(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format) get_data(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format) else: country = 'Nao Identificado' locais = [d['Pais'] + ':' + d['dest_id'] for d in localidades if d['Pais'] != ''] print('----------') print('Utilize uma das seguintes localizações') for i in locais: print(i) print('----------')
1.710938
2
account/views.py
KimSoungRyoul/drf_unitteset_study_project
0
3672
# Create your views here. from django.db.models import QuerySet from django.utils.decorators import method_decorator from drf_yasg.utils import swagger_auto_schema from rest_framework import viewsets, status from rest_framework.permissions import IsAuthenticated, AllowAny from rest_framework.response import Response from rest_framework.viewsets import mixins from account.documents import DjangoFilterDescriptionInspector from account.models import Customer from account.serializers import CustomerInfoSerializer, SignUpFormSerializer @method_decorator(name='retrieve', decorator=swagger_auto_schema( operation_description="회원 개인정보 조회 API", filter_inspectors=[DjangoFilterDescriptionInspector], )) @method_decorator(name='create', decorator=swagger_auto_schema( operation_description="회원 가입 API", )) @method_decorator(name='update', decorator=swagger_auto_schema( operation_description="회원 정보 수정 API", )) @method_decorator(name='destroy', decorator=swagger_auto_schema( operation_description="회원 탈퇴 API", )) class CustomerAPIViewSet(mixins.CreateModelMixin, mixins.DestroyModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin, viewsets.GenericViewSet): queryset: QuerySet = Customer.objects permission_classes = (IsAuthenticated,) http_method_names = ['get', 'post', 'put', 'delete'] def get_serializer_class(self): if self.request.method == 'POST': return SignUpFormSerializer elif self.request.method == 'GET': return CustomerInfoSerializer elif self.request.method == 'PUT': return SignUpFormSerializer elif self.request.method == 'DELETE': return SignUpFormSerializer def get_permissions(self): if self.request.method == 'POST': permission_classes = [AllowAny] return [permission() for permission in permission_classes] def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) self.perform_create(serializer) headers = self.get_success_headers(serializer.data) return Response({'id': serializer.data['id']}, status=status.HTTP_201_CREATED, headers=headers)
1.234375
1
tests/test_db.py
davebryson/py-tendermint
24
3688
import os from tendermint.db import VanillaDB from tendermint.utils import home_dir def test_database(): dbfile = home_dir('temp', 'test.db') db = VanillaDB(dbfile) db.set(b'dave',b'one') result = db.get(b'dave') assert(b'one' == result) db.set(b'dave',b'two') result = db.get(b'dave') assert(b'two' == result) assert(None == db.get(b'doesntexist')) assert(db.exists(b'dave')) db.delete(b'dave') assert(db.exists(b'dave') == False) if os.path.exists(dbfile): os.remove(dbfile)
1.210938
1
source/tests/test_resources.py
aws-solutions/maintaining-personalized-experiences-with-machine-learning
6
3704
# ###################################################################################################################### # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # # with the License. You may obtain a copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for # # the specific language governing permissions and limitations under the License. # # ###################################################################################################################### import pytest from shared.resource import ( DatasetGroup, Schema, Dataset, DatasetImportJob, Solution, SolutionVersion, Campaign, EventTracker, BatchSegmentJob, BatchInferenceJob, ) @pytest.mark.parametrize( "klass,camel,dash,snake", [ (DatasetGroup, "datasetGroup", "dataset-group", "dataset_group"), (Schema, "schema", "schema", "schema"), (Dataset, "dataset", "dataset", "dataset"), ( DatasetImportJob, "datasetImportJob", "dataset-import-job", "dataset_import_job", ), (Solution, "solution", "solution", "solution"), (SolutionVersion, "solutionVersion", "solution-version", "solution_version"), (Campaign, "campaign", "campaign", "campaign"), (EventTracker, "eventTracker", "event-tracker", "event_tracker"), ( BatchInferenceJob, "batchInferenceJob", "batch-inference-job", "batch_inference_job", ), (BatchSegmentJob, "batchSegmentJob", "batch-segment-job", "batch_segment_job"), ], ids=[ "DatasetGroup", "Schema", "Dataset", "DatasetImportJob", "Solution", "SolutionVersion", "Campaign", "EventTracker", "BatchInferenceJob", "BatchSegmentJob,", ], ) def test_resource_naming(klass, camel, dash, snake): assert klass().name.camel == camel assert klass().name.dash == dash assert klass().name.snake == snake
1.015625
1
nipype/interfaces/spm/__init__.py
felixsc1/nipype
8
3712
# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Top-level namespace for spm.""" from .base import (Info, SPMCommand, logger, no_spm, scans_for_fname, scans_for_fnames) from .preprocess import (FieldMap, SliceTiming, Realign, RealignUnwarp, Coregister, Normalize, Normalize12, Segment, Smooth, NewSegment, DARTEL, DARTELNorm2MNI, CreateWarped, VBMSegment) from .model import (Level1Design, EstimateModel, EstimateContrast, Threshold, OneSampleTTestDesign, TwoSampleTTestDesign, PairedTTestDesign, MultipleRegressionDesign) from .utils import (Analyze2nii, CalcCoregAffine, ApplyTransform, Reslice, ApplyInverseDeformation, ResliceToReference, DicomImport)
1.023438
1
cifar/evalit.py
Sharkbyteprojects/IRIS-ML_and_Deep-Learning
0
3720
import keras from keras.models import load_model from PIL import Image import matplotlib.pylab as plt import numpy as np import zipfile print("Extract") zip_ref = zipfile.ZipFile("./asset.zip", 'r') zip_ref.extractall(".") zip_ref.close() print("Load Model") model=load_model("cifar-model.h5") CIFAR_10_CLASSES=["Plane","Car","bird","cat","deer","dog","frog","horse","ship","truck"] def calc(imname): test_image =Image.open("asset/"+imname) test_image=test_image.resize((32,32),Image.ANTIALIAS) test_image=np.array(test_image,dtype="float32") test_image/=255 test_image=test_image.reshape(-1,32,32,3) predictions=model.predict(test_image) index_max_pred=np.argmax(predictions) plt.title("Complete: {}".format(CIFAR_10_CLASSES[index_max_pred])) plt.imshow(test_image[0].reshape(32,32,3)) print(predictions) plt.show() print("START TEST") calc("lkw-image.jpg") calc("cat.jpg") calc("frog.jpg") calc("fog.jpg") calc("lfog.jpg") calc("d.jpg") calc("b.jpg") calc("bs.jpg") calc("plapper.jpg") calc("ds.jpg") print("Complete") print("End") quit(0)
1.6875
2
firelight/interfaces/light.py
roshie548/firelight
16
3728
from abc import ABC, abstractmethod from .color import Color class LightSystem(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'discover_lights') and callable(subclass.discover_lights) and hasattr(subclass, 'set_color_all_lights') and callable(subclass.set_color_all_lights)) @abstractmethod def discover_lights(self): """Discover the lights and groups in this LightSystem.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of all the lights in the LightSystem.""" raise NotImplementedError class LightGroup(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): """Turn on the lights in this group.""" raise NotImplementedError @abstractmethod def turn_off(self): """Turn off the lights in this group.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of this light.""" raise NotImplementedError class LightDevice(ABC): @classmethod def __subclasshook__(cls, subclass): return (hasattr(subclass, 'turn_on') and callable(subclass.turn_on) and hasattr(subclass, 'turn_off') and callable(subclass.turn_off) and hasattr(subclass, 'set_transition_time') and callable(subclass.set_transition_time) and hasattr(subclass, 'set_color') and callable(subclass.set_color)) @abstractmethod def turn_on(self): """Turn on this light.""" raise NotImplementedError @abstractmethod def turn_off(self): """Turn off the light.""" raise NotImplementedError @abstractmethod def set_transition_time(self, transition_time: int): """Set how long it takes in milliseconds for colors to transition.""" raise NotImplementedError @abstractmethod def set_color(self, color: Color): """Set the color of this light.""" raise NotImplementedError
2.359375
2
wexapi/models/ticker.py
madmis/wexapi
3
3736
from decimal import Decimal class Ticker(object): def __init__( self, high: float, low: float, avg: float, vol: float, vol_cur: int, last: float, buy: float, sell: float, updated: int, ): self.high = high self.low = low self.avg = avg self.vol = vol self.vol_cur = vol_cur self.last = last self.buy = buy self.sell = sell self.updated = updated @property def high(self) -> Decimal: return self._high @high.setter def high(self, value: float): self._high = Decimal(value) @property def low(self) -> Decimal: return self._low @low.setter def low(self, value: float): self._low = Decimal(value) @property def avg(self) -> Decimal: return self._avg @avg.setter def avg(self, value: float): self._avg = Decimal(value) @property def vol(self) -> Decimal: return self._vol @vol.setter def vol(self, value: float): self._vol = Decimal(value) @property def vol_cur(self) -> Decimal: return self._vol_cur @vol_cur.setter def vol_cur(self, value: float): self._vol_cur = Decimal(value) @property def last(self) -> Decimal: return self._last @last.setter def last(self, value: float): self._last = Decimal(value) @property def buy(self) -> Decimal: return self._buy @buy.setter def buy(self, value: float): self._buy = Decimal(value) @property def sell(self) -> Decimal: return self._sell @sell.setter def sell(self, value: float): self._sell = Decimal(value) @property def updated(self) -> int: return self._updated @updated.setter def updated(self, value: int): self._updated = int(value)
2.5
2
src/preprocessing/annual_hc_by_crime_loc.py
VijayKalmath/USCrimeAnalysis
0
3744
#! usr/env/bin python import glob import numpy as np import pandas as pd from tqdm import tqdm def main(): # Fetch File Paths file_paths = glob.glob(r'./data/raw/ucr/hc_count_by_place/*.xls') # Sort them according to year file_paths.sort(key = lambda x: int(x[-8:-4])) # Create a result dataframe to store the data df_res = get_place_crime_count(file_paths[0]) # Iterate over the rest of the files for p in tqdm(file_paths[1:]): df_temp = get_place_crime_count(p) df_res = pd.merge(df_res, df_temp, on = "Place", how = "left") # Save the result to disk df_res.to_csv('./data/processed/ucr/annual_hc_count_by_place.csv',index=False) def get_place_crime_count(path:str)->pd.DataFrame: """ Function to return """ # Extracting the table name from and year from the given file path t_name = " ".join(path[path.index("Table"):path.index("_Incidents")].split("_")) t_year = path[path.index(".xls")-4:path.index(".xls")] try: # Read the Excel spreadsheet df = pd.read_excel(path,sheet_name=t_name) # Get the start and end indices of the interested datapoints start = df.index[df[t_name] == "Total"][0] + 1 end = df.index[df[t_name] == "Multiple locations"][0] # Slice the dataset df = df.iloc[start:end,0:2] # Reset the index for the reduced dataframe df.reset_index(drop = True, inplace = True) # Rename the columns df.rename(columns={t_name: "Place", "Unnamed: 1": t_year}, inplace = True) # Return the value return df except: # If there is no such data return an empty dataframe i_list = list(range(0,47)) return pd.DataFrame(np.nan, index= i_list, columns=['Place', t_year]) if __name__ == '__main__': main()
2.6875
3
src/models/text_node.py
moevm/nosql1h19-text-graph
0
3768
from neomodel import StructuredNode, StringProperty, JSONProperty, \ Relationship, IntegerProperty import numpy as np import re from models.text_relation import TextRelation __all__ = ['TextNode'] class TextNode(StructuredNode): order_id = IntegerProperty(required=True, unique_index=True) label = StringProperty(required=True) text = StringProperty(required=True) alg_results = JSONProperty() link = Relationship('TextNode', 'ALG', model=TextRelation) def short(self): res = ''.join([word.strip() + ' ' for word in re.split(r'[\n ]', self.text, 5)[:5]]) return res def describe(self): return f""" <h1>Фрагмент: {self.order_id} </h1> <table border="1" width=100%> <caption> Информация о вершине </caption> <tr> <th>Количество символов</th> <td>{self.character_num()}</td> </tr> <tr> <th>Количество слов</th> <td>{self.words_num()}</td> </tr> <tr> <th>Количество предложений</th> <td>{self.sentences_num()}</td> </tr> <tr> <th>Количество связей</th> <td>{len(self.link)}</td> </tr> </table> """ def preview(self, frag_num=0): leading = 3 if frag_num > 0: leading = int(np.floor(np.log10(frag_num))) + 1 if str(self.order_id) != str(self.label): return f"{str(self.order_id).zfill(leading)}: " \ + f"[{self.label}] {self.short()}..." else: return f"{str(self.order_id).zfill(leading)}: " \ + f"[{self.label}] {self.short()}..." return f"[{self.label}] {self.short()}..." def words_num(self): return len(self.text.split()) def character_num(self): return len(self.text) def sentences_num(self): return len([s for s in self.text.split('.') if len(s) > 2])
1.742188
2
networks/networks.py
ayyuriss/TRHPO
0
3784
from torch import nn import numpy as np import base.basenetwork as BaseN from networks.cholesky import CholeskyBlock class FCNet(BaseN.BaseNetwork): name ="FCNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512),nn.Tanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class FCSpectralNet(BaseN.BaseNetwork): name ="FCSpectralNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCSpectralNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),BaseN.AdaptiveTanh(), nn.Linear(1024,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,512),BaseN.AdaptiveTanh(), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class FCSpectralMNet(BaseN.BaseNetwork): name ="FCSpectralMNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCSpectralMNet,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.ReLU(), nn.Linear(1024,1024),nn.ReLU(), nn.Linear(1024,512),nn.ReLU(), nn.Linear(512,self.output_shape[0]-1),nn.Tanh(), BaseN.EigenLayer()) self.compile() class FCNetQ(BaseN.BaseNetwork): name ="FCNetQ" def __init__(self,input_shape,output_shape,owner_name=""): super(FCNetQ,self).__init__(input_shape,output_shape,owner_name) x = int(np.prod(input_shape)) self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(x,x),nn.Tanh(), nn.Linear(x,self.output_shape[0])) self.compile() class ConvNet(BaseN.BaseNetwork): name="ConvNet" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.ReLU(), BaseN.conv3_2(8, 16),nn.ReLU(), BaseN.conv3_2(8, 8))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNetBias(BaseN.BaseNetwork): name="ConvNetBias" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 12),BaseN.AdaptiveTanh(), BaseN.conv3_2(12, 16), BaseN.conv3_2(16, 20))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNet(BaseN.BaseNetwork): name="FCConvNet" def __init__(self,input_shape,output_shape,owner_name=""): super(FCConvNet,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 8),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),BaseN.AdaptiveTanh(), nn.Linear(512,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class FCConvNetBias(BaseN.BaseNetwork): name="FCConvNetBias" def __init__(self,input_shape,output_shape,owner_name=""): super(FCConvNetBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.ReLU(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,1024),BaseN.AdaptiveTanh(), nn.Linear(1024,256), BaseN.EigenLayer(256,self.output_shape[0],bias=False)) self.compile() class ConvNet2(BaseN.BaseNetwork): name="ConvNet2" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNet2,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 3),nn.Softplus(), BaseN.conv3_2(3, 6),BaseN.conv3_2(6, 12))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,1024),nn.Tanh(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,256), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetBig(BaseN.BaseNetwork): name="ConvNetBig" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBig,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16),nn.Softplus(), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0])) self.compile() class ConvNetBigBias(BaseN.BaseNetwork): name="ConvNetBigBias" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBigBias,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus(), BaseN.conv3_2(4, 4),BaseN.AdaptiveTanh())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), BaseN.EigenLayer(512,self.output_shape[0],bias=False)) self.compile() class ConvNetBigAtari(BaseN.BaseNetwork): name="ConvNetBigAtari" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBigAtari,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,512),nn.Tanh(), nn.Linear(512,1024), BaseN.EigenLayer(1024,self.output_shape[0])) self.compile() class ConvNetBigS(BaseN.BaseNetwork): name="ConvNetBigS" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetBigS,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 8),nn.Softplus(), BaseN.conv3_2(8, 16), BaseN.conv3_2(16, 32))] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,512), nn.Linear(512,self.output_shape[0])) self.compile() class ConvNetMNIST(BaseN.BaseNetwork): name = "ConvNetMNIST" def __init__(self,input_shape,output_shape,**kwargs): super(ConvNetMNIST,self).__init__(**kwargs) self.n = output_shape self.conv = [BaseN.ResNetBlock(1,32), BaseN.conv3_2(32,64)] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], nn.Softplus(), BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), BaseN.EigenLayer(256,self.output_shape[0])) self.compile() class ConvNetSimple(BaseN.BaseNetwork): name="ConvNetSimple" def __init__(self,input_shape,output_shape,owner_name=""): super(ConvNetSimple,self).__init__(input_shape,output_shape,owner_name) self.conv = [nn.Sequential(BaseN.conv3_2(input_shape[0], 4),nn.Softplus())] x = BaseN.output_shape(self.conv[0],input_shape) self.model = nn.Sequential(self.conv[0], BaseN.Flatten(), nn.Linear(np.prod(x), 512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile() class FCNetSimple(BaseN.BaseNetwork): name ="FCNetSimple" def __init__(self,input_shape,output_shape,owner_name=""): super(FCNetSimple,self).__init__(input_shape,output_shape,owner_name) x = input_shape self.model = nn.Sequential(BaseN.Flatten(), nn.Linear(np.prod(x), 1024),nn.Softplus(), nn.Linear(1024,512), nn.Linear(512,256),nn.Tanh(), nn.Linear(256,self.output_shape[0])) self.compile()
1.5625
2
python/verifair/benchmarks/fairsquare/M_BN_F_SVM_A_Q.py
obastani/verifair
5
3840
from .helper import * def sample(flag): sex = step([(0,1,0.3307), (1,2,0.6693)]) if sex < 1: capital_gain = gaussian(568.4105, 24248365.5428) if capital_gain < 7298.0000: age = gaussian(38.4208, 184.9151) capital_loss = gaussian(86.5949, 157731.9553) else: age = gaussian(38.8125, 193.4918) capital_loss = gaussian(117.8083, 252612.0300) else: capital_gain = gaussian(1329.3700, 69327473.1006) if capital_gain < 5178.0000: age = gaussian(38.6361, 187.2435) capital_loss = gaussian(87.0152, 161032.4157) else: age = gaussian(38.2668, 187.2747) capital_loss = gaussian(101.7672, 189798.1926) sensitiveAttribute(sex < 1, flag) qualified(age > 18) N_age = (age - 17.0) / 62.0 N_capital_gain = (capital_gain - 0.0) / 22040.0 N_capital_loss = (capital_loss - 0.0) / 1258.0 t = 0.0006 * N_age + -5.7363 * N_capital_gain + -0.0002 * N_capital_loss + 1.0003 if sex > 1: t = t + -0.0003 if sex < 1: t = t - 0.5 return int(t < 0) fairnessTarget(t < 0)
1.71875
2
microservices/users/config.py
Levakin/sanic-test-app
0
3848
# -*- coding: utf-8 -*- import os from distutils.util import strtobool class Config: DEBUG = bool(strtobool(os.getenv('DEBUG', "False"))) DATABASE_URI = os.getenv('DATABASE_URI', '127.0.0.1:27017') WORKERS = int(os.getenv('WORKERS', 2)) LOGO = os.getenv('LOGO', None) HOST = os.getenv('HOST', '127.0.0.1') PORT = int(os.getenv('PORT', 8000)) SECRET = os.getenv('SECRET', 'secret') LOGIN_MIN_LENGTH = int(os.getenv('LOGIN_MIN_LENGTH', 1)) LOGIN_MAX_LENGTH = int(os.getenv('LOGIN_MAX_LENGTH', 32))
1.046875
1
src/simplify.py
denghz/Probabilistic-Programming
0
3864
from wolframclient.language.expression import WLSymbol from nnDiff import * def parseGlobalSymbol(s): if isinstance(s, numbers.Number): return s if isinstance(s, WLSymbol): if s.name == 'E': return 'E' else: return s.name[7:] def parse(exp): symbol = parseGlobalSymbol(exp) if symbol: return [symbol] else: f = str(exp.head) args = list(map(parse, exp.args)) res = [] if (f == "Power"): res1 = [] p = args[1][0] e = args[0] if e == ['E']: return ['Exp'] + args[1] if p < 0: res = ["Inv"] p = -p if p >= 2: p = p - 2 res1 = ["Times"] + e + e while p > 0: p = p - 1 res1 = ["Times"] + res1 + e return res + res1 else: return res + e else: if len(args) == 1: return [f] + args[0] elif len(args) >= 2: res = [f] + args[0] + args[1] args = args[2:] for arg in args: res = [f] + res + arg return res def simplify(exp): with WolframLanguageSession() as session: session.evaluate("Inv[zzz_] := 1/zzz") f = wlexpr(str(Func(exp))) getfreeVars = wlexpr("Reduce`FreeVariables") freeVariables = session.evaluate(getfreeVars(f)) ass = wl.Element(wl.Alternatives(freeVariables), wl.Reals) wmres = session.evaluate(wl.FullSimplify(f,ass)) print(wmres) res = parse(wmres) return res if __name__ == "__main__": exp = sys.argv[1:] if exp == []: exp = ["Sin", "x"] res = map(str,simplify(exp)) print(' '.join(res), file=sys.stderr)
1.9375
2
pytype/tests/py2/test_stdlib.py
souravbadami/pytype
1
3896
"""Tests of selected stdlib functions.""" from pytype.tests import test_base class StdlibTests(test_base.TargetPython27FeatureTest): """Tests for files in typeshed/stdlib.""" def testPosix(self): ty = self.Infer(""" import posix x = posix.urandom(10) """) self.assertTypesMatchPytd(ty, """ posix = ... # type: module x = ... # type: str """) def testXRange(self): self.Check(""" import random random.sample(xrange(10), 5) """) def testStringTypes(self): ty = self.Infer(""" import types if isinstance("", types.StringTypes): x = 42 if isinstance(False, types.StringTypes): y = 42 if isinstance(u"", types.StringTypes): z = 42 """, deep=False) self.assertTypesMatchPytd(ty, """ types = ... # type: module x = ... # type: int z = ... # type: int """) def testDefaultDict(self): self.Check(""" import collections import itertools ids = collections.defaultdict(itertools.count(17).next) """) def testSysVersionInfoLt(self): ty = self.Infer(""" import sys if sys.version_info[0] < 3: v = 42 else: v = "hello world" """) self.assertTypesMatchPytd(ty, """ sys = ... # type: module v = ... # type: int """) def testSysVersionInfoLe(self): ty = self.Infer(""" import sys if sys.version_info[0] <= 2: v = 42 else: v = "hello world" """) self.assertTypesMatchPytd(ty, """ sys = ... # type: module v = ... # type: int """) def testSysVersionInfoEq(self): ty = self.Infer(""" import sys if sys.version_info[0] == 2: v = 42 elif sys.version_info[0] == 3: v = "hello world" else: v = None """) self.assertTypesMatchPytd(ty, """ sys = ... # type: module v = ... # type: int """) def testSysVersionInfoGe(self): ty = self.Infer(""" import sys if sys.version_info[0] >= 3: v = 42 else: v = "hello world" """) self.assertTypesMatchPytd(ty, """ sys = ... # type: module v = ... # type: str """) def testSysVersionInfoGt(self): ty = self.Infer(""" import sys if sys.version_info[0] > 2: v = 42 else: v = "hello world" """) self.assertTypesMatchPytd(ty, """ sys = ... # type: module v = ... # type: str """) def testSysVersionInfoNamedAttribute(self): ty = self.Infer(""" import sys if sys.version_info.major == 2: v = 42 else: v = "hello world" """) self.assertTypesMatchPytd(ty, """ sys: module v: int """) test_base.main(globals(), __name__ == "__main__")
1.695313
2
imgaug/augmenters/flip.py
pAoenix/image-Augmented
1
3912
""" Augmenters that apply mirroring/flipping operations to images. Do not import directly from this file, as the categorization is not final. Use instead :: from imgaug import augmenters as iaa and then e.g. :: seq = iaa.Sequential([ iaa.Fliplr((0.0, 1.0)), iaa.Flipud((0.0, 1.0)) ]) List of augmenters: * Fliplr * Flipud """ from __future__ import print_function, division, absolute_import from .. import parameters as iap import numpy as np import six.moves as sm from .meta import Augmenter class Fliplr(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long """ Flip/mirror input images horizontally. Parameters ---------- p : number or StochasticParameter, optional(default=0) Probability of each image to get flipped. name : string, optional(default=None) See `Augmenter.__init__()` deterministic : bool, optional(default=False) See `Augmenter.__init__()` random_state : int or np.random.RandomState or None, optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Fliplr(0.5) would horizontally flip/mirror 50 percent of all input images. >>> aug = iaa.Fliplr(1.0) would horizontally flip/mirror all input images. """ def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Fliplr, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, "p") def _augment_images(self, images, random_state, parents, hooks): nb_images = len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images): if samples[i] == 1: images[i] = np.fliplr(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in heatmaps], random_state=random_state, parents=parents, hooks=hooks ) for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return heatmaps def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): nb_images = len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: width = keypoints_on_image.shape[1] for keypoint in keypoints_on_image.keypoints: keypoint.x = (width - 1) - keypoint.x return keypoints_on_images def get_parameters(self): return [self.p] class Flipud(Augmenter): # pylint: disable=locally-disabled, unused-variable, line-too-long """ Flip/mirror input images vertically. Parameters ---------- p : number or StochasticParameter, optional(default=0) Probability of each image to get flipped. name : string, optional(default=None) See `Augmenter.__init__()` deterministic : bool, optional(default=False) See `Augmenter.__init__()` random_state : int or np.random.RandomState or None, optional(default=None) See `Augmenter.__init__()` Examples -------- >>> aug = iaa.Flipud(0.5) would vertically flip/mirror 50 percent of all input images. >>> aug = iaa.Flipud(1.0) would vertically flip/mirror all input images. """ def __init__(self, p=0, name=None, deterministic=False, random_state=None): super(Flipud, self).__init__(name=name, deterministic=deterministic, random_state=random_state) self.p = iap.handle_probability_param(p, "p") def _augment_images(self, images, random_state, parents, hooks): nb_images = len(images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i in sm.xrange(nb_images): if samples[i] == 1: images[i] = np.flipud(images[i]) return images def _augment_heatmaps(self, heatmaps, random_state, parents, hooks): arrs_flipped = self._augment_images( [heatmaps_i.arr_0to1 for heatmaps_i in heatmaps], random_state=random_state, parents=parents, hooks=hooks ) for heatmaps_i, arr_flipped in zip(heatmaps, arrs_flipped): heatmaps_i.arr_0to1 = arr_flipped return heatmaps def _augment_keypoints(self, keypoints_on_images, random_state, parents, hooks): nb_images = len(keypoints_on_images) samples = self.p.draw_samples((nb_images,), random_state=random_state) for i, keypoints_on_image in enumerate(keypoints_on_images): if samples[i] == 1: height = keypoints_on_image.shape[0] for keypoint in keypoints_on_image.keypoints: keypoint.y = (height - 1) - keypoint.y return keypoints_on_images def get_parameters(self): return [self.p]
2.34375
2
tests/atfork/test_atfork.py
luciferliu/xTools
0
3928
#!/usr/bin/python # # Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Licensed to the PSF under a Contributor Agreement. # # Author: <NAME> <<EMAIL>> """Tests for atfork.""" import os import sys import importlib from xTool.compat import StringIO import traceback import unittest from xTool import atfork class AtforkTest(unittest.TestCase): def setUp(self): atfork.monkeypatch_os_fork_functions() self.calls = [] self.orig_stderr = sys.stderr self.assertFalse( atfork._fork_lock.locked(), "atfork._fork_lock not released by an earlier test!", ) # Unregister calls registered by earlier tests. atfork._prepare_call_list = [] atfork._parent_call_list = [] atfork._child_call_list = [] def tearDown(self): # Un-monkeypatch the os module. ook. global os importlib.reload(os) sys.stderr = self.orig_stderr def _pre(self): self.calls.append(self._pre) def _parent(self): self.calls.append(self._parent) def _child(self): self.calls.append(self._child) def _other(self): self.calls.append(self._other) def _raise_pre(self): self._pre() raise RuntimeError("This as the first parent error expected.") def _raise_parent(self): self._parent() raise RuntimeError("This as the second parent error expected.") def _raise_child(self): self._child() raise RuntimeError("This child error is expected.") def _assert_expected_parent_stderr(self, error_msg): self.assertTrue(("first parent error" in error_msg), error_msg) self.assertTrue(("second parent error" in error_msg), error_msg) self.assertTrue( (error_msg.index("first parent") < error_msg.index("second parent")), "first and second errors out of order in:\n%r" % error_msg, ) self.assertEqual(2, error_msg.count("RuntimeError:")) def _assert_expected_child_stderr(self, error_msg): self.assertTrue("child error is expected" in error_msg) self.assertEqual(1, error_msg.count("RuntimeError:"), error_msg) def test_monkeypatching(self): if not hasattr(os, "fork"): return # Nothing to test on this platform. self.assertTrue(callable(atfork._orig_os_fork)) self.assertTrue(callable(atfork._orig_os_forkpty)) # The os module was patched, these should not be equal. self.assertNotEqual(atfork._orig_os_fork, os.fork) self.assertNotEqual(atfork._orig_os_forkpty, os.forkpty) # These are the wrapped versions we patched in. self.assertEqual(atfork.os_fork_wrapper, os.fork) self.assertEqual(atfork.os_forkpty_wrapper, os.forkpty) def test_register_atfork_calls(self): # Test with both positional and keyword arguments as well as None. atfork.atfork(self._pre, self._parent, self._child) atfork.atfork(prepare=self._pre) atfork.atfork(parent=self._parent) atfork.atfork(child=self._child) self.assertEqual([self._pre] * 2, atfork._prepare_call_list) self.assertEqual([self._parent] * 2, atfork._parent_call_list) self.assertEqual([self._child] * 2, atfork._child_call_list) if __debug__: self.assertRaises(AssertionError, atfork.atfork, 1, 2, 3) def test_call_atfork_list(self): self.assertEqual([], atfork._call_atfork_list([])) self.assertEqual([], atfork._call_atfork_list([self._pre])) def raise_something(): raise RuntimeError() errors = atfork._call_atfork_list([raise_something] * 2) self.assertEqual(2, len(errors)) for exc_info in errors: self.assertEqual(RuntimeError, exc_info[0]) def _test_a_fork_wrapper(self, fork_func): sys.stderr = StringIO() # restored in tearDown atfork.atfork(self._raise_pre, self._raise_parent, self._raise_child) atfork.atfork(self._other, self._other, self._other) pid = fork_func() if pid == 0: try: try: self.assertEqual( [self._pre, self._other, self._child, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self._assert_expected_child_stderr(sys.stderr.getvalue()) except BaseException: try: traceback.print_exc() self.orig_stderr.write(sys.stderr.getvalue()) finally: os._exit(1) finally: os._exit(0) else: self.assertEqual( [self._pre, self._other, self._parent, self._other], self.calls ) self.assertFalse(atfork._fork_lock.locked()) self.assertEqual(0, os.waitpid(pid, 0)[1], "error in child") self._assert_expected_parent_stderr(sys.stderr.getvalue()) def test_os_fork_wrapper(self): self._test_a_fork_wrapper(os.fork) def test_os_forkpty_wrapper(self): self._test_a_fork_wrapper(lambda: os.forkpty()[0]) def _test_fork_failure(self, orig_fork_attrname, fork_wrapper): def failing_fork(): raise OSError(0, "testing a fork failure") atfork.atfork(self._pre, self._parent, self._child) orig_orig_fork = getattr(atfork, orig_fork_attrname) try: setattr(atfork, orig_fork_attrname, failing_fork) try: pid = fork_wrapper() if pid == 0: # This should never happen but do this just in case. os._exit(0) except OSError: self.assertEqual([self._pre, self._parent], self.calls) else: self.fail("Fork failed to fail!") finally: setattr(atfork, orig_fork_attrname, orig_orig_fork) def test_fork_wrapper_failure(self): self._test_fork_failure("_orig_os_fork", atfork.os_fork_wrapper) def test_forkpty_wrapper_failure(self): self._test_fork_failure("_orig_os_forkpty", atfork.os_forkpty_wrapper) def test_multiple_monkeypatch_safe(self): self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) atfork.monkeypatch_os_fork_functions() self.assertNotEqual(atfork._orig_os_fork, atfork.os_fork_wrapper) self.assertNotEqual(atfork._orig_os_forkpty, atfork.os_forkpty_wrapper) if __name__ == "__main__": unittest.main()
1.585938
2
tests/test_basics.py
sirosen/git-fortune
0
3936
import subprocess from git_fortune._compat import fix_line_endings from git_fortune.version import __version__ def test_help(capfd): subprocess.check_call(["git-fortune", "-h"]) captured = capfd.readouterr() assert ( fix_line_endings( """ A fortune-like command for showing git tips Invoke it as 'git-fortune' or 'git fortune' """ ) in captured.out ) def test_version(capfd): subprocess.check_call(["git-fortune", "--version"]) captured = capfd.readouterr() assert "git-fortune {}".format(__version__) in captured.out def test_tip_boxformat(capfd): subprocess.check_call(["git-fortune", "--id", "3"]) tip3boxbody = fix_line_endings( """\ +-------------------------------------------------------------------------------+ | GIT TIP #3 | | | | `git log --graph` can show you a tree-like representation of the git history. | | | | Try adding in `--oneline --decorate --all`. | | | +-------------------------------------------------------------------------------+ """ ) captured = capfd.readouterr() assert captured.out == tip3boxbody def test_tip_plainformat(capfd): subprocess.check_call(["git-fortune", "--format", "plain", "--id", "1"]) tip1plainbody = fix_line_endings( "Modify your last commit before pushing with `git commit --amend`.\n" ) captured = capfd.readouterr() assert captured.out == tip1plainbody def test_noargs(capfd): """just make sure it doesn't crashfail""" subprocess.check_call(["git-fortune"]) captured = capfd.readouterr() assert "GIT TIP #" in captured.out # from the box format def test_category(capfd): """just make sure it doesn't crashfail""" subprocess.check_call(["git-fortune", "--category", "diff"]) captured = capfd.readouterr() assert "GIT TIP #" in captured.out # from the box format def test_category_and_id_mutex(capfd): ret = subprocess.call(["git-fortune", "--category", "diff", "--id", "3"]) assert ret == 2 captured = capfd.readouterr() assert "" == captured.out assert "argument --id: not allowed with argument --category" in captured.err
1.71875
2
t2vretrieval/models/mlmatch.py
Roc-Ng/HANet
34
3944
import numpy as np import torch import framework.ops import t2vretrieval.encoders.mlsent import t2vretrieval.encoders.mlvideo import t2vretrieval.models.globalmatch from t2vretrieval.models.criterion import cosine_sim from t2vretrieval.models.globalmatch import VISENC, TXTENC class RoleGraphMatchModelConfig(t2vretrieval.models.globalmatch.GlobalMatchModelConfig): def __init__(self): super().__init__() self.num_verbs = 4 self.num_nouns = 6 self.attn_fusion = 'embed' # sim, embed self.simattn_sigma = 4 self.hard_topk = 1 self.max_violation = True self.loss_weights = None ## this config will be covered by model.json due to the functions of load and load_from_dict self.subcfgs[VISENC] = t2vretrieval.encoders.mlvideo.MultilevelEncoderConfig() self.subcfgs[TXTENC] = t2vretrieval.encoders.mlsent.RoleGraphEncoderConfig() class RoleGraphMatchModel(t2vretrieval.models.globalmatch.GlobalMatchModel): def build_submods(self): return { VISENC: t2vretrieval.encoders.mlvideo.MultilevelEncoder(self.config.subcfgs[VISENC]), TXTENC: t2vretrieval.encoders.mlsent.RoleGraphEncoder(self.config.subcfgs[TXTENC]) } def forward_video_embed(self, batch_data): vid_fts = torch.FloatTensor(batch_data['attn_fts']).to(self.device) vid_lens = torch.LongTensor(batch_data['attn_lens']).to(self.device) # (batch, max_vis_len, dim_embed) vid_sent_embeds, vid_verb_embeds, vid_noun_embeds, local_sent_embeds, logits, max_len = self.submods[VISENC](vid_fts, vid_lens) return { 'vid_sent_embeds': vid_sent_embeds, 'vid_verb_embeds': vid_verb_embeds, 'vid_noun_embeds': vid_noun_embeds, 'local_vid_embeds': local_sent_embeds, 'vid_lens': vid_lens, 'max_len': max_len, 'logits': logits, } def forward_text_embed(self, batch_data): sent_ids = torch.LongTensor(batch_data['sent_ids']).to(self.device) ## sentence sent_lens = torch.LongTensor(batch_data['sent_lens']).to(self.device) ## length verb_masks = torch.BoolTensor(batch_data['verb_masks']).to(self.device) ## batch*nv*max_sen_len noun_masks = torch.BoolTensor(batch_data['noun_masks']).to(self.device) node_roles = torch.LongTensor(batch_data['node_roles']).to(self.device) ## batch*(n_v+n_n) rel_edges = torch.FloatTensor(batch_data['rel_edges']).to(self.device) ## batch*(1+n_v+n_n)*(1+n_v+n_n) verb_lens = torch.sum(verb_masks, 2) noun_lens = torch.sum(noun_masks, 2) # sent_embeds: (batch, dim_embed) # verb_embeds, noun_embeds: (batch, num_xxx, dim_embed) sent_embeds, verb_embeds, noun_embeds, local_sent_embeds, sent_logits = self.submods[TXTENC]( sent_ids, sent_lens, verb_masks, noun_masks, node_roles, rel_edges) return { 'sent_embeds': sent_embeds, 'sent_lens': sent_lens, 'verb_embeds': verb_embeds, 'verb_lens': verb_lens, 'noun_embeds': noun_embeds, 'noun_lens': noun_lens, 'sent_logits': sent_logits, 'local_sent_embeds': local_sent_embeds, } def generate_phrase_scores(self, vid_embeds, vid_masks, phrase_embeds, phrase_masks, mask_flag=False): '''Args: - vid_embeds: (batch, num_frames, embed_size) - vid_masks: (batch, num_frames) - phrase_embeds: (batch, num_phrases, embed_size) - phrase_masks: (batch, num_phrases) ''' batch_vids, num_frames, _ = vid_embeds.size() vid_pad_masks = (vid_masks == 0).unsqueeze(1).unsqueeze(3) batch_phrases, num_phrases, dim_embed = phrase_embeds.size() # compute component-wise similarity vid_2d_embeds = vid_embeds.view(-1, dim_embed) phrase_2d_embeds = phrase_embeds.view(-1, dim_embed) # size = (batch_vids, batch_phrases, num_frames, num_phrases) ground_sims = cosine_sim(vid_2d_embeds, phrase_2d_embeds).view( batch_vids, num_frames, batch_phrases, num_phrases).transpose(1, 2) ### if mask_flag: vid_attn_per_word = ground_sims.masked_fill(vid_pad_masks, 0) ############## else: vid_attn_per_word = ground_sims vid_attn_per_word[vid_attn_per_word < 0] = 0 vid_attn_per_word = framework.ops.l2norm(vid_attn_per_word, dim=2) if mask_flag: vid_attn_per_word = vid_attn_per_word.masked_fill(vid_pad_masks, -1e18) ################# vid_attn_per_word = torch.softmax(self.config.simattn_sigma * vid_attn_per_word, dim=2) if self.config.attn_fusion == 'embed': vid_attned_embeds = torch.einsum('abcd,ace->abde', vid_attn_per_word, vid_embeds) word_attn_sims = torch.einsum('abde,bde->abd', framework.ops.l2norm(vid_attned_embeds), framework.ops.l2norm(phrase_embeds)) elif self.config.attn_fusion == 'sim': # (batch_vids, batch_phrases, num_phrases) word_attn_sims = torch.sum(ground_sims * vid_attn_per_word, dim=2) # sum: (batch_vid, batch_phrases) phrase_scores = torch.sum(word_attn_sims * phrase_masks.float().unsqueeze(0), 2) \ / torch.sum(phrase_masks, 1).float().unsqueeze(0).clamp(min=1) return phrase_scores def generate_scores(self, **kwargs): ##### shared ##### vid_lens = kwargs['vid_lens'] # (batch, ) num_frames = int(kwargs['max_len'])###########################kwargs['vid_verb_embeds'].size(1) vid_masks = framework.ops.sequence_mask(vid_lens, num_frames, inverse=False) # batch*max_len ##### sentence-level scores ##### sent_scores = cosine_sim(kwargs['vid_sent_embeds'], kwargs['sent_embeds']) ####################################################### # concept scores use jaccard similarity concept_verb_scores = self.jaccard_sim(kwargs['logits'][0], kwargs['sent_logits'][0]) concept_noun_scores = self.jaccard_sim(kwargs['logits'][1], kwargs['sent_logits'][1]) ####################################################### ##### verb-level scores ##### vid_verb_embeds = kwargs['vid_verb_embeds'] # (batch, num_frames, dim_embed) verb_embeds = kwargs['verb_embeds'] # (batch, num_verbs, dim_embed) verb_lens = kwargs['verb_lens'] # (batch, num_verbs) local_vid_embeds =kwargs['local_vid_embeds'] local_sent_embeds = kwargs['local_sent_embeds'] verb_masks = framework.ops.sequence_mask(torch.sum(verb_lens > 0, 1).long(), self.config.num_verbs, inverse=False) # sum: (batch_vids, batch_sents) verb_scores = self.generate_phrase_scores(vid_verb_embeds, vid_masks, verb_embeds, verb_masks) ind_verb_scores = self.generate_phrase_scores(local_vid_embeds[0], vid_masks, local_sent_embeds[0], verb_masks, True) ##### noun-level scores ##### vid_noun_embeds = kwargs['vid_noun_embeds'] # (batch, num_frames, dim_embed) noun_embeds = kwargs['noun_embeds'] # (batch, num_nouns, dim_embed) noun_lens = kwargs['noun_lens'] # (batch, num_nouns) noun_masks = framework.ops.sequence_mask(torch.sum(noun_lens > 0, 1).long(), self.config.num_nouns, inverse=False) # sum: (batch_vids, batch_sents) noun_scores = self.generate_phrase_scores(vid_noun_embeds, vid_masks, noun_embeds, noun_masks) ind_noun_scores = self.generate_phrase_scores(local_vid_embeds[1], vid_masks, local_sent_embeds[1], noun_masks, True) return sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, ind_verb_scores, ind_noun_scores def jaccard_sim(self, im, s): im_bs = im.size(0) s_bs = s.size(0) im = im.unsqueeze(1).expand(-1, s_bs, -1) s = s.unsqueeze(0).expand(im_bs, -1, -1) intersection = torch.min(im, s).sum(-1) union = torch.max(im, s).sum(-1) score = intersection / union return score def forward_loss(self, batch_data, step=None): enc_outs = self.forward_video_embed(batch_data) cap_enc_outs = self.forward_text_embed(batch_data) enc_outs.update(cap_enc_outs) sent_scores, verb_scores, noun_scores, concept_verb_scores, concept_noun_scores, local_verb_scores, local_noun_scores = self.generate_scores(**enc_outs) scores = (sent_scores + verb_scores + noun_scores + local_verb_scores + local_noun_scores) / 5 scores2 = (concept_verb_scores + concept_noun_scores) / 2 sent_loss = self.criterion(sent_scores) verb_loss = self.criterion(verb_scores) noun_loss = self.criterion(noun_scores) eta = 0.1 mu = 0.01 concept_verb_loss = 0.5*self.criterion(concept_verb_scores) concept_noun_loss = 0.5*self.criterion(concept_noun_scores) concept_loss = eta*self.criterion(scores2) verb_concept_label = torch.FloatTensor(batch_data['verb_concept_label']).to(self.device) noun_concept_label = torch.FloatTensor(batch_data['noun_concept_label']).to(self.device) verb_concept_mask = torch.FloatTensor(batch_data['verb_concept_mask']).to(self.device) noun_concept_mask = torch.FloatTensor(batch_data['noun_concept_mask']).to(self.device) v_mask_sum = torch.sum(verb_concept_mask, dim=1) n_mask_sum = torch.sum(noun_concept_mask, dim=1) vbce_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['logits'][0], verb_concept_label), dim=1) vbce_loss = mu*torch.mean(vbce_loss/v_mask_sum) nbce_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['logits'][1], noun_concept_label), dim=1) nbce_loss = mu*torch.mean(nbce_loss/n_mask_sum) vbce_sent_loss = torch.sum(verb_concept_mask*self.criterion_bce(enc_outs['sent_logits'][0], verb_concept_label), dim=1) vbce_sent_loss = mu*torch.mean(vbce_sent_loss/v_mask_sum) nbce_sent_loss = torch.sum(noun_concept_mask*self.criterion_bce(enc_outs['sent_logits'][1], noun_concept_label), dim=1) nbce_sent_loss = mu*torch.mean(nbce_sent_loss/n_mask_sum) fusion_loss = self.criterion(scores) if self.config.loss_weights is None: loss = fusion_loss + 1*(vbce_loss+nbce_loss) + 1*(vbce_sent_loss+nbce_sent_loss) + concept_loss else: loss = self.config.loss_weights[0] * fusion_loss + \ self.config.loss_weights[1] * sent_loss + \ self.config.loss_weights[2] * verb_loss + \ self.config.loss_weights[3] * noun_loss + \ vbce_loss + nbce_loss if step is not None and self.config.monitor_iter > 0 and step % self.config.monitor_iter == 0: neg_scores = scores.masked_fill(torch.eye(len(scores), dtype=torch.bool).to(self.device), -1e10) self.print_fn('\tstep %d: pos mean scores %.2f, hard neg mean scores i2t %.2f, t2i %.2f'%( step, torch.mean(torch.diag(scores)), torch.mean(torch.max(neg_scores, 1)[0]), torch.mean(torch.max(neg_scores, 0)[0]))) self.print_fn('\tstep %d: sent_loss %.4f, verb_loss %.4f, noun_loss %.4f, fusion_loss %.4f'%( step, sent_loss.data.item(), verb_loss.data.item(), noun_loss.data.item(), fusion_loss.data.item())) self.print_fn('\tstep %d: vbce_loss %.4f, nbce_loss %.4f'%(step, vbce_loss.item(), nbce_loss.item())) self.print_fn('\tstep %d: vbce_sent_loss %.4f, nbce_sent_loss %.4f'%(step, vbce_sent_loss.item(), nbce_sent_loss.item())) self.print_fn('\tstep %d: sim_loss %.4f, vsim_loss %.4f, nsim_loss %.4f'%(step, concept_loss.item(), concept_verb_loss.item(), concept_noun_loss.item())) return loss def evaluate_scores(self, tst_reader): K = self.config.subcfgs[VISENC].num_levels K = K + 4 assert K == 7, 'Note that this error indicates losing other scores!' vid_names, all_scores = [], [[] for _ in range(K)] cap_names = tst_reader.dataset.captions for vid_data in tst_reader: vid_names.extend(vid_data['names']) vid_enc_outs = self.forward_video_embed(vid_data) for k in range(K): all_scores[k].append([]) ijj = 0 for cap_data in tst_reader.dataset.iterate_over_captions(self.config.tst_batch_size): cap_enc_outs = self.forward_text_embed(cap_data) cap_enc_outs.update(vid_enc_outs) indv_scores = self.generate_scores(**cap_enc_outs) for k in range(K): all_scores[k][-1].append(indv_scores[k].data.cpu().numpy()) ijj += 0 for k in range(K): all_scores[k][-1] = np.concatenate(all_scores[k][-1], axis=1) for k in range(K): all_scores[k] = np.concatenate(all_scores[k], axis=0) # (n_img, n_cap) all_scores = np.array(all_scores) # (k, n_img, n_cap) return vid_names, cap_names, all_scores def evaluate(self, tst_reader, return_outs=False): vid_names, cap_names, scores = self.evaluate_scores(tst_reader) i2t_gts = [] for vid_name in vid_names: i2t_gts.append([]) for i, cap_name in enumerate(cap_names): if cap_name in tst_reader.dataset.ref_captions[vid_name]: i2t_gts[-1].append(i) t2i_gts = {} for i, t_gts in enumerate(i2t_gts): for t_gt in t_gts: t2i_gts.setdefault(t_gt, []) t2i_gts[t_gt].append(i) idx = [0, 1, 2, 5, 6] fused_scores = (np.mean(scores[idx], 0) + np.mean(scores[3:5], 0))/2 metrics = self.calculate_metrics(fused_scores, i2t_gts, t2i_gts) if return_outs: outs = { 'vid_names': vid_names, 'cap_names': cap_names, 'scores': scores, } return metrics, outs else: return metrics
1.703125
2
exposing/_version.py
w4k2/exposing
0
3960
""" ``exposing`` """ __version__ = '0.2.2'
-0.114258
0
data_structures/linked_lists/ll-kth-from-end/ll_kth.py
jeremyCtown/data-structures-and-algorithms
0
3968
from node import Node class LinkedList: """ initializes LL """ def __init__(self, iter=[]): self.head = None self._size = 0 for item in reversed(iter): self.insert(item) def __repr__(self): """ assumes head will have a val and we will need this """ return '<head> => {}'.format(self.head.val) def __str__(self): """ this is where we can see the list""" def __len__(self): """ returns size of LL """ return self._size def insert(self, val): """ basic insertion method for adding to front of LL """ self.head = Node(val, self.head) self._size += 1 def append(self, val): """ appends node to the end of the LL """ new_node = Node(val, None) current = self.head._next while current._next is not None: current._next = current._next._next if current._next._next is None: current._next._next = new_node new_node._next is None self._size += 1 return new_node._next def insert_before(self, val, new_val): """ inserts node before node at val """ new_node = Node(new_val) current = self.head._next while current._next is not None: if current._next.val == val: new_node._next = current._next current._next = new_node self._size += 1 break current = current._next if current._next is None: raise ValueError("Data not in list") def insert_after(self, val, new_val): """ inserts node after node at val """ new_node = Node(new_val) current = self.head._next while current._next is not None: if current.val == val: new_node._next = current._next._next current._next = new_node self._size += 1 break current = current._next if current._next is None: raise ValueError("Data not in list") def kth_from_end(self, k): """ returns node at kth from end """ if self._size - k < 0: raise AttributeError current = self.head for i in range(self._size - k - 1): current = current._next return current
3.375
3
dataloader/viperlist_train.py
urasakikeisuke/rigidmask
138
3976
import torch.utils.data as data from PIL import Image import os import os.path import numpy as np import pdb import glob IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', ] def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def dataloader(filepath): left_fold = 'image_2/' train = glob.glob(filepath+left_fold+'/0*.jpg') train = sorted(train) l0_train = [] l1_train = [] flow_train = [] for img in train: img1 = ('%s_%s.jpg'%(img.rsplit('_',1)[0],'%05d'%(1+int(img.split('.')[0].split('_')[-1])) )) flowp = img.replace('.jpg', '.png').replace('image_2','flow_occ') if (img1 in train and len(glob.glob(flowp))>0 and ('01000' not in img)): l0_train.append(img) l1_train.append(img1) flow_train.append(flowp) return l0_train, l1_train, flow_train
2.421875
2
modules/star_se_SP.py
tbersez/Allmine
5
3984
# STAR aligner single end mode, second pass # # This module runs the second pass of the STAR aligner 2 path # strategy. The goal is to align reads taking in account splice # junction found in the fist pass.. # # Inputs: # - sample_trim.fastq.gz # - splicing junction files (.tab) # # Output: # - aligned reads # - logs for follow up and debuging if needed # # Parameters: # No fancy parameters needed, only the threads number is specified. rule star_se_SP: input: # fake input flag = ancient(config["REF"] + "REindexing_done.txt"), R1 = config["TRIMMED"] + "{samples}_trim.fastq.gz", genomeDir = ancient(config["REF"]) output: bam = config["MAP"] + "{samples}_sorted.bam.gz" params: prefix = config["MAP"] + "{samples}.", tmp = config["MAP"] + "SP/" + "{samples}_sp_STAR_TMP", bind = config["BIND"], cont = config["CONT"] benchmark: "benchmarks/star_SP/{samples}.tsv" message : "Running STAR second pass with {input.R1}. \n" shell: """ singularity exec -B {params.bind} {params.cont} \ STAR \ --runThreadN 10 \ --genomeDir {input.genomeDir} \ --readFilesIn {input.R1} \ --outSAMtype BAM SortedByCoordinate \ --outFileNamePrefix {params.prefix} \ --outStd BAM_SortedByCoordinate \ --outTmpDir {params.tmp} \ --scoreGap 0 \ --scoreGapNoncan -8 \ --scoreGapGCAG -4 \ --scoreGapATAC -8 \ --scoreGenomicLengthLog2scale -0.25 \ --scoreDelOpen -2 \ --scoreDelBase -2 \ --scoreInsOpen -2 \ --scoreInsBase -2 \ --scoreStitchSJshift 1 \ --readFilesCommand zcat | gzip --stdout > {output.bam} """
1.492188
1
gym_combat/gym_combat/envs/main.py
refaev/combat_gym
0
3992
from matplotlib import style from tqdm import tqdm style.use("ggplot") from gym_combat.envs.Arena.CState import State from gym_combat.envs.Arena.Entity import Entity from gym_combat.envs.Arena.Environment import Environment, Episode from gym_combat.envs.Common.constants import * from gym_combat.envs.Qtable import Qtable_DecisionMaker from gym_combat.envs.DQN import DQNAgent_keras from gym_combat.envs.Greedy import Greedy_player import matplotlib.pyplot as plt def print_start_of_game_info(blue_decision_maker, red_decision_maker): print("Starting tournament!") print("Blue player type: ", Agent_type_str[blue_decision_maker.type()]) if blue_decision_maker.path_model_to_load==None: print("Blue player starting with no model") else: print("Blue player starting tournament with trained model: " , blue_decision_maker.path_model_to_load) print("Red player type: ", Agent_type_str[red_decision_maker.type()]) if red_decision_maker.path_model_to_load==None: print("Red player starting with no model") else: print("Red player starting tournament with trained model: " , red_decision_maker.path_model_to_load) print("Number of rounds: ", NUM_OF_EPISODES) print("~~~ GO! ~~~\n\n") def evaluate(episode_number): #if episode_number % EVALUATE_PLAYERS_EVERY == 0: a = episode_number % EVALUATE_PLAYERS_EVERY if a>=0 and a<EVALUATE_BATCH_SIZE: EVALUATE = True else: EVALUATE = False return EVALUATE def print_states(observation_for_blue_s0, observation_for_blue_s1): import matplotlib.pyplot as plt plt.matshow(observation_for_blue_s0.img) plt.show() plt.matshow(observation_for_blue_s1.img) plt.show() if __name__ == '__main__': env = Environment(IS_TRAINING) print("Starting Blue player") blue_decision_maker = DQNAgent_keras.DQNAgent_keras() #blue_decision_maker = DQNAgent_keras.DQNAgent_keras(UPDATE_CONTEXT=True, path_model_to_load='conv1(6_6_1_256)_conv2(4_4_256_128)_conv3(3_3_128_128)_flatten_fc__blue_202001_ 0.95max_ -0.04avg_ -3.10min__1620558885.model') print("Starting red player") ### Red Decision Maker red_decision_maker = Greedy_player.Greedy_player() env.blue_player = Entity(blue_decision_maker) env.red_player = Entity(red_decision_maker) print_start_of_game_info(blue_decision_maker, red_decision_maker) NUM_OF_EPISODES = env.NUMBER_OF_EPISODES for episode in tqdm(range(1, NUM_OF_EPISODES + 1), ascii=True, unit='episodes'): EVALUATE = evaluate(episode) current_episode = Episode(episode, EVALUATE, show_always=False if IS_TRAINING else True) # set new start position for the players env.reset_game(episode) # get observation observation_for_blue_s0: State = env.get_observation_for_blue() action_blue = -1 # initialize the decision_makers for the players blue_decision_maker.set_initial_state(observation_for_blue_s0, episode) #red_decision_maker.set_initial_state(observation_for_red_s0, episode) # for non-greedy players blue_won_the_game = False red_won_the_game = False for steps_current_game in range(1, MAX_STEPS_PER_EPISODE + 1): ##### Blue's turn! ##### observation_for_blue_s0: State = env.get_observation_for_blue() current_episode.print_episode(env, steps_current_game) action_blue: AgentAction = blue_decision_maker.get_action(observation_for_blue_s0, EVALUATE) env.take_action(Color.Blue, action_blue) # take the action! current_episode.print_episode(env, steps_current_game) current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Blue) is not WinEnum.NoWin) if current_episode.is_terminal:# Blue won the game! blue_won_the_game=True else: ##### Red's turn! ##### observation_for_red_s0: State = env.get_observation_for_red() action_red: AgentAction = red_decision_maker.get_action(observation_for_red_s0, EVALUATE) env.take_action(Color.Red, action_red) # take the action! current_episode.is_terminal = (env.compute_terminal(whos_turn=Color.Red) is not WinEnum.NoWin) if current_episode.is_terminal: # Blue won the game! red_won_the_game = True current_episode.print_episode(env, steps_current_game) reward_step_blue, reward_step_red = env.handle_reward(steps_current_game) current_episode.episode_reward_red += reward_step_red current_episode.episode_reward_blue += reward_step_blue observation_for_blue_s1: State = env.get_observation_for_blue() blue_decision_maker.update_context(observation_for_blue_s0, action_blue, reward_step_blue, observation_for_blue_s1, current_episode.is_terminal, EVALUATE) if steps_current_game == MAX_STEPS_PER_EPISODE: # if we exited the loop because we reached MAX_STEPS_PER_EPISODE current_episode.is_terminal = True if blue_won_the_game or red_won_the_game: break # for statistics env.update_win_counters(steps_current_game) env.data_for_statistics(current_episode.episode_reward_blue, current_episode.episode_reward_red, steps_current_game, blue_decision_maker.get_epsolon()) env.evaluate_info(EVALUATE, episode, steps_current_game, blue_decision_maker.get_epsolon()) if current_episode.episode_number % SAVE_STATS_EVERY == 0: if False:#blue_decision_maker.type()== AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, "conv")#env.save_folder_path) # print info of episode: current_episode.print_info_of_episode(env, steps_current_game, blue_decision_maker.get_epsolon(), episode) env.end_run() if blue_decision_maker.type() == AgentType.DQN_keras or blue_decision_maker.type() == AgentType.DQN_basic: blue_decision_maker._decision_maker.print_model(observation_for_blue_s0, episode, env.save_folder_path)
2.1875
2
phi/math/backend/_backend.py
marc-gav/PhiFlow
0
4000
from collections import namedtuple from contextlib import contextmanager from threading import Barrier from typing import List, Callable import numpy from ._dtype import DType, combine_types SolveResult = namedtuple('SolveResult', [ 'method', 'x', 'residual', 'iterations', 'function_evaluations', 'converged', 'diverged', 'message', ]) class ComputeDevice: """ A physical device that can be selected to perform backend computations. """ def __init__(self, backend: 'Backend', name: str, device_type: str, memory: int, processor_count: int, description: str, ref=None): self.name: str = name """ Name of the compute device. CPUs are typically called `'CPU'`. """ self.device_type: str = device_type """ Type of device such as `'CPU'`, `'GPU'` or `'TPU'`. """ self.memory: int = memory """ Maximum memory of the device that can be allocated (in bytes). -1 for n/a. """ self.processor_count: int = processor_count """ Number of CPU cores or GPU multiprocessors. -1 for n/a. """ self.description: str = description """ Further information about the device such as driver version. """ self.ref = ref """ (Optional) Reference to the internal device representation. """ self.backend: 'Backend' = backend """ Backend that this device belongs to. Different backends represent the same device with different objects. """ def __repr__(self): mem = f"{(self.memory / 1024 ** 2)} MB" if self.memory > 0 else "memory: n/a" pro = f"{self.processor_count} processors" if self.processor_count > 0 else "processors: n/a" descr = self.description.replace('\n', ' ') if len(descr) > 30: descr = descr[:28] + "..." return f"'{self.name}' ({self.device_type}) | {mem} | {pro} | {descr}" class Backend: def __init__(self, name: str, default_device: ComputeDevice): """ Backends delegate low-level operations to a compute library or emulate them. The methods of `Backend` form a comprehensive list of available operations. To support a compute library, subclass `Backend` and register it by adding it to `BACKENDS`. Args: name: Human-readable string default_device: `ComputeDevice` being used by default """ self._name = name self._default_device = default_device def __enter__(self): _DEFAULT.append(self) def __exit__(self, exc_type, exc_val, exc_tb): _DEFAULT.pop(-1) @property def name(self) -> str: return self._name def supports(self, feature: str or Callable) -> bool: """ Tests if this backend supports the given feature. Features correspond to a method of this backend that must be implemented if the feature is supported. Possible features: * `sparse_tensor` * `gradients Args: feature: `str` or unbound Backend method, e.g. `Backend.sparse_tensor` Returns: Whether the feature is supported. """ feature = feature if isinstance(feature, str) else feature.__name__ if not hasattr(Backend, feature): raise ValueError(f"Not a valid feature: '{feature}'") backend_fun = getattr(Backend, feature) impl_fun = getattr(self.__class__, feature) return impl_fun is not backend_fun def prefers_channels_last(self) -> bool: raise NotImplementedError() @property def precision(self) -> int: """ Short for math.backend.get_precision() """ return get_precision() @property def float_type(self) -> DType: return DType(float, self.precision) @property def as_registered(self) -> 'Backend': from phi.math.backend import BACKENDS for backend in BACKENDS: if self.name in backend.name: return backend raise RuntimeError(f"Backend '{self}' is not visible.") @property def complex_type(self) -> DType: return DType(complex, max(64, self.precision)) def combine_types(self, *dtypes: DType) -> DType: return combine_types(*dtypes, fp_precision=self.precision) def auto_cast(self, *tensors) -> list: """ Determins the appropriate values type resulting from operations involving the tensors as input. This method is called by the default implementations of basic operators. Backends can override this method to prevent unnecessary casting. Args: *tensors: tensors to cast and to consider when determining the common data type Returns: tensors cast to a common data type """ dtypes = [self.dtype(t) for t in tensors] result_type = self.combine_types(*dtypes) if result_type.kind in (int, float, complex, bool): tensors = [self.cast(t, result_type) for t in tensors] return tensors def __str__(self): return self.name def __repr__(self): return self.name def list_devices(self, device_type: str or None = None) -> List[ComputeDevice]: """ Fetches information about all available compute devices this backend can use. Implementations: * NumPy: [`os.cpu_count`](https://docs.python.org/3/library/os.html#os.cpu_count) * PyTorch: [`torch.cuda.get_device_properties`](https://pytorch.org/docs/stable/cuda.html#torch.cuda.get_device_properties) * TensorFlow: `tensorflow.python.client.device_lib.list_local_devices` * Jax: [`jax.devices`](https://jax.readthedocs.io/en/latest/jax.html#jax.devices) Args: device_type: (optional) Return only devices of this type, e.g. `'GPU'` or `'CPU'`. See `ComputeDevice.device_type`. Returns: `list` of all currently available devices. """ raise NotImplementedError() def get_default_device(self) -> ComputeDevice: return self._default_device def set_default_device(self, device: ComputeDevice or str): if isinstance(device, str): devices = self.list_devices(device) assert len(devices) >= 1, f"{self.name}: Cannot select '{device} because no device of this type is available." device = devices[0] self._default_device = device def seed(self, seed: int): raise NotImplementedError() def is_tensor(self, x, only_native=False): """ An object is considered a native tensor by a backend if no internal conversion is required by backend methods. An object is considered a tensor (nativer or otherwise) by a backend if it is not a struct (e.g. tuple, list) and all methods of the backend accept it as a tensor argument. Args: x: object to check only_native: If True, only accepts true native tensor representations, not Python numbers or others that are also supported as tensors (Default value = False) Returns: bool: whether `x` is considered a tensor by this backend """ raise NotImplementedError() def as_tensor(self, x, convert_external=True): """ Converts a tensor-like object to the native tensor representation of this backend. If x is a native tensor of this backend, it is returned without modification. If x is a Python number (numbers.Number instance), `convert_numbers` decides whether to convert it unless the backend cannot handle Python numbers. *Note:* There may be objects that are considered tensors by this backend but are not native and thus, will be converted by this method. Args: x: tensor-like, e.g. list, tuple, Python number, tensor convert_external: if False and `x` is a Python number that is understood by this backend, this method returns the number as-is. This can help prevent type clashes like int32 vs int64. (Default value = True) Returns: tensor representation of `x` """ raise NotImplementedError() def is_available(self, tensor) -> bool: """ Tests if the value of the tensor is known and can be read at this point. If true, `numpy(tensor)` must return a valid NumPy representation of the value. Tensors are typically available when the backend operates in eager mode. Args: tensor: backend-compatible tensor Returns: bool """ raise NotImplementedError() def numpy(self, tensor) -> numpy.ndarray: """ Returns a NumPy representation of the given tensor. If `tensor` is already a NumPy array, it is returned without modification. This method raises an error if the value of the tensor is not known at this point, e.g. because it represents a node in a graph. Use `is_available(tensor)` to check if the value can be represented as a NumPy array. Args: tensor: backend-compatible tensor Returns: NumPy representation of the values stored in the tensor """ raise NotImplementedError() def to_dlpack(self, tensor): raise NotImplementedError() def from_dlpack(self, capsule): raise NotImplementedError() def copy(self, tensor, only_mutable=False): raise NotImplementedError() def call(self, f: Callable, *args, name=None): """ Calls `f(*args)` and returns the result. This method may be used to register internal calls with the profiler. Usage: choose_backend(key).call(custom_function, *args) """ return f(*args) def block_until_ready(self, values): pass def jit_compile(self, f: Callable) -> Callable: return NotImplemented def functional_gradient(self, f, wrt: tuple or list, get_output: bool): raise NotImplementedError(self) def custom_gradient(self, f: Callable, gradient: Callable) -> Callable: """ Creates a function based on `f` that uses a custom gradient for backprop. Args: f: Forward function. gradient: Function for backprop. Will be called as `gradient(*d_out)` to compute the gradient of `f`. Returns: Function with similar signature and return values as `f`. However, the returned function does not support keyword arguments. """ return NotImplemented def jit_compile_grad(self, f, wrt: tuple or list, get_output: bool): raise NotImplementedError() def transpose(self, tensor, axes): raise NotImplementedError() def random_uniform(self, shape): """ Float tensor of selected precision containing random values in the range [0, 1) """ raise NotImplementedError(self) def random_normal(self, shape): """ Float tensor of selected precision containing random values sampled from a normal distribution with mean 0 and std 1. """ raise NotImplementedError(self) def stack(self, values, axis=0): raise NotImplementedError(self) def concat(self, values, axis): raise NotImplementedError(self) def pad(self, value, pad_width, mode: str = 'constant', constant_values=0): """ Pad a tensor with values as specified by `mode` and `constant_values`. If the mode is not supported, returns NotImplemented. Args: value: tensor pad_width: 2D tensor specifying the number of values padded to the edges of each axis in the form [[axis 0 lower, axis 0 upper], ...] including batch and component axes. mode: constant', 'boundary', 'periodic', 'symmetric', 'reflect' constant_values: used for out-of-bounds points if mode='constant' (Default value = 0) mode: str: (Default value = 'constant') Returns: padded tensor or NotImplemented """ raise NotImplementedError(self) def reshape(self, value, shape): raise NotImplementedError(self) def flip(self, value, axes: tuple or list): slices = tuple(slice(None, None, -1 if i in axes else None) for i in range(self.ndims(value))) return value[slices] def sum(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def prod(self, value, axis=None): raise NotImplementedError(self) def divide_no_nan(self, x, y): """ Computes x/y but returns 0 if y=0. Args: x: y: Returns: """ raise NotImplementedError(self) def where(self, condition, x=None, y=None): raise NotImplementedError(self) def nonzero(self, values): """ Args: values: Tensor with only spatial dimensions Returns: non-zero multi-indices as tensor of shape (nnz, vector) """ raise NotImplementedError(self) def mean(self, value, axis=None, keepdims=False): raise NotImplementedError(self) def range(self, start, limit=None, delta=1, dtype: DType = DType(int, 32)): raise NotImplementedError(self) def zeros(self, shape, dtype: DType = None): raise NotImplementedError(self) def zeros_like(self, tensor): raise NotImplementedError(self) def ones(self, shape, dtype: DType = None): raise NotImplementedError(self) def ones_like(self, tensor): raise NotImplementedError(self) def meshgrid(self, *coordinates): raise NotImplementedError(self) def linspace(self, start, stop, number): raise NotImplementedError(self) def tensordot(self, a, a_axes: tuple or list, b, b_axes: tuple or list): """ Multiply-sum-reduce a_axes of a with b_axes of b. """ raise NotImplementedError(self) def matmul(self, A, b): raise NotImplementedError(self) def einsum(self, equation, *tensors): raise NotImplementedError(self) def while_loop(self, loop: Callable, values: tuple): """ ```python while any(values[0]): values = loop(*values) return values ``` This operation does not support backpropagation. Args: loop: Loop function, must return a `tuple` with entries equal to `values` in shape and data type. values: Initial values of loop variables. Returns: Loop variables upon loop completion. """ raise NotImplementedError(self) def abs(self, x): raise NotImplementedError(self) def sign(self, x): raise NotImplementedError(self) def round(self, x): raise NotImplementedError(self) def ceil(self, x): raise NotImplementedError(self) def floor(self, x): raise NotImplementedError(self) def max(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def min(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def maximum(self, a, b): raise NotImplementedError(self) def minimum(self, a, b): raise NotImplementedError(self) def clip(self, x, minimum, maximum): raise NotImplementedError(self) def sqrt(self, x): raise NotImplementedError(self) def exp(self, x): raise NotImplementedError(self) def conv(self, value, kernel, zero_padding=True): """ Convolve value with kernel. Depending on the tensor rank, the convolution is either 1D (rank=3), 2D (rank=4) or 3D (rank=5). Higher dimensions may not be supported. Args: value: tensor of shape (batch_size, in_channel, spatial...) kernel: tensor of shape (batch_size or 1, out_channel, in_channel, spatial...) zero_padding: If True, pads the edges of `value` with zeros so that the result has the same shape as `value`. Returns: Convolution result as tensor of shape (batch_size, out_channel, spatial...) """ raise NotImplementedError(self) def expand_dims(self, a, axis=0, number=1): raise NotImplementedError(self) def shape(self, tensor): raise NotImplementedError(self) def staticshape(self, tensor): raise NotImplementedError(self) def cast(self, x, dtype: DType): raise NotImplementedError(self) def to_float(self, x): """ Converts a tensor to floating point values with precision equal to the currently set default precision. See Also: `Backend.precision()`. If `x` is mutable and of the correct floating type, returns a copy of `x`. To convert float tensors to the backend precision but leave non-float tensors untouched, use `Backend.as_tensor()`. Args: x: tensor of bool, int or float Returns: Values of `x` as float tensor """ return self.cast(x, self.float_type) def to_int32(self, x): return self.cast(x, DType(int, 32)) def to_int64(self, x): return self.cast(x, DType(int, 64)) def to_complex(self, x): return self.cast(x, DType(complex, max(64, min(self.precision * 2, 128)))) def batched_gather_nd(self, values, indices): """ Gathers values from the tensor `values` at locations `indices`. The first dimension of `values` and `indices` is the batch dimension which must be either equal for both or one for either. Args: values: tensor of shape (batch, spatial..., channel) indices: int tensor of shape (batch, any..., multi_index) where the size of multi_index is values.rank - 2. Returns: Gathered values as tensor of shape (batch, any..., channel) """ raise NotImplementedError(self) def flatten(self, x): return self.reshape(x, (-1,)) def std(self, x, axis=None, keepdims=False): raise NotImplementedError(self) def boolean_mask(self, x, mask, axis=0): """ Args: x: tensor with any number of dimensions mask: 1D mask tensor axis: Axis index >= 0 """ raise NotImplementedError(self) def isfinite(self, x): raise NotImplementedError(self) def scatter(self, base_grid, indices, values, mode: str): """ Depending on `mode`, performs scatter_update or scatter_add. Args: base_grid: Tensor into which scatter values are inserted at indices. Tensor of shape (batch_size, spatial..., channels) indices: Tensor of shape (batch_size or 1, update_count, index_vector) values: Values to scatter at indices. Tensor of shape (batch_size or 1, update_count or 1, channels or 1) mode: One of ('update', 'add') Returns: Copy of base_grid with values at `indices` updated by `values`. """ raise NotImplementedError(self) def any(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def all(self, boolean_tensor, axis=None, keepdims=False): raise NotImplementedError(self) def fft(self, x): """ Computes the n-dimensional FFT along all but the first and last dimensions. Args: x: tensor of dimension 3 or higher Returns: """ raise NotImplementedError(self) def ifft(self, k): """ Computes the n-dimensional inverse FFT along all but the first and last dimensions. Args: k: tensor of dimension 3 or higher Returns: """ raise NotImplementedError(self) def imag(self, x): raise NotImplementedError(self) def real(self, x): raise NotImplementedError(self) def sin(self, x): raise NotImplementedError(self) def cos(self, x): raise NotImplementedError(self) def tan(self, x): raise NotImplementedError(self) def log(self, x): """ Natural logarithm """ raise NotImplementedError(self) def log2(self, x): raise NotImplementedError(self) def log10(self, x): raise NotImplementedError(self) def dtype(self, array) -> DType: raise NotImplementedError(self) def tile(self, value, multiples): """ Repeats the tensor along each axis the number of times given by multiples. If `multiples` has more dimensions than `value`, these dimensions are added to `value` as outer dimensions. Args: value: tensor multiples: tuple or list of integers Returns: tile tensor """ raise NotImplementedError(self) def sparse_tensor(self, indices, values, shape): """ Optional features. Args: indices: tuple/list matching the dimensions (pair for matrix) values: param shape: shape: Returns: """ raise NotImplementedError(self) def coordinates(self, tensor): """ Returns the coordinates and values of a tensor. Args: tensor: Sparse tensor Returns: coordinates: `tuple` of tensor holding the coordinate vectors, i.e. (row, col) for matrices. indices: Tensor holding the corresponding values """ raise NotImplementedError(self) def minimize(self, method: str, f, x0, atol, max_iter, trj: bool): from scipy.optimize import OptimizeResult, minimize from threading import Thread assert self.supports(Backend.functional_gradient) assert len(self.staticshape(x0)) == 2 # (batch, parameters) batch_size = self.staticshape(x0)[0] fg = self.functional_gradient(f, [0], get_output=True) method_description = f"SciPy {method} with {self.name}" iterations = [0] * batch_size function_evaluations = [0] * batch_size xs = [None] * batch_size final_losses = [None] * batch_size converged = [False] * batch_size diverged = [False] * batch_size messages = [""] * batch_size f_inputs = [None] * batch_size f_b_losses = None f_b_losses_np = None f_grad_np = None f_input_available = Barrier(batch_size + 1) f_output_available = Barrier(batch_size + 1) finished = [False] * batch_size all_finished = False trajectories = [[] for _ in range(batch_size)] if trj else None threads = [] for b in range(batch_size): def b_thread(b=b): recent_b_losses = [] def b_fun(x: numpy.ndarray): function_evaluations[b] += 1 f_inputs[b] = self.as_tensor(x, convert_external=True) f_input_available.wait() f_output_available.wait() recent_b_losses.append(f_b_losses[b]) if final_losses[b] is None: # first evaluation final_losses[b] = f_b_losses[b] if trajectories is not None: trajectories[b].append(SolveResult(method_description, x0[b], f_b_losses[b], 0, 1, False, False, "")) return f_b_losses_np[b], f_grad_np[b] def callback(x, *args): # L-BFGS-B only passes x but the documentation says (x, state) iterations[b] += 1 loss = min(recent_b_losses) recent_b_losses.clear() final_losses[b] = loss if trajectories is not None: trajectories[b].append(SolveResult(method_description, x, loss, iterations[b], function_evaluations[b], False, False, "")) res = minimize(fun=b_fun, x0=x0[b], jac=True, method=method, tol=atol[b], options={'maxiter': max_iter[b]}, callback=callback) assert isinstance(res, OptimizeResult) # res.nit, res.nfev xs[b] = res.x converged[b] = res.success diverged[b] = res.status not in (0, 1) # 0=success messages[b] = res.message finished[b] = True while not all_finished: f_input_available.wait() f_output_available.wait() b_thread = Thread(target=b_thread) threads.append(b_thread) b_thread.start() while True: f_input_available.wait() if all(finished): all_finished = True f_output_available.wait() break _, f_b_losses, f_grad = fg(self.stack(f_inputs)) f_b_losses_np = self.numpy(f_b_losses).astype(numpy.float64) f_grad_np = self.numpy(f_grad).astype(numpy.float64) f_output_available.wait() for b_thread in threads: b_thread.join() # make sure threads exit correctly if trj: max_trajectory_length = max([len(t) for t in trajectories]) last_points = [SolveResult(method_description, xs[b], final_losses[b], iterations[b], function_evaluations[b], converged[b], diverged[b], "") for b in range(batch_size)] trajectories = [t[:-1] + [last_point] * (max_trajectory_length - len(t) + 1) for t, last_point in zip(trajectories, last_points)] trajectory = [] for states in zip(*trajectories): x = self.stack([self.to_float(state.x) for state in states]) residual = self.stack([state.residual for state in states]) iterations = [state.iterations for state in states] function_evaluations = [state.function_evaluations for state in states] converged = [state.converged for state in states] diverged = [state.diverged for state in states] trajectory.append(SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages)) return trajectory else: x = self.stack(xs) residual = self.stack(final_losses) return SolveResult(method_description, x, residual, iterations, function_evaluations, converged, diverged, messages) def linear_solve(self, method: str, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: """ Solve the system of linear equations A · x = y. This method need not provide a gradient for the operation. Args: method: Which algorithm to use. One of `('auto', 'CG', 'CG-adaptive')`. lin: Linear operation. One of * sparse/dense matrix valid for all instances * tuple/list of sparse/dense matrices for varying matrices along batch, must have the same nonzero locations. * linear function A(x), must be called on all instances in parallel y: target result of A * x. 2nd order tensor (batch, vector) or list of vectors. x0: Initial guess of size (batch, parameters) rtol: Relative tolerance of size (batch,) atol: Absolute tolerance of size (batch,) max_iter: Maximum number of iterations of size (batch,) trj: Whether to record and return the optimization trajectory as a `List[SolveResult]`. Returns: result: `SolveResult` or `List[SolveResult]`, depending on `trj`. """ if method == 'auto': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) elif method == 'CG': return self.conjugate_gradient(lin, y, x0, rtol, atol, max_iter, trj) elif method == 'CG-adaptive': return self.conjugate_gradient_adaptive(lin, y, x0, rtol, atol, max_iter, trj) else: raise NotImplementedError(f"Method '{method}' not supported for linear solve.") def conjugate_gradient(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: """ Standard conjugate gradient algorithm. Signature matches to `Backend.linear_solve()`. """ # Based on "An Introduction to the Conjugate Gradient Method Without the Agonizing Pain" by <NAME> # symbols: dx=d, dy=q, step_size=alpha, residual_squared=delta, residual=r, y=b method = f"Φ-Flow CG ({self.name})" y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2) x = x0 dx = residual = y - self.linear(lin, x) it_counter = 0 iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int, 32)) residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) while ~self.all(finished): it_counter += 1; iterations += not_finished_1 dy = self.linear(lin, dx); function_evaluations += not_finished_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(residual_squared, dx_dy) step_size *= self.expand_dims(self.to_float(not_finished_1), -1) # this is not really necessary but ensures batch-independence x += step_size * dx if it_counter % 50 == 0: residual = y - self.linear(lin, x); function_evaluations += 1 else: residual = residual - step_size * dy # in-place subtraction affects convergence residual_squared_old = residual_squared residual_squared = self.sum(residual ** 2, -1, keepdims=True) dx = residual + self.divide_no_nan(residual_squared, residual_squared_old) * dx diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) if trajectory is not None: trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")) x = self.copy(x) iterations = self.copy(iterations) finished = converged | diverged | (iterations >= max_iter); not_finished_1 = self.to_int32(~finished) # ; active = self.to_float(self.expand_dims(not_finished_1, -1)) return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "") def conjugate_gradient_adaptive(self, lin, y, x0, rtol, atol, max_iter, trj: bool) -> SolveResult or List[SolveResult]: """ Conjugate gradient algorithm with adaptive step size. Signature matches to `Backend.linear_solve()`. """ # Based on the variant described in "Methods of Conjugate Gradients for Solving Linear Systems" by <NAME> and <NAME> # https://nvlpubs.nist.gov/nistpubs/jres/049/jresv49n6p409_A1b.pdf method = f"Φ-Flow CG-adaptive ({self.name})" y = self.to_float(y) x0 = self.copy(self.to_float(x0), only_mutable=True) batch_size = self.staticshape(y)[0] tolerance_sq = self.maximum(rtol ** 2 * self.sum(y ** 2, -1), atol ** 2) x = x0 dx = residual = y - self.linear(lin, x) dy = self.linear(lin, dx) iterations = self.zeros([batch_size], DType(int, 32)) function_evaluations = self.ones([batch_size], DType(int, 32)) residual_squared = rsq0 = self.sum(residual ** 2, -1, keepdims=True) diverged = self.any(~self.isfinite(x), axis=(1,)) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) trajectory = [SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")] if trj else None continue_ = ~converged & ~diverged & (iterations < max_iter) def loop(continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, _converged, _diverged): continue_1 = self.to_int32(continue_) it_counter += 1 iterations += continue_1 dx_dy = self.sum(dx * dy, axis=-1, keepdims=True) step_size = self.divide_no_nan(self.sum(dx * residual, axis=-1, keepdims=True), dx_dy) step_size *= self.expand_dims(self.to_float(continue_1), -1) # this is not really necessary but ensures batch-independence x += step_size * dx # if it_counter % 50 == 0: # Not traceable since Python bool # residual = y - self.linear(lin, x); function_evaluations += 1 # else: residual = residual - step_size * dy # in-place subtraction affects convergence residual_squared = self.sum(residual ** 2, -1, keepdims=True) dx = residual - self.divide_no_nan(self.sum(residual * dy, axis=-1, keepdims=True) * dx, dx_dy) dy = self.linear(lin, dx); function_evaluations += continue_1 diverged = self.any(residual_squared / rsq0 > 100, axis=(1,)) & (iterations >= 8) converged = self.all(residual_squared <= tolerance_sq, axis=(1,)) if trajectory is not None: trajectory.append(SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "")) x = self.copy(x) iterations = self.copy(iterations) continue_ = ~converged & ~diverged & (iterations < max_iter) return continue_, it_counter, x, dx, dy, residual, iterations, function_evaluations, converged, diverged _, _, x, _, _, residual, iterations, function_evaluations, converged, diverged =\ self.while_loop(loop, (continue_, 0, x, dx, dy, residual, iterations, function_evaluations, converged, diverged)) return trajectory if trj else SolveResult(method, x, residual, iterations, function_evaluations, converged, diverged, "") def linear(self, lin, vector): if callable(lin): return lin(vector) elif isinstance(lin, (tuple, list)): for lin_i in lin: lin_shape = self.staticshape(lin_i) assert len(lin_shape) == 2 return self.stack([self.matmul(m, v) for m, v in zip(lin, self.unstack(vector))]) else: lin_shape = self.staticshape(lin) assert len(lin_shape) == 2, f"A must be a matrix but got shape {lin_shape}" return self.matmul(lin, vector) def gradients(self, y, xs: tuple or list, grad_y) -> tuple: raise NotImplementedError(self) def record_gradients(self, xs: tuple or list, persistent=False): raise NotImplementedError(self) def stop_gradient(self, value): raise NotImplementedError(self) def grid_sample(self, grid, spatial_dims: tuple, coordinates, extrapolation='constant'): """ Interpolates a regular grid at the specified coordinates. Args: grid: Tensor spatial_dims: Dimension indices that correspond to coordinate vectors coordinates: Tensor of floating grid indices. The last dimension must match `spatial_dims`. The first grid point of dimension i lies at position 0, the last at values.shape[i]-1. extrapolation: Values to use for coordinates outside the grid. One of `('undefined', 'zeros', 'boundary', 'periodic', 'symmetric', 'reflect')`. Returns: sampled values with linear interpolation """ return NotImplemented def variable(self, value): return NotImplemented def ndims(self, tensor): return len(self.staticshape(tensor)) def size(self, array): return self.prod(self.shape(array)) def batch_gather(self, tensor, batches): if isinstance(batches, int): batches = [batches] return tensor[batches, ...] def unstack(self, tensor, axis=0, keepdims=False) -> tuple: if axis < 0: axis += len(tensor.shape) if axis >= len(tensor.shape) or axis < 0: raise ValueError("Illegal axis value") result = [] for slice_idx in range(tensor.shape[axis]): if keepdims: component = tensor[tuple([slice(slice_idx, slice_idx + 1) if d == axis else slice(None) for d in range(len(tensor.shape))])] else: component = tensor[tuple([slice_idx if d == axis else slice(None) for d in range(len(tensor.shape))])] result.append(component) return tuple(result) def equal(self, x, y): """ Element-wise equality check """ raise NotImplementedError(self) def not_equal(self, x, y): return ~self.equal(x, y) def greater_than(self, x, y): x, y = self.auto_cast(x, y) return x > y def greater_or_equal(self, x, y): x, y = self.auto_cast(x, y) return x >= y def add(self, a, b): a, b = self.auto_cast(a, b) return a + b def sub(self, a, b): a, b = self.auto_cast(a, b) return a - b def mul(self, a, b): a, b = self.auto_cast(a, b) return a * b def div(self, numerator, denominator): numerator, denominator = self.auto_cast(numerator, denominator) return numerator / denominator def pow(self, base, exp): base, exp = self.auto_cast(base, exp) return base ** exp def mod(self, dividend, divisor): dividend, divisor = self.auto_cast(dividend, divisor) return dividend % divisor def and_(self, a, b): a, b = self.auto_cast(a, b) return a & b def or_(self, a, b): a, b = self.auto_cast(a, b) return a | b def xor(self, a, b): a, b = self.auto_cast(a, b) return a ^ b def floordiv(self, a, b): a, b = self.auto_cast(a, b) return a // b BACKENDS = [] """ Global list of all registered backends. Register a `Backend` by adding it to the list. """ _DEFAULT = [] # [0] = global default, [1:] from 'with' blocks _PRECISION = [32] # [0] = global precision in bits, [1:] from 'with' blocks def choose_backend(*values, prefer_default=False) -> Backend: """ Selects a suitable backend to handle the given values. This function is used by most math functions operating on `Tensor` objects to delegate the actual computations. Args: *values: prefer_default: if True, selects the default backend assuming it can handle handle the values, see `default_backend()`. raise_error: Determines the behavior of this function if no backend can handle the given values. If True, raises a `NoBackendFound` error, else returns `None`. Returns: the selected `Backend` """ # --- Default Backend has priority --- if _is_applicable(_DEFAULT[-1], values) and (prefer_default or _is_specific(_DEFAULT[-1], values)): return _DEFAULT[-1] # --- Filter out non-applicable --- backends = [backend for backend in BACKENDS if _is_applicable(backend, values)] if len(backends) == 0: raise NoBackendFound(f"No backend found for types {[type(v).__name__ for v in values]}; registered backends are {BACKENDS}") # --- Native tensors? --- for backend in backends: if _is_specific(backend, values): return backend return backends[0] class NoBackendFound(Exception): """ Thrown by `choose_backend` if no backend can handle the given values. """ def __init__(self, msg): Exception.__init__(self, msg) def default_backend() -> Backend: """ The default backend is preferred by `choose_backend()`. The default backend can be set globally using `set_global_default_backend()` and locally using `with backend:`. Returns: current default `Backend` """ return _DEFAULT[-1] def context_backend() -> Backend or None: """ Returns the backend set by the inner-most surrounding `with backend:` block. If called outside a backend context, returns `None`. Returns: `Backend` or `None` """ return _DEFAULT[-1] if len(_DEFAULT) > 1 else None def set_global_default_backend(backend: Backend): """ Sets the given backend as default. This setting can be overridden using `with backend:`. See `default_backend()`, `choose_backend()`. Args: backend: `Backend` to set as default """ assert isinstance(backend, Backend) _DEFAULT[0] = backend def set_global_precision(floating_point_bits: int): """ Sets the floating point precision of DYNAMIC_BACKEND which affects all registered backends. If `floating_point_bits` is an integer, all floating point tensors created henceforth will be of the corresponding data type, float16, float32 or float64. Operations may also convert floating point values to this precision, even if the input had a different precision. If `floating_point_bits` is None, new tensors will default to float32 unless specified otherwise. The output of math operations has the same precision as its inputs. Args: floating_point_bits: one of (16, 32, 64, None) """ _PRECISION[0] = floating_point_bits def get_precision() -> int: """ Gets the current target floating point precision in bits. The precision can be set globally using `set_global_precision()` or locally using `with precision(p):`. Any Backend method may convert floating point values to this precision, even if the input had a different precision. Returns: 16 for half, 32 for single, 64 for double """ return _PRECISION[-1] @contextmanager def precision(floating_point_bits: int): """ Sets the floating point precision for the local context. Usage: `with precision(p):` This overrides the global setting, see `set_global_precision()`. Args: floating_point_bits: 16 for half, 32 for single, 64 for double """ _PRECISION.append(floating_point_bits) try: yield None finally: _PRECISION.pop(-1) def convert(tensor, backend: Backend = None, use_dlpack=True): """ Convert a Tensor to the native format of `backend`. If the target backend can operate natively on `tensor`, returns `tensor`. If both backends support *DLPack* and `use_dlpack=True`, uses zero-copy conversion using the DLPack library. Else, intermediately converts `tensor` to a NumPy array. *Warning*: This operation breaks the automatic differentiation chain. Args: tensor: Native tensor belonging to any registered backend. backend: Target backend. If `None`, uses the current default backend, see `default_backend()`. Returns: Tensor belonging to `backend`. """ backend = backend or default_backend() current_backend = choose_backend(tensor, prefer_default=False) if backend.is_tensor(tensor, True) or backend is current_backend: return tensor if use_dlpack and current_backend.supports(Backend.to_dlpack) and backend.supports(Backend.from_dlpack): capsule = current_backend.to_dlpack(tensor) return backend.from_dlpack(capsule) else: nparray = current_backend.numpy(tensor) return backend.as_tensor(nparray) # Backend choice utility functions def _is_applicable(backend, values): for value in values: if not backend.is_tensor(value, only_native=False): return False return True def _is_specific(backend, values): for value in values: if backend.is_tensor(value, only_native=True): return True return False # Other low-level helper functions def combined_dim(dim1, dim2, type_str: str = 'batch'): if dim1 is None and dim2 is None: return None if dim1 is None or dim1 == 1: return dim2 if dim2 is None or dim2 == 1: return dim1 assert dim1 == dim2, f"Incompatible {type_str} dimensions: x0 {dim1}, y {dim2}" return dim1
2.515625
3
mellon/factories/filesystem/file.py
LaudateCorpus1/mellon
5
4008
import collections import os.path from zope import component from zope import interface from zope.component.factory import Factory from sparc.configuration import container import mellon @interface.implementer(mellon.IByteMellonFile) class MellonByteFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path self.config = config def __str__(self): return "byte file at location {}".format(self.file_path) def __iter__(self): with open(self.file_path, 'rb') as stream: file_ = component.createObject(u'mellon.byte_file_from_stream', stream, self.config) for snippet in file_: yield snippet mellonByteFileFromFilePathAndConfigFactory = Factory(MellonByteFileFromFilePathAndConfig) @interface.implementer(mellon.IUnicodeMellonFile) class MellonUnicodeFileFromFilePathAndConfig(object): def __init__(self, file_path, config): self.file_path = file_path self.config = config def __str__(self): return "Unicode file at location {}".format(self.file_path) def __iter__(self): _end = 0 _buffer = collections.deque() _eof_buffer = collections.deque() with open(str(self.file_path), 'rU') as stream: file_ = component.createObject(u'mellon.unicode_file_from_stream', stream, self.config) for snippet in file_: yield snippet mellonUnicodeFileFromFilePathAndConfigFactory = Factory(MellonUnicodeFileFromFilePathAndConfig) @interface.implementer(mellon.IMellonFileProvider) class MellonFileProviderForRecursiveDirectoryConfig(object): def __init__(self, config): """Init Args: config: sparc.configuration.container.ISparcAppPyContainerConfiguration provider with mellon.factories.filesystem[configure.yaml:FileSystemDir] and mellon[configure.yaml:MellonSnippet] entries. """ self.config = config def __iter__(self): base_path = container.IPyContainerConfigValue(self.config).\ get('FileSystemDir')['directory'] for d, dirs, files in os.walk(base_path): for f in files: path = os.path.join(d, f) if not os.path.isfile(path): continue #get interface-assigned string (IPath) path = component.createObject(u'mellon.filesystem_path', path) if mellon.IBinaryChecker(path).check(): yield component.createObject(\ u'mellon.factories.filesystem.byte_file', path, self.config) else: yield component.createObject(\ u'mellon.factories.filesystem.unicode_file', path, self.config) mellonFileProviderForRecursiveDirectoryConfigFactory = Factory(MellonFileProviderForRecursiveDirectoryConfig) interface.alsoProvides(mellonFileProviderForRecursiveDirectoryConfigFactory, mellon.IMellonFileProviderFactory)
1.507813
2
neural-networks.py
PacktPublishing/Python-Deep-Learning-for-Beginners-
7
4016
import numpy as np # Perceptron def predict_perceptron(inputs, weights): if np.dot(inputs, weights) > 0: return 1 else: return 0 def predict_perceptron_proper(inputs, weights): def step_function(input): return 1 if input > 0 else 0 def linear_model(inputs, weights): return np.dot(inputs, weights) return step_function(linear_model(inputs, weights)) def neuron(inputs, weights): def sigmoid_function(input): return 1 / (1 + np.exp(-1 * input)) def linear_model(inputs, weights): return np.dot(inputs, weights) return sigmoid_function(linear_model(inputs, weights)) neural_network = neuron(neuron(inputs, weights1), weights2)
2.421875
2
clang/tools/scan-build-py/libscanbuild/analyze.py
Kvarnefalk/llvm-project
1
4024
# -*- coding: utf-8 -*- # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # See https://llvm.org/LICENSE.txt for license information. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception """ This module implements the 'scan-build' command API. To run the static analyzer against a build is done in multiple steps: -- Intercept: capture the compilation command during the build, -- Analyze: run the analyzer against the captured commands, -- Report: create a cover report from the analyzer outputs. """ import re import os import os.path import json import logging import multiprocessing import tempfile import functools import subprocess import contextlib import datetime import shutil import glob from collections import defaultdict from libscanbuild import command_entry_point, compiler_wrapper, \ wrapper_environment, run_build, run_command, CtuConfig from libscanbuild.arguments import parse_args_for_scan_build, \ parse_args_for_analyze_build from libscanbuild.intercept import capture from libscanbuild.report import document from libscanbuild.compilation import split_command, classify_source, \ compiler_language from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \ ClangErrorException from libscanbuild.shell import decode __all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper'] COMPILER_WRAPPER_CC = 'analyze-cc' COMPILER_WRAPPER_CXX = 'analyze-c++' CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt' CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps' @command_entry_point def scan_build(): """ Entry point for scan-build command. """ args = parse_args_for_scan_build() # will re-assign the report directory as new output with report_directory( args.output, args.keep_empty, args.output_format) as args.output: # Run against a build command. there are cases, when analyzer run # is not required. But we need to set up everything for the # wrappers, because 'configure' needs to capture the CC/CXX values # for the Makefile. if args.intercept_first: # Run build command with intercept module. exit_code = capture(args) # Run the analyzer against the captured commands. if need_analyzer(args.build): govern_analyzer_runs(args) else: # Run build command and analyzer with compiler wrappers. environment = setup_environment(args) exit_code = run_build(args.build, env=environment) # Cover report generation and bug counting. number_of_bugs = document(args) # Set exit status as it was requested. return number_of_bugs if args.status_bugs else exit_code @command_entry_point def analyze_build(): """ Entry point for analyze-build command. """ args = parse_args_for_analyze_build() # will re-assign the report directory as new output with report_directory(args.output, args.keep_empty, args.output_format) as args.output: # Run the analyzer against a compilation db. govern_analyzer_runs(args) # Cover report generation and bug counting. number_of_bugs = document(args) # Set exit status as it was requested. return number_of_bugs if args.status_bugs else 0 def need_analyzer(args): """ Check the intent of the build command. When static analyzer run against project configure step, it should be silent and no need to run the analyzer or generate report. To run `scan-build` against the configure step might be necessary, when compiler wrappers are used. That's the moment when build setup check the compiler and capture the location for the build process. """ return len(args) and not re.search(r'configure|autogen', args[0]) def prefix_with(constant, pieces): """ From a sequence create another sequence where every second element is from the original sequence and the odd elements are the prefix. eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """ return [elem for piece in pieces for elem in [constant, piece]] def get_ctu_config_from_args(args): """ CTU configuration is created from the chosen phases and dir. """ return ( CtuConfig(collect=args.ctu_phases.collect, analyze=args.ctu_phases.analyze, dir=args.ctu_dir, extdef_map_cmd=args.extdef_map_cmd) if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir') else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd='')) def get_ctu_config_from_json(ctu_conf_json): """ CTU configuration is created from the chosen phases and dir. """ ctu_config = json.loads(ctu_conf_json) # Recover namedtuple from json when coming from analyze-cc or analyze-c++ return CtuConfig(collect=ctu_config[0], analyze=ctu_config[1], dir=ctu_config[2], extdef_map_cmd=ctu_config[3]) def create_global_ctu_extdef_map(extdef_map_lines): """ Takes iterator of individual external definition maps and creates a global map keeping only unique names. We leave conflicting names out of CTU. :param extdef_map_lines: Contains the id of a definition (mangled name) and the originating source (the corresponding AST file) name. :type extdef_map_lines: Iterator of str. :returns: Mangled name - AST file pairs. :rtype: List of (str, str) tuples. """ mangled_to_asts = defaultdict(set) for line in extdef_map_lines: mangled_name, ast_file = line.strip().split(' ', 1) mangled_to_asts[mangled_name].add(ast_file) mangled_ast_pairs = [] for mangled_name, ast_files in mangled_to_asts.items(): if len(ast_files) == 1: mangled_ast_pairs.append((mangled_name, next(iter(ast_files)))) return mangled_ast_pairs def merge_ctu_extdef_maps(ctudir): """ Merge individual external definition maps into a global one. As the collect phase runs parallel on multiple threads, all compilation units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER. These definition maps contain the mangled names and the source (AST generated from the source) which had their definition. These files should be merged at the end into a global map file: CTU_EXTDEF_MAP_FILENAME.""" def generate_extdef_map_lines(extdefmap_dir): """ Iterate over all lines of input files in a determined order. """ files = glob.glob(os.path.join(extdefmap_dir, '*')) files.sort() for filename in files: with open(filename, 'r') as in_file: for line in in_file: yield line def write_global_map(arch, mangled_ast_pairs): """ Write (mangled name, ast file) pairs into final file. """ extern_defs_map_file = os.path.join(ctudir, arch, CTU_EXTDEF_MAP_FILENAME) with open(extern_defs_map_file, 'w') as out_file: for mangled_name, ast_file in mangled_ast_pairs: out_file.write('%s %s\n' % (mangled_name, ast_file)) triple_arches = glob.glob(os.path.join(ctudir, '*')) for triple_path in triple_arches: if os.path.isdir(triple_path): triple_arch = os.path.basename(triple_path) extdefmap_dir = os.path.join(ctudir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) extdef_map_lines = generate_extdef_map_lines(extdefmap_dir) mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines) write_global_map(triple_arch, mangled_ast_pairs) # Remove all temporary files shutil.rmtree(extdefmap_dir, ignore_errors=True) def run_analyzer_parallel(args): """ Runs the analyzer against the given compilation database. """ def exclude(filename, directory): """ Return true when any excluded directory prefix the filename. """ if not os.path.isabs(filename): # filename is either absolute or relative to directory. Need to turn # it to absolute since 'args.excludes' are absolute paths. filename = os.path.normpath(os.path.join(directory, filename)) return any(re.match(r'^' + exclude_directory, filename) for exclude_directory in args.excludes) consts = { 'clang': args.clang, 'output_dir': args.output, 'output_format': args.output_format, 'output_failures': args.output_failures, 'direct_args': analyzer_params(args), 'force_debug': args.force_debug, 'ctu': get_ctu_config_from_args(args) } logging.debug('run analyzer against compilation database') with open(args.cdb, 'r') as handle: generator = (dict(cmd, **consts) for cmd in json.load(handle) if not exclude( cmd['file'], cmd['directory'])) # when verbose output requested execute sequentially pool = multiprocessing.Pool(1 if args.verbose > 2 else None) for current in pool.imap_unordered(run, generator): if current is not None: # display error message from the static analyzer for line in current['error_output']: logging.info(line.rstrip()) pool.close() pool.join() def govern_analyzer_runs(args): """ Governs multiple runs in CTU mode or runs once in normal mode. """ ctu_config = get_ctu_config_from_args(args) # If we do a CTU collect (1st phase) we remove all previous collection # data first. if ctu_config.collect: shutil.rmtree(ctu_config.dir, ignore_errors=True) # If the user asked for a collect (1st) and analyze (2nd) phase, we do an # all-in-one run where we deliberately remove collection data before and # also after the run. If the user asks only for a single phase data is # left so multiple analyze runs can use the same data gathered by a single # collection run. if ctu_config.collect and ctu_config.analyze: # CTU strings are coming from args.ctu_dir and extdef_map_cmd, # so we can leave it empty args.ctu_phases = CtuConfig(collect=True, analyze=False, dir='', extdef_map_cmd='') run_analyzer_parallel(args) merge_ctu_extdef_maps(ctu_config.dir) args.ctu_phases = CtuConfig(collect=False, analyze=True, dir='', extdef_map_cmd='') run_analyzer_parallel(args) shutil.rmtree(ctu_config.dir, ignore_errors=True) else: # Single runs (collect or analyze) are launched from here. run_analyzer_parallel(args) if ctu_config.collect: merge_ctu_extdef_maps(ctu_config.dir) def setup_environment(args): """ Set up environment for build command to interpose compiler wrapper. """ environment = dict(os.environ) environment.update(wrapper_environment(args)) environment.update({ 'CC': COMPILER_WRAPPER_CC, 'CXX': COMPILER_WRAPPER_CXX, 'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '', 'ANALYZE_BUILD_REPORT_DIR': args.output, 'ANALYZE_BUILD_REPORT_FORMAT': args.output_format, 'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '', 'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)), 'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '', 'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args)) }) return environment @command_entry_point def analyze_compiler_wrapper(): """ Entry point for `analyze-cc` and `analyze-c++` compiler wrappers. """ return compiler_wrapper(analyze_compiler_wrapper_impl) def analyze_compiler_wrapper_impl(result, execution): """ Implements analyzer compiler wrapper functionality. """ # don't run analyzer when compilation fails. or when it's not requested. if result or not os.getenv('ANALYZE_BUILD_CLANG'): return # check is it a compilation? compilation = split_command(execution.cmd) if compilation is None: return # collect the needed parameters from environment, crash when missing parameters = { 'clang': os.getenv('ANALYZE_BUILD_CLANG'), 'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'), 'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'), 'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'), 'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS', '').split(' '), 'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'), 'directory': execution.cwd, 'command': [execution.cmd[0], '-c'] + compilation.flags, 'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU')) } # call static analyzer against the compilation for source in compilation.files: parameters.update({'file': source}) logging.debug('analyzer parameters %s', parameters) current = run(parameters) # display error message from the static analyzer if current is not None: for line in current['error_output']: logging.info(line.rstrip()) @contextlib.contextmanager def report_directory(hint, keep, output_format): """ Responsible for the report directory. hint -- could specify the parent directory of the output directory. keep -- a boolean value to keep or delete the empty report directory. """ stamp_format = 'scan-build-%Y-%m-%d-%H-%M-%S-%f-' stamp = datetime.datetime.now().strftime(stamp_format) parent_dir = os.path.abspath(hint) if not os.path.exists(parent_dir): os.makedirs(parent_dir) name = tempfile.mkdtemp(prefix=stamp, dir=parent_dir) logging.info('Report directory created: %s', name) try: yield name finally: if os.listdir(name): if output_format != 'sarif': # 'scan-view' currently does not support sarif format. msg = "Run 'scan-view %s' to examine bug reports." else: msg = "View result at %s/results-merged.sarif." keep = True else: if keep: msg = "Report directory '%s' contains no report, but kept." else: msg = "Removing directory '%s' because it contains no report." logging.warning(msg, name) if not keep: os.rmdir(name) def analyzer_params(args): """ A group of command line arguments can mapped to command line arguments of the analyzer. This method generates those. """ result = [] if args.store_model: result.append('-analyzer-store={0}'.format(args.store_model)) if args.constraints_model: result.append('-analyzer-constraints={0}'.format( args.constraints_model)) if args.internal_stats: result.append('-analyzer-stats') if args.analyze_headers: result.append('-analyzer-opt-analyze-headers') if args.stats: result.append('-analyzer-checker=debug.Stats') if args.maxloop: result.extend(['-analyzer-max-loop', str(args.maxloop)]) if args.output_format: result.append('-analyzer-output={0}'.format(args.output_format)) if args.analyzer_config: result.extend(['-analyzer-config', args.analyzer_config]) if args.verbose >= 4: result.append('-analyzer-display-progress') if args.plugins: result.extend(prefix_with('-load', args.plugins)) if args.enable_checker: checkers = ','.join(args.enable_checker) result.extend(['-analyzer-checker', checkers]) if args.disable_checker: checkers = ','.join(args.disable_checker) result.extend(['-analyzer-disable-checker', checkers]) return prefix_with('-Xclang', result) def require(required): """ Decorator for checking the required values in state. It checks the required attributes in the passed state and stop when any of those is missing. """ def decorator(function): @functools.wraps(function) def wrapper(*args, **kwargs): for key in required: if key not in args[0]: raise KeyError('{0} not passed to {1}'.format( key, function.__name__)) return function(*args, **kwargs) return wrapper return decorator @require(['command', # entry from compilation database 'directory', # entry from compilation database 'file', # entry from compilation database 'clang', # clang executable name (and path) 'direct_args', # arguments from command line 'force_debug', # kill non debug macros 'output_dir', # where generated report files shall go 'output_format', # it's 'plist', 'html', 'plist-html', 'plist-multi-file', or 'sarif' 'output_failures', # generate crash reports or not 'ctu']) # ctu control options def run(opts): """ Entry point to run (or not) static analyzer against a single entry of the compilation database. This complex task is decomposed into smaller methods which are calling each other in chain. If the analysis is not possible the given method just return and break the chain. The passed parameter is a python dictionary. Each method first check that the needed parameters received. (This is done by the 'require' decorator. It's like an 'assert' to check the contract between the caller and the called method.) """ try: command = opts.pop('command') command = command if isinstance(command, list) else decode(command) logging.debug("Run analyzer against '%s'", command) opts.update(classify_parameters(command)) return arch_check(opts) except Exception: logging.error("Problem occurred during analysis.", exc_info=1) return None @require(['clang', 'directory', 'flags', 'file', 'output_dir', 'language', 'error_output', 'exit_code']) def report_failure(opts): """ Create report when analyzer failed. The major report is the preprocessor output. The output filename generated randomly. The compiler output also captured into '.stderr.txt' file. And some more execution context also saved into '.info.txt' file. """ def extension(): """ Generate preprocessor file extension. """ mapping = {'objective-c++': '.mii', 'objective-c': '.mi', 'c++': '.ii'} return mapping.get(opts['language'], '.i') def destination(): """ Creates failures directory if not exits yet. """ failures_dir = os.path.join(opts['output_dir'], 'failures') if not os.path.isdir(failures_dir): os.makedirs(failures_dir) return failures_dir # Classify error type: when Clang terminated by a signal it's a 'Crash'. # (python subprocess Popen.returncode is negative when child terminated # by signal.) Everything else is 'Other Error'. error = 'crash' if opts['exit_code'] < 0 else 'other_error' # Create preprocessor output file name. (This is blindly following the # Perl implementation.) (handle, name) = tempfile.mkstemp(suffix=extension(), prefix='clang_' + error + '_', dir=destination()) os.close(handle) # Execute Clang again, but run the syntax check only. cwd = opts['directory'] cmd = [opts['clang'], '-fsyntax-only', '-E'] + opts['flags'] + \ [opts['file'], '-o', name] try: cmd = get_arguments(cmd, cwd) run_command(cmd, cwd=cwd) except subprocess.CalledProcessError: pass except ClangErrorException: pass # write general information about the crash with open(name + '.info.txt', 'w') as handle: handle.write(opts['file'] + os.linesep) handle.write(error.title().replace('_', ' ') + os.linesep) handle.write(' '.join(cmd) + os.linesep) handle.write(' '.join(os.uname()) + os.linesep) handle.write(get_version(opts['clang'])) handle.close() # write the captured output too with open(name + '.stderr.txt', 'w') as handle: handle.writelines(opts['error_output']) handle.close() @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'output_dir', 'output_format']) def run_analyzer(opts, continuation=report_failure): """ It assembles the analysis command line and executes it. Capture the output of the analysis and returns with it. If failure reports are requested, it calls the continuation to generate it. """ def target(): """ Creates output file name for reports. """ if opts['output_format'] in { 'plist', 'plist-html', 'plist-multi-file'}: (handle, name) = tempfile.mkstemp(prefix='report-', suffix='.plist', dir=opts['output_dir']) os.close(handle) return name elif opts['output_format'] == 'sarif': (handle, name) = tempfile.mkstemp(prefix='result-', suffix='.sarif', dir=opts['output_dir']) os.close(handle) return name return opts['output_dir'] try: cwd = opts['directory'] cmd = get_arguments([opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] + [opts['file'], '-o', target()], cwd) output = run_command(cmd, cwd=cwd) return {'error_output': output, 'exit_code': 0} except subprocess.CalledProcessError as ex: result = {'error_output': ex.output, 'exit_code': ex.returncode} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result except ClangErrorException as ex: result = {'error_output': ex.error, 'exit_code': 0} if opts.get('output_failures', False): opts.update(result) continuation(opts) return result def extdef_map_list_src_to_ast(extdef_src_list): """ Turns textual external definition map list with source files into an external definition map list with ast files. """ extdef_ast_list = [] for extdef_src_txt in extdef_src_list: mangled_name, path = extdef_src_txt.split(" ", 1) # Normalize path on windows as well path = os.path.splitdrive(path)[1] # Make relative path out of absolute path = path[1:] if path[0] == os.sep else path ast_path = os.path.join("ast", path + ".ast") extdef_ast_list.append(mangled_name + " " + ast_path) return extdef_ast_list @require(['clang', 'directory', 'flags', 'direct_args', 'file', 'ctu']) def ctu_collect_phase(opts): """ Preprocess source by generating all data needed by CTU analysis. """ def generate_ast(triple_arch): """ Generates ASTs for the current compilation command. """ args = opts['direct_args'] + opts['flags'] ast_joined_path = os.path.join(opts['ctu'].dir, triple_arch, 'ast', os.path.realpath(opts['file'])[1:] + '.ast') ast_path = os.path.abspath(ast_joined_path) ast_dir = os.path.dirname(ast_path) if not os.path.isdir(ast_dir): try: os.makedirs(ast_dir) except OSError: # In case an other process already created it. pass ast_command = [opts['clang'], '-emit-ast'] ast_command.extend(args) ast_command.append('-w') ast_command.append(opts['file']) ast_command.append('-o') ast_command.append(ast_path) logging.debug("Generating AST using '%s'", ast_command) run_command(ast_command, cwd=opts['directory']) def map_extdefs(triple_arch): """ Generate external definition map file for the current source. """ args = opts['direct_args'] + opts['flags'] extdefmap_command = [opts['ctu'].extdef_map_cmd] extdefmap_command.append(opts['file']) extdefmap_command.append('--') extdefmap_command.extend(args) logging.debug("Generating external definition map using '%s'", extdefmap_command) extdef_src_list = run_command(extdefmap_command, cwd=opts['directory']) extdef_ast_list = extdef_map_list_src_to_ast(extdef_src_list) extern_defs_map_folder = os.path.join(opts['ctu'].dir, triple_arch, CTU_TEMP_DEFMAP_FOLDER) if not os.path.isdir(extern_defs_map_folder): try: os.makedirs(extern_defs_map_folder) except OSError: # In case an other process already created it. pass if extdef_ast_list: with tempfile.NamedTemporaryFile(mode='w', dir=extern_defs_map_folder, delete=False) as out_file: out_file.write("\n".join(extdef_ast_list) + "\n") cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] + opts['flags'] \ + [opts['file']] triple_arch = get_triple_arch(cmd, cwd) generate_ast(triple_arch) map_extdefs(triple_arch) @require(['ctu']) def dispatch_ctu(opts, continuation=run_analyzer): """ Execute only one phase of 2 phases of CTU if needed. """ ctu_config = opts['ctu'] if ctu_config.collect or ctu_config.analyze: assert ctu_config.collect != ctu_config.analyze if ctu_config.collect: return ctu_collect_phase(opts) if ctu_config.analyze: cwd = opts['directory'] cmd = [opts['clang'], '--analyze'] + opts['direct_args'] \ + opts['flags'] + [opts['file']] triarch = get_triple_arch(cmd, cwd) ctu_options = ['ctu-dir=' + os.path.join(ctu_config.dir, triarch), 'experimental-enable-naive-ctu-analysis=true'] analyzer_options = prefix_with('-analyzer-config', ctu_options) direct_options = prefix_with('-Xanalyzer', analyzer_options) opts['direct_args'].extend(direct_options) return continuation(opts) @require(['flags', 'force_debug']) def filter_debug_flags(opts, continuation=dispatch_ctu): """ Filter out nondebug macros when requested. """ if opts.pop('force_debug'): # lazy implementation just append an undefine macro at the end opts.update({'flags': opts['flags'] + ['-UNDEBUG']}) return continuation(opts) @require(['language', 'compiler', 'file', 'flags']) def language_check(opts, continuation=filter_debug_flags): """ Find out the language from command line parameters or file name extension. The decision also influenced by the compiler invocation. """ accepted = frozenset({ 'c', 'c++', 'objective-c', 'objective-c++', 'c-cpp-output', 'c++-cpp-output', 'objective-c-cpp-output' }) # language can be given as a parameter... language = opts.pop('language') compiler = opts.pop('compiler') # ... or find out from source file extension if language is None and compiler is not None: language = classify_source(opts['file'], compiler == 'c') if language is None: logging.debug('skip analysis, language not known') return None elif language not in accepted: logging.debug('skip analysis, language not supported') return None else: logging.debug('analysis, language: %s', language) opts.update({'language': language, 'flags': ['-x', language] + opts['flags']}) return continuation(opts) @require(['arch_list', 'flags']) def arch_check(opts, continuation=language_check): """ Do run analyzer through one of the given architectures. """ disabled = frozenset({'ppc', 'ppc64'}) received_list = opts.pop('arch_list') if received_list: # filter out disabled architectures and -arch switches filtered_list = [a for a in received_list if a not in disabled] if filtered_list: # There should be only one arch given (or the same multiple # times). If there are multiple arch are given and are not # the same, those should not change the pre-processing step. # But that's the only pass we have before run the analyzer. current = filtered_list.pop() logging.debug('analysis, on arch: %s', current) opts.update({'flags': ['-arch', current] + opts['flags']}) return continuation(opts) else: logging.debug('skip analysis, found not supported arch') return None else: logging.debug('analysis, on default arch') return continuation(opts) # To have good results from static analyzer certain compiler options shall be # omitted. The compiler flag filtering only affects the static analyzer run. # # Keys are the option name, value number of options to skip IGNORED_FLAGS = { '-c': 0, # compile option will be overwritten '-fsyntax-only': 0, # static analyzer option will be overwritten '-o': 1, # will set up own output file # flags below are inherited from the perl implementation. '-g': 0, '-save-temps': 0, '-install_name': 1, '-exported_symbols_list': 1, '-current_version': 1, '-compatibility_version': 1, '-init': 1, '-e': 1, '-seg1addr': 1, '-bundle_loader': 1, '-multiply_defined': 1, '-sectorder': 3, '--param': 1, '--serialize-diagnostics': 1 } def classify_parameters(command): """ Prepare compiler flags (filters some and add others) and take out language (-x) and architecture (-arch) flags for future processing. """ result = { 'flags': [], # the filtered compiler flags 'arch_list': [], # list of architecture flags 'language': None, # compilation language, None, if not specified 'compiler': compiler_language(command) # 'c' or 'c++' } # iterate on the compile options args = iter(command[1:]) for arg in args: # take arch flags into a separate basket if arg == '-arch': result['arch_list'].append(next(args)) # take language elif arg == '-x': result['language'] = next(args) # parameters which looks source file are not flags elif re.match(r'^[^-].+', arg) and classify_source(arg): pass # ignore some flags elif arg in IGNORED_FLAGS: count = IGNORED_FLAGS[arg] for _ in range(count): next(args) # we don't care about extra warnings, but we should suppress ones # that we don't want to see. elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg): pass # and consider everything else as compilation flag. else: result['flags'].append(arg) return result
1.710938
2
Phase-1/Python Basic 1/Day-3.py
CodedLadiesInnovateTech/python-challenges
11
4032
<<<<<<< HEAD """ 1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s). Sample function : abs() Expected Result : abs(number) -> number Return the absolute value of the argument. Tools: help function 2. Write a Python program to print the calendar of a given month and year. Tools: Use 'calendar' module. 3. Write a Python program to print the following here document. Sample string : a string that you "don't" have to escape This is a ....... multi-line heredoc string --------> example Tools: string formating 4. Write a Python program to calculate number of days between two dates. Sample dates : (2014, 7, 2), (2014, 7, 11) Expected output : 9 days Tools: Datetime module, timedelta module 5. Write a Python program to get the volume of a sphere with radius 6. Tools: input function, math 6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference. Tools: abs function, input function, math 7. Write a Python program to test whether a number is within 100 of 1000 or 2000. Tools: maths,input function 8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum. Tools: math, input function 9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged. Tools: input function, string formating 10. Write a Python program to get a string which is n (non-negative integer) copies of a given string. Tools: input function, slicing ======= """ 1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s). Sample function : abs() Expected Result : abs(number) -> number Return the absolute value of the argument. Tools: help function 2. Write a Python program to print the calendar of a given month and year. Tools: Use 'calendar' module. 3. Write a Python program to print the following here document. Sample string : a string that you "don't" have to escape This is a ....... multi-line heredoc string --------> example Tools: string formating 4. Write a Python program to calculate number of days between two dates. Sample dates : (2014, 7, 2), (2014, 7, 11) Expected output : 9 days Tools: Datetime module, timedelta module 5. Write a Python program to get the volume of a sphere with radius 6. Tools: input function, math 6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference. Tools: abs function, input function, math 7. Write a Python program to test whether a number is within 100 of 1000 or 2000. Tools: maths,input function 8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum. Tools: math, input function 9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged. Tools: input function, string formating 10. Write a Python program to get a string which is n (non-negative integer) copies of a given string. Tools: input function, slicing >>>>>>> f4444ec0d72c645d12694e90df7429456db0611c """
3.015625
3
bindings/pydrake/systems/perception.py
RobotLocomotion/drake-python3.7
2
4040
import numpy as np from pydrake.common.value import AbstractValue from pydrake.math import RigidTransform from pydrake.perception import BaseField, Fields, PointCloud from pydrake.systems.framework import LeafSystem def _TransformPoints(points_Ci, X_CiSi): # Make homogeneous copy of points. points_h_Ci = np.vstack((points_Ci, np.ones((1, points_Ci.shape[1])))) return X_CiSi.dot(points_h_Ci)[:3, :] def _TileColors(color, dim): # Need manual broadcasting. return np.tile(np.array([color]).T, (1, dim)) def _ConcatenatePointClouds(points_dict, colors_dict): scene_points = None scene_colors = None for id in points_dict: if scene_points is None: scene_points = points_dict[id] else: scene_points = np.hstack((points_dict[id], scene_points)) if scene_colors is None: scene_colors = colors_dict[id] else: scene_colors = np.hstack((colors_dict[id], scene_colors)) valid_indices = np.logical_not(np.isnan(scene_points)) scene_points = scene_points[:, valid_indices[0, :]] scene_colors = scene_colors[:, valid_indices[0, :]] return scene_points, scene_colors class PointCloudConcatenation(LeafSystem): """ .. pydrake_system:: name: PointCloudConcatenation input_ports: - point_cloud_CiSi_id0 - X_FCi_id0 - ... - point_cloud_CiSi_idN - X_FCi_idN output_ports: - point_cloud_FS """ def __init__(self, id_list, default_rgb=[255., 255., 255.]): """ A system that takes in N point clouds of points Si in frame Ci, and N RigidTransforms from frame Ci to F, to put each point cloud in a common frame F. The system returns one point cloud combining all of the transformed point clouds. Each point cloud must have XYZs. RGBs are optional. If absent, those points will be the provided default color. @param id_list A list containing the string IDs of all of the point clouds. This is often the serial number of the camera they came from, such as "1" for a simulated camera or "805212060373" for a real camera. @param default_rgb A list of length 3 containing the RGB values to use in the absence of PointCloud.rgbs. Values should be between 0 and 255. The default is white. """ LeafSystem.__init__(self) self._point_cloud_ports = {} self._transform_ports = {} self._id_list = id_list self._default_rgb = np.array(default_rgb) output_fields = Fields(BaseField.kXYZs | BaseField.kRGBs) for id in self._id_list: self._point_cloud_ports[id] = self.DeclareAbstractInputPort( "point_cloud_CiSi_{}".format(id), AbstractValue.Make(PointCloud(fields=output_fields))) self._transform_ports[id] = self.DeclareAbstractInputPort( "X_FCi_{}".format(id), AbstractValue.Make(RigidTransform.Identity())) self.DeclareAbstractOutputPort("point_cloud_FS", lambda: AbstractValue.Make( PointCloud(fields=output_fields)), self.DoCalcOutput) def _AlignPointClouds(self, context): points = {} colors = {} for id in self._id_list: point_cloud = self.EvalAbstractInput( context, self._point_cloud_ports[id].get_index()).get_value() X_CiSi = self.EvalAbstractInput( context, self._transform_ports[id].get_index()).get_value() points[id] = _TransformPoints( point_cloud.xyzs(), X_CiSi.GetAsMatrix4()) if point_cloud.has_rgbs(): colors[id] = point_cloud.rgbs() else: colors[id] = _TileColors( self._default_rgb, point_cloud.xyzs().shape[1]) return _ConcatenatePointClouds(points, colors) def DoCalcOutput(self, context, output): scene_points, scene_colors = self._AlignPointClouds(context) output.get_mutable_value().resize(scene_points.shape[1]) output.get_mutable_value().mutable_xyzs()[:] = scene_points output.get_mutable_value().mutable_rgbs()[:] = scene_colors
1.929688
2
FOR/Analisador-completo/main.py
lucasf5/Python
1
4056
# Exercício Python 56: Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre: a média de idade do grupo, qual é o nome do homem mais velho e quantas mulheres têm menos de 20 anos. mediaidade = '' nomelista = [] idadelista = [] sexolista = [] homens = [] mulherescommenosde20 = 0 nomedelas = [] # ------------------------------------------------------------------- for i in range(1,5): print(f'{i} PESSOA') nome = (input('Seu nome: ')) idade = int(input('Sua idade: ')) sexo = int(input('Sexo? [0]Masculino [1]Feminino: ')) if sexo == 1 and idade < 20: nomedelas.append(nome) mulherescommenosde20 += 1 elif sexo == 0: homens.append(nome) # Adcionei todas idades em uma lista idadelista.append(idade) # Tirei a média dessas idades //Primeira parte mediaidade = ((sum(idadelista))/4) # Adcionei todos os nomes em uma lista nomelista.append(nome) # ------------------------------------------------------------------- # Armazenei em maximo o maior valor encontrado dentro de uma lista maximo = max(idadelista) # Armazenei em idadexidade o INDEX do maior valor indexidade = idadelista.index(maximo) # Armazenei em indexnome a posição de quem tem a maior idade indexnome = nomelista[indexidade] # ------------------------------------------------------------------- print(f'A media das idades é: {mediaidade}') print(f'A pessoa que tem a maior idade, com {maximo} é essa: {indexnome}') print(f'As mulheres que possuem menos de 20 anos: {mulherescommenosde20} e são: {nomedelas}')
2.71875
3
chess_commentary_model/transformers_model/dataset_preprocessing.py
Rseiji/TCC-2020
0
4072
"""Métodos de preprocessamento de testes individuais """ import pandas as pd import numpy as np import math def test_1(df, seed=0): """training: balanced; test: balanced training: 80k (40k 0, 40k 1) test: 20k (10k 0, 10k 1) """ df_ones = df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:50000] df_zeros_test = df_zeros.loc[40000:50000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_2(df, seed=0): """training: balanced; test: unbalanced training: 80k (40k 0, 40k 1) test: 20k (4k 0, 16k 1) """ df_ones = df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:40000] df_zeros_training = df_zeros.loc[:40000] df_ones_test = df_ones.loc[40000:44000] df_zeros_test = df_zeros.loc[40000:56000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_3(df, seed=0): """training: unbalanced; test: unbalanced training: 80k (16k 1, 64k 0) test: 20k (4k 1, 16k 0) """ df_ones = df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:16000] df_zeros_training = df_zeros.loc[:64000] df_ones_test = df_ones.loc[16000:20000] df_zeros_test = df_zeros.loc[64000:80000] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test ################################## ## Tests on old dataset ################################## def test_4(df, seed=0): """ training: balanced; test: balanced training: 58k (29k 0, 29k 1) test: 14.5k (7.25k 0, 7.25k 1) """ df_ones = df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:36250] df_zeros_test = df_zeros.loc[29000:36250] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_5(df, seed=0): """training: balanced; test: unbalanced training: 58k (29000 0, 29000 1) test: 14.5k (12905 0, 1595 1) """ df_ones = df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:29000] df_zeros_training = df_zeros.loc[:29000] df_ones_test = df_ones.loc[29000:30595] df_zeros_test = df_zeros.loc[29000:41905] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test def test_6(df, seed=0): """training: unbalanced; test: unbalanced training: 58k (6380 1, 51620 0) test: 14.5k (1595 1, 12905 0) """ df_ones = df[df['label'] == 1] df_zeros = df[df['label'] == 0] df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True) df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True) df_ones_training = df_ones.loc[:6380] df_zeros_training = df_zeros.loc[:51620] df_ones_test = df_ones.loc[6380:7975] df_zeros_test = df_zeros.loc[51620:64525] df_training = pd.concat([df_ones_training, df_zeros_training]) df_training = df_training.sample(frac=1).reset_index(drop=True) df_test = pd.concat([df_ones_test, df_zeros_test]) df_test = df_test.sample(frac=1).reset_index(drop=True) sentences_train = df_training['comment'].tolist() sentences_test = df_test['comment'].tolist() labels_train = df_training['label'].tolist() labels_test = df_test['label'].tolist() return sentences_train, sentences_test, labels_train, labels_test
2.46875
2
get_data/speech_commands.py
patrick-kidger/generalised_shapelets
32
4080
import os import pathlib import sklearn.model_selection import tarfile import torch import torchaudio import urllib.request here = pathlib.Path(__file__).resolve().parent def _split_data(tensor, stratify): # 0.7/0.15/0.15 train/val/test split (train_tensor, testval_tensor, train_stratify, testval_stratify) = sklearn.model_selection.train_test_split(tensor, stratify, train_size=0.7, random_state=0, shuffle=True, stratify=stratify) val_tensor, test_tensor = sklearn.model_selection.train_test_split(testval_tensor, train_size=0.5, random_state=1, shuffle=True, stratify=testval_stratify) return train_tensor, val_tensor, test_tensor def _save_data(dir, **tensors): for tensor_name, tensor_value in tensors.items(): torch.save(tensor_value, str(dir / tensor_name) + '.pt') def download(): base_base_loc = str(here / '../experiments/data') if not os.path.exists(base_base_loc): raise RuntimeError("data directory does not exist. Please create a directory called 'data' in the 'experiments'" " directory. (We're going to put a lot of data there, so we don't make it automatically - " "thus giving you the opportunity to make it a symlink rather than a normal directory, so " "that the data can be stored elsewhere if you wish.)") base_loc = base_base_loc + '/SpeechCommands' loc = base_loc + '/speech_commands.tar.gz' if os.path.exists(loc): return if not os.path.exists(base_loc): os.mkdir(base_loc) urllib.request.urlretrieve('http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz', loc) with tarfile.open(loc, 'r') as f: f.extractall(base_loc) def _process_data(): base_loc = here / '..' / 'experiments' / 'data' / 'SpeechCommands' X = torch.empty(34975, 16000, 1) y = torch.empty(34975, dtype=torch.long) batch_index = 0 y_index = 0 for foldername in ('yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go'): loc = base_loc / foldername for filename in os.listdir(loc): audio, _ = torchaudio.load_wav(loc / filename, channels_first=False, normalization=False) # for forward compatbility if they fix it audio = audio / 2 ** 15 # Normalization argument doesn't seem to work so we do it manually. # A few samples are shorter than the full length; for simplicity we discard them. if len(audio) != 16000: continue X[batch_index] = audio y[batch_index] = y_index batch_index += 1 y_index += 1 assert batch_index == 34975, "batch_index is {}".format(batch_index) audio_X = X # X is of shape (batch=34975, length=16000, channels=1) X = torchaudio.transforms.MFCC(log_mels=True)(X.squeeze(-1)).transpose(1, 2).detach() # X is of shape (batch=34975, length=81, channels=40). For some crazy reason it requires a gradient, so detach. train_X, _, _ = _split_data(X, y) out = [] means = [] stds = [] for Xi, train_Xi in zip(X.unbind(dim=-1), train_X.unbind(dim=-1)): mean = train_Xi.mean() std = train_Xi.std() means.append(mean) stds.append(std) out.append((Xi - mean) / (std + 1e-5)) X = torch.stack(out, dim=-1) train_audio_X, val_audio_X, test_audio_X = _split_data(audio_X, y) train_X, val_X, test_X = _split_data(X, y) train_y, val_y, test_y = _split_data(y, y) return train_X, val_X, test_X, train_y, val_y, test_y, torch.stack(means), torch.stack(stds), train_audio_X, \ val_audio_X, test_audio_X def main(): download() (train_X, val_X, test_X, train_y, val_y, test_y, means, stds, train_audio_X, val_audio_X, test_audio_X) = _process_data() loc = here / '..' / 'experiments' / 'data' / 'speech_commands_data' if not os.path.exists(loc): os.mkdir(loc) _save_data(loc, train_X=train_X, val_X=val_X, test_X=test_X, train_y=train_y, val_y=val_y, test_y=test_y, means=means, stds=stds, train_audio_X=train_audio_X, val_audio_X=val_audio_X, test_audio_X=test_audio_X) if __name__ == '__main__': main()
1.765625
2
python/snewpy/snowglobes.py
svalder/snewpy
0
4088
# -*- coding: utf-8 -*- """The ``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES. `SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected event rates from a given input supernova neutrino flux. It supports many different neutrino detectors, detector materials and interaction channels. There are three basic steps to using SNOwGLoBES from SNEWPY: * **Generating input files for SNOwGLoBES:** There are two ways to do this, either generate a time series or a fluence file. This is done taking as input the supernova simulation model. The first will evaluate the neutrino flux at each time step, the latter will compute the integrated neutrino flux (fluence) in the time bin. The result is a compressed .tar file containing all individual input files. * **Running SNOwGLoBES:** This step convolves the fluence generated in the previous step with the cross-sections for the interaction channels happening in various detectors supported by SNOwGLoBES. It takes into account the effective mass of the detector as well as a smearing matrix describing the energy-dependent detection efficiency. The output gives the number of events detected as a function of energy for each interaction channel, integrated in a given time window (or time bin), or in a snapshot in time. * **Collating SNOwGLoBES outputs:** This step puts together all the interaction channels and time bins evaluated by SNOwGLoBES in a single file (for each detector and for each time bin). The output tables allow to build the detected neutrino energy spectrum and neutrino time distribution, for each reaction channel or the sum of them. """ import io import logging import os import re import tarfile from pathlib import Path from tempfile import TemporaryDirectory import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from astropy import units as u from tqdm.auto import tqdm import snewpy.models from snewpy.flavor_transformation import * from snewpy.neutrino import Flavor, MassHierarchy from snewpy.snowglobes_interface import SNOwGLoBES logger = logging.getLogger(__name__) def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None): """Generate time series files in SNOwGLoBES format. This version will subsample the times in a supernova model, produce energy tables expected by SNOwGLoBES, and compress the output into a tarfile. Parameters ---------- model_path : str Input file containing neutrino flux information from supernova model. model_type : str Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`. transformation_type : str Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values. d : int or float Distance to supernova in kpc. output_filename : str or None Name of output file. If ``None``, will be based on input file name. ntbins : int Number of time slices. Will be ignored if ``deltat`` is also given. deltat : astropy.Quantity or None Length of time slices. Returns ------- str Path of compressed .tar file with neutrino flux data. """ model_class = getattr(snewpy.models.ccsn, model_type) # Choose flavor transformation. Use dict to associate the transformation name with its class. flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) # Subsample the model time. Default to 30 time slices. tmin = snmodel.get_time()[0] tmax = snmodel.get_time()[-1] if deltat is not None: dt = deltat ntbins = int((tmax-tmin)/dt) else: dt = (tmax - tmin) / (ntbins+1) tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s times = 0.5*(tedges[1:] + tedges[:-1]) # Generate output. if output_filename is not None: tfname = output_filename + 'kpc.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip extension (if present) tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf: #creates file in tar archive that gives information on parameters output = '\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6 * u.erg energy = np.linspace(0, 100, 501) * MeV # 1MeV # Loop over sampled times. for i, t in enumerate(times): osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation) osc_fluence = {} table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt)) table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau') # Generate energy + number flux table. for j, E in enumerate(energy): for flavor in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 * MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode energy/flux table and output to file in tar archive. output = '\n'.join(table).encode('ascii') extension = ".dat" model_file_root, _ = os.path.splitext(model_file) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension) info = tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None): """Generate fluence files in SNOwGLoBES format. This version will subsample the times in a supernova model, produce energy tables expected by SNOwGLoBES, and compress the output into a tarfile. Parameters ---------- model_path : str Input file containing neutrino flux information from supernova model. model_type : str Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`. transformation_type : str Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values. d : int or float Distance to supernova in kpc. output_filename : str or None Name of output file. If ``None``, will be based on input file name. tstart : astropy.Quantity or None Start of time interval to integrate over, or list of start times of the time series bins. tend : astropy.Quantity or None End of time interval to integrate over, or list of end times of the time series bins. Returns ------- str Path of compressed .tar file with neutrino flux data. """ model_class = getattr(snewpy.models.ccsn, model_type) # Choose flavor transformation. Use dict to associate the transformation name with its class. flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)} flavor_transformation = flavor_transformation_dict[transformation_type] model_dir, model_file = os.path.split(os.path.abspath(model_path)) snmodel = model_class(model_path) #set the timings up #default if inputs are None: full time window of the model if tstart is None: tstart = snmodel.get_time()[0] tend = snmodel.get_time()[-1] try: if len(tstart/u.s) > 0: t0 = tstart[0] t1 = tend[-1] nbin = len(tstart/u.s) except: t0 = tstart t1 = tend nbin = 1 times = 0.5*(tstart + tend) model_times = snmodel.get_time() model_tstart = model_times*1.0 model_tend = model_times*1.0 model_tstart[0] = model_times[0] for i in range(1, len(model_times), 1): model_tstart[i] = 0.5*(model_times[i]+model_times[i-1]) model_tend[i-1] = model_tstart[i] model_tend[len(model_times)-1] = model_times[-1] if nbin > 1: starting_index = np.zeros(len(times), dtype=np.int64) ending_index = np.zeros(len(times), dtype=np.int64) for i in range(len(tstart)): starting_index[i] = next(j for j, t in enumerate(model_tend) if t > tstart[i]) ending_index[i] = next(j for j, t in enumerate(model_tend) if t >= tend[i]) else: starting_index = [next(j for j, t in enumerate(model_tend) if t > tstart)] ending_index = [next(j for j, t in enumerate(model_tend) if t >= tend)] # Generate output. if output_filename is not None: tfname = output_filename+'.tar.bz2' else: model_file_root, _ = os.path.splitext(model_file) # strip extension (if present) tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2' with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf: #creates file in tar archive that gives information on parameters output = '\n'.join(map(str, transformation_type)).encode('ascii') tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output)) MeV = 1.60218e-6 * u.erg energy = np.linspace(0, 100, 501) * MeV # Loop over sampled times. for i in range(nbin): if nbin > 1: ta = tstart[i] tb = tend[i] t = times[i] dt = tb-ta else: ta = tstart tb = tend t = times dt = tb-ta #first time bin of model in requested interval osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation) if dt < model_tend[starting_index[i]]-ta: dt = dt else: for flavor in Flavor: osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta) #intermediate time bins of model in requested interval for j in range(starting_index[i]+1, ending_index[i], 1): temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j]) #last time bin of model in requested interval temp_spectra = snmodel.get_transformed_spectra( model_times[ending_index[i]], energy, flavor_transformation) for flavor in Flavor: osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]]) for flavor in Flavor: osc_spectra[flavor] /= (tb-ta) osc_fluence = {} table = [] table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt)) table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau') # Generate energy + number flux table. for j, E in enumerate(energy): for flavor in Flavor: osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2) s = '{:17.8E}'.format(E/(1e3 * MeV)) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR]) table.append(s) logging.debug(s) # Encode energy/flux table and output to file in tar archive. output = '\n'.join(table).encode('ascii') extension = ".dat" if output_filename is not None: if nbin > 1: filename = output_filename+"_"+str(i)+extension else: filename = output_filename+extension else: model_file_root, _ = os.path.splitext(model_file) # strip extension (if present) filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \ '.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension) info = tarfile.TarInfo(name=filename) info.size = len(output) tf.addfile(info, io.BytesIO(output)) return os.path.join(model_dir, tfname) def simulate(SNOwGLoBESdir, tarball_path, detector_input="all", verbose=False): """Takes as input the neutrino flux files and configures and runs the supernova script inside SNOwGLoBES, which outputs calculated event rates expected for a given (set of) detector(s). These event rates are given as a function of the neutrino energy and time, for each interaction channel. Parameters ---------- SNOwGLoBESdir : str Path to directory where SNOwGLoBES is installed. tarball_path : str Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input : str Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES. verbose : bool Whether to generate verbose output, e.g. for debugging. """ sng = SNOwGLoBES(SNOwGLoBESdir) if detector_input == 'all': detector_input = list(sng.detectors) detector_input.remove('d2O') elif isinstance(detector_input,str): detector_input = [detector_input] result = {} #Extracts data from tarfile and sets up lists of paths and fluxfilenames for later use with TemporaryDirectory(prefix='snowglobes') as tempdir: with tarfile.open(tarball_path) as tar: tar.extractall(tempdir) flux_files = list(Path(tempdir).glob('*.dat')) if len(detector_input)>0: detector_input = tqdm(detector_input, desc='Detectors', leave=False) for det in detector_input: res=sng.run(flux_files, det) result[det]=dict(zip((f.stem for f in flux_files),res)) # save result to file for re-use in collate() cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Saving simulation results to {cache_file}') np.save(cache_file, result) return result re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\d*)_?(.*)') def get_channel_label(c): mapp = {'nc':'NeutralCurrent', 'ibd':'Inverse Beta Decay', 'e':r'${\nu}_x+e^-$'} def gen_label(m): flv,bar,Nuc,num,res = m.groups() if flv!='e': flv='\\'+flv if bar: bar='\\'+bar s = f'${bar}{{\\nu}}_{flv}$ '+f'${{}}^{{{num}}}{Nuc}$ '+res return s if c in mapp: return mapp[c] else: return re_chan_label.sub(gen_label, c) def collate(SNOwGLoBESdir, tarball_path, detector_input="all", skip_plots=False, verbose=False, remove_generated_files=True): """Collates SNOwGLoBES output files and generates plots or returns a data table. Parameters ---------- SNOwGLoBESdir : str Path to directory where SNOwGLoBES is installed. tarball_path : str Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``. detector_input : str Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES. skip_plots: bool If False, it gives as output the plot of the energy distribution for each time bin and for each interaction channel. verbose : bool Whether to generate verbose output, e.g. for debugging. remove_generated_files: bool Remove the output files from SNOwGLoBES, collated files, and .png's made for this snewpy run. Returns ------- dict Dictionary of data tables: One table per time bin; each table contains in the first column the energy bins, in the remaining columns the number of events for each interaction channel in the detector. """ def aggregate_channels(table, **patterns): #rearrange the table to have only channel column levels = list(table.columns.names) levels.remove('channel') t = table.stack(levels) for name,pattern in patterns.items(): #get channels which contain `like` t_sel = t.filter(like=pattern) #sum over them and save to a separate column t_agg = t_sel.sum(axis='columns') #drop processed channels t.drop(t_sel.columns, axis='columns',inplace=True) t[name]=t_agg #fill the column #return table with the original levels order t = t.unstack(levels) t = t.reorder_levels(table.columns.names, axis=1) return t def do_plot(table, params): #plotting the events from given table flux,det,weighted,smeared = params for c in table.columns: if table[c].max() > 0.1: plt.plot(table[c],drawstyle='steps',label=get_channel_label(c), lw=1) plt.xlim(right=0.10) plt.ylim(bottom=0.10) plt.yscale('log') plt.legend(bbox_to_anchor=(0.5, 0.5, 0.5, 0.5), loc='best', borderaxespad=0) # formats complete graph smear_title = 'Interaction' if smeared=='unsmeared' else 'Detected' plt.title(f'{flux} {det.capitalize()} {weighted.capitalize()} {smear_title} Events') if smeared=='smeared': plt.xlabel('Detected Energy (GeV)') plt.ylabel('Events') else: plt.xlabel('Neutrino Energy (GeV)') plt.ylabel('Interaction Events') #read the results from storage cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy' logging.info(f'Reading tables from {cache_file}') tables = np.load(cache_file, allow_pickle=True).tolist() #This output is similar to what produced by: #tables = simulate(SNOwGLoBESdir, tarball_path,detector_input) #dict for old-style results, for backward compatibiity results = {} #save collated files: with TemporaryDirectory(prefix='snowglobes') as tempdir: tempdir = Path(tempdir) for det in tables: results[det] = {} for flux,t in tables[det].items(): t = aggregate_channels(t,nc='nc_',e='_e') for w in ['weighted','unweighted']: for s in ['smeared','unsmeared']: table = t[w][s] filename_base = f'{flux}_{det}_events_{s}_{w}' filename = tempdir/f'Collated_{filename_base}.dat' #save results to text files with open(filename,'w') as f: f.write(table.to_string(float_format='%23.15g')) #format the results for the output header = 'Energy '+' '.join(list(table.columns)) data = table.to_numpy().T index = table.index.to_numpy() data = np.concatenate([[index],data]) results[filename.name] = {'header':header,'data':data} #optionally plot the results if skip_plots is False: plt.figure(dpi=300) do_plot(table,(flux,det,w,s)) filename = tempdir/f'{filename_base}_log_plot.png' plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight') #Make a tarfile with the condensed data files and plots output_name = Path(tarball_path).stem output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed' output_path = Path(tarball_path).parent/(output_name+'.tar.gz') with tarfile.open(output_path, "w:gz") as tar: for file in tempdir.iterdir(): tar.add(file,arcname=output_name+'/'+file.name) logging.info(f'Created archive: {output_path}') return results
2.1875
2
tests/resources/mlflow-test-plugin/mlflow_test_plugin/file_store.py
iPieter/kiwi
0
4120
from six.moves import urllib from kiwi.store.tracking.file_store import FileStore class PluginFileStore(FileStore): """FileStore provided through entrypoints system""" def __init__(self, store_uri=None, artifact_uri=None): path = urllib.parse.urlparse(store_uri).path if store_uri else None self.is_plugin = True super(PluginFileStore, self).__init__(path, artifact_uri)
1.203125
1
tests/test_dsl.py
goodreferences/ElasticQuery
0
4136
# ElasticQuery # File: tests/test_dsl.py # Desc: tests for ElasticQuery DSL objects (Filter, Query, Aggregate) from os import path from unittest import TestCase from jsontest import JsonTest from elasticquery import Query, Aggregate, Suggester from elasticquery.exceptions import ( NoQueryError, NoAggregateError, NoSuggesterError, MissingArgError ) from .util import assert_equal CLASS_NAMES = { '_query': Query } def _test_query(self, query, test_name, test_data): method = getattr(query, test_name) def parse_arg(arg): if isinstance(arg, list): return [parse_arg(a) for a in arg] else: return ( CLASS_NAMES[arg](arg, {}) if (isinstance(arg, basestring) and arg.startswith('_')) else arg ) args = test_data.get('args', []) args = parse_arg(args) kwargs = test_data.get('kwargs', {}) kwargs = { k: parse_arg(v) if isinstance(v, list) else parse_arg(v) for k, v in kwargs.iteritems() } output = method(*args, **kwargs).dict() assert_equal(self, output, test_data['output']) class TestQueries(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'queries') jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Query, test_name, test_data) ) class TestAggregates(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'aggregates') jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Aggregate, test_name, test_data) ) class TestSuggesters(TestCase): __metaclass__ = JsonTest jsontest_files = path.join('tests', 'suggesters') jsontest_function = lambda self, test_name, test_data: ( _test_query(self, Suggester, test_name, test_data) ) class TestFails(TestCase): def test_no_query(self): with self.assertRaises(NoQueryError): Query.doesnotexist() def test_no_aggregate(self): with self.assertRaises(NoAggregateError): Aggregate.doesnotexist() def test_no_suggester(self): with self.assertRaises(NoSuggesterError): Suggester.doesnotexist() def test_missing_arg(self): with self.assertRaises(MissingArgError): Query.term(None) def test_invalid_arg(self): # Test passing not a list with self.assertRaises(ValueError): Query.bool(must=set()) # And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[None]) # And now an invalid list with self.assertRaises(ValueError): Query.bool(must=[Aggregate.terms('test', 'test')]) # And now an invalid list with self.assertRaises(ValueError): Query.range('field', gte=['error']) # Empty list should be OK/ignored Query.bool(must=[])
1.5625
2
esercizi/areaSottesaCompareNumPy.py
gdv/python-alfabetizzazione
0
4144
import numpy as np import timeit def effe(x): y = -x * (x - 1.0) return y numIntervalli = input('inserire il numero di intervalli in [0.0, 1.0] ') deltaIntervallo = 1.0 / float(numIntervalli) print "larghezza intervallo", deltaIntervallo start = timeit.default_timer() xIntervalli = [] yIntervalli = [] i = 0 while i < numIntervalli: xIntervallo = i*deltaIntervallo xIntervalli.append(xIntervallo) yIntervalli.append(effe(xIntervallo)) i += 1 areaSottesa = 0.0 for altezza in yIntervalli: areaSottesa += altezza * deltaIntervallo endOld = timeit.default_timer() print "l'area sottesa dalla curva vale ", areaSottesa xNPIntervalli = np.linspace(0.0, 1.0, numIntervalli, endpoint=False) yNPIntervalli = -xNPIntervalli * (xNPIntervalli - 1.0) npArea = np.sum(yNPIntervalli*deltaIntervallo) endNP = timeit.default_timer() # print xNPIntervalli # print xIntervalli # print yNPIntervalli # print yIntervalli print "area numpy = ", npArea print "old timing = ", endOld - start, "numPy timing = ", endNP - endOld
2.109375
2
esperanto_analyzer/web/__init__.py
fidelisrafael/esperanto-analyzer
18
4192
from .api.server import run_app
0.15625
0
test/test_cursor_binding.py
rhlahuja/snowflake-connector-python
0
4200
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (c) 2012-2018 Snowflake Computing Inc. All right reserved. # import pytest from snowflake.connector.errors import (ProgrammingError) def test_binding_security(conn_cnx, db_parameters): """ SQL Injection Tests """ try: with conn_cnx() as cnx: cnx.cursor().execute( "CREATE OR REPLACE TABLE {name} " "(aa INT, bb STRING)".format( name=db_parameters['name'])) cnx.cursor().execute( "INSERT INTO {name} VALUES(%s, %s)".format( name=db_parameters['name']), (1, 'test1')) cnx.cursor().execute( "INSERT INTO {name} VALUES(%(aa)s, %(bb)s)".format( name=db_parameters['name']), {'aa': 2, 'bb': 'test2'}) for rec in cnx.cursor().execute( "SELECT * FROM {name} ORDER BY 1 DESC".format( name=db_parameters['name'])): break assert rec[0] == 2, 'First column' assert rec[1] == 'test2', 'Second column' for rec in cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']), (1,)): break assert rec[0] == 1, 'First column' assert rec[1] == 'test1', 'Second column' # SQL injection safe test # Good Example with pytest.raises(ProgrammingError): cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']), ("1 or aa>0",)) with pytest.raises(ProgrammingError): cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%(aa)s".format( name=db_parameters['name']), {"aa": "1 or aa>0"}) # Bad Example in application. DON'T DO THIS c = cnx.cursor() c.execute("SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']) % ("1 or aa>0",)) rec = c.fetchall() assert len(rec) == 2, "not raising error unlike the previous one." finally: with conn_cnx() as cnx: cnx.cursor().execute( "drop table if exists {name}".format( name=db_parameters['name'])) def test_binding_list(conn_cnx, db_parameters): """ SQL binding list type for IN """ try: with conn_cnx() as cnx: cnx.cursor().execute( "CREATE OR REPLACE TABLE {name} " "(aa INT, bb STRING)".format( name=db_parameters['name'])) cnx.cursor().execute( "INSERT INTO {name} VALUES(%s, %s)".format( name=db_parameters['name']), (1, 'test1')) cnx.cursor().execute( "INSERT INTO {name} VALUES(%(aa)s, %(bb)s)".format( name=db_parameters['name']), {'aa': 2, 'bb': 'test2'}) cnx.cursor().execute( "INSERT INTO {name} VALUES(3, 'test3')".format( name=db_parameters['name'])) for rec in cnx.cursor().execute(""" SELECT * FROM {name} WHERE aa IN (%s) ORDER BY 1 DESC """.format(name=db_parameters['name']), ([1, 3],)): break assert rec[0] == 3, 'First column' assert rec[1] == 'test3', 'Second column' for rec in cnx.cursor().execute( "SELECT * FROM {name} WHERE aa=%s".format( name=db_parameters['name']), (1,)): break assert rec[0] == 1, 'First column' assert rec[1] == 'test1', 'Second column' rec = cnx.cursor().execute(""" SELECT * FROM {name} WHERE aa IN (%s) ORDER BY 1 DESC """.format(name=db_parameters['name']), ((1,),)) finally: with conn_cnx() as cnx: cnx.cursor().execute( "drop table if exists {name}".format( name=db_parameters['name'])) def test_unsupported_binding(conn_cnx, db_parameters): """ Unsupported data binding """ try: with conn_cnx() as cnx: cnx.cursor().execute( "CREATE OR REPLACE TABLE {name} " "(aa INT, bb STRING)".format( name=db_parameters['name'])) cnx.cursor().execute( "INSERT INTO {name} VALUES(%s, %s)".format( name=db_parameters['name']), (1, 'test1')) sql = 'select count(*) from {name} where aa=%s'.format( name=db_parameters['name']) with cnx.cursor() as cur: rec = cur.execute(sql, (1,)).fetchone() assert rec[0] is not None, 'no value is returned' # dict with pytest.raises(ProgrammingError): cnx.cursor().execute(sql, ({'value': 1},)) finally: with conn_cnx() as cnx: cnx.cursor().execute( "drop table if exists {name}".format( name=db_parameters['name']))
1.6875
2
gogapi/api.py
tikki/pygogapi
23
4216
import json import re import logging import html.parser import zlib import requests from gogapi import urls from gogapi.base import NotAuthorizedError, logger from gogapi.product import Product, Series from gogapi.search import SearchResult DEBUG_JSON = False GOGDATA_RE = re.compile(r"gogData\.?(.*?) = (.+);") CLIENT_VERSION = "1.2.17.9" # Just for their statistics USER_AGENT = "GOGGalaxyClient/{} pygogapi/0.1".format(CLIENT_VERSION) REQUEST_RETRIES = 3 PRODUCT_EXPANDABLE = [ "downloads", "expanded_dlcs", "description", "screenshots", "videos", "related_products", "changelog" ] USER_EXPANDABLE = ["friendStatus", "wishlistStatus", "blockedStatus"] LOCALE_CODES = ["de-DE", "en-US", "fr-FR", "pt-BR", "pl-PL", "ru-RU", "zh-Hans"] CURRENCY_CODES = [ "USD", "EUR", "GBP", "AUD", "RUB", "PLN", "CAD", "CHF", "NOK", "SEK", "DKK" ] def find_scripts(site): parser = ScriptParser() parser.feed(site) return parser.scripts class ScriptParser(html.parser.HTMLParser): def __init__(self): super().__init__() self.last_tag = None self.scripts = [] def handle_starttag(self, tag, attrs): self.last_tag = tag def handle_data(self, data): if self.last_tag == "script": self.scripts.append(data) class GogApi: def __init__(self, token=None): self.token = token self.locale = (None, None, None) # TODO: replace tuple self.session = requests.Session() self.session.headers["User-Agent"] = USER_AGENT self.force_authorize = False # Helpers def request(self, method, url, authorized=True, allow_redirects=False, **kwargs): """ Wrapper around requests.request that also handles authorization, retries and logging """ if authorized or self.force_authorize: if self.token is None: raise NotAuthorizedError() if self.token.expired(): self.token.refresh() self.session.headers["Authorization"] = \ "Bearer " + self.token.access_token else: self.session.headers.pop("Authorization", None) # Retries retries = REQUEST_RETRIES while retries > 0: resp = self.session.request( method, url, allow_redirects=allow_redirects, **kwargs) if resp.status_code < 400: return resp elif 400 <= resp.status_code < 500: break else: retries -= 1 resp.raise_for_status() def get(self, *args, **kwargs): """ Wrapper around requests.get """ return self.request("GET", *args, **kwargs) def post(self, *args, **kwargs): """ Wrapper around requests.post """ return self.request("POST", *args, **kwargs) def request_json(self, *args, compressed=False, **kwargs): """ Wrapper around GogApi.request that automatically parses the JSON response. Also does zlib decompression because GOG decided to reinvent the wheel instead of using HTTP gzip encoding for their content system V2. """ resp = self.request(*args, **kwargs) if not compressed: if DEBUG_JSON: print(resp.text) return resp.json() else: json_comp = resp.content json_text = zlib.decompress(json_comp, 15).decode("utf-8") if DEBUG_JSON: print(json_text) return json.loads(json_text) def get_json(self, *args, **kwargs): """ Wrapper around GogApi.get with JSON parsing """ return self.request_json("GET", *args, **kwargs) def get_gogdata(self, url, *args, **kwargs): """ Downloads a page and returns the embedded JavaScript gogData variable. """ resp = self.get(url, *args, **kwargs) gogdata = {} for script in find_scripts(resp.text): matches = GOGDATA_RE.finditer(resp.text) for match in matches: subkey = match.group(1) value = match.group(2) value_parsed = json.loads(value) if subkey: data = {subkey: value_parsed} else: data = value_parsed gogdata.update(data) return gogdata def set_locale(self, country, currency, locale): """ country: ISO 3166 Alpha-2 currency: ISO 4217 locale: ISO 639 + ISO 3166 like language[_territory] """ if len(country) != 2: return AttributeError("Invalid country code {}".format(country)) elif currency not in CURRENCY_CODES: return AttributeError("Invalid currency code {}".format(locale)) elif locale not in LOCALE_CODES: return AttributeError("Invalid locale code {}".format(locale)) self.locale = (country, currency, locale) self.session.cookies["gog_lc"] = "_".join(self.locale) # Web APIs def web_game_gogdata(self, slug): return self.get_gogdata(urls.web("game", slug), authorized=False) def web_games_gogdata(self): return self.get_gogdata(urls.web("account.games")) def web_movies_gogdata(self): return self.get_gogdata(urls.web("account.movies")) def web_wishlist_gogdata(self): return self.get_gogdata(urls.web("account.wishlist")) def web_friends_gogdata(self): return self.get_gogdata(urls.web("account.friends")) def web_chat_gogdata(self): return self.get_gogdata(urls.web("account.chat")) def web_wallet_gogdata(self): return self.get_gogdata(urls.web("wallet")) def web_orders_gogdata(self): return self.get_gogdata(urls.web("settings.orders")) def web_account_gamedetails(self, game_id): return self.get_json(urls.web("account.gamedetails", game_id)) def web_account_search(self, **query): """ Allowed query keys: category: Genre feature: Feature hiddenFlag: Show hidden games language: Language mediaType: Game or movie page: Page number search: Search string sortBy: Sort order system: OS tags: Tags totalPages: Total Pages """ return self.get_json(urls.web("account.get_filtered"), params=query) def web_search(self, **query): """ Allowed query keys: category: Genre devpub: Developer or Published feature: Features language: Language mediaType: Game or movie page: Page number price: Price range release: Release timeframe search: Search string sort: Sort order system: OS limit: Max results """ return self.get_json( urls.web("search.filtering"), params=query, authorized=False) def web_user_data(self): return self.get_json(urls.web("user.data")) def web_user_games(self): return self.get_json(urls.web("user.games")) def web_user_wishlist(self): return self.get_json(urls.web("user.wishlist")) def web_user_wishlist_add(self, game_id): """Returns new wishlist""" return self.get_json(urls.web("user.wishlist.add", game_id)) def web_user_wishlist_remove(self, game_id): """Returns new wishlist""" return self.get_json(urls.web("user.wishlist.remove", game_id)) def web_user_ratings(self): return self.get_json(urls.web("user.ratings")) def web_user_review_votes(self): return self.get_json(urls.web("user.review_votes")) def web_user_change_currency(self, currency): return self.get_json(urls.web("user.change_currency", currency)) def web_user_change_language(self, lang): return self.get_json(urls.web("user.change_language", lang)) def web_user_set_redirect_url(self, url): """Set redirect url after login. Only know valid url: checkout""" return self.get(urls.web("user.set_redirect_url", params={"url": url})) def web_user_review_guidelines(self): return self.get_json(urls.web("user.review_guidelines")) def web_user_public_info(self, user_id, expand=None): if not expand: params = None elif expand == True: params = {"expand": ",".join(USER_EXPANDABLE)} else: params = {"expand": ",".join(expand)} return self.get_json( urls.web("user.public.info", user_id, params=params)) def web_user_public_block(self, user_id): return self.get_json(urls.web("user.public.block", user_id)) def web_user_public_unblock(self, user_id): return self.get_json(urls.web("user.public.unblock", user_id)) def web_friends_remove(self, user_id): return self.get_json(urls.web("friends.remove", user_id)) def web_friends_invite(self, user_id): return self.get_json(urls.web("friends.invite", user_id)) def web_friends_accept(self, user_id): return self.get_json(urls.web("friends.accept", user_id)) def web_friends_decline(self, user_id): return self.get_json(urls.web("friends.decline", user_id)) def web_cart_get(self): return self.get_json(urls.web("cart.get")) def web_cart_add(self, game_id): return self.get_json(urls.web("cart.add", game_id)) def web_cart_add_series(self, series_id): return self.get_json(urls.web("cart.add_series", series_id)) def web_cart_remove(self, game_id): return self.get_json(urls.web("cart.remove", game_id)) def web_reviews_search(self, game_id): return self.get_json(urls.web("reviews.search", game_id)) def web_reviews_vote(self, game_id): return self.get_json(urls.web("reviews.vote", game_id)) def web_reviews_report(self, game_id): return self.get_json(urls.web("reviews.report", game_id)) def web_reviews_rate(self, game_id): return self.get_json(urls.web("reviews.rate", game_id)) def web_reviews_add(self, game_id): return self.get_json(urls.web("reviews.add", game_id)) def web_order_change_currency(self, order_id, currency): return self.get_json( urls.web("order.change_currency", order_id, currency)) def web_order_add(self, order_id, game_id): return self.get_json(urls.web("order.add", order_id, game_id)) def web_order_remove(self, order_id, game_id): return self.get_json(urls.web("order.remove", order_id, game_id)) def web_order_enable_store_credit(self, order_id): return self.get_json(urls.web("order.enable_store_credit", order_id)) def web_order_disable_store_credit(self, order_id): return self.get_json(urls.web("order.disable_store_credit", order_id)) def web_order_set_as_gift(self, order_id): return self.get_json(urls.web("order.set_as_gift", order_id)) def web_order_set_as_not_gift(self, order_id): return self.get_json(urls.web("order.set_as_non_gift", order_id)) def web_order_process_order(self, order_id): return self.get_json(urls.web("order.process_order", order_id)) def web_order_payment_status(self, order_id): return self.get_json(urls.web("order.payment_status", order_id)) def web_order_check_status(self, order_id): return self.get_json(urls.web("order.check_status", order_id)) def web_checkout(self, order_id=None): if order_id is None: return self.get_json(urls.web("checkout")) else: return self.get_json(urls.web("checkout_id", order_id)) def web_checkout_manual(self, order_id): return self.get_json(urls.web("checkout_manual", order_id)) # Galaxy APIs def galaxy_file(self, game_id, dl_url): dl_url = dl_url.lstrip("/") return self.get_json(urls.galaxy("file", game_id, dl_url)) def galaxy_user(self, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("user", user_id)) def galaxy_friends(self, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("friends", user_id)) def galaxy_invitations(self, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("invitations", user_id)) def galaxy_status(self, user_id=None): if user_id is None: user_id = self.token.user_id reqdata = {"version": CLIENT_VERSION} self.post(urls.galaxy("status", user_id), data=reqdata) def galaxy_statuses(self, user_ids): user_ids_str = ",".join(user_ids) params = {"user_id": user_ids_str} #self.request("OPTIONS", urls.galaxy("statuses"), params=params) return self.get_json(urls.galaxy("statuses"), params=params) def galaxy_achievements(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("achievements", game_id, user_id)) def galaxy_sessions(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("sessions", game_id, user_id)) def galaxy_friends_achievements(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json( urls.galaxy("friends.achievements", game_id, user_id)) def galaxy_friends_sessions(self, game_id, user_id=None): if user_id is None: user_id = self.token.user_id return self.get_json(urls.galaxy("friends.sessions", game_id, user_id)) def galaxy_product(self, game_id, expand=None): if not expand: params = {} elif expand is True: params = {"expand": ",".join(PRODUCT_EXPANDABLE)} else: params = {"expand": ",".join(expand)} if self.locale[2]: params["locale"] = self.locale[2] return self.get_json( urls.galaxy("product", game_id), params=params, authorized=False) def galaxy_products(self, game_ids, expand=None): if not expand: params = {} elif expand is True: params = {"expand": ",".join(PRODUCT_EXPANDABLE)} else: params = {"expand": ",".join(expand)} if self.locale[2]: params["locale"] = self.locale[2] ids_string = ",".join(str(game_id) for game_id in game_ids) params["ids"] = ids_string return self.get_json( urls.galaxy("products"), params=params, authorized=False) def galaxy_secure_link(self, game_id, path, generation): return self.get_json( urls.galaxy("cs.securelink", game_id), params={"path": path, "generation": generation}) def galaxy_builds(self, game_id, system): return self.get_json( urls.galaxy("cs.builds", game_id, system), authorized=False) def galaxy_cs_meta(self, meta_id): return self.get_json( urls.galaxy("cs.meta", meta_id[0:2], meta_id[2:4], meta_id), compressed=True, authorized=False) def galaxy_client_config(): return self.get_json(urls.galaxy("client-config"), authorized=False) def product(self, product_id, slug=None): return Product(self, product_id, slug) def search(self, **query): search_data = self.web_search(**query) return SearchResult(self, query, search_data)
1.40625
1
apps/notifications/tests/test_views.py
SCiO-systems/qcat
0
4248
import logging from unittest import mock from unittest.mock import call from django.conf import settings from django.contrib.auth import get_user_model from django.core.signing import Signer from django.urls import reverse from django.http import Http404 from django.test import RequestFactory from braces.views import LoginRequiredMixin from django.test import override_settings from model_mommy import mommy from apps.notifications.models import Log, StatusUpdate, MemberUpdate, ReadLog, \ ActionContextQuerySet from apps.notifications.views import LogListView, LogCountView, ReadLogUpdateView, \ LogQuestionnairesListView, LogInformationUpdateCreateView, \ LogSubscriptionPreferencesView, SignedLogSubscriptionPreferencesView from apps.qcat.tests import TestCase class LogListViewTest(TestCase): def setUp(self): self.view = LogListView() self.url_path = reverse('notification_partial_list') self.request = RequestFactory().get(self.url_path) self.user = {} self.request.user = self.user self.view_instance = self.setup_view( view=self.view, request=self.request ) member_add_log = mommy.make( _model=Log, id=8, action=settings.NOTIFICATIONS_ADD_MEMBER ) self.change_log = mommy.make( _model=Log, id=42, action=settings.NOTIFICATIONS_CHANGE_STATUS ) mommy.make(_model=StatusUpdate, log=self.change_log) mommy.make(_model=MemberUpdate, log=member_add_log) def get_view_with_get_querystring(self, param): request = RequestFactory().get( '{url}?{param}'.format(url=self.url_path, param=param) ) request.user = self.user return self.setup_view(view=self.view, request=request) def test_force_login(self): self.assertIsInstance(self.view_instance, LoginRequiredMixin) def test_queryset_method(self): self.assertEqual( self.view_instance.queryset_method, 'user_log_list' ) def test_queryset_method_pending(self): self.assertEqual( self.get_view_with_get_querystring('is_pending').queryset_method, 'user_pending_list' ) def test_get_paginate_by(self): self.assertEqual( self.view_instance.get_paginate_by(None), settings.NOTIFICATIONS_LIST_PAGINATE_BY ) def test_get_paginate_by_teaser(self): self.assertEqual( self.get_view_with_get_querystring('is_teaser').get_paginate_by(None), settings.NOTIFICATIONS_TEASER_PAGINATE_BY ) @mock.patch('apps.notifications.views.Log.actions.user_log_list') def test_get_queryset(self, mock_actions): self.view_instance.get_queryset() mock_actions.assert_called_once_with(user={}) @mock.patch('apps.notifications.views.Log.actions.user_pending_list') def test_get_queryset_pending(self, mock_actions): self.get_view_with_get_querystring('is_pending').get_queryset() mock_actions.assert_called_once_with(user={}) @mock.patch.object(LogListView, 'add_user_aware_data') def test_get_context_data_logs(self, mock_add_user_aware_data): self.view_instance.object_list = 'foo' self.view_instance.get_context_data() mock_add_user_aware_data.assert_called_once_with('foo') def _test_add_user_aware_data(self): # for faster tests, mock all the elements. elements are created here # as this makes the tests more readable. pth = 'apps.notifications.views.Log.actions' with mock.patch('{}.read_id_list'.format(pth)) as read_id_list: read_id_list.return_value = [42] with mock.patch('{}.user_pending_list'.format(pth)) as pending: pending.values_list.return_value = [8, 42] logs = Log.objects.all() return list(self.view_instance.add_user_aware_data(logs)) def test_add_user_aware_data_keys(self): data_keys = self._test_add_user_aware_data()[0].keys() for key in ['id', 'created', 'text', 'is_read', 'is_todo', 'edit_url']: self.assertTrue(key in data_keys) def test_add_user_aware_data_is_read(self): data = self._test_add_user_aware_data() # logs are ordered by creation date - 42 is the newer one self.assertTrue(data[0]['is_read']) def test_add_user_aware_data_is_not_read(self): data = self._test_add_user_aware_data() self.assertFalse(data[1]['is_read']) #def test_add_user_aware_data_is_todo(self): # data = self._test_add_user_aware_data() # self.assertTrue(data[1]['is_todo']) def test_add_user_aware_data_is_not_todo(self): data = self._test_add_user_aware_data() self.assertFalse(data[0]['is_todo']) @override_settings(NOTIFICATIONS_ACTIONS={'foo': 'bar', 'result': '42'}) def test_statuses_in_context(self): self.view_instance.object_list = [] context = self.view_instance.get_context_data() self.assertDictEqual( context['statuses'], {'foo': 'bar', 'result': '42'} ) @mock.patch('apps.notifications.views.Log.actions.user_log_list') def test_status_filter_queryset(self, mock_user_log_list): mock_user_log_list.return_value = [] self.assertEqual( [], self.view_instance.get_queryset() ) @mock.patch('apps.notifications.views.Log.actions.user_log_list') def test_status_filter_queryset_for_status(self, mock_user_log_list): mock_user_log_list.return_value = Log.objects.filter() view = self.view view.get_statuses = mock.MagicMock(return_value=[3]) view_instance = self.setup_view( view=view, request=self.request ) self.assertQuerysetEqual( view_instance.get_queryset(), [self.change_log.id], transform=lambda item: item.id ) def test_get_status_invalid(self): request = RequestFactory().get('{}?statuses=foo'.format(self.url_path)) view = self.setup_view(self.view, request) self.assertEqual(view.get_statuses(), []) @override_settings(NOTIFICATIONS_ACTIONS={'2': 'bar'}) def test_get_status_invalid_config(self): request = RequestFactory().get('{}?statuses=1'.format(self.url_path)) view = self.setup_view(self.view, request) self.assertEqual(view.get_statuses(), []) def test_get_status_valid(self): request = RequestFactory().get('{}?statuses=1,2,3'.format(self.url_path)) view = self.setup_view(self.view, request) self.assertEqual(view.get_statuses(), [1, 2, 3]) class ReadLogUpdateViewTest(TestCase): def setUp(self): self.view = ReadLogUpdateView() self.request = RequestFactory().post( reverse('notification_read'), data={'user': 123, 'log': 'log', 'checked': 'true'} ) self.user = mock.MagicMock(id=123) self.request.user = self.user self.view_instance = self.setup_view(view=self.view, request=self.request) def test_validate_data_all_keys(self): self.assertFalse( self.view_instance.validate_data() ) def test_validate_data_id_type(self): self.assertFalse( self.view_instance.validate_data(checked='1', log='1', user='foo') ) def test_validate_data_invalid_user(self): self.assertFalse( self.view_instance.validate_data(checked='456', log='1', user='456') ) def test_validate_data_valid(self): self.assertTrue( self.view_instance.validate_data(checked='1', log='1', user='123') ) @mock.patch('apps.notifications.views.ReadLog.objects.update_or_create') def test_post_valid_checked(self, mock_get_or_create): self.view_instance.post(request=self.request) mock_get_or_create.assert_called_once_with( user_id='123', log_id='log', defaults={'is_read': True} ) @mock.patch('apps.notifications.views.ReadLog.objects.update_or_create') def test_post_valid_unchecked(self, mock_get_or_create): request = RequestFactory().post( reverse('notification_read'), data={'user': 123, 'log': 'log', 'checked': 'false'} ) self.view_instance.post(request=request) mock_get_or_create.assert_called_once_with( user_id='123', log_id='log', defaults={'is_read': False} ) @mock.patch.object(ReadLogUpdateView, 'validate_data') def test_post_invalid(self, mock_validate_data): logging.disable(logging.CRITICAL) mock_validate_data.return_value = False with self.assertRaises(Http404): self.view_instance.post(request=self.request) class LogCountViewTest(TestCase): def setUp(self): super().setUp() self.request = RequestFactory().get(reverse('notification_new_count')) self.request.user = mommy.make(_model=get_user_model()) self.view = self.setup_view(view=LogCountView(), request=self.request) mommy.make( _model=Log, catalyst=self.request.user, action=settings.NOTIFICATIONS_CHANGE_STATUS, _quantity=4 ) mommy.make( _model=Log, catalyst=self.request.user, action=settings.NOTIFICATIONS_EDIT_CONTENT, _quantity=2 ) @mock.patch('apps.notifications.views.Log.actions.only_unread_logs') def test_get_unread_only(self, mock_only_unread_logs): self.view.get(request=self.request) mock_only_unread_logs.assert_called_once_with( user=self.request.user ) def test_log_count(self): response = self.view.get(request=self.request) self.assertEqual(response.content, b'4') def test_log_count_one_read(self): mommy.make( _model=ReadLog, log=Log.objects.filter(action=settings.NOTIFICATIONS_CHANGE_STATUS).first(), user=self.request.user, is_read=True ) response = self.view.get(request=self.request) self.assertEqual(response.content, b'3') class LogQuestionnairesListViewTest(TestCase): def setUp(self): super().setUp() self.request = RequestFactory().get(reverse('notification_questionnaire_logs')) self.request.user = 'foo' self.view = self.setup_view(view=LogQuestionnairesListView(), request=self.request) @mock.patch.object(ActionContextQuerySet, 'user_log_list') def test_get_questionnaire_logs(self, mock_user_log_list): self.view.get_questionnaire_logs('foo') mock_user_log_list.assert_called_once_with(user='foo') @mock.patch.object(LogQuestionnairesListView, 'get_questionnaire_logs') def test_get(self, mock_get_questionnaire_logs): mock_get_questionnaire_logs.return_value = ['foo_1', 'foo_2', 'bar_3'] response = self.view.get(self.request) self.assertEqual( response.content, b'{"questionnaires": ["bar_3", "foo_1", "foo_2"]}' ) class LogInformationUpdateCreateViewTest(TestCase): def setUp(self): super().setUp() self.url = reverse('notification_inform_compiler') self.view = LogInformationUpdateCreateView() self.request = RequestFactory().get(self.url) self.request.user = 'foo' self.view = self.setup_view(view=self.view, request=self.request) def test_get_compiler_query(self): questionnaire = mock.MagicMock() self.view.get_compiler(questionnaire) self.assertEqual( questionnaire.method_calls[0], call.questionnairemembership_set.get(role='compiler') ) def test_get_compiler(self): sentinel = mock.sentinel questionnaire = mock.MagicMock() questionnaire.questionnairemembership_set.get.return_value = sentinel self.assertEqual( self.view.get_compiler(questionnaire), sentinel.user ) @mock.patch('apps.notifications.views.query_questionnaire') def test_get_questionnaire(self, mock_query_questionnaire): one_questionnaire = mock.MagicMock() one_questionnaire.first = lambda : 'foo' mock_query_questionnaire.return_value = one_questionnaire self.assertEqual( self.view.get_questionnaire('foo'), 'foo' ) @mock.patch('apps.notifications.views.query_questionnaire') def test_get_questionnaire_raises(self, mock_query_questionnaire): not_exists = mock.MagicMock() not_exists.exists = lambda : False mock_query_questionnaire.return_value = not_exists with self.assertRaises(Http404): self.view.get_questionnaire('foo') @mock.patch('apps.notifications.views.query_questionnaire') def test_get_questionnaire_calls_filter(self, mock_query_questionnaire): self.view.get_questionnaire('foo') mock_query_questionnaire.assert_called_once_with( identifier='foo', request=self.request ) @override_settings(NOTIFICATIONS_FINISH_EDITING='setting') @mock.patch.object(LogInformationUpdateCreateView, 'get_questionnaire') @mock.patch.object(LogInformationUpdateCreateView, 'get_compiler') def test_post(self, mock_get_compiler, mock_get_questionnaire): compiler = mock.MagicMock() mock_get_questionnaire.return_value = mock.sentinel.questionnaire mock_get_compiler.return_value = compiler request = RequestFactory().post(self.url, data={ 'identifier': 'foo', 'message': 'bar' }) with mock.patch('apps.notifications.views.InformationLog') as mock_create: self.setup_view(view=self.view, request=self.request).post(request) mock_create.assert_called_once_with( action='setting', questionnaire=mock.sentinel.questionnaire, receiver=compiler, sender='foo' ) class LogSubscriptionPreferencesMixinTest(TestCase): def setUp(self): self.url = reverse('notification_preferences') self.view = LogSubscriptionPreferencesView() self.request = RequestFactory().get(self.url) self.user = mommy.make(_model=get_user_model()) self.obj = self.user.mailpreferences self.request.user = self.user self.request._messages = mock.MagicMock() self.view = self.setup_view(view=self.view, request=self.request) self.view.object = self.obj def test_get_initial(self): self.obj.wanted_actions = 'some,thing,yay' self.assertEqual( ['some', 'thing', 'yay'], self.view.get_initial()['wanted_actions'] ) def test_get_form_valid_changed_language(self): self.view.object = mock.MagicMock() self.view.object.has_changed_language = False form = mock.MagicMock() form.changed_data = ['language'] self.view.form_valid(form) self.assertTrue(self.view.object.has_changed_language) def test_get_form_valid_message(self): self.view.form_valid(mock.MagicMock()) self.assertTrue(self.request._messages.method_calls) class SignedLogSubscriptionPreferencesViewTest(TestCase): def setUp(self): self.user = mommy.make(_model=get_user_model()) self.obj = self.user.mailpreferences self.view = SignedLogSubscriptionPreferencesView() self.request = RequestFactory().get(str(self.obj.get_signed_url())) self.request._messages = mock.MagicMock() self.view = self.setup_view(view=self.view, request=self.request) self.view.object = self.obj def test_get_success_url_signed(self): mock_user = mock.MagicMock(return_value=self.user) mock_user.is_authenticated = False mock_user.id = self.user.id self.request.user = mock_user self.assertEqual( self.view.get_success_url(), self.obj.get_signed_url() ) def test_get_success_url_user(self): self.request.user = self.user self.assertEqual( self.view.get_success_url(), reverse('notification_preferences') ) def test_get_object_user(self): self.request.user = self.user self.assertEqual( self.view.get_object(), self.obj ) def test_get_signed_object(self): mock_user = mock.MagicMock(return_value=self.user) mock_user.is_authenticated = False mock_user.id=self.user.id self.request.user = mock_user self.view.kwargs['token'] = mock.MagicMock() with mock.patch.object(Signer, 'unsign') as mock_unsign: mock_unsign.return_value = self.obj.id self.assertEqual( self.view.get_object(), self.obj ) mock_unsign.assert_called_with(self.view.kwargs['token']) def test_get_signed_object_404(self): mock_user = mock.MagicMock(return_value=self.user) mock_user.is_authenticated = False mock_user.id = self.user.id self.request.user = mock_user self.view.kwargs['token'] = mock.MagicMock() with self.assertRaises(Http404): self.view.get_object()
1.59375
2
tests/components/airthings/test_config_flow.py
MrDelik/core
30,023
4256
"""Test the Airthings config flow.""" from unittest.mock import patch import airthings from homeassistant import config_entries from homeassistant.components.airthings.const import CONF_ID, CONF_SECRET, DOMAIN from homeassistant.core import HomeAssistant from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM from tests.common import MockConfigEntry TEST_DATA = { CONF_ID: "client_id", CONF_SECRET: "secret", } async def test_form(hass: HomeAssistant) -> None: """Test we get the form.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) assert result["type"] == RESULT_TYPE_FORM assert result["errors"] is None with patch("airthings.get_token", return_value="test_token",), patch( "homeassistant.components.airthings.async_setup_entry", return_value=True, ) as mock_setup_entry: result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) await hass.async_block_till_done() assert result2["type"] == RESULT_TYPE_CREATE_ENTRY assert result2["title"] == "Airthings" assert result2["data"] == TEST_DATA assert len(mock_setup_entry.mock_calls) == 1 async def test_form_invalid_auth(hass: HomeAssistant) -> None: """Test we handle invalid auth.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "airthings.get_token", side_effect=airthings.AirthingsAuthError, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) assert result2["type"] == RESULT_TYPE_FORM assert result2["errors"] == {"base": "invalid_auth"} async def test_form_cannot_connect(hass: HomeAssistant) -> None: """Test we handle cannot connect error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "airthings.get_token", side_effect=airthings.AirthingsConnectionError, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) assert result2["type"] == RESULT_TYPE_FORM assert result2["errors"] == {"base": "cannot_connect"} async def test_form_unknown_error(hass: HomeAssistant) -> None: """Test we handle unknown error.""" result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER} ) with patch( "airthings.get_token", side_effect=Exception, ): result2 = await hass.config_entries.flow.async_configure( result["flow_id"], TEST_DATA, ) assert result2["type"] == RESULT_TYPE_FORM assert result2["errors"] == {"base": "unknown"} async def test_flow_entry_already_exists(hass: HomeAssistant) -> None: """Test user input for config_entry that already exists.""" first_entry = MockConfigEntry( domain="airthings", data=TEST_DATA, unique_id=TEST_DATA[CONF_ID], ) first_entry.add_to_hass(hass) with patch("airthings.get_token", return_value="token"): result = await hass.config_entries.flow.async_init( DOMAIN, context={"source": config_entries.SOURCE_USER}, data=TEST_DATA ) assert result["type"] == "abort" assert result["reason"] == "already_configured"
1.648438
2
tests/services/test_rover_runner_service.py
dev-11/mars-rover-challenge
0
4264
import unittest from services import RoverRunnerService from tests.test_environment.marses import small_mars_with_one_rover_empty_commands from tests.test_environment import mocks as m from data_objects import Rover class TestRoverRunnerService(unittest.TestCase): def test_rover_runner_moves_rover_forward(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run(['M']) self.assertEqual(Rover(0, 1, 'N'), final_pos) def test_rover_runner_turns_rover_left(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run(['L']) self.assertEqual(Rover(0, 0, 'W'), final_pos) def test_rover_runner_turns_rover_right(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run(['R']) self.assertEqual(Rover(0, 0, 'E'), final_pos) def test_rover_runner_goes_off_gird_east(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(1, 1, "E") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_east_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_goes_off_gird_north(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(1, 1, "N") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_goes_off_gird_west(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(0, 1, "W") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_west_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_goes_off_gird_south(self): grid = small_mars_with_one_rover_empty_commands.grid rover = Rover(0, 0, "S") tss = m.get_mocked_turn_command_selector_turn_right_from_north_command_only() mss = m.get_mocked_move_command_selector_south_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(ValueError, rrs.run, ['M']) def test_rover_runner_does_nothing_empty_command(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) final_pos = rrs.run([]) self.assertEqual(rover, final_pos) def test_rover_runner_raises_error_for_None_command(self): grid = small_mars_with_one_rover_empty_commands.grid rover = small_mars_with_one_rover_empty_commands.rover_setups[0].rover tss = m.get_mocked_turn_command_selector_turn_left_from_north_command_only() mss = m.get_mocked_move_command_selector_north_command_only() rrs = RoverRunnerService(grid, rover, mss, tss) self.assertRaises(TypeError, rrs.run, None)
1.539063
2
src/decisionengine/framework/modules/tests/test_module_decorators.py
moibenko/decisionengine
9
4280
# SPDX-FileCopyrightText: 2017 Fermi Research Alliance, LLC # SPDX-License-Identifier: Apache-2.0 import pytest from decisionengine.framework.modules import Publisher, Source from decisionengine.framework.modules.Module import verify_products from decisionengine.framework.modules.Source import Parameter def test_multiple_consumes_declarations(): with pytest.raises(Exception, match="@consumes has already been called"): @Publisher.consumes(a=int) @Publisher.consumes(b=float) class _(Publisher.Publisher): pass def test_multiple_produces_declarations(): with pytest.raises(Exception, match="@produces has already been called"): @Source.produces(c=str) @Source.produces(d=bool) class _(Source.Source): pass def test_wrong_product_names(): @Source.produces(a=str) class BMaker(Source.Source): def __init__(self, config): super().__init__(config) def acquire(self): return {"b": ""} maker = BMaker({"channel_name": "test"}) expected_err_msg = ( "The following products were not produced:\n" + " - 'a' of type 'str'\n\n" + "The following products were not declared:\n" + " - 'b' of type 'str'" ) with pytest.raises(Exception, match=expected_err_msg): verify_products(maker, maker.acquire()) def test_wrong_product_types(): @Source.produces(a=str, b=int) class AMaker(Source.Source): def __init__(self, config): super().__init__(config) def acquire(self): return {"a": 42, "b": 17} maker = AMaker({"channel_name": "test"}) expected_err_msg = "The following products have the wrong types:\n" + r" - 'a' \(expected 'str', got 'int'\)" with pytest.raises(Exception, match=expected_err_msg): verify_products(maker, maker.acquire()) def test_supports_config(): expected_err_msg = ( "An error occurred while processing the parameter 'conflicting_types':\n" + "The specified type 'int' conflicts with the type of the default value " + r"'hello' \(type 'str'\)" ) with pytest.raises(Exception, match=expected_err_msg): @Source.supports_config(Parameter("conflicting_types", type=int, default="hello")) class _(Source.Source): pass
1.445313
1
common/__init__.py
whyh/FavourDemo
1
4288
from . import (emoji as emj, keyboards as kb, telegram as tg, phrases as phr, finance as fin, utils, glossary, bots, gcp, sed, db)
0.030884
0
3-functions/pytest-exercises/test_functions.py
BaseCampCoding/python-fundamentals
0
4296
import functions from pytest import approx from bcca.test import should_print def test_add_em_up(): assert functions.add_em_up(1, 2, 3) == 6 assert functions.add_em_up(4, 5, 6) == 15 def test_sub_sub_hubbub(): assert functions.sub_sub_hubbub(1, 2, 3) == -4 def test_square_area(): assert functions.square_area(5, 5) == 25 assert functions.square_area(3, 5) == 15 assert functions.square_area(2, 2) == 4 def test_circle_area(): assert functions.circle_area(1) == approx(3.14) assert functions.circle_area(5) == approx(78.5) def test_kilometers_to_miles(): assert functions.kilometers_to_miles(1) == approx(0.6214) assert functions.kilometers_to_miles(.5) == approx(0.3107) assert functions.kilometers_to_miles(0) == approx(0.0) assert functions.kilometers_to_miles(40) == approx(24.855999999999998) @should_print def test_sales_tax_1(output): functions.sales_tax(1) assert output == """ Purchase Amount: 1 State Sales Tax: 0.04 County Sales Tax: 0.02 Total Sales Tax: 0.06 Total Cost: 1.06 """ @should_print def test_sales_tax_99_99(output): functions.sales_tax(99.99) assert output == """ Purchase Amount: 99.99 State Sales Tax: 3.9996 County Sales Tax: 1.9998 Total Sales Tax: 5.9994 Total Cost: 105.98939999999999 """ @should_print def test_sales_tax_5_95(output): functions.sales_tax(5.95) assert output == """ Purchase Amount: 5.95 State Sales Tax: 0.23800000000000002 County Sales Tax: 0.11900000000000001 Total Sales Tax: 0.35700000000000004 Total Cost: 6.307 """ def test_min_insurance(): assert functions.min_insurance(100000) == approx(80000.0) assert functions.min_insurance(123456789) == approx(98765431.2) assert functions.min_insurance(0) == approx(0.0) assert functions.min_insurance(-54317890) == approx(-43454312.0) @should_print def test_property_tax_10000(output): functions.property_tax(10000) assert output == ''' Assessment Value: 6000.0 Property Tax: 38.4 ''' @should_print def test_property_tax_99999_95(output): functions.property_tax(99999.95) assert output == ''' Assessment Value: 59999.969999999994 Property Tax: 383.999808 ''' def test_bmi(): assert functions.bmi(160, 67) == approx(25.05680552) assert functions.bmi(200, 72) == approx(27.12191358) assert functions.bmi(120, 60) == approx(23.43333333) def test_calories(): assert functions.calories(5, 20) == 125 assert functions.calories(1, 1) == 13 def test_earnings(): assert functions.earnings(100, 100, 100) == 3600 assert functions.earnings(50, 75, 100) == 2550 assert functions.earnings(0, 1000, 79) == 12711 @should_print def test_paint_job_estimator(output): functions.paint_job_estimator(50, 10) assert output == ''' Gallons of paint required: 0.43478260869565216 Hours of labor required: 3.4782608695652173 Cost of paint: 4.3478260869565215 Cost of labor: 69.56521739130434 Total Cost: 73.91304347826086 ''' @should_print def test_paint_job_estimator_2(output): functions.paint_job_estimator(750, 15.95) assert output == ''' Gallons of paint required: 6.521739130434782 Hours of labor required: 52.17391304347826 Cost of paint: 104.02173913043477 Cost of labor: 1043.4782608695652 Total Cost: 1147.5 ''' @should_print def test_monthly_sales_tax(output): functions.monthly_sales_tax(123456.79) assert output == ''' Monthly sales: 123456.79 State sales tax: 4938.2716 County sales tax: 2469.1358 Total sales tax: 7407.4074 ''' @should_print def test_monthly_sales_tax_2(output): functions.monthly_sales_tax(4321567.21) assert output == ''' Monthly sales: 4321567.21 State sales tax: 172862.6884 County sales tax: 86431.3442 Total sales tax: 259294.03260000004 '''
1.96875
2
saleor/core/jwt.py
autobotasia/saleor
1
4320
from datetime import datetime, timedelta from typing import Any, Dict, Optional import graphene import jwt from django.conf import settings from django.core.handlers.wsgi import WSGIRequest from ..account.models import User from ..app.models import App from .permissions import ( get_permission_names, get_permissions_from_codenames, get_permissions_from_names, ) JWT_ALGORITHM = "HS256" SALEOR_AUTH_HEADER = "HTTP_AUTHORIZATION_BEARER" DEFAULT_AUTH_HEADER = "HTTP_AUTHORIZATION" AUTH_HEADER_PREFIXES = ["JWT", "BEARER"] JWT_ACCESS_TYPE = "access" JWT_REFRESH_TYPE = "refresh" JWT_THIRDPARTY_ACCESS_TYPE = "thirdparty" JWT_REFRESH_TOKEN_COOKIE_NAME = "refreshToken" PERMISSIONS_FIELD = "permissions" JWT_SALEOR_OWNER_NAME = "saleor" JWT_OWNER_FIELD = "owner" def jwt_base_payload( exp_delta: Optional[timedelta], token_owner: str ) -> Dict[str, Any]: utc_now = datetime.utcnow() payload = {"iat": utc_now, JWT_OWNER_FIELD: token_owner} if exp_delta: payload["exp"] = utc_now + exp_delta return payload def jwt_user_payload( user: User, token_type: str, exp_delta: Optional[timedelta], additional_payload: Optional[Dict[str, Any]] = None, token_owner: str = JWT_SALEOR_OWNER_NAME, ) -> Dict[str, Any]: payload = jwt_base_payload(exp_delta, token_owner) payload.update( { "token": user.jwt_token_key, "email": user.email, "type": token_type, "user_id": graphene.Node.to_global_id("User", user.id), "is_staff": user.is_staff, "is_supplier": user.is_supplier, } ) if additional_payload: payload.update(additional_payload) return payload def jwt_encode(payload: Dict[str, Any]) -> str: return jwt.encode( payload, settings.SECRET_KEY, # type: ignore JWT_ALGORITHM, ) def jwt_decode_with_exception_handler( token: str, verify_expiration=settings.JWT_EXPIRE ) -> Optional[Dict[str, Any]]: try: return jwt_decode(token, verify_expiration=verify_expiration) except jwt.PyJWTError: return None def jwt_decode(token: str, verify_expiration=settings.JWT_EXPIRE) -> Dict[str, Any]: return jwt.decode( token, settings.SECRET_KEY, # type: ignore algorithms=[JWT_ALGORITHM], options={"verify_exp": verify_expiration}, ) def create_token(payload: Dict[str, Any], exp_delta: timedelta) -> str: payload.update(jwt_base_payload(exp_delta, token_owner=JWT_SALEOR_OWNER_NAME)) return jwt_encode(payload) def create_access_token( user: User, additional_payload: Optional[Dict[str, Any]] = None ) -> str: payload = jwt_user_payload( user, JWT_ACCESS_TYPE, settings.JWT_TTL_ACCESS, additional_payload ) return jwt_encode(payload) def create_refresh_token( user: User, additional_payload: Optional[Dict[str, Any]] = None ) -> str: payload = jwt_user_payload( user, JWT_REFRESH_TYPE, settings.JWT_TTL_REFRESH, additional_payload, ) return jwt_encode(payload) def get_token_from_request(request: WSGIRequest) -> Optional[str]: auth_token = request.META.get(SALEOR_AUTH_HEADER) if not auth_token: auth = request.META.get(DEFAULT_AUTH_HEADER, "").split(maxsplit=1) if len(auth) == 2 and auth[0].upper() in AUTH_HEADER_PREFIXES: auth_token = auth[1] return auth_token def get_user_from_payload(payload: Dict[str, Any]) -> Optional[User]: user = User.objects.filter(email=payload["email"], is_active=True).first() user_jwt_token = payload.get("token") if not user_jwt_token or not user: raise jwt.InvalidTokenError( "Invalid token. Create new one by using tokenCreate mutation." ) if user.jwt_token_key != user_jwt_token: raise jwt.InvalidTokenError( "Invalid token. Create new one by using tokenCreate mutation." ) return user def is_saleor_token(token: str) -> bool: """Confirm that token was generated by Saleor not by plugin.""" try: payload = jwt.decode(token, options={"verify_signature": False}) except jwt.PyJWTError: return False owner = payload.get(JWT_OWNER_FIELD) if not owner or owner != JWT_SALEOR_OWNER_NAME: return False return True def get_user_from_access_token(token: str) -> Optional[User]: if not is_saleor_token(token): return None payload = jwt_decode(token) return get_user_from_access_payload(payload) def get_user_from_access_payload(payload: dict) -> Optional[User]: jwt_type = payload.get("type") if jwt_type not in [JWT_ACCESS_TYPE, JWT_THIRDPARTY_ACCESS_TYPE]: raise jwt.InvalidTokenError( "Invalid token. Create new one by using tokenCreate mutation." ) permissions = payload.get(PERMISSIONS_FIELD, None) user = get_user_from_payload(payload) if user and permissions is not None: token_permissions = get_permissions_from_names(permissions) token_codenames = [perm.codename for perm in token_permissions] user.effective_permissions = get_permissions_from_codenames(token_codenames) user.is_staff = True if user.effective_permissions else False return user def create_access_token_for_app(app: "App", user: "User"): """Create access token for app. App can use user jwt token to proceed given operation on the Saleor side. The token which can be used by App has additional field defining the permissions assigned to it. The permissions set is the intersection of user permissions and app permissions. """ app_permissions = app.permissions.all() app_permission_enums = get_permission_names(app_permissions) permissions = user.effective_permissions user_permission_enums = get_permission_names(permissions) app_id = graphene.Node.to_global_id("App", app.id) additional_payload = { "app": app_id, PERMISSIONS_FIELD: list(app_permission_enums & user_permission_enums), } payload = jwt_user_payload( user, JWT_THIRDPARTY_ACCESS_TYPE, exp_delta=settings.JWT_TTL_APP_ACCESS, additional_payload=additional_payload, ) return jwt_encode(payload)
1.421875
1
main.py
JaekwangCha/my_pytorch_templet
0
4328
# version 0.1 # ================== IMPORT CUSTOM LEARNING LIBRARIES ===================== # from customs.train import train, test from customs.dataset import load_dataset from customs.model import load_model # ================== TRAINING SETTINGS ================== # import argparse import os parser = argparse.ArgumentParser() parser.add_argument('--train_method', default='supervised', type=str, help='type of training: supervised(default), unsupervised, reinforce') parser.add_argument('--task', default='classification', type=str, help='task of training: classification(default), regression') parser.add_argument('--dataset', default='mnist', type=str, help='dataset to use') parser.add_argument('--model', default='CNN', type=str, help='model to use') parser.add_argument('--seed', default=42, type=int, help='random seed (default: 42)') parser.add_argument('--num_worker', default=1, type=int, help='number of dataloader worker') parser.add_argument('--no_cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--gpu', default=0, type=str, help='GPU-id for GPU to use') parser.add_argument('--multi_gpu', default=0, type=str, help='GPU-ids for multi-GPU usage') parser.add_argument('--pin_memory', default=True, type=bool, help='pin memory option selector') parser.add_argument('--save_model', action='store_true', default=False, help='For Saving the current Model') parser.add_argument('--save_path', default=os.getcwd()+'/weights', type=str, help='Where to save weights') parser.add_argument('--log_path', default=os.getcwd()+'/Logs', type=str, help='Where to save Logs') # data setting parser.add_argument('--val_rate', default=0.2, type=float, help='split rate for the validation data') parser.add_argument('--transform', default='default', type=str, help='choose the data transform type') # training parameter setting parser.add_argument('--n_epoch', default=10, type=int, help='number of total training iteration') parser.add_argument('--batch_size', default=32, type=int, help='size of minibatch') parser.add_argument('--test_batch_size', default=32, type=int, help='size of test-minibatch') # optimizer & scheduler setting parser.add_argument('--lr', default=0.03, type=float, help='training learning rate') parser.add_argument('--optimizer', default='adam', type=str, help='optimizer select') parser.add_argument('--scheduler', default='steplr', type=str, help='scheduler select') opt = parser.parse_args() # ===================== IMPORT PYTORCH LIBRARIES ================== # import torch from torch.utils.data import DataLoader torch.manual_seed(opt.seed) # ================== GPU SETTINGS ================== # def gpu_setup(opt): use_cuda = not opt.no_cuda and torch.cuda.is_available() os.environ["CUDA_DEVICE_ORDER"] ="PCI_BUS_ID" if opt.multi_gpu != 0: print() print('Activating multi-gpu training mode') print(opt.multi_gpu) os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.multi_gpu) opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') else: print() print('Activating single-gpu training mode') os.environ['CUDA_VISIBLE_DEVICES'] = str(opt.gpu) opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') print('Using gpu number ' + str(opt.gpu)) return use_cuda # ======================= MAIN SCRIPT ============================= # def main(opt): use_cuda = gpu_setup(opt) dataset_train, dataset_validation = load_dataset(opt, train=True) print('training data size: {}'.format(len(dataset_train))) print('validation data size: {}'.format(len(dataset_validation))) dataset_test = load_dataset(opt, train=False) print('test data size: {}'.format(len(dataset_test))) print() kwargs = {'num_workers': opt.num_worker, 'pin_memory': opt.pin_memory} if use_cuda else {} train_dataloader = DataLoader(dataset_train, batch_size=opt.batch_size, shuffle=True, **kwargs) validation_dataloader = DataLoader(dataset_validation, batch_size=opt.batch_size, shuffle=True, **kwargs) test_dataloader = DataLoader(dataset_test, batch_size=opt.test_batch_size, shuffle=True, **kwargs) model = load_model(opt) if opt.multi_gpu != 0: model = torch.nn.DataParallel(model) model.to(opt.device) train(opt, model, train_dataloader, validation_dataloader) test(opt, model, test_dataloader) if __name__ == '__main__': main(opt)
1.75
2
src/tiden/tidenrunner.py
mshonichev/example_pkg
0
4336
#!/usr/bin/env python3 # # Copyright 2017-2020 GridGain Systems. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .tidenpluginmanager import PluginManager from .report.steps import step, InnerReportConfig, Step, add_attachment, AttachmentType from .util import log_print, unix_path, call_method, create_case, kill_stalled_java, exec_time from .result import Result from .util import write_yaml_file, should_be_skipped from .logger import * from .runner import get_test_modules, get_long_path_len, get_class_from_module, known_issue_str from .priority_decorator import get_priority_key from .sshpool import SshPool from uuid import uuid4 from traceback import format_exc from .runner import set_configuration_options, get_configuration_representation, get_actual_configuration from importlib import import_module from os import path, mkdir from time import time from shutil import copyfile from os.path import join, basename from glob import glob import traceback class TidenTestPlan: all_tests = None skipped_tests = None tests_to_execute = None def __init__(self): self.all_tests = {} self.skipped_tests = [] self.tests_to_execute = [] def update(self, other): self.all_tests.update(other.all_tests) self.skipped_tests.extend(other.skipped_tests) self.tests_to_execute.extend(other.tests_to_execute) class TidenRunner: # { # '<suite_name>.<test_file_name>': { # 'path': <full-path-to-test-file>, # 'module_short_name': <test_file_name>, # } # } modules = None # Tiden config dictionary config = None # Tiden SshPool instance ssh_pool = None # Tiden PluginManager instance pm = None # longest length of the test name long_path_len = 0 # instance of Result class result = None # current test module, a key to self.modules dictionary test_module = None # == TidenTestPlan for all modules: total = None # dictionary of TidenTestPlan indexed by test module name test_plan = {} # == for current test module: # a short name of test module, e.g. test module file name without .py extension module_short_name = None # a name of module' test class test_class_name = None # instance of current module' test case class test_class = None # == for current test within module: # test name, with all configuration options current_test_name = None # test method name only current_test_method = None def __init__(self, config, **kwargs): if kwargs.get('modules', None) is not None: self.modules = kwargs.get('modules') else: self.modules = get_test_modules(config, collect_only=kwargs.get('collect_only')) self.config = config self.long_path_len = get_long_path_len(self.modules) xunit_path_var = None if kwargs.get('xunit_path'): xunit_path_var = kwargs.get('xunit_path') elif config.get('var_dir') and config.get('xunit_file'): xunit_path_var = join(config.get('var_dir'), config.get('xunit_file')) self.result = Result(xunit_path=xunit_path_var) self.ssh_pool: SshPool = kwargs.get('ssh_pool') self.pm: PluginManager = kwargs.get('plugin_manager') def collect_tests(self): """ Collect tests from all modules. """ log_print("*** Collecting tests ***", color='blue') long_path_len = get_long_path_len(self.modules) from tiden.sshpool import AbstractSshPool self.ssh_pool = AbstractSshPool({'hosts': []}) def empty_init(self, config, ssh_pool): self.config = config self.ssh = ssh_pool self.__prepare_session_vars() for test_module in sorted(self.modules.keys()): # cleanup instance vars self.test_plan[test_module] = TidenTestPlan() self.__prepare_module_vars(test_module, fake_init=empty_init) self.__print_current_module_name() test_method_names = sorted(list(self.gen_tests(self.test_class))) self.create_test_module_attr_yaml(test_method_names) self.collect_tests0(test_method_names) self.total.update(self.test_plan[test_module]) log_print("*** Found %s tests. %s skipped. Going to 'run' %s tests ***" % ( len(self.total.all_tests), len(self.total.skipped_tests), len(self.total.tests_to_execute) ), color='blue') test_cnt = 0 # Skipped tests do not hit collect report # Now generate results for 'executed' tests for test_module in sorted(self.modules.keys()): self.__prepare_module_vars(test_module, fake_init=empty_init) test_plan = self.test_plan[self.test_module] for test_name in sorted(test_plan.tests_to_execute): test_param = test_plan.all_tests[test_name] self.__prepare_test_vars(**test_param) test_cnt = test_cnt + 1 self.result.start_testcase(self.test_class, self.current_test_name) self.__print_found_test_method_to_execute(long_path_len, test_cnt, test_module) self.result.stop_testcase('pass') def process_tests(self): """ Run all tests :return: """ log_print("*** Tests ***", color='blue') self.__prepare_session_vars() # Check requirements for applications for test_module in sorted(self.modules.keys()): module = import_module("suites.%s" % test_module) test_class_name = get_class_from_module(self.modules[test_module]['module_short_name']) test_class = getattr(module, test_class_name)(self.config, self.ssh_pool) if hasattr(test_class, 'check_requirements'): test_class.check_requirements() for test_module in sorted(self.modules.keys()): # cleanup instance vars self.test_plan[test_module] = TidenTestPlan() self.__prepare_module_vars(test_module) # find test methods: if hasattr(self.test_class, '__configurations__'): cfg_options = getattr(self.test_class, '__configuration_options__') configuration = get_actual_configuration(self.config, cfg_options) log_print("Configuration options for %s:\n%s" % (self.test_class.__class__.__name__, '\n'.join([ '\t' + cfg_option_name + '=' + str( configuration[i]) for i, cfg_option_name in enumerate(cfg_options) ])), color='blue') else: cfg_options = None configuration = None test_method_names = list(self.gen_tests(self.test_class)) self.collect_tests1(test_method_names, common_test_param={ 'configuration': configuration, 'cfg_options': cfg_options, }) test_plan = self.test_plan[self.test_module] if len(test_plan.skipped_tests) > 0: self._skip_tests() if len(test_plan.tests_to_execute) > 0: tests_to_execute = sorted(test_plan.tests_to_execute, key=get_priority_key(self.test_class)) log_print("*** Found %s tests in %s. %s skipped. Going to run %s tests ***\n%s" % ( len(test_plan.all_tests), self.test_class_name, len(test_plan.skipped_tests), len(test_plan.tests_to_execute), '\n'.join([ test_plan.all_tests[test_name]['test_method_name'] for test_name in tests_to_execute ])), color='blue') # Execute module setup setup_passed = self.__call_module_setup_teardown('setup') if setup_passed: self._run_tests(tests_to_execute) # Execute module teardown self.__call_module_setup_teardown('teardown') # this is for correct fail in Jenkins if not setup_passed: exit(1) def create_test_module_attr_yaml(self, test_method_names): # create attr.yaml for current_test_name in test_method_names: test_function = getattr(self.test_class, current_test_name) create_case(test_function) def __prepare_session_vars(self): self.test_plan = {} self.total = TidenTestPlan() def __prepare_module_vars(self, module_name, fake_init=None): """ Prepare per-module initialization of internal variables: Expects self.test_module be set to proper full name of module under 'suites' directory sets up self.test_class_name self.module_short_name self.test_class - creates instance of test case class resets self.all_tests, self.tests_to_execute, self.skipped_tests config fills in config['rt'], config['rt']['remote'] Creates test module working local and remote directories. Copies resources from suite directory to local test module working directory. :param module_name: name of the module to prepare :param fake_init: do not init module :return: """ self.test_module = module_name # fill new module vars self.module_short_name = self.modules[self.test_module]['module_short_name'] test_module_dir = "%s/%s" % (self.config['suite_var_dir'], self.module_short_name) remote_test_module_dir = "%s/%s" % (self.config['remote']['suite_var_dir'], self.module_short_name) self.test_class_name = get_class_from_module(self.module_short_name) # Update Tiden config self.config['rt'] = { 'test_class': self.test_class_name, 'test_method': None, 'test_module': self.test_module, 'test_module_name': self.module_short_name, 'test_module_dir': test_module_dir, 'remote': { 'test_module_dir': remote_test_module_dir, } } module = import_module("suites.%s" % self.test_module) # used for collect_only if fake_init: self.test_class = getattr(module, self.test_class_name) self.test_class.__init__ = fake_init self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool) else: # for process tests - prepare test directory and resources self.__create_test_module_directory(remote_test_module_dir, test_module_dir) self.test_class = getattr(module, self.test_class_name)(self.config, self.ssh_pool) if hasattr(self.test_class, 'tiden'): self.__copy_resources_to_local_test_module_directory() # Set ssh and config apps model classes self.test_class.tiden.config = self.config self.test_class.tiden.ssh = self.ssh_pool self.test_class.config = self.config self.test_class.ssh = self.ssh_pool self._save_config() def __prepare_test_vars(self, test_method_name=None, configuration=None, cfg_options=None, **kwargs): if not test_method_name: return self.test_iteration = 1 self.current_test_method = test_method_name if hasattr(self.test_class, '__configurations__'): if cfg_options is None: cfg_options = getattr(self.test_class, '__configuration_options__') if configuration is None: configuration = get_actual_configuration(self.config, cfg_options) configuration_representation = get_configuration_representation(cfg_options, configuration) self.current_test_name = self.current_test_method + configuration_representation else: self.current_test_name = self.current_test_method def collect_test0(self): # collect test params test_params = { 'test_name': self.current_test_name, } test_function = getattr(self.test_class, self.current_test_method) # first setup fixture if hasattr(test_function, "__setup__"): setup_fixture = getattr(test_function, "__setup__") if type(setup_fixture) == type(''): setup_method = getattr(self.test_class, setup_fixture) else: setup_method = setup_fixture test_params['setup_test_params'] = True test_params['setup_test_method'] = setup_method # next, teardown fixture if hasattr(test_function, "__teardown__"): teardown_fixture = getattr(test_function, "__teardown__") teardown_method = getattr(self.test_class, teardown_fixture) test_params['teardown_test_method'] = teardown_method # don't forget known issues if hasattr(test_function, "__known_issues__"): known_issue = getattr(test_function, "__known_issues__") test_params['known_issue'] = known_issue # test by default runs only once, # unless repeated_test_count set explicitly by decorator or framework option repeat_count = 1 # here, we check --to=repeated_test=N and --to=repeated_test.test_name=N options # and decorate test with @repeated_test automagically if that's required if self.config.get('repeated_test'): repeated_test_option = self.config['repeated_test'] re_decorate = False if type({}) != type(repeated_test_option): # if option was given as --to=repeated_test=N, re-decorate all tests re_decorate = True repeat_count = int(repeated_test_option) elif self.current_test_method in repeated_test_option.keys(): # otherwise re-decorate only if test name matches given option re_decorate = True repeat_count = int(repeated_test_option[self.current_test_method]) if re_decorate: from tiden.util import repeated_test original_test = test_function if hasattr(original_test, 'repeated_test_name'): # that test was previously decorated by @repeated_test, extract original test_names original_names = original_test.repeated_test_name decorated_test = repeated_test(repeat_count, test_names=original_names)(original_test.__func__) else: # that's a brand new decoration decorated_test = repeated_test(repeat_count)(original_test.__func__) # this magic required to convert decorated test function to method of a test class from types import MethodType setattr(self.test_class, self.current_test_method, MethodType(decorated_test, self.test_class)) test_function = getattr(self.test_class, self.current_test_method) if hasattr(test_function, 'repeated_test_count'): repeat_count = test_function.repeated_test_count repeated_test_name = test_function.repeated_test_name test_params['repeated_test_count'] = repeat_count test_params['repeated_test_name'] = repeated_test_name test_params['continue_on_fail'] = self.config.get('repeated_test_continue_on_fail', False) return test_params def _skip_tests(self): test_plan = self.test_plan[self.test_module] skipped_tests = sorted(test_plan.skipped_tests) try: for current_test in skipped_tests: test_param = test_plan.all_tests[current_test] self.__prepare_test_vars(**test_param) pad_string = self.__get_pad_string(msg=self.current_test_method) self.result.skip_testcase_no_start(self.test_class, self.current_test_name, skip_message=test_param['skip_msg'], skip_no_start=test_param['skip_no_start']) self.result.update_xunit() log_print("%s %s" % (pad_string, test_param['skip_msg']), color='yellow') finally: self.current_test_name = None self.current_test_method = None def _run_tests(self, tests_to_execute): test_plan = self.test_plan[self.test_module] try: for test_cnt, current_test in enumerate(tests_to_execute, start=1): test_param = test_plan.all_tests[current_test] self.__prepare_test_vars(**test_param) repeated_test_count = test_param.get('repeated_test_count', 1) repeated_test_continue_on_fail = test_param.get('continue_on_fail') test_with_iterations = True if repeated_test_count > 1 else False pad_string = self.__get_pad_string() log_print("%s started (%s from %s)" % (pad_string, test_cnt, len(tests_to_execute)), color='yellow') for self.test_iteration in range(repeated_test_count): if test_with_iterations: log_print("{} started (iteration {} from {})".format(pad_string, self.test_iteration + 1, repeated_test_count), color='yellow') test_status = self._run_test() if test_with_iterations and test_status != 'pass' and not repeated_test_continue_on_fail: self.result.update_test_name('{}_iteration_{}'.format(current_test, self.test_iteration + 1)) break finally: self.current_test_name = None self.current_test_method = None def _run_test(self): setattr(self, '_secret_report_storage', InnerReportConfig()) test_exception = None tb_msg = None test_status = 'pass' pad_string = self.__get_pad_string() started = int(time()) known_issue = self.test_plan[self.test_module].all_tests[self.current_test_name].get('known_issue') setattr(self.test_class, '_secret_report_storage', InnerReportConfig()) try: self.pm.do("before_test_method", test_module=self.test_module, test_name=self.current_test_name, artifacts=self.config.get('artifacts', {})) self.result.start_testcase(self.test_class, self.current_test_name) self.__update_config_and_save(current_method_name=self.current_test_name) # Execute test setup method self.__call_test_setup_teardown('setup') # self.__print_with_format() with Step(self, 'Execution'): try: call_method(self.test_class, self.current_test_method) finally: self.__set_child_steps_to_parent() self.__save_logs() log_print(f"{pad_string} passed {exec_time(started)}", color='green') except (AssertionError, TidenException) as e: test_status = 'fail' test_exception = e tb_msg = traceback.format_exc() except Exception as e: test_status = 'error' test_exception = e tb_msg = traceback.format_exc() finally: if test_status != 'pass': log_print(tb_msg, color='red') log_print("{} {} {}{}".format(pad_string, test_status, exec_time(started), known_issue_str(known_issue)), color='red') self.result.stop_testcase( test_status, e=test_exception, tb=tb_msg, known_issue=known_issue, run_info=self.test_class.get_run_info() if hasattr(self.test_class, 'get_run_info') else None ) # Execute test teardown method self.__call_test_setup_teardown('teardown') self.pm.do('after_test_method', test_status=test_status, exception=test_exception, stacktrace=tb_msg, known_issue=known_issue, description=getattr(self.test_class, self.current_test_method, lambda: None).__doc__, inner_report_config=getattr(self, '_secret_report_storage')) # Kill java process if teardown function didn't kill nodes if not hasattr(self.test_class, 'keep_ignite_between_tests'): kill_stalled_java(self.ssh_pool) return test_status @step('logs') def __save_logs(self): test_dir = self.config.get('rt', {}).get('remote', {}).get('test_dir') if 'WardReport' in self.config.get('plugins', []): report_config = self.config['plugins']['WardReport'] files_receiver_url = report_config['files_url'] upload_logs = report_config['upload_logs'] else: return if test_dir: try: for host_ip, output_lines in self.ssh_pool.exec([f"ls {test_dir}"]).items(): with Step(self, host_ip): for line in output_lines: file_name: str for file_name in line.split('\n'): if file_name and file_name.endswith('.log'): send_file_name = f'{uuid4()}_{file_name}' add_attachment(self, file_name, send_file_name, AttachmentType.FILE) if upload_logs: cmd = f'cd {test_dir}; ' \ f'curl -H "filename: {send_file_name}" ' \ f'-F "file=@{file_name};filename={file_name}" ' \ f'{files_receiver_url}/files/add' self.ssh_pool.exec_on_host(host_ip, [cmd]) except: log_print(f'Failed to send report. \n{format_exc()}', color='pink') def __copy_resources_to_local_test_module_directory(self): """ Copy resources in test resource directory :return: """ test_resource_dir = "%s/res" % self.config['rt']['test_module_dir'] if not path.exists(test_resource_dir): mkdir(test_resource_dir) self.config['rt']['resource_dir'] = "%s/res/%s" % (self.config['suite_dir'], self.module_short_name[5:]) for file in glob("%s/*" % self.config['rt']['resource_dir']): if path.isfile(file): copyfile(file, f"{test_resource_dir}/{basename(file)}") self.config['rt']['test_resource_dir'] = unix_path(test_resource_dir) def __create_test_module_directory(self, remote_test_module_dir, test_module_dir): mkdir(test_module_dir) self.ssh_pool.exec([f'mkdir -p {remote_test_module_dir}']) @step('{method_name}') def __call_test_setup_teardown(self, method_name): method_to_execute = None try: self._call_plugin_manager(f'before_test_method_{method_name}') all_tests = self.test_plan[self.test_module].all_tests if all_tests[self.current_test_name].get(f'{method_name}_test_method'): method_to_execute = all_tests[self.current_test_name].get(f'{method_name}_test_method') self.__print_with_format(msg=str(method_to_execute.__name__)) try: if all_tests[self.current_test_name].get(f'{method_name}_test_params'): method_to_execute(self.test_class) else: method_to_execute() except Exception as e: log_print(f'!!! Exception in {method_name} code !!!', color='red') log_print(traceback.format_exc()) try: self.__save_logs() except: log_print(f'Failed to get logs\n{traceback.format_exc()}', color='pink') # if exception in setup method then re-raise the exception as we should fail the test if method_name == 'setup': raise e finally: self.__set_child_steps_to_parent() self._call_plugin_manager(f'after_test_method_{method_name}') def __set_child_steps_to_parent(self): exec_report: InnerReportConfig = getattr(self.test_class, '_secret_report_storage', None) test_report: InnerReportConfig = getattr(self, '_secret_report_storage') idx_to_add = None for idx, test_step in enumerate(test_report.steps): if test_step['status'] is None: idx_to_add = idx break test_report.steps[idx_to_add]['children'] = exec_report.steps + test_report.steps[idx_to_add].get('children', []) title = getattr(getattr(self.test_class, self.current_test_method), '__report_title__', None) suites = getattr(getattr(self.test_class, self.current_test_method), '__report_suites__', None) if title: test_report.title = title test_report.suites = suites setattr(self, '_secret_report_storage', test_report) setattr(self.test_class, '_secret_report_storage', InnerReportConfig()) def __call_module_setup_teardown(self, fixture_name): """ Execute test module setup/teardown fixture. :param fixture_name: either 'setup' or 'teardown' :return: """ self._call_plugin_manager('before_test_class_%s' % fixture_name) fixture_passed = True try: if hasattr(self.test_class, fixture_name): started = time() try: self.__print_with_format('started', current_method_name=fixture_name) self.__update_config_and_save(current_method_name=fixture_name) # Execute setup or teardown method call_method(self.test_class, fixture_name) self.__print_with_format('finished in %s sec' % (int(time() - started)), current_method_name=fixture_name) # except (AssertionError, TidenException) as e: except Exception as e: fixture_passed = False self.__print_with_format('failed in %s sec' % (int(time() - started)), current_method_name=fixture_name) log_print('Exception in %s.%s.%s: %s\n%s' % (self.test_module, self.test_class_name, fixture_name, str(e), str(traceback.format_exc())), color='red') finally: self._call_plugin_manager('after_test_class_%s' % fixture_name) return fixture_passed def _call_plugin_manager(self, execution_point): args = [self.test_module, self.test_class] if self.current_test_method: args.append(self.current_test_method) self.pm.do(execution_point, *args) def __update_config_and_save(self, current_method_name=None): test_method = current_method_name if current_method_name else self.current_test_method test_method_name = test_method.split('(')[0] if '(' in test_method else test_method test_dir_name = test_method_name all_tests = self.test_plan[self.test_module].all_tests # cause of repeated_tests decorator if all_tests.get(test_method) and all_tests[test_method].get('repeated_test_name'): test_dir_name = '{}_{}'.format( test_method_name, all_tests[test_method].get('repeated_test_name')[self.test_iteration]) self.config['rt']['test_method'] = test_method_name self.config['rt']['remote']['test_dir'] = "{}/{}/{}".format( self.config['rt']['remote']['test_module_dir'], self.config['rt']['test_class'], test_dir_name ) self.config['rt']['test_dir'] = "{}/{}/{}".format( self.config['rt']['test_module_dir'], self.config['rt']['test_class'], test_dir_name) try: create_remote_dir = [ 'mkdir -p %s/%s/%s' % (self.config['rt']['remote']['test_module_dir'], self.test_class_name, str(test_dir_name)), 'ln -sfn %s %s/current_test_directory' % (self.config['rt']['remote']['test_module_dir'], self.config['environment']['home']) ] self.ssh_pool.exec(create_remote_dir) except Exception: log_print("Can't create symlink to current test", color='red') self._save_config() def _check_test_for_skip(self): attribs = [] skip_test = False skip_msg = None skip_no_start = False test_function = getattr(self.test_class, self.current_test_method) if hasattr(test_function, "__attrib__"): attribs = getattr(test_function, "__attrib__") attribs.append(str(self.current_test_method)) # if attr is passed to runner and test is not marked with one of the attribute # then skip it. if 'mute' in attribs: skip_msg = 'skipped cause test is MUTED' known_issue = None if hasattr(test_function, "__known_issues__"): known_issue = getattr(test_function, "__known_issues__") if known_issue: skip_msg = '{} cause of {}'.format(skip_msg, known_issue) skip_test = True skip_no_start = True elif self.config.get('attrib') and should_be_skipped(self.config.get('attrib'), attribs, self.config.get('attr_match', 'any')): skip_msg = 'skipped cause of attrib mismatch' skip_test = True skip_no_start = True if hasattr(test_function, "__skipped__"): skip_msg = 'skipped cause of %s' % test_function.__skipped_message__ skip_test = True if hasattr(test_function, "__skip_cond__"): skip_condition = getattr(test_function, "__skip_cond__") conditions_met, skip_message = skip_condition(self.config) if not conditions_met: skip_msg = 'skipped cause of %s' % skip_message skip_test = True if hasattr(test_function, "__skip_conds__") and \ len(test_function.__skip_conds__) > 0: skip_conditions = test_function.__skip_conds__ for skip_condition in skip_conditions: conditions_met, skip_message = skip_condition(self.test_class) if not conditions_met: skip_msg = 'skipped cause of %s' % skip_message skip_test = True return skip_test, skip_msg, skip_no_start def get_tests_results(self): return self.result def _save_config(self): write_yaml_file(self.config['config_path'], self.config) @staticmethod def gen_tests(test_class): """ Generates all test method of given test class :param test_class: :return: """ for class_attr in dir(test_class): if class_attr.startswith('test_'): yield class_attr def collect_tests0(self, test_method_names): """ Collect given set of tests from test module for all configurations :param test_method_names: :return: """ if not hasattr(self.test_class, '__configurations__'): self.collect_tests1(test_method_names) else: cfg_options = getattr(self.test_class, '__configuration_options__').copy() configurations = getattr(self.test_class, '__configurations__').copy() for configuration in configurations: # set configuration options from given configuration to Tiden config, # so that test can check options and skip itself set_configuration_options(cfg_options, self.config, configuration) self.collect_tests1(test_method_names, common_test_param={ 'configuration': configuration, 'cfg_options': cfg_options, }) def collect_tests1(self, test_method_names, common_test_param={}): """ Collect given tests from current test module :param test_method_names: :param common_test_param: :return: """ try: test_plan = self.test_plan[self.test_module] for test_method_name in test_method_names: self.__prepare_test_vars(test_method_name, **common_test_param) test_param = { 'test_method_name': test_method_name, } is_skipped, skip_msg, skip_no_start = self._check_test_for_skip() test_param.update(self.collect_test0()) repeat_count = test_param.get('repeated_test_count', 1) if repeat_count > 0: if repeat_count == 1: # don't rename tests when only one iteration requested test_param['repeated_test_name'] = [] else: # rare case, skip by --to=repeated_test.test_name=0 is_skipped = True skip_msg = 'skipped due to repeated_test iterations <= 0' skip_no_start = False if is_skipped: test_param.update({ 'skip_msg': skip_msg, 'skip_no_start': skip_no_start, }) test_plan.skipped_tests.append(self.current_test_name) else: if common_test_param: test_param.update(common_test_param) test_plan.tests_to_execute.append(self.current_test_name) test_plan.all_tests[self.current_test_name] = test_param.copy() finally: self.current_test_method = None self.current_test_name = None def __print_found_test_method_to_execute(self, long_path_len, test_cnt, test_module): method_long_name = "%s.%s.%s " % (test_module, self.test_class_name, self.current_test_name) pad_string = method_long_name.ljust(long_path_len, '.') log_print("%s found (%s from %s)" % (pad_string, test_cnt, len(self.total.tests_to_execute)), color='yellow') def __print_with_format(self, msg='', current_method_name=''): if not current_method_name: if self.current_test_method: current_method_name = self.current_test_method else: current_method_name = '' log_print("[{}][.{}.{}] {}".format( datetime.now().isoformat()[11:-7], self.test_class_name, current_method_name, msg)) def __print_current_module_name(self): log_print("[%s][%s]" % ( datetime.now().isoformat()[11:-7], self.test_module)) def __get_pad_string(self, msg=None): return ("%s.%s.%s " % ( self.test_module, self.test_class_name, msg if msg else self.current_test_method)) \ .ljust(self.long_path_len, '.')
1.34375
1
changes/api/serializer/models/logsource.py
alex/changes
1
4368
from changes.api.serializer import Serializer, register from changes.models.log import LogSource @register(LogSource) class LogSourceSerializer(Serializer): def serialize(self, instance, attrs): return { 'id': instance.id.hex, 'job': { 'id': instance.job_id.hex, }, 'name': instance.name, 'step': instance.step, 'dateCreated': instance.date_created, }
1.140625
1
agent.py
kapzlok2408/Pokemon-Showdown-Node-Bot
0
4384
import gym import gym_pokemon import random if __name__ == "__main__": env = gym.make("Pokemon-v0") total_reward = 0.0 total_steps = 0 obs = env.reset() while True: action = random.randint(-1,8) obs, reward, done, _ = env.step(action) total_reward += reward total_steps += 1 print("Currently %d steps, total reward of %.2f" % (total_steps, total_reward)) if done: break
2.046875
2
test/spec/test_spec.py
raghu1121/SLM-Lab
1
4408
from flaky import flaky from slm_lab.experiment.control import Trial from slm_lab.experiment.monitor import InfoSpace from slm_lab.lib import util from slm_lab.spec import spec_util import os import pandas as pd import pytest import sys # helper method to run all tests in test_spec def run_trial_test(spec_file, spec_name=False): spec = spec_util.get(spec_file, spec_name) spec = spec_util.override_test_spec(spec) info_space = InfoSpace() info_space.tick('trial') trial = Trial(spec, info_space) trial_data = trial.run() assert isinstance(trial_data, pd.DataFrame) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_cartpole'), ('experimental/reinforce.json', 'reinforce_rnn_cartpole'), # ('experimental/reinforce.json', 'reinforce_conv_breakout'), ]) def test_reinforce(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_mlp_pendulum'), ('experimental/reinforce.json', 'reinforce_rnn_pendulum'), ]) def test_reinforce_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_cartpole'), ('experimental/a2c.json', 'a2c_mlp_separate_cartpole'), ('experimental/a2c.json', 'a2c_rnn_shared_cartpole'), ('experimental/a2c.json', 'a2c_rnn_separate_cartpole'), # ('experimental/a2c.json', 'a2c_conv_shared_breakout'), # ('experimental/a2c.json', 'a2c_conv_separate_breakout'), ('experimental/a2c.json', 'a2c_mlp_concat_cartpole'), ]) def test_a2c(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/a2c.json', 'a2c_mlp_shared_pendulum'), ('experimental/a2c.json', 'a2c_mlp_separate_pendulum'), ('experimental/a2c.json', 'a2c_rnn_shared_pendulum'), ('experimental/a2c.json', 'a2c_rnn_separate_pendulum'), ]) def test_a2c_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_cartpole'), ('experimental/ppo.json', 'ppo_mlp_separate_cartpole'), ('experimental/ppo.json', 'ppo_rnn_shared_cartpole'), ('experimental/ppo.json', 'ppo_rnn_separate_cartpole'), # ('experimental/ppo.json', 'ppo_conv_shared_breakout'), # ('experimental/ppo.json', 'ppo_conv_separate_breakout'), ]) def test_ppo(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo.json', 'ppo_mlp_shared_pendulum'), ('experimental/ppo.json', 'ppo_mlp_separate_pendulum'), ('experimental/ppo.json', 'ppo_rnn_shared_pendulum'), ('experimental/ppo.json', 'ppo_rnn_separate_pendulum'), ]) def test_ppo_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_cartpole'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_cartpole'), ]) def test_ppo_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ppo_sil.json', 'ppo_sil_mlp_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_mlp_separate_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_shared_pendulum'), ('experimental/ppo_sil.json', 'ppo_sil_rnn_separate_pendulum'), ]) def test_ppo_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_cartpole'), ('experimental/sil.json', 'sil_mlp_separate_cartpole'), ('experimental/sil.json', 'sil_rnn_shared_cartpole'), ('experimental/sil.json', 'sil_rnn_separate_cartpole'), # ('experimental/sil.json', 'sil_conv_shared_breakout'), # ('experimental/sil.json', 'sil_conv_separate_breakout'), ]) def test_sil(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sil.json', 'sil_mlp_shared_pendulum'), ('experimental/sil.json', 'sil_mlp_separate_pendulum'), ('experimental/sil.json', 'sil_rnn_shared_pendulum'), ('experimental/sil.json', 'sil_rnn_separate_pendulum'), ]) def test_sil_cont(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/sarsa.json', 'sarsa_mlp_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_mlp_epsilon_greedy_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_boltzmann_cartpole'), ('experimental/sarsa.json', 'sarsa_rnn_epsilon_greedy_cartpole'), # ('experimental/sarsa.json', 'sarsa_conv_boltzmann_breakout'), # ('experimental/sarsa.json', 'sarsa_conv_epsilon_greedy_breakout'), ]) def test_sarsa(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'vanilla_dqn_cartpole'), ('experimental/dqn.json', 'dqn_boltzmann_cartpole'), ('experimental/dqn.json', 'dqn_epsilon_greedy_cartpole'), ('experimental/dqn.json', 'drqn_boltzmann_cartpole'), ('experimental/dqn.json', 'drqn_epsilon_greedy_cartpole'), # ('experimental/dqn.json', 'dqn_boltzmann_breakout'), # ('experimental/dqn.json', 'dqn_epsilon_greedy_breakout'), ('experimental/dqn.json', 'dqn_stack_epsilon_greedy_lunar'), ]) def test_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/ddqn.json', 'ddqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddqn_epsilon_greedy_cartpole'), ('experimental/ddqn.json', 'ddrqn_boltzmann_cartpole'), ('experimental/ddqn.json', 'ddrqn_epsilon_greedy_cartpole'), # ('experimental/ddqn.json', 'ddqn_boltzmann_breakout'), # ('experimental/ddqn.json', 'ddqn_epsilon_greedy_breakout'), ]) def test_ddqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_cartpole'), ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_cartpole'), # ('experimental/dueling_dqn.json', 'dueling_dqn_boltzmann_breakout'), # ('experimental/dueling_dqn.json', 'dueling_dqn_epsilon_greedy_breakout'), ]) def test_dueling_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/hydra_dqn.json', 'hydra_dqn_boltzmann_cartpole'), ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole'), # ('experimental/hydra_dqn.json', 'hydra_dqn_epsilon_greedy_cartpole_2dball'), ]) def test_hydra_dqn(spec_file, spec_name): run_trial_test(spec_file, spec_name) @flaky @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/dqn.json', 'dqn_pong'), # ('experimental/a2c.json', 'a2c_pong'), ]) def test_atari(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('experimental/reinforce.json', 'reinforce_conv_vizdoom'), ]) def test_reinforce_vizdoom(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'base_case_unity'), ('base.json', 'base_case_openai'), ('random.json', 'random_cartpole'), ('random.json', 'random_pendulum'), # ('base.json', 'multi_agent'), # ('base.json', 'multi_agent_multi_env'), ]) def test_base(spec_file, spec_name): run_trial_test(spec_file, spec_name) @pytest.mark.parametrize('spec_file,spec_name', [ ('base.json', 'multi_body'), ('base.json', 'multi_env'), ]) def test_base_multi(spec_file, spec_name): run_trial_test(spec_file, spec_name)
1.289063
1
FastLinear/generate_memory_bank.py
WangFeng18/dino
0
4440
import os from tqdm import tqdm import torch.backends.cudnn as cudnn import torch from datasets import ImageNetInstance, ImageNetInstanceLMDB from torchvision import transforms import argparse from BaseTaskModel.task_network import get_moco_network, get_swav_network, get_selfboost_network, get_minmaxent_network, get_simclr_network, get_sup_network, get_dino_network from torch.utils.data import DataLoader from PIL import ImageFile, Image import torch.distributed as dist from lars import * ImageFile.LOAD_TRUNCATED_IMAGES = True import warnings warnings.filterwarnings('ignore') def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. *** Warning ***: torch.distributed.all_gather has no gradient. """ tensors_gather = [torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size())] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output def main(): parser = argparse.ArgumentParser("The first stage of BoostrapSelfSup") parser.add_argument('--local_rank', default=-1, type=int, help='node rank for distributed parallel') parser.add_argument("--task", type=str, default="moco", help="the pretraining models") parser.add_argument("--pretrained_path", type=str, default="", help="the pretraining models") parser.add_argument("--save_path", type=str, default="", help="where to save the memory_bank") parser.add_argument("--backbone", type=str, default="resnet50") parser.add_argument("--data_path", type=str, default="~/ILSVRC2012/", help="the data path") parser.add_argument("--batch_size", type=int, default=32, help="batch size") parser.add_argument("--img_size", type=int, default=224, help="image size") parser.add_argument("--feat_dim", type=int, default=128, help="feat dimension") parser.add_argument("--feature_layer", type=str, default='lowdim', help="feature layer") parser.add_argument('--use-lmdb', action='store_true') args = parser.parse_args() pretrained_path = os.path.expanduser(args.pretrained_path) save_path = os.path.expanduser(args.save_path) data_path = os.path.expanduser(args.data_path) batch_size = args.batch_size feat_dim = args.feat_dim dist.init_process_group(backend='nccl') torch.cuda.set_device(args.local_rank) # network = ResNet(50, frozen_stages=4) if args.task == 'moco': network = get_moco_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'swav': network = get_swav_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'selfboost': network = get_selfboost_network(pretrained_path, feature_layer=args.feature_layer) elif args.task == 'minmaxent': network = get_minmaxent_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'dino': network = get_dino_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'simclr': network = get_simclr_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) elif args.task == 'sup': network = get_sup_network(args.backbone, pretrained_path, feature_layer=args.feature_layer) else: raise NotImplementedError network.cuda(args.local_rank) network = torch.nn.parallel.DistributedDataParallel(network, device_ids=[args.local_rank]) cudnn.benchmark = True augmentation = transforms.Compose([ transforms.Resize(int(256*args.img_size/224), interpolation=Image.BICUBIC), transforms.CenterCrop(args.img_size), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]) if args.use_lmdb: train_dataset = ImageNetInstanceLMDB(root=data_path, list_file='train.lmdb', transform=augmentation) val_dataset = ImageNetInstanceLMDB(root=data_path, list_file='val.lmdb', transform=augmentation) else: train_dataset = ImageNetInstance(root=os.path.join(data_path, 'train'), transform=augmentation) val_dataset = ImageNetInstance(root=os.path.join(data_path, 'val'), transform=augmentation) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=False, rank=args.local_rank) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset, shuffle=False, rank=args.local_rank) n_train_points = len(train_dataset) n_val_points = len(val_dataset) train_dataloader = DataLoader(train_dataset, batch_size=batch_size, sampler=train_sampler, pin_memory=True, num_workers=4) val_dataloader = DataLoader(val_dataset, batch_size=batch_size, sampler=val_sampler, pin_memory=True, num_workers=4) print("Initializing train memory bank: {} points.".format(n_train_points)) train_memory_bank = torch.zeros(n_train_points, feat_dim).to("cpu").detach() print("Initializing val memory bank: {} points.".format(n_val_points)) val_memory_bank = torch.zeros(n_val_points, feat_dim).to("cpu").detach() network.eval() train_sampler.set_epoch(0) val_sampler.set_epoch(0) for data in tqdm(train_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for x in feature] feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): train_memory_bank[idx,:] = feature.detach().cpu() for data in tqdm(val_dataloader): idx, img, _ = data idx = idx.cuda(args.local_rank, non_blocking=True) img = img.cuda(args.local_rank, non_blocking=True) if True: #args.backbone.startswith('resnet'): feature = network(img) else: feature = network.module.get_intermediate_layers(img, 4) feature = [x[:, 0] for x in feature] feature = torch.cat(feature, dim=-1) feature = concat_all_gather(feature.contiguous()) idx = concat_all_gather(idx) with torch.no_grad(): val_memory_bank[idx,:] = feature.detach().cpu() if args.local_rank == 0: torch.save( {'train_memory_bank': train_memory_bank, 'val_memory_bank': val_memory_bank }, args.save_path ) if __name__ == '__main__': main()
1.976563
2
examples/django_mongoengine/bike/models.py
pfrantz/graphene-mongo
260
4456
from mongoengine import Document from mongoengine.fields import ( FloatField, StringField, ListField, URLField, ObjectIdField, ) class Shop(Document): meta = {"collection": "shop"} ID = ObjectIdField() name = StringField() address = StringField() website = URLField() class Bike(Document): meta = {"collection": "bike"} ID = ObjectIdField() name = StringField() brand = StringField() year = StringField() size = ListField(StringField()) wheel_size = FloatField() type = StringField()
1.3125
1
pyqt/getting_started/close_window.py
CospanDesign/python
5
4464
#!/usr/bin/python import sys from PyQt4 import QtGui from PyQt4 import QtCore class Example(QtGui.QWidget): def __init__(self): super(Example, self).__init__() self.initUI() def initUI(self): qbtn = QtGui.QPushButton('Quit', self) qbtn.clicked.connect(QtCore.QCoreApplication.instance().quit) qbtn.resize(qbtn.sizeHint()) self.setGeometry(300, 300, 250, 150) self.setWindowTitle('Quit Button') self.show() def main(): app = QtGui.QApplication(sys.argv) ex = Example() sys.exit(app.exec_()) if __name__ == "__main__": main()
1.59375
2
api/skill/serializer.py
zaubermaerchen/imas_cg_api
2
4472
# coding: utf-8 from rest_framework import serializers from data.models import Skill, SkillValue class ListSerializer(serializers.ModelSerializer): skill_value_list = serializers.SerializerMethodField(read_only=True) class Meta: model = Skill fields = [ 'skill_id', 'target_unit', 'target_member', 'target_type', 'target_num', 'target_param', 'skill_value_id', 'skill_value_list', 'comment' ] @staticmethod def get_skill_value_list(obj): return SkillValue.get_value_list(obj.skill_value_id) class Costar(object): def __init__(self, name, count): self.name = name self.count = count class CostarSerializer(serializers.Serializer): name = serializers.CharField(max_length=255) count = serializers.IntegerField() def create(self, validated_data): return Costar(**validated_data) def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.count = validated_data.get('count', instance.count) return instance
1.710938
2
tests/unit/utils/test_validators.py
kajusK/HiddenPlaces
0
4488
"""Unit tests for app.validators. """ from wtforms import ValidationError import flask from pytest import raises from app.utils.validators import password_rules, image_file, allowed_file class DummyField(object): """Dummy field object to emulate wtforms field.""" def __init__(self, data=None, errors=(), raw_data=None): self.data = data self.errors = list(errors) self.raw_data = raw_data def gettext(self, string): return string def ngettext(self, singular, plural, n): return singular class DummyForm(dict): """Dummy form object to emulate wtforms form.""" pass class DummyFile(object): """Dummy file like class to emulate uploaded file handler.""" def __init__(self, filename): self.filename = filename def __repr__(self): return self.filename def _run_validator_check(subtests, validator, valid, invalid): """Runs tests again validator with valid and invalid inputs. Args: subtest: Subtests fixture. validator: Validator instance to run tests against valid: List of valid inputs invalid: List of invalid inputs """ field = DummyField() for item in valid: field.data = item with subtests.test(item=item): validator(DummyForm(), field) for item in invalid: field.data = item with subtests.test(item=item): with raises(ValidationError): validator(DummyForm(), field) def test_allowed_file(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [DummyFile(x) for x in valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_multiple(subtests, req_context): validator = allowed_file() extensions = ['exe', 'html'] valid = ['foo.jpg', 'exe', 'foo.exe.zip', 'foo'] invalid = ['foo.exe', 'foo.EXE', 'foo.pdf.exe', 'foo.html'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['DISABLED_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_allowed_file_message(req_context): validator = allowed_file(message="custom message") field = DummyField() field.data = DummyFile("blah.foo") flask.current_app.config['DISABLED_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == "custom message" def test_image_file(subtests, req_context): validator = image_file() extensions = ['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [DummyFile(x) for x in valid] invalid = [DummyFile(x) for x in invalid] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_multiple(subtests, req_context): validator = image_file() extensions = ['jpg', 'png', 'tiff'] valid = ['foo.jpg', 'foo.JPG', 'bar.png', 'blah.tiff', 'a.foo.jpg'] invalid = ['foo', 'jpg', 'foo.pdf', 'foo.jpg.pdf', '', '.jpg', 'o.gif'] valid = [[DummyFile(x) for x in valid], [DummyFile(valid[0])], [DummyFile(valid[0]), DummyFile(valid[1])]] invalid = [[DummyFile(x) for x in invalid], [DummyFile(invalid[0])], [DummyFile(invalid[0]), DummyFile(invalid[1])]] flask.current_app.config['IMAGE_EXTENSIONS'] = extensions with flask.current_app.test_request_context(): _run_validator_check(subtests, validator, valid, invalid) def test_image_file_message(req_context): validator = image_file(message="custom message") field = DummyField() field.data = DummyFile("blah") flask.current_app.config['IMAGE_EXTENSIONS'] = ['foo'] with flask.current_app.test_request_context(): with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == "custom message" def test_password_rules_length(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=None) valid = ["as123.21", "abcdef", "sdadadaswasasa", "1234567", "...,.,..,", "AAAAAAA", "AbCdEf"] invalid = ["abc", "123", "....", "aBcDe", "a1.V3"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_upper(subtests): validator = password_rules(length=6, upper=2, lower=None, numeric=None, special=None) valid = ["abcDEf", "HellOO", "ABCDEZ", "A.b#3CZ", "ADSDSA"] invalid = ["abcdEf", "helloo", "A231sdsd"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_lower(subtests): validator = password_rules(length=6, upper=None, lower=3, numeric=None, special=None) valid = ["abcdefg", "axzBAR", "123abcdsa", "AbCdEfGh", "..as..2ds.."] invalid = ["foOBAR", "123ABcdSA", "1a2b.C#"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_numeric(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=2, special=None) valid = ["1bcd4A.d", "123456", "a?9#.0"] invalid = ["2ds.#<", "abcdef", "ABCDEF", "x2U.'Q"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_special(subtests): validator = password_rules(length=6, upper=None, lower=None, numeric=None, special=3) valid = ["ab.?123!", ".#@dS9", "abcdef123><?"] invalid = ["abcdef", ".23134", "AbCd123,]"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_all(subtests): validator = password_rules(length=6, upper=2, lower=1, numeric=1, special=1) valid = ["ABc1.2", "abcDEF123#%^", "a2B.C?"] invalid = ["helloo", "ABCDEF", "Ab1.?c"] _run_validator_check(subtests, validator, valid, invalid) def test_password_rules_message(subtests): validator = password_rules(length=100, message="custom message") field = DummyField() field.data = "wrong" with raises(ValidationError) as e: validator(DummyForm(), field) assert str(e.value) == "custom message"
1.851563
2
src/regrtest.py
ucsd-progsys/csolve-bak
0
4504
#!/usr/bin/python # Copyright (c) 2009 The Regents of the University of California. All rights reserved. # # Permission is hereby granted, without written agreement and without # license or royalty fees, to use, copy, modify, and distribute this # software and its documentation for any purpose, provided that the # above copyright notice and the following two paragraphs appear in # all copies of this software. # # IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN # IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS # ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION # TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import time, subprocess, optparse, sys, socket, os import misc.rtest as rtest solve = "./csolve -c".split() null = open("/dev/null", "w") now = (time.asctime(time.localtime(time.time()))).replace(" ","_") logfile = "../tests/logs/regrtest_results_%s_%s" % (socket.gethostname (), now) argcomment = "//! run with " def logged_sys_call(args, out=None, err=None): print "exec: " + " ".join(args) return subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet: out = null else: out = None if time: time = ["time"] else: time = [] hygiene_flags = [("--csolveprefix=%s" % (file)), "-o", "/dev/null"] out = open(file + ".log", "w") rv = logged_sys_call(time + solve + flags + hygiene_flags + [file], out) out.close() return rv def run_script(file,quiet): if quiet: out = null else: out = None return logged_sys_call(file, out) def getfileargs(file): f = open(file) l = f.readline() f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(" ") else: return [] class Config (rtest.TestConfig): def __init__ (self, dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount) self.dargs = dargs if os.path.exists("../tests/postests/coreutils/"): logged_sys_call(["../tests/postests/coreutils/makeCoreUtil.sh", "init"], None) def run_test (self, file): os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(".c"): fargs = getfileargs(file) return solve_quals(file, True, False, True, fargs) elif file.endswith(".sh"): return run_script(file, True) def is_test (self, file): return (file.endswith(".sh") and os.access(file, os.X_OK)) \ or (file.endswith(".c") and not file.endswith(".csolve.save.c") and not file.endswith(".ssa.c")) ##################################################################################### #testdirs = [("../postests", 0)] #testdirs = [("../negtests", 1)] #testdirs = [("../slowtests", 1)] #DEFAULT testdirs = [("../tests/postests", 0), ("../tests/negtests", [1, 2])] #testdirs = [("../tests/microtests", 0)] parser = optparse.OptionParser() parser.add_option("-t", "--threads", dest="threadcount", default=1, type=int, help="spawn n threads") parser.add_option("-o", "--opts", dest="opts", default="", type=str, help="additional arguments to csolve") parser.disable_interspersed_args() options, args = parser.parse_args() runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount)) exit (runner.run ())
1.523438
2
venv/Lib/site-packages/sklearn/linear_model/tests/test_least_angle.py
andywu113/fuhe_predict
3
4512
import warnings from distutils.version import LooseVersion import numpy as np import pytest from scipy import linalg from sklearn.model_selection import train_test_split from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.utils.testing import TempMemmap from sklearn.exceptions import ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO: use another dataset that has multiple drops diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples = y.size def test_simple(): # Principle of Lars is to keep covariances tied and decreasing # also test verbose output from io import StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO() _, _, coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout = old_stdout for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed(): # The same, with precomputed Gram matrix _, _, coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert ocur == i + 1 else: # no more than max_pred variables can go into the active set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1, o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X and Gram raises exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test that lars_path with precomputed Gram and Xy gives the right answer G = np.dot(X.T, X) Xy = np.dot(X.T, y) for method in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy deprecation def test_lars_lstsq(): # Test that Lars gives least square solution at the end # of the path X1 = 3 * X # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about default value change when numpy >= 1.14 rcond = None if LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least square solution at the end # of the path _, _, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that lars_path is robust to collinearity in input X = np.array([[3., 3., 1.], [2., 2., 0.], [1., 1., 0]]) y = np.array([1., 0., 0]) rng = np.random.RandomState(0) f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded n_samples = 10 X = rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False`` option returns the correct output alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_, _, coef = linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test that the ``return_path=False`` option with Gram remains correct alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with Gram and Xy remains # correct X, y = 3 * diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different values of precompute G = np.dot(X.T, X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False, 'auto', None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test when input is a singular matrix X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency test that checks that LARS Lasso is handling rank # deficient input data (with n_features < rank) in the same way # as coordinate descent Lasso y = [5, 0, 5] for X in ( [[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]] ): # To be able to use the coefs to compute the objective function, # we need to turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. / (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso using coordinate descent give the # same results. X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar test, with the classifiers for alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same test, with normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso using coordinate descent give the # same results when early stopping is used. # (test : before, in the middle, and in the last part of the path) alphas_min = [10, 0.9, 1e-4] for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) # same test, with normalization for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that the path length of the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of alphas is always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very ill-conditioned design, and check that # it does not blow up, and stays somewhat close to a solution given # by the coordinate descent solver # Also test that lasso_path (using lars_path output style) gives # the same result as lars_path and previous lasso output style # under these conditions. rng = np.random.RandomState(42) # Generate data n, m = 70, 100 k = 5 X = rng.randn(n, m) w = np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y = np.dot(X, w) sigma = 0.2 y += sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which the LARS has to go # far in the path to converge, and check that LARS and coordinate # descent give the same answers # Note it used to be the case that Lars had to use the drop for good # strategy for this but this is no longer the case with the # equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] y = [10, 10, 1] alpha = .0001 def objective_function(coef): return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8)) def test_lars_add_features(): # assure that at least some features get added if necessary # test for 6d2b4c # Hilbert matrix n = 5 H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be of length 6 + 1 in a Lars going down to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): # Assure that estimators receiving multidimensional y do the right thing Y = np.vstack([y, y ** 2]).T n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_lars_cv(): # Test the LassoLarsCV object by checking that the optimal alpha # increases as the number of samples increases. # This property is not actually guaranteed in general and is just a # property of the given dataset, with the given steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length in (400, 200, 100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data X = np.c_[X, x, x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) == 0 def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that # - some good features are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When using automated memory mapping on large input, the # fold data is in read-only mode # This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The following should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 def test_lars_path_positive_constraint(): # this is the main test for the positive parameter on the lars_path method # the estimator classes just make use of this function # we do the test on the diabetes dataset # ensure that we get negative coefficients when positive=False # and all positive when positive=True # for method 'lar' (default) and lasso # Once deprecation of LAR + positive option is done use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method = 'lasso' _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min() < 0 _, _, coefs = \ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min() >= 0 # now we gonna test the positive option for all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive option of all estimator # classes in this same function here default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso using coordinate descent give the # same results when using the positive option # This test is basically a copy of the above with additional positive # option. However for the middle part, the comparison of coefficient values # for a range of alphas, we had to make an adaptations. See below. # not normalized data X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The range of alphas chosen for coefficient comparison here is restricted # as compared with the above test without the positive option. This is due # to the circumstance that the Lars-Lasso algorithm does not converge to # the least-squares-solution for small alphas, see 'Least Angle Regression' # by Efron et al 2004. The coefficients are typically in congruence up to # the smallest alpha reached by the Lars-Lasso algorithm and start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation agrees with the LassoLars # implementation available in R (lars library) under the following # scenarios: # 1) fit_intercept=False and normalize=False # 2) fit_intercept=True and normalize=True # Let's generate the data used in the bug report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T ########################################################################### # Scenario 1: Let's compare R vs sklearn when fit_intercept=False and # normalize=False ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars = lars(X, t(y), type="lasso", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) # r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's compare R vs sklearn when fit_intercept=True and # normalize=True # # Note: When normalize is equal to True, R returns the coefficients in # their original units, that is, they are rescaled back, whereas sklearn # does not do that, therefore, we need to do this step before comparing # their results. ########################################################################### # # The R result was obtained using the following code: # # library(lars) # model_lasso_lars2 = lars(X, t(y), type="lasso", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back the coefficients returned by sklearn before comparing # against the R result (read the note above) temp = X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): """ Test that user input regarding copy_X is not being overridden (it was until at least version 0.21) """ lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): """ Test that user input to .fit for copy_X overrides default __init__ value """ lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X == np.array_equal(X, X_copy)
2.265625
2
src/djangoreactredux/wsgi.py
noscripter/django-react-redux-jwt-base
4
4560
""" WSGI config for django-react-redux-jwt-base project. """ import os os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoreactredux.settings.dev") from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise application = get_wsgi_application() application = DjangoWhiteNoise(application)
0.871094
1
3d_Vnet/3dvnet.py
GingerSpacetail/Brain-Tumor-Segmentation-and-Survival-Prediction-using-Deep-Neural-Networks
100
4568
import random import pandas as pd import numpy as np import matplotlib.pyplot as plt #%matplotlib inline import tensorflow as tf import keras.backend as K from keras.utils import to_categorical from keras import metrics from keras.models import Model, load_model from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core import Lambda, RepeatVector, Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize from sklearn.utils import class_weight from keras.callbacks import ModelCheckpoint from keras.callbacks import CSVLogger from keras.callbacks import EarlyStopping from keras.layers.advanced_activations import PReLU import os from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize # from medpy.io import load import numpy as np #import cv2 import nibabel as nib from PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X = add([input_mat,X]); return X def Vnet_3d(input_img, n_filters = 8, dropout = 0.2, batch_norm = True): #c1 = conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size = (5,5,5) , strides = (1,1,1) , padding='same')(input_img) #c1 = add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size = (2,2,2) , strides = (2,2,2) , padding = 'same' )(c1) c3 = conv_block(c2 , n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size = (2,2,2) , strides = (2,2,2), padding = 'same')(c3) p3 = Dropout(dropout)(p3) c4 = conv_block(p3, n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c4) p4 = Dropout(dropout)(p4) c5 = conv_block(p4, n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c5) p6 = Dropout(dropout)(p6) #c6 = conv_block(p5, n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2), padding='same')(p7); u6 = concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding= 'same')(c7); u8 = concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding= 'same')(c8); u9 = concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding= 'same')(c9); u10 = concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1) , padding = 'same')(u10); c10 = Dropout(dropout)(c10) c10 = add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4, (1,1,1), activation='softmax')(c10) model = Model(inputs=input_img, outputs=outputs) return model
2.015625
2
pytudes/_2021/educative/grokking_the_coding_interview/fast_and_slow_pointers/_1__linked_list_cycle__easy.py
TeoZosa/pytudes
1
4592
"""https://www.educative.io/courses/grokking-the-coding-interview/N7rwVyAZl6D Categories: - Binary - Bit Manipulation - Blind 75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py """ from pytudes._2021.utils.linked_list import ( ListNode, NodeType, convert_list_to_linked_list, ) def has_cycle(head: NodeType) -> bool: """ Args: head: head of a singly-linked list of nodes Returns: whether or not the linked list has a cycle Examples: >>> has_cycle(None) False >>> head = ListNode("self-edge") >>> head.next = head >>> has_cycle(head) True >>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>> head.next.next.next.next.next.next = head.next.next >>> has_cycle(head) True >>> head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head) True """ slow = fast = head while fast is not None and fast.next is not None: # since fast ≥ slow slow = slow.next fast = fast.next.next if slow == fast: return True # found the cycle else: return False def main(): head = convert_list_to_linked_list([1, 2, 3, 4, 5, 6]) print("LinkedList has cycle: " + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next print("LinkedList has cycle: " + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next print("LinkedList has cycle: " + str(has_cycle(head))) main()
3.1875
3
naplib/alignment/prosodylab_aligner/__main__.py
gavinmischler/naplib-python
1
4608
# Copyright (c) 2011-2014 <NAME> and <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ Command-line driver for the module """ import logging import os import sys import yaml from bisect import bisect from shutil import copyfile from textgrid import MLF from corpus import Corpus from aligner import Aligner from archive import Archive from utilities import splitname, resolve_opts, \ ALIGNED, CONFIG, HMMDEFS, MACROS, SCORES from argparse import ArgumentParser DICTIONARY = "eng.dict" MODEL = "eng.zip" LOGGING_FMT = "%(message)s" # parse arguments argparser = ArgumentParser(prog="{} -m aligner".format(sys.executable), description="Prosodylab-Aligner") argparser.add_argument("-c", "--configuration", help="config file") argparser.add_argument("-d", "--dictionary", metavar="DICT", action="append", help="dictionary file (default: {}) (can specify multiple)".format(DICTIONARY)) argparser.add_argument("-s", "--samplerate", type=int, help="analysis samplerate (in Hz)") argparser.add_argument("-e", "--epochs", type=int, help="# of epochs of training per round") input_group = argparser.add_argument_group() input_group.add_argument("-r", "--read", help="source for a precomputed acoustic model") input_group.add_argument("-t", "--train", help="directory containing data for training") output_group = argparser.add_mutually_exclusive_group(required=True) output_group.add_argument("-a", "--align", help="directory containing data to align") output_group.add_argument("-w", "--write", help="destination for computed acoustic model") verbosity_group = argparser.add_mutually_exclusive_group() verbosity_group.add_argument("-v", "--verbose", action="store_true", help="Verbose output") verbosity_group.add_argument("-V", "--extra-verbose", action="store_true", help="Even more verbose output") args = argparser.parse_args() # hack to allow proper override of default dictionary if not args.dictionary: args.dictionary = [DICTIONARY] # set up logging loglevel = logging.WARNING if args.extra_verbose: loglevel = logging.DEBUG elif args.verbose: loglevel = logging.INFO logging.basicConfig(format=LOGGING_FMT, level=loglevel) # input: pick one if args.train: if args.read: logging.error("Cannot train on persistent model.") exit(1) logging.info("Preparing corpus '{}'.".format(args.train)) opts = resolve_opts(args) corpus = Corpus(args.train, opts) logging.info("Preparing aligner.") aligner = Aligner(opts) logging.info("Training aligner on corpus '{}'.".format(args.train)) aligner.HTKbook_training_regime(corpus, opts["epochs"], flatstart=(args.read is None)) else: if not args.read: args.read = MODEL logging.info("Reading aligner from '{}'.".format(args.read)) # warn about irrelevant flags if args.configuration: logging.warning("Ignoring config flag (-c/--configuration).") args.configuration = None if args.epochs: logging.warning("Ignoring epochs flag (-e/--epochs).") if args.samplerate: logging.warning("Ignoring samplerate flag (-s/--samplerate).") args.samplerate = None # create archive from -r argument archive = Archive(args.read) # read configuration file therefrom, and resolve options with it args.configuration = os.path.join(archive.dirname, CONFIG) opts = resolve_opts(args) # initialize aligner and set it to point to the archive data aligner = Aligner(opts) aligner.curdir = archive.dirname # output: pick one if args.align: # check to make sure we're not aligning on the training data if (not args.train) or (os.path.realpath(args.train) != os.path.realpath(args.align)): logging.info("Preparing corpus '{}'.".format(args.align)) corpus = Corpus(args.align, opts) logging.info("Aligning corpus '{}'.".format(args.align)) aligned = os.path.join(args.align, ALIGNED) scores = os.path.join(args.align, SCORES) aligner.align_and_score(corpus, aligned, scores) logging.debug("Wrote MLF file to '{}'.".format(aligned)) logging.debug("Wrote likelihood scores to '{}'.".format(scores)) logging.info("Writing TextGrids.") size = MLF(aligned).write(args.align) if not size: logging.error("No paths found!") exit(1) logging.debug("Wrote {} TextGrids.".format(size)) elif args.write: # create and populate archive (_, basename, _) = splitname(args.write) archive = Archive.empty(basename) archive.add(os.path.join(aligner.curdir, HMMDEFS)) archive.add(os.path.join(aligner.curdir, MACROS)) # whatever this is, it's not going to work once you move the data if "dictionary" in opts: del opts["dictionary"] with open(os.path.join(archive.dirname, CONFIG), "w") as sink: yaml.dump(opts, sink) (basename, _) = os.path.splitext(args.write) archive_path = os.path.relpath(archive.dump(basename)) logging.info("Wrote aligner to '{}'.".format(archive_path)) # else unreachable logging.info("Success!")
1.59375
2
SAP/released_tr_email_sender/ui.py
botisko/personal_programs
0
4616
import json from tkinter import * from tkinter import ttk from tkinter import messagebox from tr_data import TRData, NO_DATA_MEETS_CRITERIA from email_text import email_body_template from helpers import send_email RECIPIENT = <email_address> EXCEPTION_FILE = "tr_number_exceptions.json" class TrEmailSender: def __init__(self, transport_requests: TRData): self.transport_requests = transport_requests self.exceptions = self.load_exceptions() # WINDOW CREATION self.window = Tk() self.window.title("Send email with import requests to TST") self.window.config(padx=20, pady=20) # TTILE LABEL self.title_lbl = Label( text="Please select TRs to be included into email: ", ) # BUTTONS self.refresh_btn = Button(text="REFRESH", command=self.refresh) self.exceptions_btn = Button(text="Add to exceptions", command=self.add_to_exceptions) self.select_all_btn = Button(text="Select All", command=self.select_all) self.send_btn = Button(text="SEND", command=self.send_email) # list of TRs columns_labels = { 'tr_number': ("TR Number", 100), 'description': ("Description", 350), 'tkt_type': ("Ticket Type", 80), 'ticket_num': ("Ticket Number", 80), 'module': ("SAP Module", 80), 'export_datetime': ("Export Timestamp", 150), 'owner': ("Owner", 80) } # TREE VIEW for list display self.tr_tree_view = ttk.Treeview(columns=tuple(columns_labels.keys()), show='headings') # Update columns for column, (label, field_length) in columns_labels.items(): self.tr_tree_view.column(column, minwidth=80, width=field_length, anchor='w', stretch=False) self.tr_tree_view.heading(column, text=label) # insert data self.populate_tree_view_lines() #LAYOUT PLACEMENT self.title_lbl.grid(row=0, column=0, sticky=W) self.tr_tree_view.grid(row=1, column=0, rowspan=4) self.refresh_btn.grid(row=1, column=1, sticky=N+S+E+W, padx=2, pady=2) self.exceptions_btn.grid(row=2, column=1, sticky=E+W+S, padx=1, pady=2) self.select_all_btn.grid(row=3, column=1, sticky=E+W+N, padx=1, pady=2) self.send_btn.grid(row=4, column=1, sticky=N+S+E+W, padx=1, pady=2) # DISPLAY WINDOW self.window.mainloop() def refresh(self): # delete all rows in tree view for item in self.tr_tree_view.get_children(): self.tr_tree_view.delete(item) # update with new data self.transport_requests.refresh() self.exceptions = self.load_exceptions() self.populate_tree_view_lines() def populate_tree_view_lines(self): all_are_in_exceptions = True for (tr_number, export_timestamp, owner, description, ticket_number, sap_module, ticket_type) in self.transport_requests.data: # check if not in exception if not tr_number in self.exceptions: year = export_timestamp[:4] month = export_timestamp[4:6] day = export_timestamp[6:8] time = f"{export_timestamp[8:10]}:{export_timestamp[10:12]}:{export_timestamp[12:]}" export_date_time = f"{day}/{month}/{year} - {time}" line_values = (tr_number, description, ticket_type, ticket_number, sap_module, export_date_time, owner) self.tr_tree_view.insert('', 'end', values=line_values) all_are_in_exceptions = False # if all TRs are in exceptions, insert only pre-defined information if all_are_in_exceptions: tr_number = NO_DATA_MEETS_CRITERIA[0][0] description = NO_DATA_MEETS_CRITERIA[0][3] no_data_information = (tr_number, description, "", "", "", "", "") self.tr_tree_view.insert('', 'end', values=no_data_information) def select_all(self): items = self.tr_tree_view.get_children() self.tr_tree_view.selection_add(items) def get_selected_item_ids(self): return self.tr_tree_view.selection() def send_email(self): # get selected lines selected_ids = self.get_selected_item_ids() # get data of each id if not selected_ids: messagebox.showinfo( title="Status Info", message="There is nothing to send.\n\nPlease refresh the page." ) return None email_details = self.prepare_email_details(selected_ids) # send email if send_email(**email_details): messagebox.showinfo( title="Status Info", message="Email has been sent!") # add trs into exceptions return self.add_to_exceptions() else: return None def prepare_email_details(self, selected_ids): transport_data = [self.tr_tree_view.item(id_tag, 'values') for id_tag in selected_ids] # prepare list of transports for email body html_list_of_trs = "" ticket_numbers = set() for (tr_number, description, ticket_type, ticket_number, sap_module, export_timestamp, owner) in transport_data: html_list_of_trs += f"<li>{tr_number} - {owner} - {description}</li>" ticket_numbers.add(ticket_number) # prepare email details email_details = { 'recipient': RECIPIENT, 'subject': f"Transport requests for: {', '.join(sorted(ticket_numbers)).rstrip(', ')}", 'html_body': email_body_template.format(html_list_of_trs) } return email_details def load_exceptions(self): try: with open(file=EXCEPTION_FILE, mode='r') as file: exception_list = set(json.load(file)['tr_numbers']) except FileNotFoundError: with open(file=EXCEPTION_FILE, mode='w') as file: exception_dict = {'tr_numbers': []} json.dump(exception_dict, file, indent=4) return set() else: return exception_list def add_to_exceptions(self): selected_ids = self.get_selected_item_ids() if not selected_ids: messagebox.showinfo( title="Status Info", message="Nothing has been selected.\n\nPlease refresh the page." ) return None transport_numbers = [self.tr_tree_view.item(id_tag, 'values')[0] for id_tag in selected_ids] # add TR number of selected items to exception json file for number in transport_numbers: self.exceptions.add(number) updated_data= {'tr_numbers': list(self.exceptions)} with open(file=EXCEPTION_FILE, mode='w') as file: json.dump(updated_data, file, indent=4) return self.refresh()
1.859375
2
ast_version/src/binop.py
lucassa3/CCompiler
1
4624
from node import Node class BinOp(Node): def eval(self, st): a = self.children[0].eval(st) b = self.children[1].eval(st) if self.value == "MINUS": return a - b elif self.value == "PLUS": return a + b elif self.value == "MULT": return a * b elif self.value == "DIV": return a // b elif self.value == "GREATER": return a > b elif self.value == "LESS": return a < b elif self.value == "GE": return a >= b elif self.value == "LE": return a <= b elif self.value == "EQUALS": return a == b elif self.value == "AND": return a and b elif self.value == "OR": return a or b
1.78125
2
application/mod_user/forms.py
hackBCA/hackbcafour
2
4632
from wtforms import Form, TextField, PasswordField, SelectField, TextAreaField, BooleanField, validators, ValidationError, RadioField import re phone_regex = "(\+\d+-?)?((\(?\d{3}\)?)|(\d{3}))-?\d{3}-?\d{4}$" gender_choices = [ ("", "Gender"), ("male", "Male"), ("female", "Female"), ("other", "Other"), ("rns", "Rather Not Say") ] beginner_choices = [ ("", "Are you a beginner?"), ("yes", "Yes"), ("no", "No") ] ethnicity_choices = [ ("", "Ethnicity"), ("white", "White"), ("african_american", "African American"), ("asian_pacific", "Asian or Pacific Islander"), ("american_indian_alaskan_native", "American Indian or Alaskan Native"), ("multiracial", "Multiracial"), ("hispanic", "Hispanic origin"), ("other", "Other"), ("rns", "Rather Not Say") ] num_hackathons_choices = [ ("", "How many hackathons have you been to?"), ("0", "0"), ("1", "1"), ("2", "2"), ("3", "3"), ("4", "4"), ("5", "5+") ] num_hackathons_choices_mentor = [ ("", "How many hackathons have you mentored at?"), ("0", "0"), ("1", "1"), ("2", "2"), ("3", "3"), ("4", "4"), ("5", "5+") ] grade_choices = [ ("", "What grade are you in?"), ("9", "9th"), ("10", "10th"), ("11", "11th"), ("12", "12th") ] shirt_sizes = [ ("", "What is your shirt size?"), ("XS", "Extra Small"), ("S", "Small"), ("M", "Medium"), ("L", "Large"), ("XL", "Extra Large") ] type_account_choices = [ ("hacker", "Hacker"), ("mentor", "Mentor") ] free_response1_prompt = "Why do you want to come to hackBCA?" free_response1_prompt_mentor = "Please list languages/frameworks/technologies that you would like to mentor students in." free_response2_prompt_mentor = "Would you like to run a workshop? If so, please briefly describe your ideas." class HackerRegistrationForm(Form): email = TextField("Email", [ validators.Required(message = "Enter an email."), validators.Email(message = "Invalid email address.") ], render_kw={"class": 'text'}, description = "Email") first_name = TextField("First Name", [ validators.Required(message = "You must enter a first name.") ], render_kw={"class": 'text'}, description = "First Name") last_name = TextField("Last Name", [ validators.Required(message = "You must enter a last name.") ], render_kw={"class": 'text'}, description = "Last Name") school = TextField("School Name", [ validators.Required(message = "Enter your school's name.") ], render_kw={"class": 'text'}, description = "School Name") gender = SelectField("Gender", [validators.Required(message = "You must select an option.")], choices = gender_choices, render_kw={"class": 'text'}, description = "Gender") beginner = SelectField("Are you a beginner?", [validators.Required(message = "You must select an option.")], choices = beginner_choices, render_kw={"class": 'text'}, description = "Are you a beginner?") ethnicity = SelectField("Ethnicity", [validators.Required(message = "You must select an option.")], choices = ethnicity_choices, render_kw={"class": 'text'}, description = "Ethnicity") grade = SelectField("Grade", [validators.Required(message = "You must select an option.")], choices = grade_choices, render_kw={"class": 'text'}, description = "Grade") age = TextField("Age", [ validators.Required(message = "Enter your age") ], render_kw={"class": 'text'}, description = "Age") num_hackathons = SelectField("How many hackathons have you attended?", [validators.Required(message = "You must select an option.")], choices = num_hackathons_choices, render_kw={"class": 'text'}, description = "How many hackathons have you attended?") free_response1 = TextAreaField(free_response1_prompt, [ validators.Required(message = "You must answer this question."), validators.Length(max = 1500, message = "Response must be less than 1500 characters long.") ], render_kw={"class": 'text'}, description = "1500 characters maximum.") link1 = TextField("Link #1", [ validators.optional(), validators.URL(message = "Invalid URL.") ], render_kw={"class": 'text'}, description = "Link #1 (Optional)") link2 = TextField("Link #2", [ validators.optional(), validators.URL(message = "Invalid URL.") ], render_kw={"class": 'text'}, description = "Link #2 (Optional)") link3 = TextField("Link #3", [ validators.optional(), validators.URL(message = "Invalid URL.") ], render_kw={"class": 'text'}, description = "Link #3 (Optional)") password = PasswordField("Password", [ validators.Required(message = "You must enter a password."), validators.Length(min = 8, message = "Password must be at least 8 characters.") ], render_kw={"class": 'text'}, description = "Password") confirm_password = PasswordField("Confirm Password", render_kw={"class": 'text'}, description = "Confirm Password") mlh_coc = BooleanField("I agree", [ validators.Required(message = "Please read and agree to the MLH Code of Conduct.") ], description = "I have read & agree to the MLH Code of Conduct.", default = False) mlh_terms = BooleanField("I agree", [ validators.Required(message = "Please read and agree to the MLH Terms and Conditions.") ], description = "I agree to the MLH Contest Terms and Conditions and the MLH Privacy Policy.", default = False) def validate_confirm_password(form, field): password = form['password'].data if len(password) >= 8 and password != field.data: raise ValidationError("Passwords must match.") def validate(self): #Man I love validators.URL links = ["link1", "link2", "link3"] originalValues = {} for link in links: #Temporarily prefix all links with http:// if they are missing it attr = getattr(self, link) val = attr.data originalValues[link] = val if re.match("^(http|https)://", val) is None: val = "http://" + val attr.data = val setattr(self, link, attr) rv = Form.validate(self) for link in links: #Revert link values back to actual values attr = getattr(self, link) attr.data = originalValues[link] setattr(self, link, attr) if not rv: return False return True def validate_other_gender(form, field): if form['gender'].data == 'other' and field.data == "": raise ValidationError("Enter your gender.") class MentorRegistrationForm(Form): email = TextField("Email", [ validators.Required(message = "Enter an email."), validators.Email(message = "Invalid email address.") ], render_kw={"class": 'text'}, description = "Email") first_name = TextField("First Name", [ validators.Required(message = "You must enter a first name.") ], render_kw={"class": 'text'}, description = "First Name") last_name = TextField("Last Name", [ validators.Required(message = "You must enter a last name.") ], render_kw={"class": 'text'}, description = "Last Name") school = TextField("Company/School Name", [ validators.Required(message = "Enter your company/schools's name.") ], render_kw={"class": 'text'}, description = "Company/School Name") phone = TextField("Phone Number", [ validators.Required(message = "Enter your preferred contact number."), validators.Regexp(phone_regex, message = "Please enter a valid phone number.") ], render_kw={"class": 'text'}, description = "Phone Number") num_hackathons = SelectField("How many hackathons have you mentored at?", [validators.Required(message = "You must select an option.")], choices = num_hackathons_choices_mentor, render_kw={"class": 'text'}, description = "How many hackathons have you mentored at?") mentor_free_response1 = TextAreaField(free_response1_prompt_mentor, [ validators.Length(max = 1500, message = "Response must be less than 1500 characters long.") ], render_kw={"class": 'text'}, description = "1500 characters maximum.") mentor_free_response2 = TextAreaField(free_response2_prompt_mentor, [ validators.Length(max = 1500, message = "Response must be less than 1500 characters long.") ], render_kw={"class": 'text'}, description = "1500 characters maximum.") github_link = TextField("Github Link", [ validators.optional(), validators.URL(message = "Invalid URL.") ], render_kw={"class": 'text'}, description = "Github Link (Optional)") linkedin_link = TextField("LinkedIn", [ validators.optional(), validators.URL(message = "Invalid URL.") ], render_kw={"class": 'text'}, description = "LinkedIn Link (Optional)") site_link = TextField("Personal Site", [ validators.optional(), validators.URL(message = "Invalid URL.") ], render_kw={"class": 'text'}, description = "Personal Site Link (Optional)") other_link = TextField("other", [ validators.optional(), validators.URL(message = "Invalid URL.") ], render_kw={"class": 'text'}, description = "Other Link (Optional)") password = PasswordField("Password", [ validators.Required(message = "You must enter a password."), validators.Length(min = 8, message = "Password must be at least 8 characters.") ], render_kw={"class": 'text'}, description = "Password") confirm_password = PasswordField("<PASSWORD> Password", render_kw={"class": 'text'}, description = "Confirm Password") mlh_coc = BooleanField("I agree", [ validators.Required(message = "Please read and agree to the MLH Code of Conduct.") ], description = "I have read & agree to the MLH Code of Conduct.", default = False) mlh_terms = BooleanField("I agree", [ validators.Required(message = "Please read and agree to the MLH Terms and Conditions.") ], description = "I agree to the MLH Contest Terms and Conditions and the MLH Privacy Policy.", default = False) def validate(self): links = ["github_link", "linkedin_link", "site_link", "other_link"] originalValues = {} for link in links: #Temporarily prefix all links with http:// if they are missing it attr = getattr(self, link) val = attr.data originalValues[link] = val if re.match("^(http|https)://", val) is None: val = "http://" + val attr.data = val setattr(self, link, attr) rv = Form.validate(self) for link in links: #Revert link values back to actual values attr = getattr(self, link) attr.data = originalValues[link] setattr(self, link, attr) if not rv: return False return True class LoginForm(Form): email = TextField("Email", [ validators.Required(message = "Enter an email."), validators.Email(message = "Invalid email address." )], render_kw={"class": 'text'},description = "Email") password = PasswordField("Password", [], render_kw={"class": 'text'}, description = "Password") class EmailForm(Form): email = TextField("Email", [ validators.Required(message = "Enter an email."), validators.Email(message = "Invalid email address." )], render_kw={"class": 'text'}, description = "Email") class RecoverForm(Form): password = PasswordField("Password", [ validators.Required(message = "You must enter a password."), validators.Length(min = 8, message = "Password must be at least 8 characters.") ], render_kw={"class": 'text'}, description = "Password") confirm_password = PasswordField("Confirm Password", render_kw={"class": 'text'}, description = "Confirm Password") def validate_confirm_password(form, field): password = form['password'].data if len(password) >= 8 and password != field.data: raise ValidationError("Passwords must match.") class ChangeNameForm(Form): first_name = TextField("First Name", [ validators.Required(message = "You must enter a first name.") ], render_kw={"class": 'text'}, description = "First Name") last_name = TextField("Last Name", [ validators.Required(message = "You must enter a last name.") ], render_kw={"class": 'text'}, description = "Last Name") class ChangePasswordForm(Form): password = PasswordField("Password", [ validators.Required(message = "You must enter your current password."), validators.Length(min = 8, message = "Password must be at least 8 characters.") ], render_kw={"class": 'text'}, description = "Current Password") new_password = PasswordField("New Password", [ validators.Required(message = "You must choose a new password."), validators.Length(min = 8, message = "Password must be at least 8 characters.") ], render_kw={"class": 'text'}, description = "New Password") confirm_password = PasswordField("<PASSWORD> Password", render_kw={"class": 'text'}, description = "Confirm New Password") def validate_confirm_password(form, field): password = form['new_password'].data if len(password) >= 8 and password != field.data: raise ValidationError("Passwords must match.") attending_choices = [ ("Attending", "Yes, I will!"), ("Not Attending", "No, I won't.") ] class RsvpForm(Form): # attending = RadioField("Are you attending hackBCA III?", [validators.Required(message = "Please tell us if you are attending hackBCA III.")], render_kw={"class": 'text'}, choices = attending_choices, description = "Will you be at hackBCA?") # t_shirt_size = SelectField("What is your shirt size?", [validators.Required(message = "You must select an option.")], choices = shirt_sizes, description = "What is your shirt size?") dietary_restrictions = TextField("Dietary Restrictions", [ validators.optional(), ], render_kw={"class": 'text'}, description = "Do you have any dietary restrictions?") guardian_name = TextField("Guardian Full Name", [ validators.Required(message = "You must enter a name.") ], render_kw={"class": 'text'}, description = "Guardian Name") guardian_home_num = TextField("Guardian Home Number", [ validators.Required(message = "Enter your guardian's home number."), validators.Regexp(phone_regex, message = "Please enter a valid phone number.") ], render_kw={"class": 'text'}, description = "Guardian Home Number") guardian_cell_num = TextField("Guardian Cellphone", [ validators.Required(message = "Enter your guardian's cellphone number."), validators.Regexp(phone_regex, message = "Please enter a valid phone number.") ], render_kw={"class": 'text'}, description = "Guardian Cellphone") guardian_email = TextField("Guardian Email", [ validators.Required(message = "Enter an email."), validators.Email(message = "Invalid email address." )], render_kw={"class": 'text'}, description = "Guardian Email") emergency_name = TextField("Emergency Contact Full Name", [ validators.Required(message = "You must enter a name.") ], render_kw={"class": 'text'}, description = "Emergency Contact Name") emergency_home_num = TextField("Emergency Contact Home Number", [ validators.Required(message = "Enter your emergency contact's home number."), validators.Regexp(phone_regex, message = "Please enter a valid phone number.") ], render_kw={"class": 'text'}, description = "Emergency Contact Home Number") emergency_cell_num = TextField("Emergency Contact Cellphone", [ validators.Required(message = "Enter your emergency contact's cellphone."), validators.Regexp(phone_regex, message = "Please enter a valid phone number.") ], render_kw={"class": 'text'}, description = "Emergency Contact Cellphone") emergency_email = TextField("Emergency Contact Email", [ validators.Required(message = "Enter an email."), validators.Email(message = "Invalid email address." )], render_kw={"class": 'text'}, description = "Emergency Contact Email") school_address = TextField("School Address", [ validators.Required(message = "Enter your school address."), ], render_kw={"class": 'text'}, description = "School Address") school_town = TextField("School Town", [ validators.Required(message = "Enter your school town."), ], render_kw={"class": 'text'}, description = "School Town") school_state = TextField("School State", [ validators.Required(message = "Enter your school state."), ], render_kw={"class": 'text'}, description = "School State") school_phone_num = TextField("School Phone Number", [ validators.Required(message = "Enter school's home number."), validators.Regexp(phone_regex, message = "Please enter a valid phone number.") ], render_kw={"class": 'text'}, description = "School Phone Number") school_principal_name = TextField("Principal Name", [ validators.Required(message = "You must enter a name."), ], render_kw={"class": 'text'}, description = "Principal Name") school_principal_email = TextField("Principal Email", [ validators.Required(message = "Enter an email."), validators.Email(message = "Invalid email address." )], render_kw={"class": 'text'}, description = "Principal Email") cs_teacher_name = TextField("CS Teacher Name", [ validators.optional(), ], render_kw={"class": 'text'}, description = "CS Teacher Name (if applicable)") cs_teacher_email = TextField("CS Teacher Email", [ validators.optional(), validators.Email(message = "Invalid email address." )], render_kw={"class": 'text'}, description = "CS Teacher Email (if applicable)") # class MentorRsvpForm(Form): # attending = RadioField("Are you attending hackBCA III?", [validators.Required(message = "Please tell us if you are attending hackBCA III.")], choices = attending_choices) # phone = TextField("Phone Number", [ # validators.Required(message = "Confirm your preferred contact number."), # validators.Regexp(phone_regex, message = "Please enter a valid phone number.") # ], description = "Phone Number Confirmation") # t_shirt_size = SelectField("What is your shirt size?", [validators.Required(message = "You must select an option.")], choices = shirt_sizes, description = "What is your shirt size?") # food_allergies = TextAreaField("Allergies", [ # validators.optional(), # ], description = "Do you have any allergies?") # medical_information = TextAreaField("Medical Information", [ # validators.optional(), # ], description = "Are there any other medical issues that we should know about? (ex. Other allergies, illnesses, etc.)") # hackbca_rules = BooleanField("I agree",[ # validators.Required(message = "Please read and agree to our rules.") # ], description = "I agree to the rules set forth by hackBCA.", default = False) # mlh_terms = BooleanField("I agree",[ # validators.Required(message = "Please read and agree to the MLH Code of Conduct.") # ], description = "I agree to the MLH Code of Conduct.", default = False)
1.757813
2
src/model/ParseInput.py
slavi010/polyhash-2020
0
4640
import os from typing import List from src.model.Etape import Etape from src.model.Grille import Grille from src.model.ItemCase import ItemCase from src.model.PointMontage import PointMontage from src.model.Robot import Robot from src.model.Tache import Tache class ParseInput: """Parser qui permet de lire le fichier texte d'input fourni par Google. Va transformer ce fichier en données et classes exploitables pour nous """ grille: Grille def __init__(self): pass def parse(self, file_path: str) -> Grille: """parse le fichier google et retourne la Grille correspondante :rtype: Grille """ # tests si file_path est un fichier assert os.path.isfile(file_path) with open(file_path, 'r') as file: index: int = 0 # récupère toutes les lignes du fichiers lines: List = file.readlines() # Transformation des lignes en liste d'entiers for index_line in range(len(lines)): lines[index_line] = lines[index_line].split(' ') for index_val in range(len(lines[index_line])): lines[index_line][index_val] = int(lines[index_line][index_val]) # crée un instance de Grille grille = Grille(lines[0][0], lines[0][1]) # instancie dans grille le nombre de robot correspondant # crée les robots for idx_robot in range(lines[0][2]): grille.robots.append(Robot()) # Crée les points de montage, et les place dans la grille for idx_point_montage in range(lines[0][3]): index += 1 grille.add_point_montage(PointMontage(lines[index][0], lines[index][1])) # Récupère le nombre de tour d'horloge autorisé grille.step_simulation = lines[0][5] # Récupére les informations de chaque tâche # instancier dans grille les tâches correspondantes # si une étape (assembly point) n'est pas encore créée dans la grille aux cordonnées correspondantes, # l'instancier et la mettre dans la grille (et ne pas oublier de l'associer à la tâche) # Crée les instances Taches et Etapes for index_tache in range(lines[0][4]): index += 1 tache_tampon: Tache = Tache(lines[index][0], index_tache) index += 1 g_x = 0 g_y = 0 for index_etape in range(lines[index-1][1]): #ajoute les étapes etape = Etape(lines[index][index_etape*2+0], lines[index][index_etape*2+1]) tache_tampon.add_etape(etape) g_x += (etape.x - g_x)/len(tache_tampon.etapes) g_y += (etape.y - g_y)/len(tache_tampon.etapes) #ajoute les paramètres dans la classe tache tache_tampon.centre_gravite = ItemCase(int(g_x), int(g_y)) tache_tampon.distance_centre_gravite = max(tache_tampon.etapes, key=lambda etape: tache_tampon.centre_gravite.distance(etape)) \ .distance(tache_tampon.centre_gravite) grille.add_tache(tache_tampon) # calcul la distance et la surface aproximative entre chaque étape for etape_from, etape_to in zip(tache_tampon.etapes[0::1], tache_tampon.etapes[1::1]): tache_tampon.distance += etape_from.distance(etape_to) tache_tampon.surface += etape_from.distance(etape_to) return grille
1.953125
2
otcextensions/tests/functional/osclient/vpc/v2/common.py
zsoltn/python-otcextensions
10
4656
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import json import uuid from datetime import datetime from openstackclient.tests.functional import base class VpcTestCase(base.TestCase): """Common functional test bits for VPC commands""" CURR_TIME = datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f") def setUp(self): super(VpcTestCase, self).setUp() UUID = uuid.uuid4().hex[:8] self.LOCAL_ROUTER_NAME = 'test-local-router-otce-cli' + UUID self.PEER_ROUTER_NAME = 'test-peer-router-otce-cli' + UUID self.PEERING_NAME = 'test-peering-otce-cli-' + UUID self.LOCAL_ROUTER_ID = None self.PEER_ROUTER_ID = None self.PEERING_ID = None def create_vpc_peering(self, name=None): self._create_routers() name = name or self.PEERING_NAME json_output = json.loads(self.openstack( 'vpc peering create ' '{name} ' '--local-router-id "{local_router_id}" ' '--peer-router-id "{peer_router_id}" ' '-f json'.format( name=name, local_router_id=self.LOCAL_ROUTER_ID, peer_router_id=self.PEER_ROUTER_ID) )) self.assertIsNotNone(json_output) self.PEERING_ID = json_output['id'] return json_output def delete_vpc_peering(self): self.addCleanup(self._delete_routers) self.openstack('vpc peering delete {}'.format(self.PEERING_ID)) def _create_routers(self): local_router = json.loads(self.openstack( 'router create -f json ' + self.LOCAL_ROUTER_NAME )) self.LOCAL_ROUTER_ID = local_router['id'] peer_router = json.loads(self.openstack( 'router create -f json ' + self.PEER_ROUTER_NAME )) self.PEER_ROUTER_ID = peer_router['id'] def _delete_routers(self): self.openstack( 'router delete {} {}'.format( self.LOCAL_ROUTER_ID, self.PEER_ROUTER_ID ))
1.359375
1
clarifai/rest/grpc/custom_converters/custom_message_to_dict.py
Taik/clarifai-python
322
4696
import typing # noqa from google.protobuf import descriptor from google.protobuf.json_format import _IsMapEntry, _Printer from google.protobuf.message import Message # noqa from clarifai.rest.grpc.proto.clarifai.api.utils import extensions_pb2 def protobuf_to_dict(object_protobuf, use_integers_for_enums=True, ignore_show_empty=False): # type: (Message, typing.Optional[bool], typing.Optional[bool]) -> dict # printer = _CustomPrinter( printer = _CustomPrinter( including_default_value_fields=False, preserving_proto_field_name=True, use_integers_for_enums=use_integers_for_enums, ignore_show_empty=ignore_show_empty) # pylint: disable=protected-access return printer._MessageToJsonObject(object_protobuf) class _CustomPrinter(_Printer): def __init__(self, including_default_value_fields, preserving_proto_field_name, use_integers_for_enums, ignore_show_empty): super(_CustomPrinter, self).__init__(including_default_value_fields, preserving_proto_field_name, use_integers_for_enums) self._ignore_show_empty = ignore_show_empty def _RegularMessageToJsonObject(self, message, js): """ Because of the fields with the custom extension `cl_show_if_empty`, we need to adjust the original's method's return JSON object and keep these fields. """ js = super(_CustomPrinter, self)._RegularMessageToJsonObject(message, js) message_descriptor = message.DESCRIPTOR for field in message_descriptor.fields: if (self._ignore_show_empty and not field.GetOptions().Extensions[extensions_pb2.cl_default_float]): continue if not field.GetOptions().Extensions[extensions_pb2.cl_show_if_empty]: continue # Singular message fields and oneof fields will not be affected. if ((field.label != descriptor.FieldDescriptor.LABEL_REPEATED and field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE) or field.containing_oneof): continue if self.preserving_proto_field_name: name = field.name else: name = field.json_name if name in js: # Skip the field which has been serialized already. continue if _IsMapEntry(field): js[name] = {} elif field.label == descriptor.FieldDescriptor.LABEL_REPEATED: js[name] = [] else: js[name] = self._FieldToJsonObject(field, field.default_value) return js def _StructMessageToJsonObject(self, message): """ Converts Struct message according to Proto3 JSON Specification. However, by default, empty objects {} get converted to null. We overwrite this behavior so {} get converted to {}. """ fields = message.fields ret = {} for key in fields: # When there's a Struct with an empty Struct field, this condition will hold True. # Far as I know this is the only case this condition will be true. If not, this condition # needs to be amended. if fields[key].WhichOneof('kind') is None: json_object = {} else: json_object = self._ValueMessageToJsonObject(fields[key]) ret[key] = json_object return ret
1.40625
1
sc2/unit.py
guliverza/AdditionalPylons
0
4720
from __future__ import annotations import warnings from typing import Any, Dict, List, Optional, Set, Tuple, Union, TYPE_CHECKING from .cache import property_immutable_cache, property_mutable_cache from .constants import ( transforming, IS_STRUCTURE, IS_LIGHT, IS_ARMORED, IS_BIOLOGICAL, IS_MECHANICAL, IS_MASSIVE, IS_PSIONIC, UNIT_BATTLECRUISER, UNIT_ORACLE, TARGET_GROUND, TARGET_AIR, TARGET_BOTH, IS_SNAPSHOT, IS_VISIBLE, IS_MINE, IS_ENEMY, IS_CLOAKED, IS_REVEALED, CAN_BE_ATTACKED, IS_CARRYING_MINERALS, IS_CARRYING_VESPENE, IS_CARRYING_RESOURCES, IS_ATTACKING, IS_PATROLLING, IS_GATHERING, IS_RETURNING, IS_COLLECTING, IS_CONSTRUCTING_SCV, IS_REPAIRING, IS_DETECTOR, UNIT_PHOTONCANNON, UNIT_COLOSSUS, ) from .data import Alliance, Attribute, CloakState, DisplayType, Race, TargetType, warpgate_abilities, TargetType, Target from .ids.ability_id import AbilityId from .ids.buff_id import BuffId from .ids.upgrade_id import UpgradeId from .ids.unit_typeid import UnitTypeId from .position import Point2, Point3 from .unit_command import UnitCommand warnings.simplefilter("once") if TYPE_CHECKING: from .bot_ai import BotAI from .game_data import AbilityData class UnitOrder: @classmethod def from_proto(cls, proto, bot_object: BotAI): return cls( bot_object._game_data.abilities[proto.ability_id], (proto.target_world_space_pos if proto.HasField("target_world_space_pos") else proto.target_unit_tag), proto.progress, ) def __init__(self, ability: AbilityData, target, progress: float = None): """ :param ability: :param target: :param progress: """ self.ability = ability self.target = target self.progress = progress def __repr__(self) -> str: return f"UnitOrder({self.ability}, {self.target}, {self.progress})" class Unit: def __init__(self, proto_data, bot_object: BotAI): """ :param proto_data: :param bot_object: """ self._proto = proto_data self._bot_object = bot_object # Used by property_immutable_cache self.cache = {} def __repr__(self) -> str: """ Returns string of this form: Unit(name='SCV', tag=4396941328). """ return f"Unit(name={self.name !r}, tag={self.tag})" @property_immutable_cache def type_id(self) -> UnitTypeId: """ UnitTypeId found in sc2/ids/unit_typeid. Caches all type_ids of the same unit type. """ unit_type = self._proto.unit_type if unit_type not in self._bot_object._game_data.unit_types: self._bot_object._game_data.unit_types[unit_type] = UnitTypeId(unit_type) return self._bot_object._game_data.unit_types[unit_type] @property_immutable_cache def _type_data(self) -> "UnitTypeData": """ Provides the unit type data. """ return self._bot_object._game_data.units[self._proto.unit_type] @property def name(self) -> str: """ Returns the name of the unit. """ return self._type_data.name @property def race(self) -> Race: """ Returns the race of the unit """ return Race(self._type_data._proto.race) @property def tag(self) -> int: """ Returns the unique tag of the unit. """ return self._proto.tag @property def is_structure(self) -> bool: """ Checks if the unit is a structure. """ return IS_STRUCTURE in self._type_data.attributes @property def is_light(self) -> bool: """ Checks if the unit has the 'light' attribute. """ return IS_LIGHT in self._type_data.attributes @property def is_armored(self) -> bool: """ Checks if the unit has the 'armored' attribute. """ return IS_ARMORED in self._type_data.attributes @property def is_biological(self) -> bool: """ Checks if the unit has the 'biological' attribute. """ return IS_BIOLOGICAL in self._type_data.attributes @property def is_mechanical(self) -> bool: """ Checks if the unit has the 'mechanical' attribute. """ return IS_MECHANICAL in self._type_data.attributes @property def is_massive(self) -> bool: """ Checks if the unit has the 'massive' attribute. """ return IS_MASSIVE in self._type_data.attributes @property def is_psionic(self) -> bool: """ Checks if the unit has the 'psionic' attribute. """ return IS_PSIONIC in self._type_data.attributes @property def tech_alias(self) -> Optional[List[UnitTypeId]]: """ Building tech equality, e.g. OrbitalCommand is the same as CommandCenter For Hive, this returns [UnitTypeId.Hatchery, UnitTypeId.Lair] For SCV, this returns None """ return self._type_data.tech_alias @property def unit_alias(self) -> Optional[UnitTypeId]: """ Building type equality, e.g. FlyingOrbitalCommand is the same as OrbitalCommand For flying OrbitalCommand, this returns UnitTypeId.OrbitalCommand For SCV, this returns None """ return self._type_data.unit_alias @property_immutable_cache def _weapons(self): """ Returns the weapons of the unit. """ try: return self._type_data._proto.weapons except: return None @property_immutable_cache def can_attack(self) -> bool: """ Checks if the unit can attack at all. """ # TODO BATTLECRUISER doesnt have weapons in proto?! return bool(self._weapons) or self.type_id in {UNIT_BATTLECRUISER, UNIT_ORACLE} @property_immutable_cache def can_attack_both(self) -> bool: """ Checks if the unit can attack both ground and air units. """ if self.type_id == UNIT_BATTLECRUISER: return True if self._weapons: return any(weapon.type in TARGET_BOTH for weapon in self._weapons) return False @property_immutable_cache def can_attack_ground(self) -> bool: """ Checks if the unit can attack ground units. """ if self.type_id in {UNIT_BATTLECRUISER, UNIT_ORACLE}: return True if self._weapons: return any(weapon.type in TARGET_GROUND for weapon in self._weapons) return False @property_immutable_cache def ground_dps(self) -> Union[int, float]: """ Returns the dps against ground units. Does not include upgrades. """ if self.can_attack_ground: weapon = next((weapon for weapon in self._weapons if weapon.type in TARGET_GROUND), None) if weapon: return (weapon.damage * weapon.attacks) / weapon.speed return 0 @property_immutable_cache def ground_range(self) -> Union[int, float]: """ Returns the range against ground units. Does not include upgrades. """ if self.type_id == UNIT_ORACLE: return 4 if self.type_id == UNIT_BATTLECRUISER: return 6 if self.can_attack_ground: weapon = next((weapon for weapon in self._weapons if weapon.type in TARGET_GROUND), None) if weapon: return weapon.range return 0 @property_immutable_cache def can_attack_air(self) -> bool: """ Checks if the unit can air attack at all. Does not include upgrades. """ if self.type_id == UNIT_BATTLECRUISER: return True if self._weapons: return any(weapon.type in TARGET_AIR for weapon in self._weapons) return False @property_immutable_cache def air_dps(self) -> Union[int, float]: """ Returns the dps against air units. Does not include upgrades. """ if self.can_attack_air: weapon = next((weapon for weapon in self._weapons if weapon.type in TARGET_AIR), None) if weapon: return (weapon.damage * weapon.attacks) / weapon.speed return 0 @property_immutable_cache def air_range(self) -> Union[int, float]: """ Returns the range against air units. Does not include upgrades. """ if self.type_id == UNIT_BATTLECRUISER: return 6 if self.can_attack_air: weapon = next((weapon for weapon in self._weapons if weapon.type in TARGET_AIR), None) if weapon: return weapon.range return 0 @property_immutable_cache def bonus_damage(self): """ Returns a tuple of form '(bonus damage, armor type)' if unit does 'bonus damage' against 'armor type'. Possible armor typs are: 'Light', 'Armored', 'Biological', 'Mechanical', 'Psionic', 'Massive', 'Structure'. """ # TODO: Consider units with ability attacks (Oracle, Baneling) or multiple attacks (Thor). if self._weapons: for weapon in self._weapons: if weapon.damage_bonus: b = weapon.damage_bonus[0] return (b.bonus, Attribute(b.attribute).name) else: return None @property def armor(self) -> Union[int, float]: """ Returns the armor of the unit. Does not include upgrades """ return self._type_data._proto.armor @property def sight_range(self) -> Union[int, float]: """ Returns the sight range of the unit. """ return self._type_data._proto.sight_range @property def movement_speed(self) -> Union[int, float]: """ Returns the movement speed of the unit. Does not include upgrades or buffs. """ return self._type_data._proto.movement_speed @property def is_mineral_field(self) -> bool: """ Checks if the unit is a mineral field. """ return self._type_data.has_minerals @property def is_vespene_geyser(self) -> bool: """ Checks if the unit is a non-empty vespene geyser or gas extraction building. """ return self._type_data.has_vespene @property def health(self) -> Union[int, float]: """ Returns the health of the unit. Does not include shields. """ return self._proto.health @property def health_max(self) -> Union[int, float]: """ Returns the maximum health of the unit. Does not include shields. """ return self._proto.health_max @property def health_percentage(self) -> Union[int, float]: """ Returns the percentage of health the unit has. Does not include shields. """ if self._proto.health_max == 0: return 0 return self._proto.health / self._proto.health_max @property def shield(self) -> Union[int, float]: """ Returns the shield points the unit has. Returns 0 for non-protoss units. """ return self._proto.shield @property def shield_max(self) -> Union[int, float]: """ Returns the maximum shield points the unit can have. Returns 0 for non-protoss units. """ return self._proto.shield_max @property def shield_percentage(self) -> Union[int, float]: """ Returns the percentage of shield points the unit has. Returns 0 for non-protoss units. """ if self._proto.shield_max == 0: return 0 return self._proto.shield / self._proto.shield_max @property def energy(self) -> Union[int, float]: """ Returns the amount of energy the unit has. Returns 0 for units without energy. """ return self._proto.energy @property def energy_max(self) -> Union[int, float]: """ Returns the maximum amount of energy the unit can have. Returns 0 for units without energy. """ return self._proto.energy_max @property def energy_percentage(self) -> Union[int, float]: """ Returns the percentage of amount of energy the unit has. Returns 0 for units without energy. """ if self._proto.energy_max == 0: return 0 return self._proto.energy / self._proto.energy_max @property def is_snapshot(self) -> bool: """ Checks if the unit is only available as a snapshot for the bot. Enemy buildings that have been scouted and are in the fog of war or attacking enemy units on higher, not visible ground appear this way. """ return self._proto.display_type == IS_SNAPSHOT @property def is_visible(self) -> bool: """ Checks if the unit is visible for the bot. NOTE: This means the bot has vision of the position of the unit! It does not give any information about the cloak status of the unit.""" return self._proto.display_type == IS_VISIBLE @property def alliance(self) -> Alliance: """ Returns the team the unit belongs to. """ return self._proto.alliance @property def is_mine(self) -> bool: """ Checks if the unit is controlled by the bot. """ return self._proto.alliance == IS_MINE @property def is_enemy(self) -> bool: """ Checks if the unit is hostile. """ return self._proto.alliance == IS_ENEMY @property def owner_id(self) -> int: """ Returns the owner of the unit. This is a value of 1 or 2 in a two player game. """ return self._proto.owner @property def position_tuple(self) -> Tuple[float, float]: """ Returns the 2d position of the unit as tuple without conversion to Point2. """ return self._proto.pos.x, self._proto.pos.y @property_immutable_cache def position(self) -> Point2: """ Returns the 2d position of the unit. """ return Point2.from_proto(self._proto.pos) @property_immutable_cache def position3d(self) -> Point3: """ Returns the 3d position of the unit. """ return Point3.from_proto(self._proto.pos) def distance_to(self, p: Union[Unit, Point2, Point3]) -> Union[int, float]: """ Using the 2d distance between self and p. To calculate the 3d distance, use unit.position3d.distance_to(p) :param p: """ if isinstance(p, Unit): return self._bot_object._distance_squared_unit_to_unit(self, p) ** 0.5 return self._bot_object.distance_math_hypot(self.position_tuple, p) def target_in_range(self, target: Unit, bonus_distance: Union[int, float] = 0) -> bool: """ Checks if the target is in range. Includes the target's radius when calculating distance to target. :param target: :param bonus_distance: """ # TODO: Fix this because immovable units (sieged tank, planetary fortress etc.) have a little lower range than this formula if self.can_attack_ground and not target.is_flying: unit_attack_range = self.ground_range elif self.can_attack_air and (target.is_flying or target.type_id == UNIT_COLOSSUS): unit_attack_range = self.air_range else: return False return ( self._bot_object._distance_squared_unit_to_unit(self, target) <= (self.radius + target.radius + unit_attack_range + bonus_distance) ** 2 ) def in_ability_cast_range( self, ability_id: AbilityId, target: Union[Unit, Point2], bonus_distance: float = 0 ) -> bool: """ Test if a unit is able to cast an ability on the target without checking ability cooldown (like stalker blink) or if ability is made available through research (like HT storm). :param ability_id: :param target: :param bonus_distance: """ cast_range = self._bot_object._game_data.abilities[ability_id.value]._proto.cast_range assert cast_range > 0, f"Checking for an ability ({ability_id}) that has no cast range" ability_target_type = self._bot_object._game_data.abilities[ability_id.value]._proto.target # For casting abilities that target other units, like transfuse, feedback, snipe, yamato if ability_target_type in {Target.Unit.value, Target.PointOrUnit.value} and isinstance(target, Unit): return ( self._bot_object._distance_squared_unit_to_unit(self, target) <= (cast_range + self.radius + target.radius + bonus_distance) ** 2 ) # For casting abilities on the ground, like queen creep tumor, ravager bile, HT storm if ability_target_type in {Target.Point.value, Target.PointOrUnit.value} and isinstance( target, (Point2, tuple) ): return ( self._bot_object._distance_pos_to_pos(self.position_tuple, target) <= cast_range + self.radius + bonus_distance ) return False @property def facing(self) -> Union[int, float]: """ Returns direction the unit is facing as a float in range [0,2π). 0 is in direction of x axis.""" return self._proto.facing # TODO: a function that checks if this unit is facing another unit def is_facing_unit(self, other_unit: Unit, angle_error: float = 1e-3) -> bool: """ Function not completed yet :param other_unit: :param angle_error: """ pass @property def radius(self) -> Union[int, float]: """ Half of unit size. See https://liquipedia.net/starcraft2/Unit_Statistics_(Legacy_of_the_Void) """ return self._proto.radius @property def build_progress(self) -> Union[int, float]: """ Returns completion in range [0,1].""" return self._proto.build_progress @property def is_ready(self) -> bool: """ Checks if the unit is completed. """ return self.build_progress == 1 @property def cloak(self) -> CloakState: """ Returns cloak state. See https://github.com/Blizzard/s2client-api/blob/d9ba0a33d6ce9d233c2a4ee988360c188fbe9dbf/include/sc2api/sc2_unit.h#L95 """ return self._proto.cloak @property def is_cloaked(self) -> bool: """ Checks if the unit is cloaked. """ return self._proto.cloak in IS_CLOAKED @property def is_revealed(self) -> bool: """ Checks if the unit is revealed. """ return self._proto.cloak is IS_REVEALED @property def can_be_attacked(self) -> bool: """ Checks if the unit is revealed or not cloaked and therefore can be attacked. """ return self._proto.cloak in CAN_BE_ATTACKED @property_immutable_cache def buffs(self) -> Set: """ Returns the set of current buffs the unit has. """ return {BuffId(buff_id) for buff_id in self._proto.buff_ids} @property_immutable_cache def is_carrying_minerals(self) -> bool: """ Checks if a worker or MULE is carrying (gold-)minerals. """ return not IS_CARRYING_MINERALS.isdisjoint(self.buffs) @property_immutable_cache def is_carrying_vespene(self) -> bool: """ Checks if a worker is carrying vespene gas. """ return not IS_CARRYING_VESPENE.isdisjoint(self.buffs) @property_immutable_cache def is_carrying_resource(self) -> bool: """ Checks if a worker is carrying a resource. """ return not IS_CARRYING_RESOURCES.isdisjoint(self.buffs) @property def detect_range(self) -> Union[int, float]: """ Returns the detection distance of the unit. """ return self._proto.detect_range @property_immutable_cache def is_detector(self) -> bool: """ Checks if the unit is a detector. Has to be completed in order to detect and Photoncannons also need to be powered. """ return self.is_ready and (self.type_id in IS_DETECTOR or self.type_id == UNIT_PHOTONCANNON and self.is_powered) @property def radar_range(self) -> Union[int, float]: return self._proto.radar_range @property def is_selected(self) -> bool: """ Checks if the unit is currently selected. """ return self._proto.is_selected @property def is_on_screen(self) -> bool: """ Checks if the unit is on the screen. """ return self._proto.is_on_screen @property def is_blip(self) -> bool: """ Checks if the unit is detected by a sensor tower. """ return self._proto.is_blip @property def is_powered(self) -> bool: """ Checks if the unit is powered by a pylon or warppism. """ return self._proto.is_powered @property def is_active(self) -> bool: """ Checks if the unit is currently training or researching. """ return self._proto.is_active # PROPERTIES BELOW THIS COMMENT ARE NOT POPULATED FOR SNAPSHOTS @property def mineral_contents(self) -> int: """ Returns the amount of minerals remaining in a mineral field. """ return self._proto.mineral_contents @property def vespene_contents(self) -> int: """ Returns the amount of gas remaining in a geyser. """ return self._proto.vespene_contents @property def has_vespene(self) -> bool: """ Checks if a geyser has any gas remaining. You can't build extractors on empty geysers. """ return bool(self._proto.vespene_contents) @property def is_flying(self) -> bool: """ Checks if the unit is flying. """ return self._proto.is_flying or self.has_buff(BuffId.GRAVITONBEAM) @property def is_burrowed(self) -> bool: """ Checks if the unit is burrowed. """ return self._proto.is_burrowed @property def is_hallucination(self) -> bool: """ Returns True if the unit is your own hallucination or detected. """ return self._proto.is_hallucination @property def attack_upgrade_level(self) -> int: """ Returns the upgrade level of the units attack. # NOTE: Returns 0 for units without a weapon. """ return self._proto.attack_upgrade_level @property def armor_upgrade_level(self) -> int: """ Returns the upgrade level of the units armor. """ return self._proto.armor_upgrade_level @property def shield_upgrade_level(self) -> int: """ Returns the upgrade level of the units shield. # NOTE: Returns 0 for units without a shield. """ return self._proto.shield_upgrade_level @property def buff_duration_remain(self) -> int: """ Returns the amount of remaining frames of the visible timer bar. # NOTE: Returns 0 for units without a timer bar. """ return self._proto.buff_duration_remain @property def buff_duration_max(self) -> int: """ Returns the maximum amount of frames of the visible timer bar. # NOTE: Returns 0 for units without a timer bar. """ return self._proto.buff_duration_max # PROPERTIES BELOW THIS COMMENT ARE NOT POPULATED FOR ENEMIES @property_mutable_cache def orders(self) -> List[UnitOrder]: """ Returns the a list of the current orders. """ return [UnitOrder.from_proto(order, self._bot_object) for order in self._proto.orders] @property_immutable_cache def order_target(self) -> Optional[Union[int, Point2]]: """ Returns the target tag (if it is a Unit) or Point2 (if it is a Position) from the first order, returns None if the unit is idle """ if self.orders: if isinstance(self.orders[0].target, int): return self.orders[0].target else: return Point2.from_proto(self.orders[0].target) return None @property def noqueue(self) -> bool: """ Checks if the unit is idle. """ warnings.warn("noqueue will be removed soon, please use is_idle instead", DeprecationWarning, stacklevel=2) return self.is_idle @property def is_idle(self) -> bool: """ Checks if unit is idle. """ return not self._proto.orders def is_using_ability(self, abilities: Union[AbilityId, Set[AbilityId]]) -> bool: """ Check if the unit is using one of the given abilities. Only works for own units. """ if not self.orders: return False if isinstance(abilities, AbilityId): abilities = {abilities} return self.orders[0].ability.id in abilities @property_immutable_cache def is_moving(self) -> bool: """ Checks if the unit is moving. Only works for own units. """ return self.is_using_ability(AbilityId.MOVE) @property_immutable_cache def is_attacking(self) -> bool: """ Checks if the unit is attacking. Only works for own units. """ return self.is_using_ability(IS_ATTACKING) @property_immutable_cache def is_patrolling(self) -> bool: """ Checks if a unit is patrolling. Only works for own units. """ return self.is_using_ability(IS_PATROLLING) @property_immutable_cache def is_gathering(self) -> bool: """ Checks if a unit is on its way to a mineral field or vespene geyser to mine. Only works for own units. """ return self.is_using_ability(IS_GATHERING) @property_immutable_cache def is_returning(self) -> bool: """ Checks if a unit is returning from mineral field or vespene geyser to deliver resources to townhall. Only works for own units. """ return self.is_using_ability(IS_RETURNING) @property_immutable_cache def is_collecting(self) -> bool: """ Checks if a unit is gathering or returning. Only works for own units. """ return self.is_using_ability(IS_COLLECTING) @property_immutable_cache def is_constructing_scv(self) -> bool: """ Checks if the unit is an SCV that is currently building. Only works for own units. """ return self.is_using_ability(IS_CONSTRUCTING_SCV) @property_immutable_cache def is_transforming(self) -> bool: """ Checks if the unit transforming. Only works for own units. """ return self.type_id in transforming and self.is_using_ability(transforming[self.type_id]) @property_immutable_cache def is_repairing(self) -> bool: """ Checks if the unit is an SCV or MULE that is currently repairing. Only works for own units. """ return self.is_using_ability(IS_REPAIRING) @property def add_on_tag(self) -> int: """ Returns the tag of the addon of unit. """ return self._proto.add_on_tag @property def has_add_on(self) -> bool: """ Checks if unit has an addon attached. """ return bool(self._proto.add_on_tag) @property_immutable_cache def add_on_land_position(self) -> Point2: """ If unit is addon (techlab or reactor), returns the position where a terran building has to land to connect to addon """ return self.position.offset(Point2((-2.5, 0.5))) @property_mutable_cache def passengers(self) -> Set[Unit]: """ Returns the units inside a Bunker, CommandCenter, PlanetaryFortress, Medivac, Nydus, Overlord or WarpPrism. """ return {Unit(unit, self._bot_object) for unit in self._proto.passengers} @property_mutable_cache def passengers_tags(self) -> Set[int]: """ Returns the tags of the units inside a Bunker, CommandCenter, PlanetaryFortress, Medivac, Nydus, Overlord or WarpPrism. """ return {unit.tag for unit in self._proto.passengers} @property def cargo_used(self) -> Union[float, int]: """ Returns how much cargo space is currently used in the unit. Note that some units take up more than one space. """ return self._proto.cargo_space_taken @property def has_cargo(self) -> bool: """ Checks if this unit has any units loaded. """ return bool(self._proto.cargo_space_taken) @property def cargo_size(self) -> Union[float, int]: """ Returns the amount of cargo space the unit needs. """ return self._type_data.cargo_size @property def cargo_max(self) -> Union[float, int]: """ How much cargo space is available at maximum. """ return self._proto.cargo_space_max @property def cargo_left(self) -> Union[float, int]: """ Returns how much cargo space is currently left in the unit. """ return self._proto.cargo_space_max - self._proto.cargo_space_taken @property def assigned_harvesters(self) -> int: """ Returns the number of workers currently gathering resources at a geyser or mining base.""" return self._proto.assigned_harvesters @property def ideal_harvesters(self) -> int: """ Returns the ideal harverster count for unit. 3 for gas buildings, 2*n for n mineral patches on that base.""" return self._proto.ideal_harvesters @property def surplus_harvesters(self) -> int: """ Returns a positive int if unit has too many harvesters mining, a negative int if it has too few mining.""" return self._proto.assigned_harvesters - self._proto.ideal_harvesters @property_immutable_cache def weapon_cooldown(self) -> Union[int, float]: """ Returns the time until the unit can fire again, returns -1 for units that can't attack. Usage: if unit.weapon_cooldown == 0: self.actions.append(unit.attack(target)) elif unit.weapon_cooldown < 0: self.actions.append(unit.move(closest_allied_unit_because_cant_attack)) else: self.actions.append(unit.move(retreatPosition)) """ if self.can_attack: return self._proto.weapon_cooldown return -1 @property def engaged_target_tag(self) -> int: # TODO What does this do? return self._proto.engaged_target_tag # Unit functions def has_buff(self, buff: BuffId) -> bool: """ Checks if unit has buff 'buff'. """ assert isinstance(buff, BuffId), f"{buff} is no BuffId" return buff in self.buffs def train(self, unit: UnitTypeId, queue: bool = False) -> UnitCommand: """ Orders unit to train another 'unit'. Usage: self.actions.append(COMMANDCENTER.train(SCV)) :param unit: :param queue: """ return self(self._bot_object._game_data.units[unit.value].creation_ability.id, queue=queue) def build(self, unit: UnitTypeId, position: Union[Point2, Point3] = None, queue: bool = False) -> UnitCommand: """ Orders unit to build another 'unit' at 'position'. Usage: self.actions.append(SCV.build(COMMANDCENTER, position)) :param unit: :param position: :param queue: """ return self(self._bot_object._game_data.units[unit.value].creation_ability.id, target=position, queue=queue) def research(self, upgrade: UpgradeId, queue: bool = False) -> UnitCommand: """ Orders unit to research 'upgrade'. Requires UpgradeId to be passed instead of AbilityId. :param upgrade: :param queue: """ return self(self._bot_object._game_data.upgrades[upgrade.value].research_ability.id, queue=queue) def warp_in(self, unit: UnitTypeId, position: Union[Point2, Point3]) -> UnitCommand: """ Orders Warpgate to warp in 'unit' at 'position'. :param unit: :param queue: """ normal_creation_ability = self._bot_object._game_data.units[unit.value].creation_ability.id return self(warpgate_abilities[normal_creation_ability], target=position) def attack(self, target: Union[Unit, Point2, Point3], queue: bool = False) -> UnitCommand: """ Orders unit to attack. Target can be a Unit or Point2. Attacking a position will make the unit move there and attack everything on its way. :param target: :param queue: """ return self(AbilityId.ATTACK, target=target, queue=queue) def gather(self, target: Unit, queue: bool = False) -> UnitCommand: """ Orders a unit to gather minerals or gas. 'Target' must be a mineral patch or a gas extraction building. :param target: :param queue: """ return self(AbilityId.HARVEST_GATHER, target=target, queue=queue) def return_resource(self, target: Unit = None, queue: bool = False) -> UnitCommand: """ Orders the unit to return resource. Does not need a 'target'. :param target: :param queue: """ return self(AbilityId.HARVEST_RETURN, target=target, queue=queue) def move(self, position: Union[Point2, Point3], queue: bool = False) -> UnitCommand: """ Orders the unit to move to 'position'. Target can be a Unit (to follow that unit) or Point2. :param position: :param queue: """ return self(AbilityId.MOVE_MOVE, target=position, queue=queue) def scan_move(self, *args, **kwargs) -> UnitCommand: """ Deprecated: This ability redirects to 'AbilityId.ATTACK' """ return self(AbilityId.SCAN_MOVE, *args, **kwargs) def hold_position(self, queue: bool = False) -> UnitCommand: """ Orders a unit to stop moving. It will not move until it gets new orders. :param queue: """ return self(AbilityId.HOLDPOSITION, queue=queue) def stop(self, queue: bool = False) -> UnitCommand: """ Orders a unit to stop, but can start to move on its own if it is attacked, enemy unit is in range or other friendly units need the space. :param queue: """ return self(AbilityId.STOP, queue=queue) def patrol(self, position: Union[Point2, Point3], queue: bool = False) -> UnitCommand: """ Orders a unit to patrol between position it has when the command starts and the target position. Can be queued up to seven patrol points. If the last point is the same as the starting point, the unit will patrol in a circle. :param position: :param queue: """ return self(AbilityId.PATROL, target=position, queue=queue) def repair(self, repair_target: Unit, queue: bool = False) -> UnitCommand: """ Order an SCV or MULE to repair. :param repair_target: :param queue: """ return self(AbilityId.EFFECT_REPAIR, target=repair_target, queue=queue) def __hash__(self): return self.tag def __eq__(self, other): try: return self.tag == other.tag except: return False def __call__(self, ability, target=None, queue: bool = False): return UnitCommand(ability, self, target=target, queue=queue)
1.640625
2
dtr_code/shared/run_torch_trial.py
merrymercy/dtr-prototype
1
4760
""" To avoid any issues of memory hanging around between inputs, we run each input as a separate process. A little ugly but effective """ import gc import glob import json import os import random import time import numpy as np import torch from common import invoke_main, read_json, write_json, prepare_out_file, check_file_exists from validate_config import validate_trials_config from pt_trial_util import create_csv_writer from tqdm import tqdm import model_util def extend_simrd_config(dest_dir, sim_conf_filename, model_name, specific_params, log_name): if not check_file_exists(dest_dir, sim_conf_filename): prepare_out_file(dest_dir, sim_conf_filename) write_json(dest_dir, sim_conf_filename, dict()) conf = read_json(dest_dir, sim_conf_filename) if model_name not in conf: conf[model_name] = [] conf[model_name].append({ 'name': model_util.get_model_family(model_name), 'batch_size': str(specific_params['batch_size']), 'layers': specific_params.get('layers', model_util.get_model_layers(model_name)), 'type': model_util.get_model_type(model_name), 'log': log_name, 'has_start': True }) write_json(dest_dir, sim_conf_filename, conf) def save_trial_log(dest_dir, sim_conf_filename, model_name, specific_params, is_baseline=False): """ Find the last DTR log produced in the trial (if any exist) and move it to the directory """ all_logs = glob.glob(os.path.join(os.getcwd(), '*.log')) if not all_logs: return # if we delete all logs in advance, there should be at most one log assert len(all_logs) == 1 most_recent = all_logs[0] # rename and move # (new name just appends info to the old one) batch_size = specific_params['batch_size'] budget = specific_params['memory_budget'] if budget < 0: budget = 'inf' new_name = '{}-{}-{}-{}'.format(model_name, batch_size, budget, os.path.basename(most_recent)) filename = prepare_out_file(dest_dir, new_name) os.rename(most_recent, filename) if is_baseline and sim_conf_filename is not None: extend_simrd_config(dest_dir, sim_conf_filename, model_name, specific_params, filename) def delete_logs(): for log in glob.glob(os.path.join(os.getcwd(), '*.log')): os.remove(log) def run_single_measurement(model_name, produce_model, run_model, teardown, inp, criterion, extra_params, use_dtr, use_profiling): """ This function initializes a model and performs a single measurement of the model on the given input. While it might seem most reasonable to initialize the model outside of the loop, DTR's logs have shown that certain constants in the model persist between loop iterations; performing these actions in a separate *function scope* turned out to be the only way to prevent having those constants hang around. Returns a dict of measurements """ torch.cuda.reset_max_memory_allocated() # resetting means the count should be reset to # only what's in scope, meaning only the input input_mem = torch.cuda.max_memory_allocated() model = produce_model(extra_params=extra_params) params = [] for m in model: if hasattr(m, 'parameters'): params.extend(m.parameters()) model_mem = torch.cuda.max_memory_allocated() optimizer = torch.optim.SGD(model[0].parameters(), 1e-3, momentum=0.9, weight_decay=1e-4) start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) # start timing torch.cuda.synchronize() start_time = time.time() if use_dtr: torch.reset_profile() start.record() # with torch.autograd.profiler.profile(use_cuda=True) as prof: run_model(criterion, *model, *inp, optimizer=optimizer) end.record() start_sync = time.time() torch.cuda.synchronize() end_sync = time.time() end_time = time.time() # end timing if use_dtr: # operators-only time, tracked by DTR cuda_time = torch.compute_time() base_compute_time = -1 remat_compute_time = -1 search_time = -1 cost_time = -1 if use_profiling: base_compute_time = torch.base_compute_time() remat_compute_time = torch.remat_compute_time() search_time = torch.search_time() cost_time = torch.cost_time() torch.reset_profile() total_mem = torch.cuda.max_memory_allocated() teardown(*model) torch.cuda.reset_max_memory_allocated() del model if use_dtr: torch.toggle_log(False) del params batch_size = len(inp[0]) ips = batch_size / (end_time - start_time) result = { 'time': end_time - start_time, 'sync_time': end_sync - start_sync, 'gpu_time': start.elapsed_time(end), 'input_mem': input_mem, 'model_mem': model_mem, 'total_mem': total_mem, 'base_compute_time': base_compute_time, 'remat_compute_time': remat_compute_time, 'search_time': search_time, 'cost_time': cost_time, 'batch_size': batch_size, 'ips': ips } if use_dtr: result['cuda_time'] = cuda_time else: result['cuda_time'] = -1.0 return result def timing_loop(model_name, i, config, use_dtr, specific_params, writer, trial_run=False, trial_run_outfile=None, memory_budget=-1.0): dry_run = config['dry_run'] measurements = [] print(f'Running {model_name} : {specific_params}') # remove any logs hanging around (so we only have to look for one) delete_logs() # we only save logs for the final input on DTR save_log = use_dtr and specific_params.get('save_logs', config['save_logs']) and i == config['n_inputs'] - 1 if use_dtr: torch.toggle_log(False) # whether to report profiling info use_profiling = use_dtr and specific_params.get('use_profiling', False) use_cudnn = model_util.use_cudnn(model_name) with torch.backends.cudnn.flags(enabled=use_cudnn, benchmark=use_cudnn): criterion = model_util.get_criterion(model_name) produce_model, gen_input, run_model, teardown = model_util.prepare_model(model_name, specific_params['batch_size'], use_dtr=use_dtr) inp = gen_input(i, specific_params.get('extra_params', dict())) n_reps = specific_params.get('n_reps', config['n_reps']) if use_profiling: torch.toggle_profile(use_profiling) progress = tqdm(range(dry_run + n_reps)) for j in progress: progress.set_description(f'Rep [{j}]' + '' if j > dry_run else f'Dry run [{j}]') gc.collect() # Annotate where the final run starts in the log if save_log and j == dry_run + n_reps - 1: torch.toggle_log(True) torch.annotate_log('START') res = run_single_measurement(model_name, produce_model, run_model, teardown, inp, criterion, extra_params=specific_params.get('extra_params', dict()), use_dtr=use_dtr, use_profiling=use_profiling) if j >= dry_run: measurements.append(res) # Dump results model_name_replace_dict = { 'tv_resnet152': 'resnet152', 'tv_resnet50': 'resnet50', } train_ips_list = [] batch_size = None for res in measurements: batch_size = res['batch_size'] train_ips_list.append(res['ips']) out_file = "speed_results.tsv" with open(out_file, "a") as fout: val_dict = { 'network': model_name_replace_dict.get(model_name, model_name), 'algorithm': 'dtr', 'budget': specific_params['memory_budget'], 'batch_size': batch_size, 'ips': np.median(train_ips_list) if train_ips_list else -1, } print(val_dict) fout.write(json.dumps(val_dict) + "\n") print(f"save results to {out_file}") # write to csv file only when this trial is not # for getting a baseline memory usage if trial_run: write_json(os.getcwd(), trial_run_outfile, { 'mem' : max(map(lambda data: data['total_mem'], measurements)) }) return if save_log: save_trial_log(config['log_dest'], config.get('simrd_config', None), model_name, specific_params, is_baseline=specific_params['memory_budget'] == -1) # clean up after ourselves delete_logs() # do all the writing after the trial is over for j in range(len(measurements)): data = measurements[j] # do unit conversions now: times in ms, # memory in MB writer.writerow({ 'time': data['time']*1e3, 'sync_time': data['sync_time']*1e3, # pytorch's cuda elapsed time is already in ms 'gpu_time': float(data['gpu_time']), # 'cuda_time' : float(data['cuda_time']) * 1e-6, 'input_mem': data['input_mem']*1e-6, 'model_mem': data['model_mem']*1e-6, 'total_mem': data['total_mem']*1e-6, 'memory_budget': memory_budget, # profiling (reported in nanoseconds) 'base_compute_time': data['base_compute_time']*1e-6, 'remat_compute_time': data['remat_compute_time']*1e-6, 'search_time': data['search_time']*1e-6, 'cost_time': data['cost_time']*1e-6, 'rep': j - dry_run, 'input': i, **specific_params }) def main(config_dir, experiment_mode, model_name, input_idx, params_file, out_file, trial_run=False, trial_run_outfile=None): if 'DTR_MODEL_NAME' in os.environ: model_name = os.environ['DTR_MODEL_NAME'] config, msg = validate_trials_config(config_dir) if config is None: print(msg) return 1 use_dtr = (experiment_mode == 'dtr') i = int(input_idx) is_trial = trial_run == 'True' if config['set_seed']: torch.manual_seed(config['seed'] + i) random.seed(config['seed'] + i) cwd = os.getcwd() # handle specific params, esp. for DTR specific_params = read_json(cwd, params_file) if 'DTR_MEMORY_BUDGET' in os.environ: specific_params['memory_budget'] = float(os.environ['DTR_MEMORY_BUDGET']) assert 'batch_size' in specific_params if use_dtr: assert 'memory_budget' in specific_params if specific_params['memory_budget'] > 0: print(f'Setting budget to {int(specific_params["memory_budget"])}') torch.set_memory_budget(int(specific_params['memory_budget'])) if is_trial: timing_loop(model_name, i, config, use_dtr, specific_params, None, True, trial_run_outfile) return with open(out_file, 'a', newline='') as csvfile: writer = create_csv_writer(csvfile, specific_params) timing_loop(model_name, i, config, use_dtr, specific_params, writer, memory_budget=specific_params.get('memory_budget', -1)) if __name__ == '__main__': invoke_main(main, 'config_dir', 'experiment_mode', 'model_name', 'input_idx', 'params_file', 'out_file', 'trial_run', 'trial_run_outfile')
1.304688
1
test/e2e/tests/test_instance.py
acornett21/ack-ec2-controller
0
4768
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may # not use this file except in compliance with the License. A copy of the # License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. """Integration tests for Instance API. """ import datetime import pytest import time import logging from acktest.resources import random_suffix_name from acktest.k8s import resource as k8s from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource from e2e.replacement_values import REPLACEMENT_VALUES from e2e.bootstrap_resources import get_bootstrap_resources RESOURCE_PLURAL = "instances" # highly available instance type for deterministic testing INSTANCE_TYPE = "m4.large" INSTANCE_AMI = "Amazon Linux 2 Kernel" INSTANCE_TAG_KEY = "owner" INSTANCE_TAG_VAL = "ack-controller" CREATE_WAIT_AFTER_SECONDS = 10 DELETE_WAIT_AFTER_SECONDS = 10 TIMEOUT_SECONDS = 300 def get_instance(ec2_client, instance_id: str) -> dict: instance = None try: resp = ec2_client.describe_instances( InstanceIds=[instance_id] ) instance = resp["Reservations"][0]["Instances"][0] except Exception as e: logging.debug(e) finally: return instance def get_instance_state(ec2_client, instance_id): instance_state = None try: instance = get_instance(ec2_client, instance_id) instance_state = instance["State"]["Name"] except Exception as e: logging.debug(e) finally: return instance_state def wait_for_instance_or_die(ec2_client, instance_id, desired_state, timeout_sec): while True: now = datetime.datetime.now() timeout = now + datetime.timedelta(seconds=timeout_sec) if datetime.datetime.now() >= timeout: pytest.fail(f"Timed out waiting for Instance to enter {desired_state} state") time.sleep(DELETE_WAIT_AFTER_SECONDS) instance_state = get_instance_state(ec2_client, instance_id) if instance_state == desired_state: break def get_ami_id(ec2_client): try: # Use latest AL2 resp = ec2_client.describe_images( Owners=['amazon'], Filters=[ {"Name": "architecture", "Values": ['x86_64']}, {"Name": "state", "Values": ['available']}, {"Name": "virtualization-type", "Values": ['hvm']}, ], ) for image in resp['Images']: if 'Description' in image: if INSTANCE_AMI in image['Description']: return image['ImageId'] except Exception as e: logging.debug(e) @pytest.fixture def instance(ec2_client): test_resource_values = REPLACEMENT_VALUES.copy() resource_name = random_suffix_name("instance-ack-test", 24) test_vpc = get_bootstrap_resources().SharedTestVPC subnet_id = test_vpc.public_subnets.subnet_ids[0] ami_id = get_ami_id(ec2_client) test_resource_values["INSTANCE_NAME"] = resource_name test_resource_values["INSTANCE_AMI_ID"] = ami_id test_resource_values["INSTANCE_TYPE"] = INSTANCE_TYPE test_resource_values["INSTANCE_SUBNET_ID"] = subnet_id test_resource_values["INSTANCE_TAG_KEY"] = INSTANCE_TAG_KEY test_resource_values["INSTANCE_TAG_VAL"] = INSTANCE_TAG_VAL # Load Instance CR resource_data = load_ec2_resource( "instance", additional_replacements=test_resource_values, ) logging.debug(resource_data) # Create k8s resource ref = k8s.CustomResourceReference( CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL, resource_name, namespace="default", ) k8s.create_custom_resource(ref, resource_data) cr = k8s.wait_resource_consumed_by_controller(ref) assert cr is not None assert k8s.get_resource_exists(ref) yield (ref, cr) # Delete the instance when tests complete try: _, deleted = k8s.delete_custom_resource(ref, 3, 10) assert deleted except: pass @service_marker @pytest.mark.canary class TestInstance: def test_create_delete(self, ec2_client, instance): (ref, cr) = instance resource_id = cr["status"]["instanceID"] time.sleep(CREATE_WAIT_AFTER_SECONDS) # Check Instance exists instance = get_instance(ec2_client, resource_id) assert instance is not None # Give time for instance to come up wait_for_instance_or_die(ec2_client, resource_id, 'running', TIMEOUT_SECONDS) # Validate instance tags instance_tags = instance["Tags"] tag_present = False for t in instance_tags: if (t['Key'] == INSTANCE_TAG_KEY and t['Value'] == INSTANCE_TAG_VAL): tag_present = True assert tag_present # Delete k8s resource _, deleted = k8s.delete_custom_resource(ref, 2, 5) assert deleted is True # Reservation still exists, but instance will commence termination # State needs to be 'terminated' in order to remove the dependency on the shared subnet # for successful test cleanup wait_for_instance_or_die(ec2_client, resource_id, 'terminated', TIMEOUT_SECONDS)
1.601563
2
jwt_auth/admin.py
alaraayan/todo-backend
0
4776
from django.contrib import admin from django.contrib.auth import get_user_model User = get_user_model() admin.site.register(User)
0.6875
1
contrib/micronet/scripts/file2buf.py
pmalhaire/WireHub
337
4784
#!/usr/bin/env python3 import os import sys MAX = 8 fpath = sys.argv[1] name = sys.argv[2] with open(fpath, "rb") as fh: sys.stdout.write("char %s[] = {" % (name,) ) i = 0 while True: if i > 0: sys.stdout.write(", ") if i % MAX == 0: sys.stdout.write("\n\t") c = fh.read(1) if not c: sys.stdout.write("\n") break sys.stdout.write("0x%.2x" % (ord(c), )) i = i + 1 print("};") print("") print("unsigned int %s_sz = %s;" % (name, i)) print("")
1.3125
1
mflops/model_info.py
shuncyu/mflops
1
4800
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Dec 14 17:38:48 2020 @author: luke """ import sys from functools import partial import torch import torch.nn as nn import prettytable as pt from .basic_hook import MODULES_MAPPING def get_model_compute_info(model, input_res, print_per_layer_stat=False, input_constructor=None, ost=sys.stdout, verbose=False, ignore_modules=[], custom_modules_hooks={}): assert type(input_res) is tuple assert len(input_res) >= 1 assert isinstance(model, nn.Module) global CUSTOM_MODULES_MAPPING CUSTOM_MODULES_MAPPING = custom_modules_hooks compute_model = add_computing_methods(model) compute_model.eval() compute_model.start_compute(ost=ost, verbose=verbose, ignore_list=ignore_modules) if input_constructor: input = input_constructor(input_res) _ = compute_model(**input) else: try: batch = torch.ones(()).new_empty((1, *input_res), dtype=next(compute_model.parameters()).dtype, device=next(compute_model.parameters()).device) except StopIteration: batch = torch.ones(()).new_empty((1, *input_res)) _ = compute_model(batch) flops_count, mac_count, params_count = compute_model.compute_average_compute_cost() if print_per_layer_stat: print_model_with_compute(compute_model, flops_count, mac_count, params_count, ost=ost) compute_model.stop_compute() CUSTOM_MODULES_MAPPING = {} tb = pt.PrettyTable() tb.field_names = ['Metrics', 'Value'] tb.add_row(['%s' %'Floating Point Operations (FLOPs)', '%8s' %to_string(flops_count)]) tb.add_row(['%s' %'Memory Access Cost (MAC)', '%8s' %to_string(mac_count)]) tb.add_row(['%s' %'Number of Parameters', '%8s' %to_string(params_count)]) print(tb) return flops_count, mac_count, params_count def to_string(params_num, units=None, precision=3): if units is None: if params_num // 10**9 > 0: return str(round(params_num / 10**9, 3)) + ' G' elif params_num // 10**6 > 0: return str(round(params_num / 10**6, 3)) + ' M' elif params_num // 10**3 > 0: return str(round(params_num / 10**3, 3)) + ' K' else: return str(params_num) else: if units == 'G': return str(round(params_num / 10**9, precision)) + ' ' + units if units == 'M': return str(round(params_num / 10**6, precision)) + ' ' + units elif units == 'K': return str(round(params_num / 10**3, precision)) + ' ' + units else: return str(params_num) def print_model_with_compute(model, total_flops, total_mac, total_params, units='M', precision=3, ost=sys.stdout): def accumulate_params(self): if is_supported_instance(self): return self.__params__ else: sum = 0 for m in self.children(): sum += m.accumulate_params() return sum def accumulate_flops(self): if is_supported_instance(self): return self.__flops__ / model.__batch_counter__ else: sum = 0 for m in self.children(): sum += m.accumulate_flops() return sum def accumulate_mac(self): if is_supported_instance(self): return self.__mac__ / model.__batch_counter__ else: sum = 0 for m in self.children(): sum += m.accumulate_mac() return sum def compute_repr(self): accumulated_params_num = self.accumulate_params() accumulated_flops_cost = self.accumulate_flops() accumulated_mac_cost = self.accumulate_mac() return ', '.join([to_string(accumulated_params_num, units=units, precision=precision), '{:.3%} Params'.format(accumulated_params_num / total_params), to_string(accumulated_flops_cost, units=units, precision=precision), '{:.3%} FLOPs'.format(accumulated_flops_cost / total_flops), to_string(accumulated_mac_cost, units=units, precision=precision), '{:.3%} MAC'.format(accumulated_mac_cost / total_mac), '{:.3} MAC/FLOPs'.format(accumulated_mac_cost / (accumulated_flops_cost + 1e-5) \ * total_flops / (total_mac + 1e-5)), self.original_extra_repr()]) def add_extra_repr(m): m.accumulate_flops = accumulate_flops.__get__(m) m.accumulate_mac = accumulate_mac.__get__(m) m.accumulate_params = accumulate_params.__get__(m) compute_extra_repr = compute_repr.__get__(m) if m.extra_repr != compute_extra_repr: m.original_extra_repr = m.extra_repr m.extra_repr = compute_extra_repr assert m.extra_repr != m.original_extra_repr def del_extra_repr(m): if hasattr(m, 'original_extra_repr'): m.extra_repr = m.original_extra_repr del m.original_extra_repr if hasattr(m, 'accumulate_flops'): del m.accumulate_flops if hasattr(m, 'accumulate_mac'): del m.accumulate_mac model.apply(add_extra_repr) print(repr(model), file=ost) model.apply(del_extra_repr) def get_model_parameters_number(model): params_num = sum(p.numel() for p in model.parameters() if p.requires_grad) return params_num def add_computing_methods(net_main_module): # adding additional methods to the existing module object, # this is done this way so that each function has access to self object net_main_module.start_compute = start_compute.__get__(net_main_module) net_main_module.stop_compute = stop_compute.__get__(net_main_module) net_main_module.reset_compute = reset_compute.__get__(net_main_module) net_main_module.compute_average_compute_cost = compute_average_compute_cost.__get__( net_main_module) net_main_module.reset_compute() return net_main_module def compute_average_compute_cost(self): """ A method that will be available after add_computing_methods() is called on a desired net object. Returns current mean flops/mac consumption per image. """ batches_count = self.__batch_counter__ flops_sum = 0 mac_sum = 0 params_sum = 0 for module in self.modules(): if is_supported_instance(module): flops_sum += module.__flops__ mac_sum += module.__mac__ params_sum = get_model_parameters_number(self) return flops_sum / batches_count, mac_sum / batches_count, params_sum def start_compute(self, **kwargs): """ A method that will be available after add_computing_methods() is called on a desired net object. Activates the computation of mean flops/mac consumption per image. Call it before you run the network. """ add_batch_counter_hook_function(self) seen_types = set() def add_compute_hook_function(module, ost, verbose, ignore_list): if type(module) in ignore_list: seen_types.add(type(module)) if is_supported_instance(module): module.__params__ = 0 elif is_supported_instance(module): if hasattr(module, '__flops_handle__'): return if type(module) in CUSTOM_MODULES_MAPPING: handle = module.register_forward_hook( CUSTOM_MODULES_MAPPING[type(module)]) else: handle = module.register_forward_hook(MODULES_MAPPING[type(module)]) module.__flops_handle__ = handle module.__mac_handle__ = handle seen_types.add(type(module)) else: if verbose and not type(module) in (nn.Sequential, nn.ModuleList) and \ not type(module) in seen_types: print('Warning: module ' + type(module).__name__ + ' is treated as a zero-op.', file=ost) seen_types.add(type(module)) self.apply(partial(add_compute_hook_function, **kwargs)) def stop_compute(self): """ A method that will be available after add_computing_methods() is called on a desired net object. Stops computing the mean flops consumption per image. Call whenever you want to pause the computation. """ remove_batch_counter_hook_function(self) self.apply(remove_compute_hook_function) def reset_compute(self): """ A method that will be available after add_computing_methods() is called on a desired net object. Resets statistics computed so far. """ add_batch_counter_variables_or_reset(self) self.apply(add_compute_variable_or_reset) def batch_counter_hook(module, input, output): batch_size = 1 if len(input) > 0: # Can have multiple inputs, getting the first one input = input[0] batch_size = len(input) else: pass print('Warning! No positional inputs found for a module,' ' assuming batch size is 1.') module.__batch_counter__ += batch_size def add_batch_counter_variables_or_reset(module): module.__batch_counter__ = 0 def add_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): return handle = module.register_forward_hook(batch_counter_hook) module.__batch_counter_handle__ = handle def remove_batch_counter_hook_function(module): if hasattr(module, '__batch_counter_handle__'): module.__batch_counter_handle__.remove() del module.__batch_counter_handle__ def add_compute_variable_or_reset(module): if is_supported_instance(module): if hasattr(module, '__flops__') or hasattr(module, '__mac__') or \ hasattr(module, '__params__'): print('Warning: variables __flops__ or __mac__ or __params__ are already ' 'defined for the module' + type(module).__name__ + ' ptflops can affect your code!') module.__flops__ = 0 module.__mac__ = 0 module.__params__ = get_model_parameters_number(module) def is_supported_instance(module): if type(module) in MODULES_MAPPING or type(module) in CUSTOM_MODULES_MAPPING: return True return False def remove_compute_hook_function(module): if is_supported_instance(module): if hasattr(module, '__flops_handle__'): module.__flops_handle__.remove() del module.__flops_handle__ if hasattr(module, '__mac_handle__'): module.__mac_handle__.remove() del module.__mac_handle__
2.015625
2
scrapy/clarinetear/spiders/pagina12.py
ramiror/clarinete
0
4832
from datetime import datetime import scrapy import lxml from lxml.html.clean import Cleaner import re SOURCE = 'Página 12' LANGUAGE = 'es' cleaner = Cleaner(allow_tags=['p', 'br', 'b', 'a', 'strong', 'i', 'em']) class Pagina12Spider(scrapy.Spider): name = 'pagina12' allowed_domains = ['www.pagina12.com.ar'] start_urls = ['https://www.pagina12.com.ar/'] def start_requests(self): url = getattr(self, 'article_url', None) if url is not None: yield scrapy.Request(url, callback=self.parse_article, cb_kwargs=dict(url=url)) def parse(self, response): urls = [] for article in response.css('article'): link = article.css('a') url = link.attrib['href'] if not url: continue if not url.startswith('http'): url = 'https://www.pagina12.com.ar' + url urls.append(url) maybe_img = article.css('img.show-for-large-only') obj = { 'title': article.css('.article-title a::text, a .title::text').get(), 'volanta': (article.css('.article-title a .title-prefix::text').get() or '').strip(), 'url': url, 'image': maybe_img.attrib['src'] if maybe_img else None, 'source': SOURCE, 'source_language': LANGUAGE, } yield obj request = scrapy.Request(url, callback=self.parse_article, cb_kwargs=dict(url=url)) yield request yield {'homepage': urls, 'source': SOURCE} def parse_article(self, response, url): html = ''.join(response.xpath('//div[@class="article-main-content article-text "]/p').extract()) if not html: return content = lxml.html.tostring(cleaner.clean_html(lxml.html.fromstring(html))).decode('utf-8') date = response.css('div.date span::text').get().strip() date_fragments = re.match(r'^([0-9]{1,2}) de ([a-z]+) de ([0-9]{4})$', date) months = { 'enero': 1, 'febrero': 2, 'marzo': 3, 'abril': 4, 'mayo': 5, 'junio': 6, 'julio': 7, 'agosto': 8, 'septiembre': 9, 'octubre': 10, 'noviembre': 11, 'diciembre': 12, } day = int(date_fragments.group(1)) month = months[date_fragments.group(2)] year = int(date_fragments.group(3)) hour = 0 minute = 0 date = datetime(year, month, day, hour, minute) obj = { 'url': url, 'content': content, 'date': date.isoformat() } yield obj
1.75
2
scripts/test_cache_size_vs_code_balance.py
tareqmalas/girih
7
4840
#!/usr/bin/env python def igs_test(target_dir, exp_name, th, group='', dry_run=0): from scripts.conf.conf import machine_conf, machine_info from scripts.utils import run_test import itertools cs = 8192 th = th # Test using rasonable time # T = scale * size / perf # scale = T*perf/size desired_time = 20 if(machine_info['hostname']=='Haswell_18core'): k_perf_order = {0:150, 1:500, 4:40, 5:200 ,6:20} elif(machine_info['hostname']=='IVB_10core'): k_perf_order = {0:120, 1:300, 4:35, 5:150 ,6:20} k_time_scale = {n: desired_time*k_perf_order[n] for n in k_perf_order.keys()} #exp = is_dp, ts, k, N, bs_z, tb_l exp_l = [] # spatial blocking exp_l = exp_l + \ [(0, 0, 0, 960, 0, [-1]) ,(1, 0, 0, 960, 0, [-1]) ,(1, 0, 1, 960, 0, [-1]) ,(1, 0, 4, 480, 0, [-1]) ,(1, 0, 5, 680, 0, [-1]) ] # 1WD exp_l = exp_l + \ [(0, 2, 0, 960, 1, [1, 3, 5]) ,(1, 2, 0, 960, 1, [1, 3, 5]) ,(1, 2, 1, 960, 1, [1, 3, 5, 7, 9, 11, 15, 19, 23, 29]) ,(1, 2, 4, 480, 1, [1, 3, 5]) ,(1, 2, 5, 680, 1, [1, 3, 9, 19]) ] # Solar kernel exp_l = exp_l + \ [(1, 2, 6, 480, 1, [1, 3, 5, 7]) ,(1, 2, 6, 480, 2, [1, 3, 5, 7]) ,(1, 2, 6, 480, 3, [1, 3, 5, 7]) ,(1, 2, 6, 480, 6, [1, 3, 5, 7]) ,(1, 2, 6, 480, 9, [1, 3, 5, 7])] mwdt=1 tgs, thx, thy, thz = (1,1,1,1) count=0 for is_dp, ts, kernel, N, bs_z, tb_l in exp_l: for tb in tb_l: outfile=('kernel%d_isdp%d_ts%d_bsz$d_tb%d_N%d_%s_%s.txt' % (kernel, is_dp, ts, bs_z, tb, N, group, exp_name[-13:])) nt = max(int(k_time_scale[kernel]/(N**3/1e6)), 30) # print outfile, ts, kernel, tb, N run_test(ntests=1,dry_run=dry_run, is_dp=is_dp, th=th, tgs=tgs, thx=thx, thy=thy, thz=thz, kernel=kernel, ts=ts, nx=N, ny=N, nz=N, nt=nt, outfile=outfile, target_dir=target_dir, cs=cs, mwdt=mwdt, tb=tb, nwf=bs_z) count = count+1 return count def main(): from scripts.utils import create_project_tarball, get_stencil_num, parse_results from scripts.conf.conf import machine_conf, machine_info import os, sys import time,datetime # user params dry_run = 1 if len(sys.argv)<2 else int(sys.argv[1]) # dry run time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H_%M') exp_name = "cache_size_vs_code_balance_at_%s_%s" % (machine_info['hostname'], time_stamp) tarball_dir='results/'+exp_name if(dry_run==0): create_project_tarball(tarball_dir, "project_"+exp_name) target_dir='results/' + exp_name th = 1 pin_str = "S0:0-%d "%(th-1) count=0 group = 'MEM' if( (machine_info['hostname']=='IVB_10core') and (group=='TLB_DATA') ): group='TLB' machine_conf['pinning_args'] = "-m -g " + group + " -C " + pin_str + ' -s 0x03 --' count= count + igs_test(target_dir, exp_name, th=th, group=group, dry_run=dry_run) print "experiments count =" + str(count) if __name__ == "__main__": main()
1.234375
1
tensorflow_rnn/mnist_lstm.py
naoki009/samples
0
4848
import numpy as np import tensorflow as tf """ Do an MNIST classification line by line by LSTM """ (x_train, y_train), \ (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train, x_test = x_train/255.0, x_test/255.0 model = tf.keras.Sequential() model.add(tf.keras.layers.LSTM(128, input_shape=(None, 28))) #model.add(tf.keras.layers.BatchNormalization()) model.add(tf.keras.layers.Dense(10)) model.add(tf.keras.layers.Activation("softmax")) model.summary() model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(), optimizer="sgd", metrics=["accuracy"]) model.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=100, epochs=100)
2.015625
2
lattedb/project/formfac/migrations/0009_auto_20200528_0907.py
callat-qcd/lattedb
1
4864
# Generated by Django 3.0.6 on 2020-05-28 09:07 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('project_formfac', '0008_auto_20200408_0823'), ] operations = [ migrations.AlterField( model_name='concatenatedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='correlatormeta', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskconcatenatedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskcorrelatorh5dset', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='diskspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedsaveragedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedsaveragedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='disktslicedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='formfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='spectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapeconcatenatedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapecorrelatorh5dset', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapetslicedsaveragedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tapetslicedsaveragedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedsaveragedformfactor4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedsaveragedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AlterField( model_name='tslicedspectrum4dfile', name='user', field=models.ForeignKey(blank=True, help_text='User who updated this object. Set on save by connection to database. Anonymous if not found.', null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
0.921875
1
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py
bopopescu/Social-Lite
0
4872
"""Generated message classes for datacatalog version v1beta1. A fully managed and highly scalable data discovery and metadata management service. """ # NOTE: This file is autogenerated and should not be edited by hand. from apitools.base.protorpclite import messages as _messages from apitools.base.py import encoding package = 'datacatalog' class Binding(_messages.Message): r"""Associates `members` with a `role`. Fields: condition: The condition that is associated with this binding. NOTE: An unsatisfied condition will not allow user access via current binding. Different bindings, including their conditions, are examined independently. members: Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid}`: An email address that represents a specific Google account. For example, `<EMAIL>` . * `serviceAccount:{emailid}`: An email address that represents a service account. For example, `my-other- <EMAIL>`. * `group:{emailid}`: An email address that represents a Google group. For example, `<EMAIL>`. * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `<EMAIL>?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `my-other- <EMAIL>?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `<EMAIL>?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid}` and the recovered group retains the role in the binding. * `domain:{domain}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`. role: Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`. """ condition = _messages.MessageField('Expr', 1) members = _messages.StringField(2, repeated=True) role = _messages.StringField(3) class DatacatalogEntriesLookupRequest(_messages.Message): r"""A DatacatalogEntriesLookupRequest object. Fields: linkedResource: The full name of the Google Cloud Platform resource the Data Catalog entry represents. See: https://cloud.google.com/apis/design/resource_names#full_resource_name. Full names are case-sensitive. Examples: * //bigquery.googleapis.com/ projects/projectId/datasets/datasetId/tables/tableId * //pubsub.googleapis.com/projects/projectId/topics/topicId sqlResource: The SQL name of the entry. SQL names are case-sensitive. Examples: * `cloud_pubsub.project_id.topic_id` * ``pubsub.project_id.`topic.id.with.dots` `` * `bigquery.table.project_id.dataset_id.table_id` * `bigquery.dataset.project_id.dataset_id` * `datacatalog.entry.project_id.location_id.entry_group_id.entry_id` `*_id`s shoud satisfy the standard SQL rules for identifiers. https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical. """ linkedResource = _messages.StringField(1) sqlResource = _messages.StringField(2) class DatacatalogProjectsLocationsEntryGroupsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsCreateRequest object. Fields: entryGroupId: Required. The id of the entry group to create. The id must begin with a letter or underscore, contain only English letters, numbers and underscores, and be at most 64 characters. googleCloudDatacatalogV1beta1EntryGroup: A GoogleCloudDatacatalogV1beta1EntryGroup resource to be passed as the request body. parent: Required. The name of the project this entry group is in. Example: * projects/{project_id}/locations/{location} Note that this EntryGroup and its child resources may not actually be stored in the location in this name. """ entryGroupId = _messages.StringField(1) googleCloudDatacatalogV1beta1EntryGroup = _messages.MessageField('GoogleCloudDatacatalogV1beta1EntryGroup', 2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsEntryGroupsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsDeleteRequest object. Fields: force: Optional. If true, deletes all entries in the entry group. name: Required. The name of the entry group. For example, `projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} `. """ force = _messages.BooleanField(1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesCreateRequest object. Fields: entryId: Required. The id of the entry to create. googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry resource to be passed as the request body. parent: Required. The name of the entry group this entry is in. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} Note that this Entry and its child resources may not actually be stored in the location in this name. """ entryId = _messages.StringField(1) googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesDeleteRequest object. Fields: name: Required. The name of the entry. Example: * projects/{project_id}/l ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id} """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesGetRequest object. Fields: name: Required. The name of the entry. Example: * projects/{project_id}/l ocations/{location}/entryGroups/{entry_group_id}/entries/{entry_id} Entry groups are logical groupings of entries. Currently, users cannot create/modify entry groups. They are created by Data Catalog; they include `@bigquery` for all BigQuery entries, and `@pubsub` for all Cloud Pub/Sub entries. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesPatchRequest object. Fields: googleCloudDatacatalogV1beta1Entry: A GoogleCloudDatacatalogV1beta1Entry resource to be passed as the request body. name: The Data Catalog resource name of the entry in URL format. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id }/entries/{entry_id} Note that this Entry and its child resources may not actually be stored in the location in this name. updateMask: The fields to update on the entry. If absent or empty, all modifiable fields are updated. The following fields are modifiable: * For entries with type `DATA_STREAM`: * `schema` * For entries with type `FILESET` * `schema` * `display_name` * `description` * `gcs_fileset_spec` * `gcs_fileset_spec.file_patterns` """ googleCloudDatacatalogV1beta1Entry = _messages.MessageField('GoogleCloudDatacatalogV1beta1Entry', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsCreateRequest object. Fields: googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag resource to be passed as the request body. parent: Required. The name of the resource to attach this tag to. Tags can be attached to Entries. Example: * projects/{project_id}/locations/{loc ation}/entryGroups/{entry_group_id}/entries/{entry_id} Note that this Tag and its child resources may not actually be stored in the location in this name. """ googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsDeleteRequest object. Fields: name: Required. The name of the tag to delete. Example: * projects/{proje ct_id}/locations/{location}/entryGroups/{entry_group_id}/entries/{entry_ id}/tags/{tag_id} """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsListRequest object. Fields: pageSize: The maximum number of tags to return. Default is 10. Max limit is 1000. pageToken: Token that specifies which page is requested. If empty, the first page is returned. parent: Required. The name of the Data Catalog resource to list the tags of. The resource could be an Entry. """ pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32) pageToken = _messages.StringField(2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTagsPatchRequest object. Fields: googleCloudDatacatalogV1beta1Tag: A GoogleCloudDatacatalogV1beta1Tag resource to be passed as the request body. name: The resource name of the tag in URL format. Example: * projects/{pr oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier. Note that this Tag may not actually be stored in the location in this name. updateMask: The fields to update on the Tag. If absent or empty, all modifiable fields are updated. Currently the only modifiable field is the field `fields`. """ googleCloudDatacatalogV1beta1Tag = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsEntriesTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsEntryGroupsGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsGetRequest object. Fields: name: Required. The name of the entry group. For example, `projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} `. readMask: The fields to return. If not set or empty, all fields are returned. """ name = _messages.StringField(1, required=True) readMask = _messages.StringField(2) class DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsEntryGroupsTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsTagTemplatesCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesCreateRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplate: A GoogleCloudDatacatalogV1beta1TagTemplate resource to be passed as the request body. parent: Required. The name of the project and the location this template is in. Example: * projects/{project_id}/locations/{location} TagTemplate and its child resources may not actually be stored in the location in this name. tagTemplateId: Required. The id of the tag template to create. """ googleCloudDatacatalogV1beta1TagTemplate = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplate', 1) parent = _messages.StringField(2, required=True) tagTemplateId = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesDeleteRequest object. Fields: force: Required. Currently, this field must always be set to `true`. This confirms the deletion of any possible tags using this template. `force = false` will be supported in the future. name: Required. The name of the tag template to delete. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } """ force = _messages.BooleanField(1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesFieldsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsCreateRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplateField: A GoogleCloudDatacatalogV1beta1TagTemplateField resource to be passed as the request body. parent: Required. The name of the project this template is in. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } Note that this TagTemplateField may not actually be stored in the location in this name. tagTemplateFieldId: Required. The ID of the tag template field to create. Field ids can contain letters (both uppercase and lowercase), numbers (0-9), underscores (_) and dashes (-). Field IDs must be at least 1 character long and at most 128 characters long. Field IDs must also be unique within their template. """ googleCloudDatacatalogV1beta1TagTemplateField = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 1) parent = _messages.StringField(2, required=True) tagTemplateFieldId = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesFieldsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsDeleteRequest object. Fields: force: Required. Currently, this field must always be set to `true`. This confirms the deletion of this field from any tags using this field. `force = false` will be supported in the future. name: Required. The name of the tag template field to delete. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id }/fields/{tag_template_field_id} """ force = _messages.BooleanField(1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesFieldsPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsPatchRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplateField: A GoogleCloudDatacatalogV1beta1TagTemplateField resource to be passed as the request body. name: Required. The name of the tag template field. Example: * projects/{ project_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{ tag_template_field_id} updateMask: Optional. The field mask specifies the parts of the template to be updated. Allowed fields: * `display_name` * `type.enum_type` * `is_required` If `update_mask` is not set or empty, all of the allowed fields above will be updated. When updating an enum type, the provided values will be merged with the existing values. Therefore, enum values can only be added, existing enum values cannot be deleted nor renamed. Updating a template field from optional to required is NOT allowed. """ googleCloudDatacatalogV1beta1TagTemplateField = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesFieldsRenameRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesFieldsRenameRequest object. Fields: googleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest: A GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest resource to be passed as the request body. name: Required. The name of the tag template. Example: * projects/{projec t_id}/locations/{location}/tagTemplates/{tag_template_id}/fields/{tag_te mplate_field_id} """ googleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest = _messages.MessageField('GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest', 1) name = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTagTemplatesGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesGetRequest object. Fields: name: Required. The name of the tag template. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTagTemplatesPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesPatchRequest object. Fields: googleCloudDatacatalogV1beta1TagTemplate: A GoogleCloudDatacatalogV1beta1TagTemplate resource to be passed as the request body. name: The resource name of the tag template in URL format. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } Note that this TagTemplate and its child resources may not actually be stored in the location in this name. updateMask: The field mask specifies the parts of the template to overwrite. Allowed fields: * `display_name` If absent or empty, all of the allowed fields above will be updated. """ googleCloudDatacatalogV1beta1TagTemplate = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplate', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTagTemplatesSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsTagTemplatesTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTagTemplatesTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsTaxonomiesCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesCreateRequest object. Fields: googleCloudDatacatalogV1beta1Taxonomy: A GoogleCloudDatacatalogV1beta1Taxonomy resource to be passed as the request body. parent: Required. Resource name of the project that the taxonomy will belong to. """ googleCloudDatacatalogV1beta1Taxonomy = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesDeleteRequest object. Fields: name: Required. Resource name of the taxonomy to be deleted. All policy tags in this taxonomy will also be deleted. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesExportRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesExportRequest object. Fields: parent: Required. Resource name of the project that taxonomies to be exported will share. serializedTaxonomies: Export taxonomies as serialized taxonomies. taxonomies: Required. Resource names of the taxonomies to be exported. """ parent = _messages.StringField(1, required=True) serializedTaxonomies = _messages.BooleanField(2) taxonomies = _messages.StringField(3, repeated=True) class DatacatalogProjectsLocationsTaxonomiesGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesGetRequest object. Fields: name: Required. Resource name of the requested taxonomy. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesImportRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesImportRequest object. Fields: googleCloudDatacatalogV1beta1ImportTaxonomiesRequest: A GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest resource to be passed as the request body. parent: Required. Resource name of project that the newly created taxonomies will belong to. """ googleCloudDatacatalogV1beta1ImportTaxonomiesRequest = _messages.MessageField('GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesListRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesListRequest object. Fields: pageSize: The maximum number of items to return. Must be a value between 1 and 1000. If not set, defaults to 50. pageToken: The next_page_token value returned from a previous list request, if any. If not set, defaults to an empty string. parent: Required. Resource name of the project to list the taxonomies of. """ pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32) pageToken = _messages.StringField(2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsTaxonomiesPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPatchRequest object. Fields: googleCloudDatacatalogV1beta1Taxonomy: A GoogleCloudDatacatalogV1beta1Taxonomy resource to be passed as the request body. name: Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". updateMask: The update mask applies to the resource. For the `FieldMask` definition, see https://developers.google.com/protocol- buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update. """ googleCloudDatacatalogV1beta1Taxonomy = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsCreateRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsCreateRequest object. Fields: googleCloudDatacatalogV1beta1PolicyTag: A GoogleCloudDatacatalogV1beta1PolicyTag resource to be passed as the request body. parent: Required. Resource name of the taxonomy that the policy tag will belong to. """ googleCloudDatacatalogV1beta1PolicyTag = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 1) parent = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsDeleteRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsDeleteRequest object. Fields: name: Required. Resource name of the policy tag to be deleted. All of its descendant policy tags will also be deleted. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetIamPolicyRequest object. Fields: getIamPolicyRequest: A GetIamPolicyRequest resource to be passed as the request body. resource: REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field. """ getIamPolicyRequest = _messages.MessageField('GetIamPolicyRequest', 1) resource = _messages.StringField(2, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsGetRequest object. Fields: name: Required. Resource name of the requested policy tag. """ name = _messages.StringField(1, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsListRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsListRequest object. Fields: pageSize: The maximum number of items to return. Must be a value between 1 and 1000. If not set, defaults to 50. pageToken: The next_page_token value returned from a previous List request, if any. If not set, defaults to an empty string. parent: Required. Resource name of the taxonomy to list the policy tags of. """ pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32) pageToken = _messages.StringField(2) parent = _messages.StringField(3, required=True) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsPatchRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsPatchRequest object. Fields: googleCloudDatacatalogV1beta1PolicyTag: A GoogleCloudDatacatalogV1beta1PolicyTag resource to be passed as the request body. name: Output only. Resource name of this policy tag, whose format is: "pro jects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/ policyTags/{id}". updateMask: The update mask applies to the resource. Only display_name, description and parent_policy_tag can be updated and thus can be listed in the mask. If update_mask is not provided, all allowed fields (i.e. display_name, description and parent) will be updated. For more information including the `FieldMask` definition, see https://developers.google.com/protocol- buffers/docs/reference/google.protobuf#fieldmask If not set, defaults to all of the fields that are allowed to update. """ googleCloudDatacatalogV1beta1PolicyTag = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 1) name = _messages.StringField(2, required=True) updateMask = _messages.StringField(3) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsTaxonomiesPolicyTagsTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesPolicyTagsTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class DatacatalogProjectsLocationsTaxonomiesSetIamPolicyRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesSetIamPolicyRequest object. Fields: resource: REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field. setIamPolicyRequest: A SetIamPolicyRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) setIamPolicyRequest = _messages.MessageField('SetIamPolicyRequest', 2) class DatacatalogProjectsLocationsTaxonomiesTestIamPermissionsRequest(_messages.Message): r"""A DatacatalogProjectsLocationsTaxonomiesTestIamPermissionsRequest object. Fields: resource: REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field. testIamPermissionsRequest: A TestIamPermissionsRequest resource to be passed as the request body. """ resource = _messages.StringField(1, required=True) testIamPermissionsRequest = _messages.MessageField('TestIamPermissionsRequest', 2) class Empty(_messages.Message): r"""A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`. """ class Expr(_messages.Message): r"""Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information. Fields: description: Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI. expression: Textual representation of an expression in Common Expression Language syntax. location: Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file. title: Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression. """ description = _messages.StringField(1) expression = _messages.StringField(2) location = _messages.StringField(3) title = _messages.StringField(4) class GetIamPolicyRequest(_messages.Message): r"""Request message for `GetIamPolicy` method. Fields: options: OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`. This field is only used by Cloud IAM. """ options = _messages.MessageField('GetPolicyOptions', 1) class GetPolicyOptions(_messages.Message): r"""Encapsulates settings provided to GetIamPolicy. Fields: requestedPolicyVersion: Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. """ requestedPolicyVersion = _messages.IntegerField(1, variant=_messages.Variant.INT32) class GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec(_messages.Message): r"""Spec for a group of BigQuery tables with name pattern `[prefix]YYYYMMDD`. Context: https://cloud.google.com/bigquery/docs /partitioned-tables#partitioning_versus_sharding Fields: dataset: Output only. The Data Catalog resource name of the dataset entry the current table belongs to, for example, `projects/{project_id}/locati ons/{location}/entrygroups/{entry_group_id}/entries/{entry_id}`. shardCount: Output only. Total number of shards. tablePrefix: Output only. The table name prefix of the shards. The name of any given shard is `[table_prefix]YYYYMMDD`, for example, for shard `MyTable20180101`, the `table_prefix` is `MyTable`. """ dataset = _messages.StringField(1) shardCount = _messages.IntegerField(2) tablePrefix = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1BigQueryTableSpec(_messages.Message): r"""Describes a BigQuery table. Enums: TableSourceTypeValueValuesEnum: Output only. The table source type. Fields: tableSourceType: Output only. The table source type. tableSpec: Spec of a BigQuery table. This field should only be populated if `table_source_type` is `BIGQUERY_TABLE`. viewSpec: Table view specification. This field should only be populated if `table_source_type` is `BIGQUERY_VIEW`. """ class TableSourceTypeValueValuesEnum(_messages.Enum): r"""Output only. The table source type. Values: TABLE_SOURCE_TYPE_UNSPECIFIED: Default unknown type. BIGQUERY_VIEW: Table view. BIGQUERY_TABLE: BigQuery native table. """ TABLE_SOURCE_TYPE_UNSPECIFIED = 0 BIGQUERY_VIEW = 1 BIGQUERY_TABLE = 2 tableSourceType = _messages.EnumField('TableSourceTypeValueValuesEnum', 1) tableSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1TableSpec', 2) viewSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1ViewSpec', 3) class GoogleCloudDatacatalogV1beta1ColumnSchema(_messages.Message): r"""Representation of a column within a schema. Columns could be nested inside other columns. Fields: column: Required. Name of the column. description: Optional. Description of the column. Default value is an empty string. mode: Optional. A column's mode indicates whether the values in this column are required, nullable, etc. Only `NULLABLE`, `REQUIRED` and `REPEATED` are supported. Default mode is `NULLABLE`. subcolumns: Optional. Schema of sub-columns. A column can have zero or more sub-columns. type: Required. Type of the column. """ column = _messages.StringField(1) description = _messages.StringField(2) mode = _messages.StringField(3) subcolumns = _messages.MessageField('GoogleCloudDatacatalogV1beta1ColumnSchema', 4, repeated=True) type = _messages.StringField(5) class GoogleCloudDatacatalogV1beta1Entry(_messages.Message): r"""Entry Metadata. A Data Catalog Entry resource represents another resource in Google Cloud Platform, such as a BigQuery dataset or a Cloud Pub/Sub topic. Clients can use the `linked_resource` field in the Entry resource to refer to the original resource ID of the source system. An Entry resource contains resource details, such as its schema. An Entry can also be used to attach flexible metadata, such as a Tag. Enums: TypeValueValuesEnum: The type of the entry. Fields: bigqueryDateShardedSpec: Specification for a group of BigQuery tables with name pattern `[prefix]YYYYMMDD`. Context: https://cloud.google.com/bigquery/docs/partitioned- tables#partitioning_versus_sharding. bigqueryTableSpec: Specification that applies to a BigQuery table. This is only valid on entries of type `TABLE`. description: Entry description, which can consist of several sentences or paragraphs that describe entry contents. Default value is an empty string. displayName: Display information such as title and description. A short name to identify the entry, for example, "Analytics Data - Jan 2011". Default value is an empty string. gcsFilesetSpec: Specification that applies to a Cloud Storage fileset. This is only valid on entries of type FILESET. linkedResource: Output only. The resource this metadata entry refers to. For Google Cloud Platform resources, `linked_resource` is the [full name of the resource](https://cloud.google.com/apis/design/resource_names#ful l_resource_name). For example, the `linked_resource` for a table resource from BigQuery is: * //bigquery.googleapis.com/projects/project Id/datasets/datasetId/tables/tableId name: The Data Catalog resource name of the entry in URL format. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id }/entries/{entry_id} Note that this Entry and its child resources may not actually be stored in the location in this name. schema: Schema of the entry. An entry might not have any schema attached to it. sourceSystemTimestamps: Output only. Timestamps about the underlying Google Cloud Platform resource, not about this Data Catalog Entry. type: The type of the entry. """ class TypeValueValuesEnum(_messages.Enum): r"""The type of the entry. Values: ENTRY_TYPE_UNSPECIFIED: Default unknown type TABLE: Output only. The type of entry that has a GoogleSQL schema, including logical views. MODEL: Output only. The type of models. DATA_STREAM: Output only. An entry type which is used for streaming entries. Example: Cloud Pub/Sub topic. FILESET: Alpha feature. An entry type which is a set of files or objects. Example: Cloud Storage fileset. """ ENTRY_TYPE_UNSPECIFIED = 0 TABLE = 1 MODEL = 2 DATA_STREAM = 3 FILESET = 4 bigqueryDateShardedSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1BigQueryDateShardedSpec', 1) bigqueryTableSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1BigQueryTableSpec', 2) description = _messages.StringField(3) displayName = _messages.StringField(4) gcsFilesetSpec = _messages.MessageField('GoogleCloudDatacatalogV1beta1GcsFilesetSpec', 5) linkedResource = _messages.StringField(6) name = _messages.StringField(7) schema = _messages.MessageField('GoogleCloudDatacatalogV1beta1Schema', 8) sourceSystemTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 9) type = _messages.EnumField('TypeValueValuesEnum', 10) class GoogleCloudDatacatalogV1beta1EntryGroup(_messages.Message): r"""EntryGroup Metadata. An EntryGroup resource represents a logical grouping of zero or more Data Catalog Entry resources. Fields: dataCatalogTimestamps: Output only. Timestamps about this EntryGroup. Default value is empty timestamps. description: Entry group description, which can consist of several sentences or paragraphs that describe entry group contents. Default value is an empty string. displayName: A short name to identify the entry group, for example, "analytics data - jan 2011". Default value is an empty string. name: The resource name of the entry group in URL format. Example: * projects/{project_id}/locations/{location}/entryGroups/{entry_group_id} Note that this EntryGroup and its child resources may not actually be stored in the location in this name. """ dataCatalogTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 1) description = _messages.StringField(2) displayName = _messages.StringField(3) name = _messages.StringField(4) class GoogleCloudDatacatalogV1beta1ExportTaxonomiesResponse(_messages.Message): r"""Response message for ExportTaxonomies. Fields: taxonomies: List of taxonomies and policy tags in a tree structure. """ taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedTaxonomy', 1, repeated=True) class GoogleCloudDatacatalogV1beta1FieldType(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1FieldType object. Enums: PrimitiveTypeValueValuesEnum: Represents primitive types - string, bool etc. Fields: enumType: Represents an enum type. primitiveType: Represents primitive types - string, bool etc. """ class PrimitiveTypeValueValuesEnum(_messages.Enum): r"""Represents primitive types - string, bool etc. Values: PRIMITIVE_TYPE_UNSPECIFIED: This is the default invalid value for a type. DOUBLE: A double precision number. STRING: An UTF-8 string. BOOL: A boolean value. TIMESTAMP: A timestamp. """ PRIMITIVE_TYPE_UNSPECIFIED = 0 DOUBLE = 1 STRING = 2 BOOL = 3 TIMESTAMP = 4 enumType = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldTypeEnumType', 1) primitiveType = _messages.EnumField('PrimitiveTypeValueValuesEnum', 2) class GoogleCloudDatacatalogV1beta1FieldTypeEnumType(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1FieldTypeEnumType object. Fields: allowedValues: Required on create; optional on update. The set of allowed values for this enum. This set must not be empty, the display names of the values in this set must not be empty and the display names of the values must be case-insensitively unique within this set. Currently, enum values can only be added to the list of allowed values. Deletion and renaming of enum values are not supported. Can have up to 500 allowed values. """ allowedValues = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue', 1, repeated=True) class GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1FieldTypeEnumTypeEnumValue object. Fields: displayName: Required. The display name of the enum value. Must not be an empty string. """ displayName = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1GcsFileSpec(_messages.Message): r"""Specifications of a single file in Cloud Storage. Fields: filePath: Required. The full file path. Example: `gs://bucket_name/a/b.txt`. gcsTimestamps: Output only. Timestamps about the Cloud Storage file. sizeBytes: Output only. The size of the file, in bytes. """ filePath = _messages.StringField(1) gcsTimestamps = _messages.MessageField('GoogleCloudDatacatalogV1beta1SystemTimestamps', 2) sizeBytes = _messages.IntegerField(3) class GoogleCloudDatacatalogV1beta1GcsFilesetSpec(_messages.Message): r"""Describes a Cloud Storage fileset entry. Fields: filePatterns: Required. Patterns to identify a set of files in Google Cloud Storage. See [Cloud Storage documentation](/storage/docs/gsutil/addlhelp/WildcardNames) for more information. Note that bucket wildcards are currently not supported. Examples of valid file_patterns: * `gs://bucket_name/dir/*`: matches all files within `bucket_name/dir` directory. * `gs://bucket_name/dir/**`: matches all files in `bucket_name/dir` spanning all subdirectories. * `gs://bucket_name/file*`: matches files prefixed by `file` in `bucket_name` * `gs://bucket_name/??.txt`: matches files with two characters followed by `.txt` in `bucket_name` * `gs://bucket_name/[aeiou].txt`: matches files that contain a single vowel character followed by `.txt` in `bucket_name` * `gs://bucket_name/[a-m].txt`: matches files that contain `a`, `b`, ... or `m` followed by `.txt` in `bucket_name` * `gs://bucket_name/a/*/b`: matches all files in `bucket_name` that match `a/*/b` pattern, such as `a/c/b`, `a/d/b` * `gs://another_bucket/a.txt`: matches `gs://another_bucket/a.txt` You can combine wildcards to provide more powerful matches, for example: * `gs://bucket_name/[a-m]??.j*g` sampleGcsFileSpecs: Output only. Sample files contained in this fileset, not all files contained in this fileset are represented here. """ filePatterns = _messages.StringField(1, repeated=True) sampleGcsFileSpecs = _messages.MessageField('GoogleCloudDatacatalogV1beta1GcsFileSpec', 2, repeated=True) class GoogleCloudDatacatalogV1beta1ImportTaxonomiesRequest(_messages.Message): r"""Request message for ImportTaxonomies. Fields: inlineSource: Inline source used for taxonomies import """ inlineSource = _messages.MessageField('GoogleCloudDatacatalogV1beta1InlineSource', 1) class GoogleCloudDatacatalogV1beta1ImportTaxonomiesResponse(_messages.Message): r"""Response message for ImportTaxonomies. Fields: taxonomies: Taxonomies that were imported. """ taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 1, repeated=True) class GoogleCloudDatacatalogV1beta1InlineSource(_messages.Message): r"""Inline source used for taxonomies import. Fields: taxonomies: Required. Taxonomies to be imported. """ taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedTaxonomy', 1, repeated=True) class GoogleCloudDatacatalogV1beta1ListPolicyTagsResponse(_messages.Message): r"""Response message for ListPolicyTags. Fields: nextPageToken: Token used to retrieve the next page of results, or empty if there are no more results in the list. policyTags: The policy tags that are in the requested taxonomy. """ nextPageToken = _messages.StringField(1) policyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1PolicyTag', 2, repeated=True) class GoogleCloudDatacatalogV1beta1ListTagsResponse(_messages.Message): r"""Response message for ListTags. Fields: nextPageToken: Token to retrieve the next page of results. It is set to empty if no items remain in results. tags: Tag details. """ nextPageToken = _messages.StringField(1) tags = _messages.MessageField('GoogleCloudDatacatalogV1beta1Tag', 2, repeated=True) class GoogleCloudDatacatalogV1beta1ListTaxonomiesResponse(_messages.Message): r"""Response message for ListTaxonomies. Fields: nextPageToken: Token used to retrieve the next page of results, or empty if there are no more results in the list. taxonomies: Taxonomies that the project contains. """ nextPageToken = _messages.StringField(1) taxonomies = _messages.MessageField('GoogleCloudDatacatalogV1beta1Taxonomy', 2, repeated=True) class GoogleCloudDatacatalogV1beta1PolicyTag(_messages.Message): r"""Denotes one policy tag in a taxonomy (e.g. ssn). Policy Tags can be defined in a hierarchy. For example, consider the following hierachy: Geolocation -&gt; (LatLong, City, ZipCode). PolicyTag "Geolocation" contains three child policy tags: "LatLong", "City", and "ZipCode". Fields: childPolicyTags: Output only. Resource names of child policy tags of this policy tag. description: Description of this policy tag. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. If not set, defaults to an empty description. displayName: Required. User defined name of this policy tag. It must: be unique within the parent taxonomy; contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. name: Output only. Resource name of this policy tag, whose format is: "pro jects/{project_number}/locations/{location_id}/taxonomies/{taxonomy_id}/ policyTags/{id}". parentPolicyTag: Resource name of this policy tag's parent policy tag (e.g. for the "LatLong" policy tag in the example above, this field contains the resource name of the "Geolocation" policy tag). If empty, it means this policy tag is a top level policy tag (e.g. this field is empty for the "Geolocation" policy tag in the example above). If not set, defaults to an empty string. """ childPolicyTags = _messages.StringField(1, repeated=True) description = _messages.StringField(2) displayName = _messages.StringField(3) name = _messages.StringField(4) parentPolicyTag = _messages.StringField(5) class GoogleCloudDatacatalogV1beta1RenameTagTemplateFieldRequest(_messages.Message): r"""Request message for RenameTagTemplateField. Fields: newTagTemplateFieldId: Required. The new ID of this tag template field. For example, `my_new_field`. """ newTagTemplateFieldId = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1Schema(_messages.Message): r"""Represents a schema (e.g. BigQuery, GoogleSQL, Avro schema). Fields: columns: Required. Schema of columns. A maximum of 10,000 columns and sub- columns can be specified. """ columns = _messages.MessageField('GoogleCloudDatacatalogV1beta1ColumnSchema', 1, repeated=True) class GoogleCloudDatacatalogV1beta1SearchCatalogRequest(_messages.Message): r"""Request message for SearchCatalog. Fields: orderBy: Specifies the ordering of results, currently supported case- sensitive choices are: * `relevance`, only supports desecending * `last_access_timestamp [asc|desc]`, defaults to descending if not specified * `last_modified_timestamp [asc|desc]`, defaults to descending if not specified If not specified, defaults to `relevance` descending. pageSize: Number of results in the search page. If <=0 then defaults to 10. Max limit for page_size is 1000. Throws an invalid argument for page_size > 1000. pageToken: Optional. Pagination token returned in an earlier SearchCatalogResponse.next_page_token, which indicates that this is a continuation of a prior SearchCatalogRequest call, and that the system should return the next page of data. If empty, the first page is returned. query: Required. The query string in search query syntax. The query must be non-empty. Query strings can be simple as "x" or more qualified as: * name:x * column:x * description:y Note: Query tokens need to have a minimum of 3 characters for substring matching to work correctly. See [Data Catalog Search Syntax](/data-catalog/docs/how-to/search-reference) for more information. scope: Required. The scope of this search request. """ orderBy = _messages.StringField(1) pageSize = _messages.IntegerField(2, variant=_messages.Variant.INT32) pageToken = _messages.StringField(3) query = _messages.StringField(4) scope = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope', 5) class GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope(_messages.Message): r"""A GoogleCloudDatacatalogV1beta1SearchCatalogRequestScope object. Fields: includeGcpPublicDatasets: If `true`, include Google Cloud Platform (GCP) public datasets in the search results. Info on GCP public datasets is available at https://cloud.google.com/public-datasets/. By default, GCP public datasets are excluded. includeOrgIds: Data Catalog tries to automatically choose the right corpus of data to search through. You can ensure an organization is included by adding it to `include_org_ids`. You can ensure a project's org is included with `include_project_ids`. You must specify at least one organization using `include_org_ids` or `include_project_ids` in all search requests. List of organization IDs to search within. To find your organization ID, follow instructions in https://cloud.google.com /resource-manager/docs/creating-managing-organization. includeProjectIds: List of project IDs to search within. To learn more about the distinction between project names/IDs/numbers, go to https://cloud.google.com/docs/overview/#projects. """ includeGcpPublicDatasets = _messages.BooleanField(1) includeOrgIds = _messages.StringField(2, repeated=True) includeProjectIds = _messages.StringField(3, repeated=True) class GoogleCloudDatacatalogV1beta1SearchCatalogResponse(_messages.Message): r"""Response message for SearchCatalog. Fields: nextPageToken: The token that can be used to retrieve the next page of results. results: Search results. """ nextPageToken = _messages.StringField(1) results = _messages.MessageField('GoogleCloudDatacatalogV1beta1SearchCatalogResult', 2, repeated=True) class GoogleCloudDatacatalogV1beta1SearchCatalogResult(_messages.Message): r"""A result that appears in the response of a search request. Each result captures details of one entry that matches the search. Enums: SearchResultTypeValueValuesEnum: Type of the search result. This field can be used to determine which Get method to call to fetch the full resource. Fields: linkedResource: The full name of the cloud resource the entry belongs to. See: https://cloud.google.com/apis/design/resource_names#full_resource_name. Example: * `//bigquery.googleapis.com/projects/projectId/datasets/data setId/tables/tableId` relativeResourceName: The relative resource name of the resource in URL format. Examples: * `projects/{project_id}/locations/{location_id}/ent ryGroups/{entry_group_id}/entries/{entry_id}` * `projects/{project_id}/tagTemplates/{tag_template_id}` searchResultSubtype: Sub-type of the search result. This is a dot- delimited description of the resource's full type, and is the same as the value callers would provide in the "type" search facet. Examples: `entry.table`, `entry.dataStream`, `tagTemplate`. searchResultType: Type of the search result. This field can be used to determine which Get method to call to fetch the full resource. """ class SearchResultTypeValueValuesEnum(_messages.Enum): r"""Type of the search result. This field can be used to determine which Get method to call to fetch the full resource. Values: SEARCH_RESULT_TYPE_UNSPECIFIED: Default unknown type. ENTRY: An Entry. TAG_TEMPLATE: A TagTemplate. ENTRY_GROUP: An EntryGroup. """ SEARCH_RESULT_TYPE_UNSPECIFIED = 0 ENTRY = 1 TAG_TEMPLATE = 2 ENTRY_GROUP = 3 linkedResource = _messages.StringField(1) relativeResourceName = _messages.StringField(2) searchResultSubtype = _messages.StringField(3) searchResultType = _messages.EnumField('SearchResultTypeValueValuesEnum', 4) class GoogleCloudDatacatalogV1beta1SerializedPolicyTag(_messages.Message): r"""Message representing one policy tag when exported as a nested proto. Fields: childPolicyTags: Children of the policy tag if any. description: Description of the serialized policy tag. The length of the description is limited to 2000 bytes when encoded in UTF-8. If not set, defaults to an empty description. displayName: Required. Display name of the policy tag. Max 200 bytes when encoded in UTF-8. """ childPolicyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 1, repeated=True) description = _messages.StringField(2) displayName = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1SerializedTaxonomy(_messages.Message): r"""Message capturing a taxonomy and its policy tag hierarchy as a nested proto. Used for taxonomy import/export and mutation. Fields: description: Description of the serialized taxonomy. The length of the description is limited to 2000 bytes when encoded in UTF-8. If not set, defaults to an empty description. displayName: Required. Display name of the taxonomy. Max 200 bytes when encoded in UTF-8. policyTags: Top level policy tags associated with the taxonomy if any. """ description = _messages.StringField(1) displayName = _messages.StringField(2) policyTags = _messages.MessageField('GoogleCloudDatacatalogV1beta1SerializedPolicyTag', 3, repeated=True) class GoogleCloudDatacatalogV1beta1SystemTimestamps(_messages.Message): r"""Timestamps about this resource according to a particular system. Fields: createTime: The creation time of the resource within the given system. expireTime: Output only. The expiration time of the resource within the given system. Currently only apllicable to BigQuery resources. updateTime: The last-modified time of the resource within the given system. """ createTime = _messages.StringField(1) expireTime = _messages.StringField(2) updateTime = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1TableSpec(_messages.Message): r"""Normal BigQuery table spec. Fields: groupedEntry: Output only. If the table is a dated shard, i.e., with name pattern `[prefix]YYYYMMDD`, `grouped_entry` is the Data Catalog resource name of the date sharded grouped entry, for example, `projects/{project_ id}/locations/{location}/entrygroups/{entry_group_id}/entries/{entry_id} `. Otherwise, `grouped_entry` is empty. """ groupedEntry = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1Tag(_messages.Message): r"""Tags are used to attach custom metadata to Data Catalog resources. Tags conform to the specifications within their tag template. See [Data Catalog IAM](/data-catalog/docs/concepts/iam) for information on the permissions needed to create or view tags. Messages: FieldsValue: Required. This maps the ID of a tag field to the value of and additional information about that field. Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields. Fields: column: Resources like Entry can have schemas associated with them. This scope allows users to attach tags to an individual column based on that schema. For attaching a tag to a nested column, use `.` to separate the column names. Example: * `outer_column.inner_column` fields: Required. This maps the ID of a tag field to the value of and additional information about that field. Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields. name: The resource name of the tag in URL format. Example: * projects/{pr oject_id}/locations/{location}/entrygroups/{entry_group_id}/entries/{ent ry_id}/tags/{tag_id} where `tag_id` is a system-generated identifier. Note that this Tag may not actually be stored in the location in this name. template: Required. The resource name of the tag template that this tag uses. Example: * projects/{project_id}/locations/{location}/tagTemplate s/{tag_template_id} This field cannot be modified after creation. templateDisplayName: Output only. The display name of the tag template. """ @encoding.MapUnrecognizedFields('additionalProperties') class FieldsValue(_messages.Message): r"""Required. This maps the ID of a tag field to the value of and additional information about that field. Valid field IDs are defined by the tag's template. A tag must have at least 1 field and at most 500 fields. Messages: AdditionalProperty: An additional property for a FieldsValue object. Fields: additionalProperties: Additional properties of type FieldsValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a FieldsValue object. Fields: key: Name of the additional property. value: A GoogleCloudDatacatalogV1beta1TagField attribute. """ key = _messages.StringField(1) value = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagField', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) column = _messages.StringField(1) fields = _messages.MessageField('FieldsValue', 2) name = _messages.StringField(3) template = _messages.StringField(4) templateDisplayName = _messages.StringField(5) class GoogleCloudDatacatalogV1beta1TagField(_messages.Message): r"""Contains the value and supporting information for a field within a Tag. Fields: boolValue: Holds the value for a tag field with boolean type. displayName: Output only. The display name of this field. doubleValue: Holds the value for a tag field with double type. enumValue: Holds the value for a tag field with enum type. This value must be one of the allowed values in the definition of this enum. stringValue: Holds the value for a tag field with string type. timestampValue: Holds the value for a tag field with timestamp type. """ boolValue = _messages.BooleanField(1) displayName = _messages.StringField(2) doubleValue = _messages.FloatField(3) enumValue = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagFieldEnumValue', 4) stringValue = _messages.StringField(5) timestampValue = _messages.StringField(6) class GoogleCloudDatacatalogV1beta1TagFieldEnumValue(_messages.Message): r"""Holds an enum value. Fields: displayName: The display name of the enum value. """ displayName = _messages.StringField(1) class GoogleCloudDatacatalogV1beta1TagTemplate(_messages.Message): r"""A tag template defines a tag, which can have one or more typed fields. The template is used to create and attach the tag to GCP resources. [Tag template roles](/iam/docs/understanding-roles#data-catalog-roles) provide permissions to create, edit, and use the template (see, for example, the [TagTemplate User](/data-catalog/docs/how-to/template-user) role, which includes permission to use the tag template to tag resources. Messages: FieldsValue: Required. Map of tag template field IDs to the settings for the field. This map is an exhaustive list of the allowed fields. This map must contain at least one field and at most 500 fields. The keys to this map are tag template field IDs. Field IDs can contain letters (both uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs must be at least 1 character long and at most 64 characters long. Field IDs must start with a letter or underscore. Fields: displayName: The display name for this template. Defaults to an empty string. fields: Required. Map of tag template field IDs to the settings for the field. This map is an exhaustive list of the allowed fields. This map must contain at least one field and at most 500 fields. The keys to this map are tag template field IDs. Field IDs can contain letters (both uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs must be at least 1 character long and at most 64 characters long. Field IDs must start with a letter or underscore. name: The resource name of the tag template in URL format. Example: * projects/{project_id}/locations/{location}/tagTemplates/{tag_template_id } Note that this TagTemplate and its child resources may not actually be stored in the location in this name. """ @encoding.MapUnrecognizedFields('additionalProperties') class FieldsValue(_messages.Message): r"""Required. Map of tag template field IDs to the settings for the field. This map is an exhaustive list of the allowed fields. This map must contain at least one field and at most 500 fields. The keys to this map are tag template field IDs. Field IDs can contain letters (both uppercase and lowercase), numbers (0-9) and underscores (_). Field IDs must be at least 1 character long and at most 64 characters long. Field IDs must start with a letter or underscore. Messages: AdditionalProperty: An additional property for a FieldsValue object. Fields: additionalProperties: Additional properties of type FieldsValue """ class AdditionalProperty(_messages.Message): r"""An additional property for a FieldsValue object. Fields: key: Name of the additional property. value: A GoogleCloudDatacatalogV1beta1TagTemplateField attribute. """ key = _messages.StringField(1) value = _messages.MessageField('GoogleCloudDatacatalogV1beta1TagTemplateField', 2) additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True) displayName = _messages.StringField(1) fields = _messages.MessageField('FieldsValue', 2) name = _messages.StringField(3) class GoogleCloudDatacatalogV1beta1TagTemplateField(_messages.Message): r"""The template for an individual field within a tag template. Fields: displayName: The display name for this field. Defaults to an empty string. isRequired: Whether this is a required field. Defaults to false. name: Output only. The resource name of the tag template field in URL format. Example: * projects/{project_id}/locations/{location}/tagTempla tes/{tag_template}/fields/{field} Note that this TagTemplateField may not actually be stored in the location in this name. type: Required. The type of value this tag field can contain. """ displayName = _messages.StringField(1) isRequired = _messages.BooleanField(2) name = _messages.StringField(3) type = _messages.MessageField('GoogleCloudDatacatalogV1beta1FieldType', 4) class GoogleCloudDatacatalogV1beta1Taxonomy(_messages.Message): r"""A taxonomy is a collection of policy tags that classify data along a common axis. For instance a data *sensitivity* taxonomy could contain policy tags denoting PII such as age, zipcode, and SSN. A data *origin* taxonomy could contain policy tags to distinguish user data, employee data, partner data, public data. Enums: ActivatedPolicyTypesValueListEntryValuesEnum: Fields: activatedPolicyTypes: Optional. A list of policy types that are activated for this taxonomy. If not set, defaults to an empty list. description: Optional. Description of this taxonomy. It must: contain only unicode characters, tabs, newlines, carriage returns and page breaks; and be at most 2000 bytes long when encoded in UTF-8. If not set, defaults to an empty description. displayName: Required. User defined name of this taxonomy. It must: contain only unicode letters, numbers, underscores, dashes and spaces; not start or end with spaces; and be at most 200 bytes long when encoded in UTF-8. name: Output only. Resource name of this taxonomy, whose format is: "projects/{project_number}/locations/{location_id}/taxonomies/{id}". """ class ActivatedPolicyTypesValueListEntryValuesEnum(_messages.Enum): r"""ActivatedPolicyTypesValueListEntryValuesEnum enum type. Values: POLICY_TYPE_UNSPECIFIED: <no description> FINE_GRAINED_ACCESS_CONTROL: <no description> """ POLICY_TYPE_UNSPECIFIED = 0 FINE_GRAINED_ACCESS_CONTROL = 1 activatedPolicyTypes = _messages.EnumField('ActivatedPolicyTypesValueListEntryValuesEnum', 1, repeated=True) description = _messages.StringField(2) displayName = _messages.StringField(3) name = _messages.StringField(4) class GoogleCloudDatacatalogV1beta1ViewSpec(_messages.Message): r"""Table view specification. Fields: viewQuery: Output only. The query that defines the table view. """ viewQuery = _messages.StringField(1) class Policy(_messages.Message): r"""An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. Optionally, a `binding` can specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:<EMAIL>", "group:<EMAIL>", "domain:google.com", "serviceAccount:<EMAIL>- <EMAIL>" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": ["user:<EMAIL>"], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 } **YAML example:** bindings: - members: - user:<EMAIL> - group:<EMAIL> - domain:google.com - serviceAccount :<EMAIL> role: roles/resourcemanager.organizationAdmin - members: - user:<EMAIL> role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/). Fields: bindings: Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member. etag: `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read- modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. version: Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. """ bindings = _messages.MessageField('Binding', 1, repeated=True) etag = _messages.BytesField(2) version = _messages.IntegerField(3, variant=_messages.Variant.INT32) class SetIamPolicyRequest(_messages.Message): r"""Request message for `SetIamPolicy` method. Fields: policy: REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them. """ policy = _messages.MessageField('Policy', 1) class StandardQueryParameters(_messages.Message): r"""Query parameters accepted by all methods. Enums: FXgafvValueValuesEnum: V1 error format. AltValueValuesEnum: Data format for response. Fields: f__xgafv: V1 error format. access_token: OAuth access token. alt: Data format for response. callback: JSONP fields: Selector specifying which fields to include in a partial response. key: API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token. oauth_token: OAuth 2.0 token for the current user. prettyPrint: Returns response with indentations and line breaks. quotaUser: Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. trace: A tracing token of the form "token:<tokenid>" to include in api requests. uploadType: Legacy upload protocol for media (e.g. "media", "multipart"). upload_protocol: Upload protocol for media (e.g. "raw", "multipart"). """ class AltValueValuesEnum(_messages.Enum): r"""Data format for response. Values: json: Responses with Content-Type of application/json media: Media download with context-dependent Content-Type proto: Responses with Content-Type of application/x-protobuf """ json = 0 media = 1 proto = 2 class FXgafvValueValuesEnum(_messages.Enum): r"""V1 error format. Values: _1: v1 error format _2: v2 error format """ _1 = 0 _2 = 1 f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1) access_token = _messages.StringField(2) alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json') callback = _messages.StringField(4) fields = _messages.StringField(5) key = _messages.StringField(6) oauth_token = _messages.StringField(7) prettyPrint = _messages.BooleanField(8, default=True) quotaUser = _messages.StringField(9) trace = _messages.StringField(10) uploadType = _messages.StringField(11) upload_protocol = _messages.StringField(12) class TestIamPermissionsRequest(_messages.Message): r"""Request message for `TestIamPermissions` method. Fields: permissions: The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions). """ permissions = _messages.StringField(1, repeated=True) class TestIamPermissionsResponse(_messages.Message): r"""Response message for `TestIamPermissions` method. Fields: permissions: A subset of `TestPermissionsRequest.permissions` that the caller is allowed. """ permissions = _messages.StringField(1, repeated=True) encoding.AddCustomJsonFieldMapping( StandardQueryParameters, 'f__xgafv', '$.xgafv') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1') encoding.AddCustomJsonEnumMapping( StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
1.359375
1
widgets/tree_item.py
tarsa129/j3d-animation-editor
6
4896
from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog from PyQt5.QtGui import QIcon from PyQt5.QtCore import Qt import animations.general_animation as j3d from widgets.yaz0 import compress, compress_slow, compress_fast from io import BytesIO class tree_item(QTreeWidgetItem): def __init__(self, parent): QTreeWidgetItem.__init__(self, parent,1000) self.display_info = [] self.filepath = "" self.compressed = 1 self.bmd_file = None self.sound_data = None self.changed = False def set_values(self, display_info, filepath, compressed ): self.display_info = display_info self.filepath = filepath.replace("|", ".") self.compressed = compressed forward_i = filepath.rfind("/") + 1 backwad_i = filepath.rfind("\\") + 1 self.setText(0, self.filepath[max(forward_i, backwad_i):]) def set_sound(self, sound_data): self.sound_data = sound_data if sound_data is not None: icon = QIcon("icons/sound.png") self.setIcon(0, icon) else: self.setIcon(0, QIcon() ) def save_animation(self, other_filepath = "", compress_dis = 1, save_all = False): if save_all and not self.changed: print("skipping " + self.filepath + " because nothing has changed") return if other_filepath != "": working_filepath = other_filepath else: working_filepath = self.filepath if (working_filepath.endswith("a") and not working_filepath.endswith(".bva") ): info = j3d.fix_array( self.display_info) self.convert_to_a(info) else: info = j3d.fix_array( self.display_info) j3d.sort_filepath(working_filepath, info, self.sound_data) compress_status = self.compressed if compress_dis != 0: compress_status = compress_dis print(compress_status) if compress_status > 1: out = BytesIO() with open(working_filepath, "rb") as f: if compress_status == 2: out = compress_fast(f) elif compress_status == 3: out = compress(f) elif compress_status == 4: out = compress_slow(f) with open(working_filepath, "wb") as f: f.write(out.getbuffer()) self.changed = False def convert_to_k(self): filepath = self.filepath[:-1] + "k" info = j3d.fix_array(self.display_info) if self.filepath.endswith(".bca"): bck = j3d.sort_filepath(filepath, info) elif filepath.endswith(".bla"): blk = j3d.sort_filepath(filepath, info) def convert_to_a(self, info): info = j3d.fix_array( info ) if self.filepath.endswith(".bck") or self.filepath.endswith(".bca"): bca = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving filepath = self.filepath[:-1] + "a" with open(filepath, "wb") as f: bca.write_bca(f) f.close() elif self.filepath.endswith(".blk") or self.filepath.endswith(".bla"): bla = j3d.convert_to_a(self.filepath, info) #this is a pure bck, no saving filepath = self.filepath[:-1] + "a" with open(filepath, "wb") as f: bla.write_bla(f) f.close() def export_anim(self): info = j3d.fix_array(self.display_info) filepath = self.filepath[0:-4] + ".anim" if self.bmd_file is None: bmd_file, choosentype = QFileDialog.getOpenFileName( None, "Open File","" , "Model files (*.bmd *.bdl)") if bmd_file: bck = j3d.export_anim(filepath, info, bmd_file) else: bck = j3d.export_anim(filepath, info, self.bmd_file) def add_children(self, strings): self.takeChildren() for name in strings: child = QTreeWidgetItem(self) child.setText(0, name) child.setDisabled(True)
2.046875
2
tests/runner.py
crnbaker/MONAI
1
4944
# Copyright 2020 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import inspect import os import sys import time import unittest from monai.utils import PerfContext results: dict = dict() class TimeLoggingTestResult(unittest.TextTestResult): """Overload the default results so that we can store the results.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.timed_tests = dict() def startTest(self, test): # noqa: N802 """Start timer, print test name, do normal test.""" self.start_time = time.time() name = self.getDescription(test) self.stream.write(f"Starting test: {name}...\n") super().startTest(test) def stopTest(self, test): # noqa: N802 """On test end, get time, print, store and do normal behaviour.""" elapsed = time.time() - self.start_time name = self.getDescription(test) self.stream.write(f"Finished test: {name} ({elapsed:.03}s)\n") if name in results: raise AssertionError("expected all keys to be unique") results[name] = elapsed super().stopTest(test) def print_results(results, discovery_time, thresh, status): # only keep results >= threshold results = dict(filter(lambda x: x[1] > thresh, results.items())) if len(results) == 0: return print(f"\n\n{status}, printing completed times >{thresh}s in ascending order...\n") timings = dict(sorted(results.items(), key=lambda item: item[1])) for r in timings: if timings[r] >= thresh: print(f"{r} ({timings[r]:.03}s)") print(f"test discovery time: {discovery_time:.03}s") print(f"total testing time: {sum(results.values()):.03}s") print("Remember to check above times for any errors!") def parse_args(default_pattern): parser = argparse.ArgumentParser(description="Runner for MONAI unittests with timing.") parser.add_argument( "-s", action="store", dest="path", default=".", help="Directory to start discovery (default: '%(default)s')" ) parser.add_argument( "-p", action="store", dest="pattern", default=default_pattern, help="Pattern to match tests (default: '%(default)s')", ) parser.add_argument( "-t", "--thresh", dest="thresh", default=10.0, type=float, help="Display tests longer than given threshold (default: %(default)d)", ) parser.add_argument( "-v", "--verbosity", action="store", dest="verbosity", type=int, default=1, help="Verbosity level (default: %(default)d)", ) parser.add_argument("-q", "--quick", action="store_true", dest="quick", default=False, help="Only do quick tests") parser.add_argument( "-f", "--failfast", action="store_true", dest="failfast", default=False, help="Stop testing on first failure" ) args = parser.parse_args() print(f"Running tests in folder: '{args.path}'") if args.pattern: print(f"With file pattern: '{args.pattern}'") return args def get_default_pattern(loader): signature = inspect.signature(loader.discover) params = {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} return params["pattern"] if __name__ == "__main__": loader = unittest.TestLoader() default_pattern = get_default_pattern(loader) # Parse input arguments args = parse_args(default_pattern) # If quick is desired, set environment variable if args.quick: os.environ["QUICKTEST"] = "True" # Get all test names (optionally from some path with some pattern) with PerfContext() as pc: tests = loader.discover(args.path, args.pattern) discovery_time = pc.total_time print(f"time to discover tests: {discovery_time}s") test_runner = unittest.runner.TextTestRunner( resultclass=TimeLoggingTestResult, verbosity=args.verbosity, failfast=args.failfast ) # Use try catches to print the current results if encountering exception or keyboard interruption try: test_result = test_runner.run(tests) print_results(results, discovery_time, args.thresh, "tests finished") sys.exit(not test_result.wasSuccessful()) except KeyboardInterrupt: print_results(results, discovery_time, args.thresh, "tests cancelled") sys.exit(1) except Exception: print_results(results, discovery_time, args.thresh, "exception reached") raise
1.953125
2
examples/bicycle/bicycle_dynamics.py
lujieyang/irs_lqr
6
4960
import numpy as np import pydrake.symbolic as ps import torch import time from irs_lqr.dynamical_system import DynamicalSystem class BicycleDynamics(DynamicalSystem): def __init__(self, h): super().__init__() """ x = [x pos, y pos, heading, speed, steering_angle] u = [acceleration, steering_velocity] """ self.h = h self.dim_x = 5 self.dim_u = 2 """Jacobian computations""" self.x_sym = np.array([ps.Variable("x_{}".format(i)) for i in range(self.dim_x)]) self.u_sym = np.array([ps.Variable("u_{}".format(i)) for i in range(self.dim_u)]) self.f_sym = self.dynamics_sym(self.x_sym, self.u_sym) self.jacobian_xu_sym = ps.Jacobian(self.f_sym, np.hstack((self.x_sym, self.u_sym))) def dynamics_sym(self, x, u): """ Symbolic expression for dynamics. Used to compute linearizations of the system. x (np.array, dim: n): state u (np.array, dim: m): action """ heading = x[2] v = x[3] steer = x[4] dxdt = np.array([ v * ps.cos(heading), v * ps.sin(heading), v * ps.tan(steer), u[0], u[1] ]) x_new = x + self.h * dxdt return x_new def dynamics(self, x, u): """ Numeric expression for dynamics. x (np.array, dim: n): state u (np.array, dim: m): action """ heading = x[2] v = x[3] steer = x[4] dxdt = np.array([ v * np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[0], u[1] ]) x_new = x + self.h * dxdt return x_new def dynamics_batch(self, x, u): """ Batch dynamics. Uses pytorch for -args: x (np.array, dim: B x n): batched state u (np.array, dim: B x m): batched input -returns: xnext (np.array, dim: B x n): batched next state """ heading = x[:,2] v = x[:,3] steer = x[:,4] dxdt = np.vstack(( v * np.cos(heading), v * np.sin(heading), v * np.tan(steer), u[:,0], u[:,1] )).transpose() x_new = x + self.h * dxdt return x_new def dynamics_batch_torch(self, x, u): """ Batch dynamics. Uses pytorch for -args: x (np.array, dim: B x n): batched state u (np.array, dim: B x m): batched input -returns: xnext (np.array, dim: B x n): batched next state """ x = torch.Tensor(x).cuda() u = torch.Tensor(u).cuda() heading = x[:,2] v = x[:,3] steer = x[:,4] dxdt = torch.vstack(( v * torch.cos(heading), v * torch.sin(heading), v * torch.tan(steer), u[:,0], u[:,1] )).T x_new = x + self.h * dxdt return x_new def jacobian_xu(self, x, u): """ Recoever linearized dynamics dfdx as a function of x, u """ env = {self.x_sym[i]: x[i] for i in range(self.dim_x)} env.update({self.u_sym[i]: u[i] for i in range(self.dim_u)}) f_x = ps.Evaluate(self.jacobian_xu_sym, env) return f_x def jacobian_xu_batch(self, x, u): """ Recoever linearized dynamics dfd(xu) as a function of x, u """ dxdu_batch = np.zeros(( x.shape[0], x.shape[1], x.shape[1] + u.shape[1])) for i in range(x.shape[0]): dxdu_batch[i] = self.jacobian_xu(x[i], u[i]) return dxdu_batch
2.59375
3
unitcap/unit_cap.py
fintelia/habitationi
1
4984
#!/usr/bin/python # Copyright 2019 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer from urlparse import urlparse, parse_qs from jinja2 import Template import sqlite3 import urllib def get_caps(options): far = {} for i in ['A-1', 'A-2', 'B', 'SD-2']: far[i] = 0.5 for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']: far[i] = 0.6 for i in ['C-1', 'BA-3', 'IB-2', 'O-1']: far[i] = .75 for i in ['BA-1', 'SD-12']: far[i] = 1.0 for i in ['C-1A', 'SD-5']: far[i] = 1.25 for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']: far[i] = 1.5 for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']: far[i] = 1.75 for i in ['BC', 'O-2']: far[i] = 2.0 for i in ['C-2A']: far[i] = 2.50 for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']: far[i] = 3.0 for i in ['IA-2', 'IB']: far[i] = 4.0 far['BB-1'] = 3.25 far['SD-11'] = 1.7 far['SD-15'] = 3.5 lot_area = { 'A-1': 6000, 'A-2': 4500, 'C-1A': 1000, 'BC': 500, 'BC-1': 450, 'IA-1': 700, 'SD-8': 650, 'SD-14': 800, } for i in ['IB-2', 'BA-1']: lot_area[i] = 1200 for i in ['B', 'SD-2', 'SD-3']: lot_area[i] = 2500 for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']: lot_area[i] = 1800 for i in ['C-1', 'BA-3']: lot_area[i] = 1500 for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']: lot_area[i] = 600 for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']: lot_area[i] = 300 for i in lot_area: if options and 'lot_explicit' in options: lot_area[i] = options['lot_explicit'] elif options and 'lot_factor' in options: lot_area[i] = int(lot_area[i] / float(options['lot_factor'])) if 'no_lot' in options: lot_area = {} for i in far: if options and 'far_explicit' in options: far[i] = options['far_explicit'] elif options and 'far_factor' in options: far[i] = far[i] * float(options['far_factor']) if 'no_far' in options: far = {} return far, lot_area def table(options): far, lot_area = get_caps(options) table = [] for i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']: table.append("<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (i, far.get(i, ""), lot_area.get(i,""))) return "\n".join(table) def unit_cap(row, options=None): if not options: options = {} far, lot_area = get_caps(options) zone = row['zone'] if (not zone.startswith("C") and not zone in ("A-1", "A-2", "B")) or zone == "CRDD": return -1 if zone in ['A-1', 'A-2'] and not 'no_a' in options: return 1 #print row area = float(row.get('gis_lot_size',0) or 0) if zone in lot_area and area: m = max(area/(lot_area[zone]), 1) else: m = 100000 max_building = area * far[zone] * 1 if max(int(max_building/800), 1) < m: m = max(int(max_building/800), 1) if zone == "B" and not 'no_b' in options: m = min(m, 2) return m def dict_factory(cursor, row): d = {} for idx, col in enumerate(cursor.description): d[col[0]] = row[idx] return d def compute_count(options = None): conn = sqlite3.connect("prop.db") if options == None: options = {} c = conn.cursor() c.row_factory = dict_factory m = 0 current = 0 for row in c.execute("SELECT * FROM lots"): t = unit_cap(row, options=options) if t == -1: continue m += int(t) return m def describe(options): changes = [] if 'no_lot' in options: changes.append("eliminate lot size/unit minimums") elif 'lot_explicit' in options: changes.append("set all lot size/unit minimums to %s" % options['lot_explicit']) elif 'lot_factor' in options and options['lot_factor'] != 1.0: changes.append('decrease lot size minimums by a factor of %s' % options['lot_factor']) if 'no_a' in options: changes.append('eliminate single family zoning in A-1 and A-2 zones') if 'no_b' in options: changes.append('eliminate two-family zoning limits in B zones') if 'far_explicit' in options: changes.append("set all FAR maximums to %s" % options['far_explicit']) elif 'far_factor' in options and options['far_factor'] != 1.0: changes.append('increase FAR maximums by a factor of %s' % options['far_factor']) if len(changes): return ", ".join(changes) else: return "" def serve(options): d = open("unit_template.html") template = Template( d.read() ) unit_count = int(compute_count(options)) data = {} data['changes'] = describe(options) data['unit_count'] = unit_count data['increase'] = unit_count-37453 data['table'] = table(options) data['options'] = options s = template.render(**data) return s PORT_NUMBER = 8080 class myHandler(BaseHTTPRequestHandler): def do_GET(self): self.send_response(200) self.send_header('Content-type','text/html') self.end_headers() # Send the html message form = parse_qs(urlparse(self.path).query) options = {} for i in ['far_factor', 'lot_factor']: if i in form: options[i] = float(form[i][0]) else: options[i] = 1.0 if 'far_explicit' in form and form['far_explicit']: options['far_explicit'] = float(form['far_explicit'][0]) if 'lot_explicit' in form and form['lot_explicit']: options['lot_explicit'] = int(form['lot_explicit'][0]) if 'lot' in form: options['no_lot'] = True if 'singlefamily' in form: options['no_a'] = True if 'twofamily' in form: options['no_b'] = True self.wfile.write(serve(options)) return def run(): try: #Create a web server and define the handler to manage the #incoming request server = HTTPServer(('', PORT_NUMBER), myHandler) print 'Started httpserver on port ' , PORT_NUMBER #Wait forever for incoming htto requests server.serve_forever() except KeyboardInterrupt: print '^C received, shutting down the web server' server.socket.close() if __name__ == "__main__": print run()
1.421875
1
sdk/videoanalyzer/azure-mgmt-videoanalyzer/azure/mgmt/videoanalyzer/models/_models.py
praveenkuttappan/azure-sdk-for-python
2,728
4992
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from azure.core.exceptions import HttpResponseError import msrest.serialization class Resource(msrest.serialization.Model): """Common fields that are returned in the response for all Azure Resource Manager resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ): super(Resource, self).__init__(**kwargs) self.id = None self.name = None self.type = None self.system_data = None class ProxyResource(Resource): """The resource model definition for a Azure Resource Manager proxy resource. It will not have tags and a location. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, } def __init__( self, **kwargs ): super(ProxyResource, self).__init__(**kwargs) class AccessPolicyEntity(ProxyResource): """Access policies help define the authentication rules, and control access to specific video resources. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param role: Defines the access level granted by this policy. Possible values include: "Reader". :type role: str or ~video_analyzer.models.AccessPolicyRole :param authentication: Authentication method to be used when validating client API access. :type authentication: ~video_analyzer.models.AuthenticationBase """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'role': {'key': 'properties.role', 'type': 'str'}, 'authentication': {'key': 'properties.authentication', 'type': 'AuthenticationBase'}, } def __init__( self, **kwargs ): super(AccessPolicyEntity, self).__init__(**kwargs) self.role = kwargs.get('role', None) self.authentication = kwargs.get('authentication', None) class AccessPolicyEntityCollection(msrest.serialization.Model): """A collection of AccessPolicyEntity items. :param value: A collection of AccessPolicyEntity items. :type value: list[~video_analyzer.models.AccessPolicyEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[AccessPolicyEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(AccessPolicyEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class AccountEncryption(msrest.serialization.Model): """Defines how the Video Analyzer account is (optionally) encrypted. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param type: Required. The type of key used to encrypt the Account Key. Possible values include: "SystemKey", "CustomerKey". :type type: str or ~video_analyzer.models.AccountEncryptionKeyType :param key_vault_properties: The properties of the key used to encrypt the account. :type key_vault_properties: ~video_analyzer.models.KeyVaultProperties :param identity: The Key Vault identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Key Vault mapping. :vartype status: str """ _validation = { 'type': {'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'key_vault_properties': {'key': 'keyVaultProperties', 'type': 'KeyVaultProperties'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(AccountEncryption, self).__init__(**kwargs) self.type = kwargs['type'] self.key_vault_properties = kwargs.get('key_vault_properties', None) self.identity = kwargs.get('identity', None) self.status = None class AudioEncoderBase(msrest.serialization.Model): """Base type for all audio encoder presets, which define the recipe or instructions on how audio should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: AudioEncoderAac. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded (2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160, 192, 224, and 256. If omitted, the bitrate of the input audio is used. :type bitrate_kbps: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.AudioEncoderAac': 'AudioEncoderAac'} } def __init__( self, **kwargs ): super(AudioEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) class AudioEncoderAac(AudioEncoderBase): """A custom preset for encoding audio with the AAC codec. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: Bitrate, in kilobits per second or Kbps, at which audio should be encoded (2-channel stereo audio at a sampling rate of 48 kHz). Allowed values are 96, 112, 128, 160, 192, 224, and 256. If omitted, the bitrate of the input audio is used. :type bitrate_kbps: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, } def __init__( self, **kwargs ): super(AudioEncoderAac, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.AudioEncoderAac' # type: str class AuthenticationBase(msrest.serialization.Model): """Base class for access policies authentication methods. You probably want to use the sub-classes and not this class directly. Known sub-classes are: JwtAuthentication. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.JwtAuthentication': 'JwtAuthentication'} } def __init__( self, **kwargs ): super(AuthenticationBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class CertificateSource(msrest.serialization.Model): """Base class for certificate sources. You probably want to use the sub-classes and not this class directly. Known sub-classes are: PemCertificateList. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.PemCertificateList': 'PemCertificateList'} } def __init__( self, **kwargs ): super(CertificateSource, self).__init__(**kwargs) self.type = None # type: Optional[str] class CheckNameAvailabilityRequest(msrest.serialization.Model): """The check availability request body. :param name: The name of the resource for which availability needs to be checked. :type name: str :param type: The resource type. :type type: str """ _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityRequest, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.type = kwargs.get('type', None) class CheckNameAvailabilityResponse(msrest.serialization.Model): """The check availability result. :param name_available: Indicates if the resource name is available. :type name_available: bool :param reason: The reason why the given name is not available. Possible values include: "Invalid", "AlreadyExists". :type reason: str or ~video_analyzer.models.CheckNameAvailabilityReason :param message: Detailed reason why the given name is available. :type message: str """ _attribute_map = { 'name_available': {'key': 'nameAvailable', 'type': 'bool'}, 'reason': {'key': 'reason', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(CheckNameAvailabilityResponse, self).__init__(**kwargs) self.name_available = kwargs.get('name_available', None) self.reason = kwargs.get('reason', None) self.message = kwargs.get('message', None) class CredentialsBase(msrest.serialization.Model): """Base class for credential objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: UsernamePasswordCredentials. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.UsernamePasswordCredentials': 'UsernamePasswordCredentials'} } def __init__( self, **kwargs ): super(CredentialsBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class TokenKey(msrest.serialization.Model): """Key properties for JWT token validation. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EccTokenKey, RsaTokenKey. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str """ _validation = { 'type': {'required': True}, 'kid': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EccTokenKey': 'EccTokenKey', '#Microsoft.VideoAnalyzer.RsaTokenKey': 'RsaTokenKey'} } def __init__( self, **kwargs ): super(TokenKey, self).__init__(**kwargs) self.type = None # type: Optional[str] self.kid = kwargs['kid'] class EccTokenKey(TokenKey): """Required validation properties for tokens generated with Elliptical Curve algorithm. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str :param alg: Required. Elliptical curve algorithm to be used: ES256, ES384 or ES512. Possible values include: "ES256", "ES384", "ES512". :type alg: str or ~video_analyzer.models.AccessPolicyEccAlgo :param x: Required. X coordinate. :type x: str :param y: Required. Y coordinate. :type y: str """ _validation = { 'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'x': {'required': True}, 'y': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'x': {'key': 'x', 'type': 'str'}, 'y': {'key': 'y', 'type': 'str'}, } def __init__( self, **kwargs ): super(EccTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EccTokenKey' # type: str self.alg = kwargs['alg'] self.x = kwargs['x'] self.y = kwargs['y'] class EdgeModuleEntity(ProxyResource): """The representation of an edge module. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar edge_module_id: Internal ID generated for the instance of the Video Analyzer edge module. :vartype edge_module_id: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'edge_module_id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'edge_module_id': {'key': 'properties.edgeModuleId', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntity, self).__init__(**kwargs) self.edge_module_id = None class EdgeModuleEntityCollection(msrest.serialization.Model): """A collection of EdgeModuleEntity items. :param value: A collection of EdgeModuleEntity items. :type value: list[~video_analyzer.models.EdgeModuleEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[EdgeModuleEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class EdgeModuleProvisioningToken(msrest.serialization.Model): """Provisioning token properties. A provisioning token allows for a single instance of Azure Video analyzer IoT edge module to be initialized and authorized to the cloud account. The provisioning token itself is short lived and it is only used for the initial handshake between IoT edge module and the cloud. After the initial handshake, the IoT edge module will agree on a set of authentication keys which will be auto-rotated as long as the module is able to periodically connect to the cloud. A new provisioning token can be generated for the same IoT edge module in case the module state lost or reset. Variables are only populated by the server, and will be ignored when sending a request. :ivar expiration_date: The expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date. :vartype expiration_date: ~datetime.datetime :ivar token: The token blob to be provided to the Azure Video Analyzer IoT edge module through the Azure IoT Edge module twin properties. :vartype token: str """ _validation = { 'expiration_date': {'readonly': True}, 'token': {'readonly': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, } def __init__( self, **kwargs ): super(EdgeModuleProvisioningToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class EncoderPresetBase(msrest.serialization.Model): """Base type for all encoder presets, which define the recipe or instructions on how the input content should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EncoderCustomPreset, EncoderSystemPreset. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderCustomPreset': 'EncoderCustomPreset', '#Microsoft.VideoAnalyzer.EncoderSystemPreset': 'EncoderSystemPreset'} } def __init__( self, **kwargs ): super(EncoderPresetBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class EncoderCustomPreset(EncoderPresetBase): """Describes a custom preset for encoding the input content using the encoder processor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param audio_encoder: Describes a custom preset for encoding audio. :type audio_encoder: ~video_analyzer.models.AudioEncoderBase :param video_encoder: Describes a custom preset for encoding video. :type video_encoder: ~video_analyzer.models.VideoEncoderBase """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'audio_encoder': {'key': 'audioEncoder', 'type': 'AudioEncoderBase'}, 'video_encoder': {'key': 'videoEncoder', 'type': 'VideoEncoderBase'}, } def __init__( self, **kwargs ): super(EncoderCustomPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderCustomPreset' # type: str self.audio_encoder = kwargs.get('audio_encoder', None) self.video_encoder = kwargs.get('video_encoder', None) class NodeBase(msrest.serialization.Model): """Base class for nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: ProcessorNodeBase, SinkNodeBase, SourceNodeBase. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.ProcessorNodeBase': 'ProcessorNodeBase', '#Microsoft.VideoAnalyzer.SinkNodeBase': 'SinkNodeBase', '#Microsoft.VideoAnalyzer.SourceNodeBase': 'SourceNodeBase'} } def __init__( self, **kwargs ): super(NodeBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.name = kwargs['name'] class ProcessorNodeBase(NodeBase): """Base class for topology processor nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: EncoderProcessor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.EncoderProcessor': 'EncoderProcessor'} } def __init__( self, **kwargs ): super(ProcessorNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.ProcessorNodeBase' # type: str self.inputs = kwargs['inputs'] class EncoderProcessor(ProcessorNodeBase): """Encoder processor allows for encoding of the input content. For example, it can used to change the resolution from 4K to 1280x720. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param preset: Required. The encoder preset, which defines the recipe or instructions on how the input content should be processed. :type preset: ~video_analyzer.models.EncoderPresetBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'preset': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'preset': {'key': 'preset', 'type': 'EncoderPresetBase'}, } def __init__( self, **kwargs ): super(EncoderProcessor, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderProcessor' # type: str self.preset = kwargs['preset'] class EncoderSystemPreset(EncoderPresetBase): """Describes a built-in preset for encoding the input content using the encoder processor. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Name of the built-in encoding preset. Possible values include: "SingleLayer_540p_H264_AAC", "SingleLayer_720p_H264_AAC", "SingleLayer_1080p_H264_AAC", "SingleLayer_2160p_H264_AAC". :type name: str or ~video_analyzer.models.EncoderSystemPresetType """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } def __init__( self, **kwargs ): super(EncoderSystemPreset, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.EncoderSystemPreset' # type: str self.name = kwargs['name'] class Endpoint(msrest.serialization.Model): """The endpoint details. All required parameters must be populated in order to send to Azure. :param endpoint_url: The URL of the endpoint. :type endpoint_url: str :param type: Required. The type of the endpoint. Possible values include: "ClientApi". :type type: str or ~video_analyzer.models.VideoAnalyzerEndpointType """ _validation = { 'type': {'required': True}, } _attribute_map = { 'endpoint_url': {'key': 'endpointUrl', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, } def __init__( self, **kwargs ): super(Endpoint, self).__init__(**kwargs) self.endpoint_url = kwargs.get('endpoint_url', None) self.type = kwargs['type'] class EndpointBase(msrest.serialization.Model): """Base class for endpoints. You probably want to use the sub-classes and not this class directly. Known sub-classes are: TlsEndpoint, UnsecuredEndpoint. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase """ _validation = { 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.TlsEndpoint': 'TlsEndpoint', '#Microsoft.VideoAnalyzer.UnsecuredEndpoint': 'UnsecuredEndpoint'} } def __init__( self, **kwargs ): super(EndpointBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.credentials = kwargs['credentials'] self.url = kwargs['url'] self.tunnel = kwargs.get('tunnel', None) class ErrorAdditionalInfo(msrest.serialization.Model): """The resource management error additional info. Variables are only populated by the server, and will be ignored when sending a request. :ivar type: The additional info type. :vartype type: str :ivar info: The additional info. :vartype info: any """ _validation = { 'type': {'readonly': True}, 'info': {'readonly': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'info': {'key': 'info', 'type': 'object'}, } def __init__( self, **kwargs ): super(ErrorAdditionalInfo, self).__init__(**kwargs) self.type = None self.info = None class ErrorDetail(msrest.serialization.Model): """The error detail. Variables are only populated by the server, and will be ignored when sending a request. :ivar code: The error code. :vartype code: str :ivar message: The error message. :vartype message: str :ivar target: The error target. :vartype target: str :ivar details: The error details. :vartype details: list[~video_analyzer.models.ErrorDetail] :ivar additional_info: The error additional info. :vartype additional_info: list[~video_analyzer.models.ErrorAdditionalInfo] """ _validation = { 'code': {'readonly': True}, 'message': {'readonly': True}, 'target': {'readonly': True}, 'details': {'readonly': True}, 'additional_info': {'readonly': True}, } _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, 'target': {'key': 'target', 'type': 'str'}, 'details': {'key': 'details', 'type': '[ErrorDetail]'}, 'additional_info': {'key': 'additionalInfo', 'type': '[ErrorAdditionalInfo]'}, } def __init__( self, **kwargs ): super(ErrorDetail, self).__init__(**kwargs) self.code = None self.message = None self.target = None self.details = None self.additional_info = None class ErrorResponse(msrest.serialization.Model): """Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). :param error: The error object. :type error: ~video_analyzer.models.ErrorDetail """ _attribute_map = { 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(ErrorResponse, self).__init__(**kwargs) self.error = kwargs.get('error', None) class GroupLevelAccessControl(msrest.serialization.Model): """Group level network access control. :param public_network_access: Whether or not public network access is allowed for specified resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess """ _attribute_map = { 'public_network_access': {'key': 'publicNetworkAccess', 'type': 'str'}, } def __init__( self, **kwargs ): super(GroupLevelAccessControl, self).__init__(**kwargs) self.public_network_access = kwargs.get('public_network_access', None) class IotHub(msrest.serialization.Model): """The IoT Hub details. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Required. The IoT Hub resource identifier. :type id: str :param identity: Required. The IoT Hub identity. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the Iot Hub mapping. :vartype status: str """ _validation = { 'id': {'required': True}, 'identity': {'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(IotHub, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs['identity'] self.status = None class JwtAuthentication(AuthenticationBase): """Properties for access validation based on JSON Web Tokens (JWT). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param issuers: List of expected token issuers. Token issuer is valid if it matches at least one of the given values. :type issuers: list[str] :param audiences: List of expected token audiences. Token audience is valid if it matches at least one of the given values. :type audiences: list[str] :param claims: List of additional token claims to be validated. Token must contains all claims and respective values for it to be valid. :type claims: list[~video_analyzer.models.TokenClaim] :param keys: List of keys which can be used to validate access tokens. Having multiple keys allow for seamless key rotation of the token signing key. Token signature must match exactly one key. :type keys: list[~video_analyzer.models.TokenKey] """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'issuers': {'key': 'issuers', 'type': '[str]'}, 'audiences': {'key': 'audiences', 'type': '[str]'}, 'claims': {'key': 'claims', 'type': '[TokenClaim]'}, 'keys': {'key': 'keys', 'type': '[TokenKey]'}, } def __init__( self, **kwargs ): super(JwtAuthentication, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.JwtAuthentication' # type: str self.issuers = kwargs.get('issuers', None) self.audiences = kwargs.get('audiences', None) self.claims = kwargs.get('claims', None) self.keys = kwargs.get('keys', None) class KeyVaultProperties(msrest.serialization.Model): """The details for accessing the encryption keys in Key Vault. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param key_identifier: Required. The URL of the Key Vault key used to encrypt the account. The key may either be versioned (for example https://vault/keys/mykey/version1) or reference a key without a version (for example https://vault/keys/mykey). :type key_identifier: str :ivar current_key_identifier: The current key used to encrypt Video Analyzer account, including the key version. :vartype current_key_identifier: str """ _validation = { 'key_identifier': {'required': True}, 'current_key_identifier': {'readonly': True}, } _attribute_map = { 'key_identifier': {'key': 'keyIdentifier', 'type': 'str'}, 'current_key_identifier': {'key': 'currentKeyIdentifier', 'type': 'str'}, } def __init__( self, **kwargs ): super(KeyVaultProperties, self).__init__(**kwargs) self.key_identifier = kwargs['key_identifier'] self.current_key_identifier = None class ListProvisioningTokenInput(msrest.serialization.Model): """The input parameters to generate registration token for the Azure Video Analyzer IoT edge module. All required parameters must be populated in order to send to Azure. :param expiration_date: Required. The desired expiration date of the registration token. The Azure Video Analyzer IoT edge module must be initialized and connected to the Internet prior to the token expiration date. :type expiration_date: ~datetime.datetime """ _validation = { 'expiration_date': {'required': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(ListProvisioningTokenInput, self).__init__(**kwargs) self.expiration_date = kwargs['expiration_date'] class LivePipeline(ProxyResource): """Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds this capacity, then the service will disconnect temporarily from the camera. It will retry to re-establish connection (with exponential backoff), checking to see if the camera bitrate is now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect other live pipelines in your account. :type bitrate_kbps: int :ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive", "Activating", "Active", "Deactivating". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipeline, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None self.parameters = kwargs.get('parameters', None) class LivePipelineCollection(msrest.serialization.Model): """A collection of LivePipeline items. :param value: A collection of LivePipeline items. :type value: list[~video_analyzer.models.LivePipeline] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[LivePipeline]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(LivePipelineCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class LivePipelineOperationStatus(msrest.serialization.Model): """Used for tracking the status of an operation on the live pipeline. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the live pipeline operation. :vartype name: str :ivar status: The status of the live pipeline operation. :vartype status: str :ivar error: The error details for the live pipeline operation. :vartype error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(LivePipelineOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error = None class LivePipelineUpdate(ProxyResource): """Live pipeline represents a unique instance of a live topology, used for real-time ingestion, archiving and publishing of content for a unique RTSP camera. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: The reference to an existing pipeline topology defined for real-time content processing. When activated, this live pipeline will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :param bitrate_kbps: Maximum bitrate capacity in Kbps reserved for the live pipeline. The allowed range is from 500 to 3000 Kbps in increments of 100 Kbps. If the RTSP camera exceeds this capacity, then the service will disconnect temporarily from the camera. It will retry to re-establish connection (with exponential backoff), checking to see if the camera bitrate is now below the reserved capacity. Doing so will ensure that one 'noisy neighbor' does not affect other live pipelines in your account. :type bitrate_kbps: int :ivar state: Current state of the pipeline (read-only). Possible values include: "Inactive", "Activating", "Active", "Deactivating". :vartype state: str or ~video_analyzer.models.LivePipelineState :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'bitrate_kbps': {'key': 'properties.bitrateKbps', 'type': 'int'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(LivePipelineUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.state = None self.parameters = kwargs.get('parameters', None) class LogSpecification(msrest.serialization.Model): """A diagnostic log emitted by service. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The diagnostic log category name. :vartype name: str :ivar display_name: The diagnostic log category display name. :vartype display_name: str :ivar blob_duration: The time range for requests in each blob. :vartype blob_duration: str """ _validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'blob_duration': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'blob_duration': {'key': 'blobDuration', 'type': 'str'}, } def __init__( self, **kwargs ): super(LogSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.blob_duration = None class MetricDimension(msrest.serialization.Model): """A metric dimension. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The metric dimension name. :vartype name: str :ivar display_name: The display name for the dimension. :vartype display_name: str :ivar to_be_exported_for_shoebox: Whether to export metric to shoebox. :vartype to_be_exported_for_shoebox: bool """ _validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'to_be_exported_for_shoebox': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'}, } def __init__( self, **kwargs ): super(MetricDimension, self).__init__(**kwargs) self.name = None self.display_name = None self.to_be_exported_for_shoebox = None class MetricSpecification(msrest.serialization.Model): """A metric emitted by service. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The metric name. :vartype name: str :ivar display_name: The metric display name. :vartype display_name: str :ivar display_description: The metric display description. :vartype display_description: str :ivar unit: The metric unit. Possible values include: "Bytes", "Count", "Milliseconds". :vartype unit: str or ~video_analyzer.models.MetricUnit :ivar aggregation_type: The metric aggregation type. Possible values include: "Average", "Count", "Total". :vartype aggregation_type: str or ~video_analyzer.models.MetricAggregationType :ivar lock_aggregation_type: The metric lock aggregation type. Possible values include: "Average", "Count", "Total". :vartype lock_aggregation_type: str or ~video_analyzer.models.MetricAggregationType :param supported_aggregation_types: Supported aggregation types. :type supported_aggregation_types: list[str] :ivar dimensions: The metric dimensions. :vartype dimensions: list[~video_analyzer.models.MetricDimension] :ivar enable_regional_mdm_account: Indicates whether regional MDM account is enabled. :vartype enable_regional_mdm_account: bool :ivar source_mdm_account: The source MDM account. :vartype source_mdm_account: str :ivar source_mdm_namespace: The source MDM namespace. :vartype source_mdm_namespace: str :ivar supported_time_grain_types: The supported time grain types. :vartype supported_time_grain_types: list[str] """ _validation = { 'name': {'readonly': True}, 'display_name': {'readonly': True}, 'display_description': {'readonly': True}, 'unit': {'readonly': True}, 'aggregation_type': {'readonly': True}, 'lock_aggregation_type': {'readonly': True}, 'dimensions': {'readonly': True}, 'enable_regional_mdm_account': {'readonly': True}, 'source_mdm_account': {'readonly': True}, 'source_mdm_namespace': {'readonly': True}, 'supported_time_grain_types': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display_name': {'key': 'displayName', 'type': 'str'}, 'display_description': {'key': 'displayDescription', 'type': 'str'}, 'unit': {'key': 'unit', 'type': 'str'}, 'aggregation_type': {'key': 'aggregationType', 'type': 'str'}, 'lock_aggregation_type': {'key': 'lockAggregationType', 'type': 'str'}, 'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'}, 'dimensions': {'key': 'dimensions', 'type': '[MetricDimension]'}, 'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'}, 'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'}, 'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'}, 'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'}, } def __init__( self, **kwargs ): super(MetricSpecification, self).__init__(**kwargs) self.name = None self.display_name = None self.display_description = None self.unit = None self.aggregation_type = None self.lock_aggregation_type = None self.supported_aggregation_types = kwargs.get('supported_aggregation_types', None) self.dimensions = None self.enable_regional_mdm_account = None self.source_mdm_account = None self.source_mdm_namespace = None self.supported_time_grain_types = None class NetworkAccessControl(msrest.serialization.Model): """Network access control for video analyzer account. :param integration: Public network access for integration group. :type integration: ~video_analyzer.models.GroupLevelAccessControl :param ingestion: Public network access for ingestion group. :type ingestion: ~video_analyzer.models.GroupLevelAccessControl :param consumption: Public network access for consumption group. :type consumption: ~video_analyzer.models.GroupLevelAccessControl """ _attribute_map = { 'integration': {'key': 'integration', 'type': 'GroupLevelAccessControl'}, 'ingestion': {'key': 'ingestion', 'type': 'GroupLevelAccessControl'}, 'consumption': {'key': 'consumption', 'type': 'GroupLevelAccessControl'}, } def __init__( self, **kwargs ): super(NetworkAccessControl, self).__init__(**kwargs) self.integration = kwargs.get('integration', None) self.ingestion = kwargs.get('ingestion', None) self.consumption = kwargs.get('consumption', None) class NodeInput(msrest.serialization.Model): """Describes an input signal to be used on a pipeline node. All required parameters must be populated in order to send to Azure. :param node_name: Required. The name of the upstream node in the pipeline which output is used as input of the current node. :type node_name: str """ _validation = { 'node_name': {'required': True}, } _attribute_map = { 'node_name': {'key': 'nodeName', 'type': 'str'}, } def __init__( self, **kwargs ): super(NodeInput, self).__init__(**kwargs) self.node_name = kwargs['node_name'] class Operation(msrest.serialization.Model): """An operation. All required parameters must be populated in order to send to Azure. :param name: Required. The operation name. :type name: str :param display: The operation display name. :type display: ~video_analyzer.models.OperationDisplay :param origin: Origin of the operation. :type origin: str :param properties: Operation properties format. :type properties: ~video_analyzer.models.Properties :param is_data_action: Whether the operation applies to data-plane. :type is_data_action: bool :param action_type: Indicates the action type. Possible values include: "Internal". :type action_type: str or ~video_analyzer.models.ActionType """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'display': {'key': 'display', 'type': 'OperationDisplay'}, 'origin': {'key': 'origin', 'type': 'str'}, 'properties': {'key': 'properties', 'type': 'Properties'}, 'is_data_action': {'key': 'isDataAction', 'type': 'bool'}, 'action_type': {'key': 'actionType', 'type': 'str'}, } def __init__( self, **kwargs ): super(Operation, self).__init__(**kwargs) self.name = kwargs['name'] self.display = kwargs.get('display', None) self.origin = kwargs.get('origin', None) self.properties = kwargs.get('properties', None) self.is_data_action = kwargs.get('is_data_action', None) self.action_type = kwargs.get('action_type', None) class OperationCollection(msrest.serialization.Model): """A collection of Operation items. :param value: A collection of Operation items. :type value: list[~video_analyzer.models.Operation] """ _attribute_map = { 'value': {'key': 'value', 'type': '[Operation]'}, } def __init__( self, **kwargs ): super(OperationCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class OperationDisplay(msrest.serialization.Model): """Operation details. :param provider: The service provider. :type provider: str :param resource: Resource on which the operation is performed. :type resource: str :param operation: The operation type. :type operation: str :param description: The operation description. :type description: str """ _attribute_map = { 'provider': {'key': 'provider', 'type': 'str'}, 'resource': {'key': 'resource', 'type': 'str'}, 'operation': {'key': 'operation', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, } def __init__( self, **kwargs ): super(OperationDisplay, self).__init__(**kwargs) self.provider = kwargs.get('provider', None) self.resource = kwargs.get('resource', None) self.operation = kwargs.get('operation', None) self.description = kwargs.get('description', None) class ParameterDeclaration(msrest.serialization.Model): """Single topology parameter declaration. Declared parameters can and must be referenced throughout the topology and can optionally have default values to be used when they are not defined in the pipelines. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter. :type name: str :param type: Required. Type of the parameter. Possible values include: "String", "SecretString", "Int", "Double", "Bool". :type type: str or ~video_analyzer.models.ParameterType :param description: Description of the parameter. :type description: str :param default: The default value for the parameter to be used if the pipeline does not specify a value. :type default: str """ _validation = { 'name': {'required': True}, 'type': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'default': {'key': 'default', 'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDeclaration, self).__init__(**kwargs) self.name = kwargs['name'] self.type = kwargs['type'] self.description = kwargs.get('description', None) self.default = kwargs.get('default', None) class ParameterDefinition(msrest.serialization.Model): """Defines the parameter value of an specific pipeline topology parameter. See pipeline topology parameters for more information. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the parameter declared in the pipeline topology. :type name: str :param value: Parameter value to be applied on this specific pipeline. :type value: str """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(ParameterDefinition, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs.get('value', None) class PemCertificateList(CertificateSource): """A list of PEM formatted certificates. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param certificates: Required. PEM formatted public certificates. One certificate per entry. :type certificates: list[str] """ _validation = { 'type': {'required': True}, 'certificates': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'certificates': {'key': 'certificates', 'type': '[str]'}, } def __init__( self, **kwargs ): super(PemCertificateList, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.PemCertificateList' # type: str self.certificates = kwargs['certificates'] class PipelineJob(ProxyResource): """Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology. When activated, this pipeline job will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :ivar state: Current state of the pipeline (read-only). Possible values include: "Processing", "Canceled", "Completed", "Failed". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline job will be automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar error: Details about the error, in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJob, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state = None self.expiration = None self.error = None self.parameters = kwargs.get('parameters', None) class PipelineJobCollection(msrest.serialization.Model): """A collection of PipelineJob items. :param value: A collection of PipelineJob items. :type value: list[~video_analyzer.models.PipelineJob] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineJob]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineJobError(msrest.serialization.Model): """Details about the error for a failed pipeline job. :param code: The error code. :type code: str :param message: The error message. :type message: str """ _attribute_map = { 'code': {'key': 'code', 'type': 'str'}, 'message': {'key': 'message', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineJobError, self).__init__(**kwargs) self.code = kwargs.get('code', None) self.message = kwargs.get('message', None) class PipelineJobOperationStatus(msrest.serialization.Model): """Used for tracking the status of an operation on the pipeline job. Variables are only populated by the server, and will be ignored when sending a request. :ivar name: The name of the pipeline job operation. :vartype name: str :ivar status: The status of the pipeline job operation. :vartype status: str :ivar error: The error details for the pipeline job operation. :vartype error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'readonly': True}, 'status': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(PipelineJobOperationStatus, self).__init__(**kwargs) self.name = None self.status = None self.error = None class PipelineJobUpdate(ProxyResource): """Pipeline job represents a unique instance of a batch topology, used for offline processing of selected portions of archived content. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param topology_name: Reference to an existing pipeline topology. When activated, this pipeline job will process content according to the pipeline topology definition. :type topology_name: str :param description: An optional description for the pipeline. :type description: str :ivar state: Current state of the pipeline (read-only). Possible values include: "Processing", "Canceled", "Completed", "Failed". :vartype state: str or ~video_analyzer.models.PipelineJobState :ivar expiration: The date-time by when this pipeline job will be automatically deleted from your account. :vartype expiration: ~datetime.datetime :ivar error: Details about the error, in case the pipeline job fails. :vartype error: ~video_analyzer.models.PipelineJobError :param parameters: List of the instance level parameter values for the user-defined topology parameters. A pipeline can only define or override parameters values for parameters which have been declared in the referenced topology. Topology parameters without a default value must be defined. Topology parameters with a default value can be optionally be overridden. :type parameters: list[~video_analyzer.models.ParameterDefinition] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'state': {'readonly': True}, 'expiration': {'readonly': True}, 'error': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'topology_name': {'key': 'properties.topologyName', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'state': {'key': 'properties.state', 'type': 'str'}, 'expiration': {'key': 'properties.expiration', 'type': 'iso-8601'}, 'error': {'key': 'properties.error', 'type': 'PipelineJobError'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDefinition]'}, } def __init__( self, **kwargs ): super(PipelineJobUpdate, self).__init__(**kwargs) self.topology_name = kwargs.get('topology_name', None) self.description = kwargs.get('description', None) self.state = None self.expiration = None self.error = None self.parameters = kwargs.get('parameters', None) class PipelineTopology(ProxyResource): """Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. * Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Required. Topology kind. Possible values include: "Live", "Batch". :type kind: str or ~video_analyzer.models.Kind :param sku: Required. Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :type description: str :param parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'kind': {'required': True}, 'sku': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopology, self).__init__(**kwargs) self.kind = kwargs['kind'] self.sku = kwargs['sku'] self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PipelineTopologyCollection(msrest.serialization.Model): """A collection of PipelineTopology items. :param value: A collection of PipelineTopology items. :type value: list[~video_analyzer.models.PipelineTopology] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[PipelineTopology]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(PipelineTopologyCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class PipelineTopologyUpdate(ProxyResource): """Pipeline topology describes the processing steps to be applied when processing content for a particular outcome. The topology should be defined according to the scenario to be achieved and can be reused across many pipeline instances which share the same processing characteristics. For instance, a pipeline topology which captures content from a RTSP camera and archives the content can be reused across many different cameras, as long as the same processing is to be applied across all the cameras. Individual instance properties can be defined through the use of user-defined parameters, which allow for a topology to be parameterized. This allows individual pipelines refer to different values, such as individual cameras' RTSP endpoints and credentials. Overall a topology is composed of the following: * Parameters: list of user defined parameters that can be references across the topology nodes. * Sources: list of one or more data sources nodes such as an RTSP source which allows for content to be ingested from cameras. * Processors: list of nodes which perform data analysis or transformations. * Sinks: list of one or more data sinks which allow for data to be stored or exported to other destinations. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param kind: Topology kind. Possible values include: "Live", "Batch". :type kind: str or ~video_analyzer.models.Kind :param sku: Describes the properties of a SKU. :type sku: ~video_analyzer.models.Sku :param description: An optional description of the pipeline topology. It is recommended that the expected use of the topology to be described here. :type description: str :param parameters: List of the topology parameter declarations. Parameters declared here can be referenced throughout the topology nodes through the use of "${PARAMETER_NAME}" string pattern. Parameters can have optional default values and can later be defined in individual instances of the pipeline. :type parameters: list[~video_analyzer.models.ParameterDeclaration] :param sources: List of the topology source nodes. Source nodes enable external data to be ingested by the pipeline. :type sources: list[~video_analyzer.models.SourceNodeBase] :param processors: List of the topology processor nodes. Processor nodes enable pipeline data to be analyzed, processed or transformed. :type processors: list[~video_analyzer.models.ProcessorNodeBase] :param sinks: List of the topology sink nodes. Sink nodes allow pipeline data to be stored or exported. :type sinks: list[~video_analyzer.models.SinkNodeBase] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'kind': {'key': 'kind', 'type': 'str'}, 'sku': {'key': 'sku', 'type': 'Sku'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'parameters': {'key': 'properties.parameters', 'type': '[ParameterDeclaration]'}, 'sources': {'key': 'properties.sources', 'type': '[SourceNodeBase]'}, 'processors': {'key': 'properties.processors', 'type': '[ProcessorNodeBase]'}, 'sinks': {'key': 'properties.sinks', 'type': '[SinkNodeBase]'}, } def __init__( self, **kwargs ): super(PipelineTopologyUpdate, self).__init__(**kwargs) self.kind = kwargs.get('kind', None) self.sku = kwargs.get('sku', None) self.description = kwargs.get('description', None) self.parameters = kwargs.get('parameters', None) self.sources = kwargs.get('sources', None) self.processors = kwargs.get('processors', None) self.sinks = kwargs.get('sinks', None) class PrivateEndpoint(msrest.serialization.Model): """The Private Endpoint resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The ARM identifier for Private Endpoint. :vartype id: str """ _validation = { 'id': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpoint, self).__init__(**kwargs) self.id = None class PrivateEndpointConnection(Resource): """The Private Endpoint Connection resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param private_endpoint: The resource of private end point. :type private_endpoint: ~video_analyzer.models.PrivateEndpoint :param private_link_service_connection_state: A collection of information about the state of the connection between service consumer and provider. :type private_link_service_connection_state: ~video_analyzer.models.PrivateLinkServiceConnectionState :ivar provisioning_state: The provisioning state of the private endpoint connection resource. Possible values include: "Succeeded", "Creating", "Deleting", "Failed". :vartype provisioning_state: str or ~video_analyzer.models.PrivateEndpointConnectionProvisioningState """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'private_endpoint': {'key': 'properties.privateEndpoint', 'type': 'PrivateEndpoint'}, 'private_link_service_connection_state': {'key': 'properties.privateLinkServiceConnectionState', 'type': 'PrivateLinkServiceConnectionState'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnection, self).__init__(**kwargs) self.private_endpoint = kwargs.get('private_endpoint', None) self.private_link_service_connection_state = kwargs.get('private_link_service_connection_state', None) self.provisioning_state = None class PrivateEndpointConnectionListResult(msrest.serialization.Model): """List of private endpoint connection associated with the specified storage account. :param value: Array of private endpoint connections. :type value: list[~video_analyzer.models.PrivateEndpointConnection] """ _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(PrivateEndpointConnectionListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkResource(Resource): """A private link resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :ivar group_id: The private link resource group id. :vartype group_id: str :ivar required_members: The private link resource required member names. :vartype required_members: list[str] :param required_zone_names: The private link resource Private link DNS zone name. :type required_zone_names: list[str] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'group_id': {'readonly': True}, 'required_members': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'group_id': {'key': 'properties.groupId', 'type': 'str'}, 'required_members': {'key': 'properties.requiredMembers', 'type': '[str]'}, 'required_zone_names': {'key': 'properties.requiredZoneNames', 'type': '[str]'}, } def __init__( self, **kwargs ): super(PrivateLinkResource, self).__init__(**kwargs) self.group_id = None self.required_members = None self.required_zone_names = kwargs.get('required_zone_names', None) class PrivateLinkResourceListResult(msrest.serialization.Model): """A list of private link resources. :param value: Array of private link resources. :type value: list[~video_analyzer.models.PrivateLinkResource] """ _attribute_map = { 'value': {'key': 'value', 'type': '[PrivateLinkResource]'}, } def __init__( self, **kwargs ): super(PrivateLinkResourceListResult, self).__init__(**kwargs) self.value = kwargs.get('value', None) class PrivateLinkServiceConnectionState(msrest.serialization.Model): """A collection of information about the state of the connection between service consumer and provider. :param status: Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service. Possible values include: "Pending", "Approved", "Rejected". :type status: str or ~video_analyzer.models.PrivateEndpointServiceConnectionStatus :param description: The reason for approval/rejection of the connection. :type description: str :param actions_required: A message indicating if changes on the service provider require any updates on the consumer. :type actions_required: str """ _attribute_map = { 'status': {'key': 'status', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'actions_required': {'key': 'actionsRequired', 'type': 'str'}, } def __init__( self, **kwargs ): super(PrivateLinkServiceConnectionState, self).__init__(**kwargs) self.status = kwargs.get('status', None) self.description = kwargs.get('description', None) self.actions_required = kwargs.get('actions_required', None) class Properties(msrest.serialization.Model): """Metric properties. Variables are only populated by the server, and will be ignored when sending a request. :ivar service_specification: The service specifications. :vartype service_specification: ~video_analyzer.models.ServiceSpecification """ _validation = { 'service_specification': {'readonly': True}, } _attribute_map = { 'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'}, } def __init__( self, **kwargs ): super(Properties, self).__init__(**kwargs) self.service_specification = None class ResourceIdentity(msrest.serialization.Model): """The user assigned managed identity to use when accessing a resource. All required parameters must be populated in order to send to Azure. :param user_assigned_identity: Required. The user assigned managed identity's resource identifier to use when accessing a resource. :type user_assigned_identity: str """ _validation = { 'user_assigned_identity': {'required': True}, } _attribute_map = { 'user_assigned_identity': {'key': 'userAssignedIdentity', 'type': 'str'}, } def __init__( self, **kwargs ): super(ResourceIdentity, self).__init__(**kwargs) self.user_assigned_identity = kwargs['user_assigned_identity'] class RsaTokenKey(TokenKey): """Required validation properties for tokens generated with RSA algorithm. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param kid: Required. JWT token key id. Validation keys are looked up based on the key id present on the JWT token header. :type kid: str :param alg: Required. RSA algorithm to be used: RS256, RS384 or RS512. Possible values include: "RS256", "RS384", "RS512". :type alg: str or ~video_analyzer.models.AccessPolicyRsaAlgo :param n: Required. RSA public key modulus. :type n: str :param e: Required. RSA public key exponent. :type e: str """ _validation = { 'type': {'required': True}, 'kid': {'required': True}, 'alg': {'required': True}, 'n': {'required': True}, 'e': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'kid': {'key': 'kid', 'type': 'str'}, 'alg': {'key': 'alg', 'type': 'str'}, 'n': {'key': 'n', 'type': 'str'}, 'e': {'key': 'e', 'type': 'str'}, } def __init__( self, **kwargs ): super(RsaTokenKey, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RsaTokenKey' # type: str self.alg = kwargs['alg'] self.n = kwargs['n'] self.e = kwargs['e'] class SourceNodeBase(NodeBase): """Base class for topology source nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: RtspSource, VideoSource. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str """ _validation = { 'type': {'required': True}, 'name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.RtspSource': 'RtspSource', '#Microsoft.VideoAnalyzer.VideoSource': 'VideoSource'} } def __init__( self, **kwargs ): super(SourceNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SourceNodeBase' # type: str class RtspSource(SourceNodeBase): """RTSP source allows for media from an RTSP camera or generic RTSP server to be ingested into a pipeline. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param transport: Network transport utilized by the RTSP and RTP exchange: TCP or HTTP. When using TCP, the RTP packets are interleaved on the TCP RTSP connection. When using HTTP, the RTSP messages are exchanged through long lived HTTP connections, and the RTP packages are interleaved in the HTTP connections alongside the RTSP messages. Possible values include: "Http", "Tcp". :type transport: str or ~video_analyzer.models.RtspTransport :param endpoint: Required. RTSP endpoint information for Video Analyzer to connect to. This contains the required information for Video Analyzer to connect to RTSP cameras and/or generic RTSP servers. :type endpoint: ~video_analyzer.models.EndpointBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'endpoint': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'transport': {'key': 'transport', 'type': 'str'}, 'endpoint': {'key': 'endpoint', 'type': 'EndpointBase'}, } def __init__( self, **kwargs ): super(RtspSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.RtspSource' # type: str self.transport = kwargs.get('transport', None) self.endpoint = kwargs['endpoint'] class TunnelBase(msrest.serialization.Model): """Base class for tunnel objects. You probably want to use the sub-classes and not this class directly. Known sub-classes are: SecureIotDeviceRemoteTunnel. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel': 'SecureIotDeviceRemoteTunnel'} } def __init__( self, **kwargs ): super(TunnelBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class SecureIotDeviceRemoteTunnel(TunnelBase): """A remote tunnel securely established using IoT Hub device information. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param iot_hub_name: Required. Name of the IoT Hub. :type iot_hub_name: str :param device_id: Required. The IoT device id to use when establishing the remote tunnel. This string is case-sensitive. :type device_id: str """ _validation = { 'type': {'required': True}, 'iot_hub_name': {'required': True}, 'device_id': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'iot_hub_name': {'key': 'iotHubName', 'type': 'str'}, 'device_id': {'key': 'deviceId', 'type': 'str'}, } def __init__( self, **kwargs ): super(SecureIotDeviceRemoteTunnel, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SecureIotDeviceRemoteTunnel' # type: str self.iot_hub_name = kwargs['iot_hub_name'] self.device_id = kwargs['device_id'] class ServiceSpecification(msrest.serialization.Model): """The service metric specifications. Variables are only populated by the server, and will be ignored when sending a request. :ivar log_specifications: List of log specifications. :vartype log_specifications: list[~video_analyzer.models.LogSpecification] :ivar metric_specifications: List of metric specifications. :vartype metric_specifications: list[~video_analyzer.models.MetricSpecification] """ _validation = { 'log_specifications': {'readonly': True}, 'metric_specifications': {'readonly': True}, } _attribute_map = { 'log_specifications': {'key': 'logSpecifications', 'type': '[LogSpecification]'}, 'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecification]'}, } def __init__( self, **kwargs ): super(ServiceSpecification, self).__init__(**kwargs) self.log_specifications = None self.metric_specifications = None class SinkNodeBase(NodeBase): """Base class for topology sink nodes. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoSink. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSink': 'VideoSink'} } def __init__( self, **kwargs ): super(SinkNodeBase, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.SinkNodeBase' # type: str self.inputs = kwargs['inputs'] class Sku(msrest.serialization.Model): """The SKU details. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param name: Required. The SKU name. Possible values include: "Live_S1", "Batch_S1". :type name: str or ~video_analyzer.models.SkuName :ivar tier: The SKU tier. Possible values include: "Standard". :vartype tier: str or ~video_analyzer.models.SkuTier """ _validation = { 'name': {'required': True}, 'tier': {'readonly': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'tier': {'key': 'tier', 'type': 'str'}, } def __init__( self, **kwargs ): super(Sku, self).__init__(**kwargs) self.name = kwargs['name'] self.tier = None class StorageAccount(msrest.serialization.Model): """The details about the associated storage account. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :param id: Required. The ID of the storage account resource. Video Analyzer relies on tables, queues, and blobs. The primary storage account must be a Standard Storage account (either Microsoft.ClassicStorage or Microsoft.Storage). :type id: str :param identity: A managed identity that Video Analyzer will use to access the storage account. :type identity: ~video_analyzer.models.ResourceIdentity :ivar status: The current status of the storage account mapping. :vartype status: str """ _validation = { 'id': {'required': True}, 'status': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'ResourceIdentity'}, 'status': {'key': 'status', 'type': 'str'}, } def __init__( self, **kwargs ): super(StorageAccount, self).__init__(**kwargs) self.id = kwargs['id'] self.identity = kwargs.get('identity', None) self.status = None class SystemData(msrest.serialization.Model): """Metadata pertaining to creation and last modification of the resource. :param created_by: The identity that created the resource. :type created_by: str :param created_by_type: The type of identity that created the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :type created_by_type: str or ~video_analyzer.models.CreatedByType :param created_at: The timestamp of resource creation (UTC). :type created_at: ~datetime.datetime :param last_modified_by: The identity that last modified the resource. :type last_modified_by: str :param last_modified_by_type: The type of identity that last modified the resource. Possible values include: "User", "Application", "ManagedIdentity", "Key". :type last_modified_by_type: str or ~video_analyzer.models.CreatedByType :param last_modified_at: The timestamp of resource last modification (UTC). :type last_modified_at: ~datetime.datetime """ _attribute_map = { 'created_by': {'key': 'createdBy', 'type': 'str'}, 'created_by_type': {'key': 'createdByType', 'type': 'str'}, 'created_at': {'key': 'createdAt', 'type': 'iso-8601'}, 'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'}, 'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'}, 'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'}, } def __init__( self, **kwargs ): super(SystemData, self).__init__(**kwargs) self.created_by = kwargs.get('created_by', None) self.created_by_type = kwargs.get('created_by_type', None) self.created_at = kwargs.get('created_at', None) self.last_modified_by = kwargs.get('last_modified_by', None) self.last_modified_by_type = kwargs.get('last_modified_by_type', None) self.last_modified_at = kwargs.get('last_modified_at', None) class TimeSequenceBase(msrest.serialization.Model): """A sequence of datetime ranges as a string. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoSequenceAbsoluteTimeMarkers. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers': 'VideoSequenceAbsoluteTimeMarkers'} } def __init__( self, **kwargs ): super(TimeSequenceBase, self).__init__(**kwargs) self.type = None # type: Optional[str] class TlsEndpoint(EndpointBase): """TLS endpoint describes an endpoint that the pipeline can connect to over TLS transport (data is encrypted in transit). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase :param trusted_certificates: List of trusted certificate authorities when authenticating a TLS connection. A null list designates that Azure Video Analyzer's list of trusted authorities should be used. :type trusted_certificates: ~video_analyzer.models.CertificateSource :param validation_options: Validation options to use when authenticating a TLS connection. By default, strict validation is used. :type validation_options: ~video_analyzer.models.TlsValidationOptions """ _validation = { 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, 'trusted_certificates': {'key': 'trustedCertificates', 'type': 'CertificateSource'}, 'validation_options': {'key': 'validationOptions', 'type': 'TlsValidationOptions'}, } def __init__( self, **kwargs ): super(TlsEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.TlsEndpoint' # type: str self.trusted_certificates = kwargs.get('trusted_certificates', None) self.validation_options = kwargs.get('validation_options', None) class TlsValidationOptions(msrest.serialization.Model): """Options for controlling the validation of TLS endpoints. :param ignore_hostname: When set to 'true' causes the certificate subject name validation to be skipped. Default is 'false'. :type ignore_hostname: str :param ignore_signature: When set to 'true' causes the certificate chain trust validation to be skipped. Default is 'false'. :type ignore_signature: str """ _attribute_map = { 'ignore_hostname': {'key': 'ignoreHostname', 'type': 'str'}, 'ignore_signature': {'key': 'ignoreSignature', 'type': 'str'}, } def __init__( self, **kwargs ): super(TlsValidationOptions, self).__init__(**kwargs) self.ignore_hostname = kwargs.get('ignore_hostname', None) self.ignore_signature = kwargs.get('ignore_signature', None) class TokenClaim(msrest.serialization.Model): """Properties for expected token claims. All required parameters must be populated in order to send to Azure. :param name: Required. Name of the claim which must be present on the token. :type name: str :param value: Required. Expected value of the claim to be present on the token. :type value: str """ _validation = { 'name': {'required': True}, 'value': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'value': {'key': 'value', 'type': 'str'}, } def __init__( self, **kwargs ): super(TokenClaim, self).__init__(**kwargs) self.name = kwargs['name'] self.value = kwargs['value'] class TrackedResource(Resource): """The resource model definition for an Azure Resource Manager tracked top level resource which has 'tags' and a 'location'. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, } def __init__( self, **kwargs ): super(TrackedResource, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.location = kwargs['location'] class UnsecuredEndpoint(EndpointBase): """Unsecured endpoint describes an endpoint that the pipeline can connect to over clear transport (no encryption in transit). All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param credentials: Required. Credentials to be presented to the endpoint. :type credentials: ~video_analyzer.models.CredentialsBase :param url: Required. The endpoint URL for Video Analyzer to connect to. :type url: str :param tunnel: Describes the tunnel through which Video Analyzer can connect to the endpoint URL. This is an optional property, typically used when the endpoint is behind a firewall. :type tunnel: ~video_analyzer.models.TunnelBase """ _validation = { 'type': {'required': True}, 'credentials': {'required': True}, 'url': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'credentials': {'key': 'credentials', 'type': 'CredentialsBase'}, 'url': {'key': 'url', 'type': 'str'}, 'tunnel': {'key': 'tunnel', 'type': 'TunnelBase'}, } def __init__( self, **kwargs ): super(UnsecuredEndpoint, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UnsecuredEndpoint' # type: str class UserAssignedManagedIdentity(msrest.serialization.Model): """The details of the user assigned managed identity used by the Video Analyzer resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar client_id: The client ID. :vartype client_id: str :ivar principal_id: The principal ID. :vartype principal_id: str """ _validation = { 'client_id': {'readonly': True}, 'principal_id': {'readonly': True}, } _attribute_map = { 'client_id': {'key': 'clientId', 'type': 'str'}, 'principal_id': {'key': 'principalId', 'type': 'str'}, } def __init__( self, **kwargs ): super(UserAssignedManagedIdentity, self).__init__(**kwargs) self.client_id = None self.principal_id = None class UsernamePasswordCredentials(CredentialsBase): """Username and password credentials. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param username: Required. Username to be presented as part of the credentials. :type username: str :param password: Required. Password to be presented as part of the credentials. It is recommended that this value is parameterized as a secret string in order to prevent this value to be returned as part of the resource on API requests. :type password: str """ _validation = { 'type': {'required': True}, 'username': {'required': True}, 'password': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'username': {'key': 'username', 'type': 'str'}, 'password': {'key': 'password', 'type': 'str'}, } def __init__( self, **kwargs ): super(UsernamePasswordCredentials, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.UsernamePasswordCredentials' # type: str self.username = kwargs['username'] self.password = kwargs['password'] class VideoAnalyzer(TrackedResource): """The Video Analyzer account. Variables are only populated by the server, and will be ignored when sending a request. All required parameters must be populated in order to send to Azure. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param location: Required. The geo-location where the resource lives. :type location: str :param identity: The identities associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access is allowed for resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values include: "Failed", "InProgress", "Succeeded". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'location': {'required': True}, 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'location': {'key': 'location', 'type': 'str'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzer, self).__init__(**kwargs) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None class VideoAnalyzerCollection(msrest.serialization.Model): """A collection of VideoAnalyzer items. :param value: A collection of VideoAnalyzer items. :type value: list[~video_analyzer.models.VideoAnalyzer] """ _attribute_map = { 'value': {'key': 'value', 'type': '[VideoAnalyzer]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) class VideoAnalyzerIdentity(msrest.serialization.Model): """The managed identity for the Video Analyzer resource. All required parameters must be populated in order to send to Azure. :param type: Required. The identity type. :type type: str :param user_assigned_identities: The User Assigned Managed Identities. :type user_assigned_identities: dict[str, ~video_analyzer.models.UserAssignedManagedIdentity] """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': 'type', 'type': 'str'}, 'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{UserAssignedManagedIdentity}'}, } def __init__( self, **kwargs ): super(VideoAnalyzerIdentity, self).__init__(**kwargs) self.type = kwargs['type'] self.user_assigned_identities = kwargs.get('user_assigned_identities', None) class VideoAnalyzerOperationStatus(msrest.serialization.Model): """Status of video analyzer operation. All required parameters must be populated in order to send to Azure. :param name: Required. Operation identifier. :type name: str :param id: Operation resource ID. :type id: str :param start_time: Operation start time. :type start_time: str :param end_time: Operation end time. :type end_time: str :param status: Operation status. :type status: str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerPrivateEndpointConnectionOperationStatus(msrest.serialization.Model): """Status of private endpoint connection operation. All required parameters must be populated in order to send to Azure. :param name: Required. Operation identifier. :type name: str :param id: Operation resource ID. :type id: str :param start_time: Operation start time. :type start_time: str :param end_time: Operation end time. :type end_time: str :param status: Operation status. :type status: str :param error: The error detail. :type error: ~video_analyzer.models.ErrorDetail """ _validation = { 'name': {'required': True}, } _attribute_map = { 'name': {'key': 'name', 'type': 'str'}, 'id': {'key': 'id', 'type': 'str'}, 'start_time': {'key': 'startTime', 'type': 'str'}, 'end_time': {'key': 'endTime', 'type': 'str'}, 'status': {'key': 'status', 'type': 'str'}, 'error': {'key': 'error', 'type': 'ErrorDetail'}, } def __init__( self, **kwargs ): super(VideoAnalyzerPrivateEndpointConnectionOperationStatus, self).__init__(**kwargs) self.name = kwargs['name'] self.id = kwargs.get('id', None) self.start_time = kwargs.get('start_time', None) self.end_time = kwargs.get('end_time', None) self.status = kwargs.get('status', None) self.error = kwargs.get('error', None) class VideoAnalyzerUpdate(msrest.serialization.Model): """The update operation for a Video Analyzer account. Variables are only populated by the server, and will be ignored when sending a request. :param tags: A set of tags. Resource tags. :type tags: dict[str, str] :param identity: The identities associated to the Video Analyzer resource. :type identity: ~video_analyzer.models.VideoAnalyzerIdentity :param storage_accounts: The storage accounts for this resource. :type storage_accounts: list[~video_analyzer.models.StorageAccount] :ivar endpoints: The endpoints associated with this resource. :vartype endpoints: list[~video_analyzer.models.Endpoint] :param encryption: The account encryption properties. :type encryption: ~video_analyzer.models.AccountEncryption :param iot_hubs: The IoT Hubs for this resource. :type iot_hubs: list[~video_analyzer.models.IotHub] :param public_network_access: Whether or not public network access is allowed for resources under the Video Analyzer account. Possible values include: "Enabled", "Disabled". :type public_network_access: str or ~video_analyzer.models.PublicNetworkAccess :param network_access_control: Network access control for Video Analyzer. :type network_access_control: ~video_analyzer.models.NetworkAccessControl :ivar provisioning_state: Provisioning state of the Video Analyzer account. Possible values include: "Failed", "InProgress", "Succeeded". :vartype provisioning_state: str or ~video_analyzer.models.ProvisioningState :ivar private_endpoint_connections: Private Endpoint Connections created under Video Analyzer account. :vartype private_endpoint_connections: list[~video_analyzer.models.PrivateEndpointConnection] """ _validation = { 'endpoints': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'private_endpoint_connections': {'readonly': True}, } _attribute_map = { 'tags': {'key': 'tags', 'type': '{str}'}, 'identity': {'key': 'identity', 'type': 'VideoAnalyzerIdentity'}, 'storage_accounts': {'key': 'properties.storageAccounts', 'type': '[StorageAccount]'}, 'endpoints': {'key': 'properties.endpoints', 'type': '[Endpoint]'}, 'encryption': {'key': 'properties.encryption', 'type': 'AccountEncryption'}, 'iot_hubs': {'key': 'properties.iotHubs', 'type': '[IotHub]'}, 'public_network_access': {'key': 'properties.publicNetworkAccess', 'type': 'str'}, 'network_access_control': {'key': 'properties.networkAccessControl', 'type': 'NetworkAccessControl'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'private_endpoint_connections': {'key': 'properties.privateEndpointConnections', 'type': '[PrivateEndpointConnection]'}, } def __init__( self, **kwargs ): super(VideoAnalyzerUpdate, self).__init__(**kwargs) self.tags = kwargs.get('tags', None) self.identity = kwargs.get('identity', None) self.storage_accounts = kwargs.get('storage_accounts', None) self.endpoints = None self.encryption = kwargs.get('encryption', None) self.iot_hubs = kwargs.get('iot_hubs', None) self.public_network_access = kwargs.get('public_network_access', None) self.network_access_control = kwargs.get('network_access_control', None) self.provisioning_state = None self.private_endpoint_connections = None class VideoArchival(msrest.serialization.Model): """Video archival properties. :param retention_period: Video retention period indicates the maximum age of the video archive segments which are intended to be kept in storage. It must be provided in the ISO8601 duration format in the granularity of days, up to a maximum of 10 years. For example, if this is set to P30D (30 days), content older than 30 days will be periodically deleted. This value can be updated at any time and the new desired retention period will be effective within 24 hours. :type retention_period: str """ _attribute_map = { 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoArchival, self).__init__(**kwargs) self.retention_period = kwargs.get('retention_period', None) class VideoContentToken(msrest.serialization.Model): """"Video content token grants access to the video content URLs.". Variables are only populated by the server, and will be ignored when sending a request. :ivar expiration_date: The content token expiration date in ISO8601 format (eg. 2021-01-01T00:00:00Z). :vartype expiration_date: ~datetime.datetime :ivar token: The content token value to be added to the video content URL as the value for the "token" query string parameter. The token is specific to a single video. :vartype token: str """ _validation = { 'expiration_date': {'readonly': True}, 'token': {'readonly': True}, } _attribute_map = { 'expiration_date': {'key': 'expirationDate', 'type': 'iso-8601'}, 'token': {'key': 'token', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoContentToken, self).__init__(**kwargs) self.expiration_date = None self.token = None class VideoContentUrls(msrest.serialization.Model): """Set of URLs to the video content. :param download_url: Video file download URL. This URL can be used in conjunction with the video content authorization token to download the video MP4 file. The resulting MP4 file can be played on any standard media player. It is available when the video type is 'file' and video file is available for consumption. :type download_url: str :param archive_base_url: Video archive streaming base URL. The archived content can be automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in conjunction with the video content authorization token on any compatible DASH or HLS players by appending the following to the base URL: .. code-block:: - HLSv4: /manifest(format=m3u8-aapl).m3u8 - HLS CMAF: /manifest(format=m3u8-cmaf) - DASH CMAF: /manifest(format=mpd-time-cmaf) Moreover, an ongoing video recording can be played in "live mode" with latencies which are approximately double of the chosen video segment length. It is available when the video type is 'archive' and video archiving is enabled. :type archive_base_url: str :param rtsp_tunnel_url: Video low-latency streaming URL. The live content can be automatically played by the Azure Video Analyzer player widget. Alternatively, this URL can be used in conjunction with the video content authorization token to expose a WebSocket tunneled RTSP stream. It is available when the video type is 'archive' and a live, low-latency feed is available from the source. :type rtsp_tunnel_url: str :param preview_image_urls: Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled. :type preview_image_urls: ~video_analyzer.models.VideoPreviewImageUrls """ _attribute_map = { 'download_url': {'key': 'downloadUrl', 'type': 'str'}, 'archive_base_url': {'key': 'archiveBaseUrl', 'type': 'str'}, 'rtsp_tunnel_url': {'key': 'rtspTunnelUrl', 'type': 'str'}, 'preview_image_urls': {'key': 'previewImageUrls', 'type': 'VideoPreviewImageUrls'}, } def __init__( self, **kwargs ): super(VideoContentUrls, self).__init__(**kwargs) self.download_url = kwargs.get('download_url', None) self.archive_base_url = kwargs.get('archive_base_url', None) self.rtsp_tunnel_url = kwargs.get('rtsp_tunnel_url', None) self.preview_image_urls = kwargs.get('preview_image_urls', None) class VideoCreationProperties(msrest.serialization.Model): """Optional properties to be used in case a new video resource needs to be created on the service. These will not take effect if the video already exists. :param title: Optional title provided by the user. Value can be up to 256 characters long. :type title: str :param description: Optional description provided by the user. Value can be up to 2048 characters long. :type description: str :param segment_length: Segment length indicates the length of individual content files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. Changing this value after the initial call to create the video resource can lead to errors when uploading content to the archive. Default value is 30 seconds. This property is only allowed for topologies where "kind" is set to "live". :type segment_length: str :param retention_period: Video retention period indicates how long the video is kept in storage. Value must be specified in ISO8601 duration format (i.e. "P1D" equals 1 day) and can vary between 1 day to 10 years, in 1 day increments. When absent (null), all video content is retained indefinitely. This property is only allowed for topologies where "kind" is set to "live". :type retention_period: str """ _attribute_map = { 'title': {'key': 'title', 'type': 'str'}, 'description': {'key': 'description', 'type': 'str'}, 'segment_length': {'key': 'segmentLength', 'type': 'str'}, 'retention_period': {'key': 'retentionPeriod', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoCreationProperties, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.segment_length = kwargs.get('segment_length', None) self.retention_period = kwargs.get('retention_period', None) class VideoEncoderBase(msrest.serialization.Model): """Base type for all video encoding presets, which define the recipe or instructions on how the input video should be processed. You probably want to use the sub-classes and not this class directly. Known sub-classes are: VideoEncoderH264. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should be encoded. If omitted, encoder sets it automatically to try and match the quality of the input video. :type bitrate_kbps: str :param frame_rate: The frame rate (in frames per second) of the encoded video. The value must be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average frame rate of the input video. :type frame_rate: str :param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } _subtype_map = { 'type': {'#Microsoft.VideoAnalyzer.VideoEncoderH264': 'VideoEncoderH264'} } def __init__( self, **kwargs ): super(VideoEncoderBase, self).__init__(**kwargs) self.type = None # type: Optional[str] self.bitrate_kbps = kwargs.get('bitrate_kbps', None) self.frame_rate = kwargs.get('frame_rate', None) self.scale = kwargs.get('scale', None) class VideoEncoderH264(VideoEncoderBase): """A custom preset for encoding video with the H.264 (AVC) codec. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param bitrate_kbps: The maximum bitrate, in kilobits per second or Kbps, at which video should be encoded. If omitted, encoder sets it automatically to try and match the quality of the input video. :type bitrate_kbps: str :param frame_rate: The frame rate (in frames per second) of the encoded video. The value must be greater than zero, and less than or equal to 300. If omitted, the encoder uses the average frame rate of the input video. :type frame_rate: str :param scale: Describes the resolution of the encoded video. If omitted, the encoder uses the resolution of the input video. :type scale: ~video_analyzer.models.VideoScale """ _validation = { 'type': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'bitrate_kbps': {'key': 'bitrateKbps', 'type': 'str'}, 'frame_rate': {'key': 'frameRate', 'type': 'str'}, 'scale': {'key': 'scale', 'type': 'VideoScale'}, } def __init__( self, **kwargs ): super(VideoEncoderH264, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoEncoderH264' # type: str class VideoEntity(ProxyResource): """Represents a video resource within Azure Video Analyzer. Videos can be ingested from RTSP cameras through live pipelines or can be created by exporting sequences from existing captured video through a pipeline job. Videos ingested through live pipelines can be streamed through Azure Video Analyzer Player Widget or compatible players. Exported videos can be downloaded as MP4 files. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts". :vartype type: str :ivar system_data: Azure Resource Manager metadata containing createdBy and modifiedBy information. :vartype system_data: ~video_analyzer.models.SystemData :param title: Optional video title provided by the user. Value can be up to 256 characters long. :type title: str :param description: Optional video description provided by the user. Value can be up to 2048 characters long. :type description: str :ivar type_properties_type: Video content type. Different content types are suitable for different applications and scenarios. Possible values include: "Archive", "File". :vartype type_properties_type: str or ~video_analyzer.models.VideoType :ivar flags: Video flags contain information about the available video actions and its dynamic properties based on the current video state. :vartype flags: ~video_analyzer.models.VideoFlags :ivar content_urls: Set of URLs to the video content. :vartype content_urls: ~video_analyzer.models.VideoContentUrls :param media_info: Contains information about the video and audio content. :type media_info: ~video_analyzer.models.VideoMediaInfo :param archival: Video archival properties. :type archival: ~video_analyzer.models.VideoArchival """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'system_data': {'readonly': True}, 'type_properties_type': {'readonly': True}, 'flags': {'readonly': True}, 'content_urls': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'system_data': {'key': 'systemData', 'type': 'SystemData'}, 'title': {'key': 'properties.title', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'type_properties_type': {'key': 'properties.type', 'type': 'str'}, 'flags': {'key': 'properties.flags', 'type': 'VideoFlags'}, 'content_urls': {'key': 'properties.contentUrls', 'type': 'VideoContentUrls'}, 'media_info': {'key': 'properties.mediaInfo', 'type': 'VideoMediaInfo'}, 'archival': {'key': 'properties.archival', 'type': 'VideoArchival'}, } def __init__( self, **kwargs ): super(VideoEntity, self).__init__(**kwargs) self.title = kwargs.get('title', None) self.description = kwargs.get('description', None) self.type_properties_type = None self.flags = None self.content_urls = None self.media_info = kwargs.get('media_info', None) self.archival = kwargs.get('archival', None) class VideoEntityCollection(msrest.serialization.Model): """A collection of VideoEntity items. :param value: A collection of VideoEntity items. :type value: list[~video_analyzer.models.VideoEntity] :param next_link: A link to the next page of the collection (when the collection contains too many results to return in one response). :type next_link: str """ _attribute_map = { 'value': {'key': 'value', 'type': '[VideoEntity]'}, 'next_link': {'key': '@nextLink', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoEntityCollection, self).__init__(**kwargs) self.value = kwargs.get('value', None) self.next_link = kwargs.get('next_link', None) class VideoFlags(msrest.serialization.Model): """Video flags contain information about the available video actions and its dynamic properties based on the current video state. All required parameters must be populated in order to send to Azure. :param can_stream: Required. Value indicating whether or not the video can be streamed. Only "archive" type videos can be streamed. :type can_stream: bool :param has_data: Required. Value indicating whether or not there has ever been data recorded or uploaded into the video. Newly created videos have this value set to false. :type has_data: bool :param is_in_use: Required. Value indicating whether or not the video is currently being referenced be an active pipeline. The fact that is being referenced, doesn't necessarily indicate that data is being received. For example, video recording may be gated on events or camera may not be accessible at the time. :type is_in_use: bool """ _validation = { 'can_stream': {'required': True}, 'has_data': {'required': True}, 'is_in_use': {'required': True}, } _attribute_map = { 'can_stream': {'key': 'canStream', 'type': 'bool'}, 'has_data': {'key': 'hasData', 'type': 'bool'}, 'is_in_use': {'key': 'isInUse', 'type': 'bool'}, } def __init__( self, **kwargs ): super(VideoFlags, self).__init__(**kwargs) self.can_stream = kwargs['can_stream'] self.has_data = kwargs['has_data'] self.is_in_use = kwargs['is_in_use'] class VideoMediaInfo(msrest.serialization.Model): """Contains information about the video and audio content. :param segment_length: Video segment length indicates the length of individual video files (segments) which are persisted to storage. Smaller segments provide lower archive playback latency but generate larger volume of storage transactions. Larger segments reduce the amount of storage transactions while increasing the archive playback latency. Value must be specified in ISO8601 duration format (i.e. "PT30S" equals 30 seconds) and can vary between 30 seconds to 5 minutes, in 30 seconds increments. :type segment_length: str """ _attribute_map = { 'segment_length': {'key': 'segmentLength', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoMediaInfo, self).__init__(**kwargs) self.segment_length = kwargs.get('segment_length', None) class VideoPreviewImageUrls(msrest.serialization.Model): """Video preview image URLs. These URLs can be used in conjunction with the video content authorization token to download the most recent still image from the video archive in different resolutions. They are available when the video type is 'archive' and preview images are enabled. :param small: Low resolution preview image URL. :type small: str :param medium: Medium resolution preview image URL. :type medium: str :param large: High resolution preview image URL. :type large: str """ _attribute_map = { 'small': {'key': 'small', 'type': 'str'}, 'medium': {'key': 'medium', 'type': 'str'}, 'large': {'key': 'large', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPreviewImageUrls, self).__init__(**kwargs) self.small = kwargs.get('small', None) self.medium = kwargs.get('medium', None) self.large = kwargs.get('large', None) class VideoPublishingOptions(msrest.serialization.Model): """Optional flags used to change how video is published. These are only allowed for topologies where "kind" is set to "live". :param disable_archive: When set to 'true' content will not be archived or recorded. This is used, for example, when the topology is used only for low latency video streaming. Default is 'false'. If set to 'true', then "disableRtspPublishing" must be set to 'false'. :type disable_archive: str :param disable_rtsp_publishing: When set to 'true' the RTSP playback URL will not be published, disabling low latency streaming. This is used, for example, when the topology is used only for archiving content. Default is 'false'. If set to 'true', then "disableArchive" must be set to 'false'. :type disable_rtsp_publishing: str """ _attribute_map = { 'disable_archive': {'key': 'disableArchive', 'type': 'str'}, 'disable_rtsp_publishing': {'key': 'disableRtspPublishing', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoPublishingOptions, self).__init__(**kwargs) self.disable_archive = kwargs.get('disable_archive', None) self.disable_rtsp_publishing = kwargs.get('disable_rtsp_publishing', None) class VideoScale(msrest.serialization.Model): """The video scaling information. :param height: The desired output video height. :type height: str :param width: The desired output video width. :type width: str :param mode: Describes the video scaling mode to be applied. Default mode is 'Pad'. If the mode is 'Pad' or 'Stretch' then both width and height must be specified. Else if the mode is 'PreserveAspectRatio' then only one of width or height need be provided. Possible values include: "Pad", "PreserveAspectRatio", "Stretch". :type mode: str or ~video_analyzer.models.VideoScaleMode """ _attribute_map = { 'height': {'key': 'height', 'type': 'str'}, 'width': {'key': 'width', 'type': 'str'}, 'mode': {'key': 'mode', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoScale, self).__init__(**kwargs) self.height = kwargs.get('height', None) self.width = kwargs.get('width', None) self.mode = kwargs.get('mode', None) class VideoSequenceAbsoluteTimeMarkers(TimeSequenceBase): """A sequence of absolute datetime ranges as a string. The datetime values should follow IS08601, and the sum of the ranges should add up to 24 hours or less. Currently, there can be only one range specified in the sequence. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param ranges: Required. The sequence of datetime ranges. Example: '[["2021-10-05T03:30:00Z", "2021-10-05T03:40:00Z"]]'. :type ranges: str """ _validation = { 'type': {'required': True}, 'ranges': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'ranges': {'key': 'ranges', 'type': 'str'}, } def __init__( self, **kwargs ): super(VideoSequenceAbsoluteTimeMarkers, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSequenceAbsoluteTimeMarkers' # type: str self.ranges = kwargs['ranges'] class VideoSink(SinkNodeBase): """Video sink in a live topology allows for video and audio to be captured, optionally archived, and published via a video resource. If archiving is enabled, this results in a video of type 'archive'. If used in a batch topology, this allows for video and audio to be stored as a file, and published via a video resource of type 'file'. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param inputs: Required. An array of upstream node references within the topology to be used as inputs for this node. :type inputs: list[~video_analyzer.models.NodeInput] :param video_name: Required. Name of a new or existing video resource used to capture and publish content. Note: if downstream of RTSP source, and if disableArchive is set to true, then no content is archived. :type video_name: str :param video_creation_properties: Optional video properties to be used in case a new video resource needs to be created on the service. :type video_creation_properties: ~video_analyzer.models.VideoCreationProperties :param video_publishing_options: Options to change how the video sink publishes content via the video resource. This property is only allowed for topologies where "kind" is set to "live". :type video_publishing_options: ~video_analyzer.models.VideoPublishingOptions """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'inputs': {'required': True}, 'video_name': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'inputs': {'key': 'inputs', 'type': '[NodeInput]'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'video_creation_properties': {'key': 'videoCreationProperties', 'type': 'VideoCreationProperties'}, 'video_publishing_options': {'key': 'videoPublishingOptions', 'type': 'VideoPublishingOptions'}, } def __init__( self, **kwargs ): super(VideoSink, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSink' # type: str self.video_name = kwargs['video_name'] self.video_creation_properties = kwargs.get('video_creation_properties', None) self.video_publishing_options = kwargs.get('video_publishing_options', None) class VideoSource(SourceNodeBase): """Video source allows for content from a Video Analyzer video resource to be ingested into a pipeline. Currently supported only with batch pipelines. All required parameters must be populated in order to send to Azure. :param type: Required. The discriminator for derived types.Constant filled by server. :type type: str :param name: Required. Node name. Must be unique within the topology. :type name: str :param video_name: Required. Name of the Video Analyzer video resource to be used as the source. :type video_name: str :param time_sequences: Required. Describes a sequence of datetime ranges. The video source only picks up recorded media within these ranges. :type time_sequences: ~video_analyzer.models.TimeSequenceBase """ _validation = { 'type': {'required': True}, 'name': {'required': True}, 'video_name': {'required': True}, 'time_sequences': {'required': True}, } _attribute_map = { 'type': {'key': '@type', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'video_name': {'key': 'videoName', 'type': 'str'}, 'time_sequences': {'key': 'timeSequences', 'type': 'TimeSequenceBase'}, } def __init__( self, **kwargs ): super(VideoSource, self).__init__(**kwargs) self.type = '#Microsoft.VideoAnalyzer.VideoSource' # type: str self.video_name = kwargs['video_name'] self.time_sequences = kwargs['time_sequences']
1.140625
1
tests/test_app_settings_dict.py
wheelercj/app_settings
0
5008
import pytest import re from typing import Any, Tuple from dataclasses import dataclass from app_settings_dict import Settings def test_simple_settings() -> None: settings = Settings( settings_file_path="C:/Users/chris/Documents/sample_settings_file_name.json", default_factories={ "key1": lambda: "value1", }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" assert settings["key2"] == "world" del settings["key1"] del settings["key2"] assert "key1" not in settings assert "key2" not in settings assert settings["key1"] == "value1" with pytest.raises(KeyError): settings["key2"] def test_default_settings() -> None: settings = Settings( settings_file_path="sample settings file name.json", default_factories={ "key1": lambda: "value1", "key2": lambda: "value2", "key3": lambda: "value3", }, default_settings={ "key3": [], }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == "value3" del settings["key3"] assert settings["key3"] == "value3" settings.reset("key3") assert settings["key3"] == [] settings["key3"] = "something" assert settings["key3"] == "something" settings.reset_all() assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == [] def test_load_without_file() -> None: def sample_prompt_function(settings: Settings) -> Settings: # s = input("Enter the settings: ") return settings.update({"key1": "a", "key2": "b"}) settings = Settings( settings_file_path="not a real file.yaml", prompt_user_for_all_settings=sample_prompt_function, default_factories={ "key1": lambda: "value1", "key2": lambda: "value2", "key3": lambda: "value3", }, default_settings={ "key3": [], "key4": "value4", }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == "value3" settings.load(fallback_option="prompt user") assert settings["key1"] == "a" assert settings["key2"] == "b" assert settings["key3"] == "value3" with pytest.raises(KeyError): settings["key4"] settings.load(fallback_option="default settings") assert settings["key1"] == "a" assert settings["key2"] == "b" assert settings["key3"] == "value3" assert settings["key4"] == "value4" settings.clear() settings.load(fallback_option="default settings") assert settings["key1"] == "hello" assert settings["key2"] == "world" assert settings["key3"] == [] assert settings["key4"] == "value4" with pytest.raises(ValueError): settings.load(fallback_option="invalid option") def test_load_after_empty() -> None: settings = Settings( settings_file_path="sample settings file name.json", prompt_user_for_all_settings=lambda: 1 / 0, default_factories={ "key1": lambda: "value1", }, default_settings={ "key1": [], }, data={ "key1": "hello", }, ) assert settings["key1"] == "hello" settings.clear() assert settings["key1"] == "value1" def test_prompt() -> None: def sample_prompt_function() -> Any: # s = input("Enter a setting: ") return "a" settings = Settings( settings_file_path="sample settings file name.json", prompt_user_for_all_settings=lambda: {"key1": "a", "key2": "b"}, default_factories={ "key1": sample_prompt_function, "key2": lambda: "value2", "key3": lambda: "value3", }, default_settings={ "key3": [], }, data={ "key1": "hello", "key2": "world", }, ) assert settings["key1"] == "hello" settings.prompt("key1") assert settings["key1"] == "a" def test_changing_settings_before_load() -> None: settings = Settings( settings_file_path="sample settings file name.json", default_factories={ "key1": lambda: "value1", }, default_settings={ "key1": [], }, data={ "key1": "hello", }, ) assert settings["key1"] == "hello" settings.load(fallback_option="default settings") assert settings["key1"] == "hello" settings["key1"] = "a" settings.load(fallback_option="default settings") assert settings["key1"] == "a" def test_update() -> None: settings = Settings( settings_file_path="sample settings file name.json", default_factories={ "key1": lambda: "value1", }, default_settings={ "key1": [], }, data={ "key1": "hello", }, ) assert settings["key1"] == "hello" settings.update({"key1": "a"}) assert settings["key1"] == "a" settings.update({"key2": "b"}) assert settings["key2"] == "b" def test_Settings__is_using_json() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", default_factories={ "key1": lambda: "value1", }, data={ "key1": "hello", "key2": "world", }, ) assert settings._Settings__is_using_json() settings.settings_file_path = "sample_settings_file_name.yaml" assert not settings._Settings__is_using_json() def test_load_from_dict() -> None: settings = Settings() settings.load_from_dict( { "key1": "hello", "key2": "world", } ) assert len(settings.data) == 0 settings = Settings( data={ "key1": "a", "key2": "b", } ) settings.load_from_dict( { "key1": "c", "key2": "d", } ) assert settings.data["key1"] == "c" assert settings.data["key2"] == "d" def test_dump_to_dict() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", data={ "key1": "hello", "key2": "world", }, ) assert settings.dump_to_dict() == { "key1": "hello", "key2": "world", } def test_nested_Settings() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", default_settings={ "key6": [], "key7": Settings( data={ "key8": "value8", } ), }, data={ "key1": "hello", "key2": "world", "key3": "value3", "key4": Settings( settings_file_path="why would anyone want an inner file though.yaml", data={ "key5": "value5", }, ), }, ) assert settings.dump_to_dict() == { "key1": "hello", "key2": "world", "key3": "value3", "key4": { "key5": "value5", }, } def test_creating_setting_after_init() -> None: settings = Settings( settings_file_path="sample_settings_file_name.json", default_settings={ "key1": [], "key2": "value2", }, ) with pytest.raises(KeyError): settings["key3"] = "value3" def test_prompt_error() -> None: settings = Settings( settings_file_path="nonexistent file.json", default_settings={ "key1": [], "key2": "value2", }, ) with pytest.raises(ValueError): settings.load(fallback_option="prompt user") def test_nested_setting_loaders_and_dumpers() -> None: @dataclass class Coords: x: int y: int def __init__(self, x_and_y: Tuple[int, int]) -> None: self.x = x_and_y[0] self.y = x_and_y[1] settings = Settings( setting_loader=Coords, setting_dumper=lambda obj: (obj.x, obj.y), data={ "location 1": Coords(x_and_y=(1, 2)), "location 2": Coords(x_and_y=(3, 4)), "patterns": Settings( setting_loader=re.compile, setting_dumper=lambda x: x.pattern, data={ "phone number pattern": re.compile(r"\d{3}-?\d{3}-?\d{4}"), "email address pattern": re.compile( r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+" ), }, ), }, ) settings_dict = settings.dump_to_dict() assert settings_dict["location 1"] == (1, 2) assert settings_dict["location 2"] == (3, 4) assert settings_dict["patterns"]["phone number pattern"] == r"\d{3}-?\d{3}-?\d{4}" assert ( settings_dict["patterns"]["email address pattern"] == r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+" ) settings.load_from_dict(settings_dict) assert settings["location 1"] == Coords(x_and_y=(1, 2)) assert settings["location 2"] == Coords(x_and_y=(3, 4)) assert settings["patterns"]["phone number pattern"] == re.compile( r"\d{3}-?\d{3}-?\d{4}" ) assert settings["patterns"]["email address pattern"] == re.compile( r"[\w\d.+-]+@[\w\d.-]+\.[\w\d]+" ) def test_init_without_keywords() -> None: with pytest.raises(TypeError): Settings("sample settings file path.json")
1.804688
2
freqtrade/strategy/informative_decorator.py
Fractate/freqbot
1
5016
from typing import Any, Callable, NamedTuple, Optional, Union from pandas import DataFrame from freqtrade.exceptions import OperationalException from freqtrade.strategy.strategy_helper import merge_informative_pair PopulateIndicators = Callable[[Any, DataFrame, dict], DataFrame] class InformativeData(NamedTuple): asset: Optional[str] timeframe: str fmt: Union[str, Callable[[Any], str], None] ffill: bool def informative(timeframe: str, asset: str = '', fmt: Optional[Union[str, Callable[[Any], str]]] = None, ffill: bool = True) -> Callable[[PopulateIndicators], PopulateIndicators]: """ A decorator for populate_indicators_Nn(self, dataframe, metadata), allowing these functions to define informative indicators. Example usage: @informative('1h') def populate_indicators_1h(self, dataframe: DataFrame, metadata: dict) -> DataFrame: dataframe['rsi'] = ta.RSI(dataframe, timeperiod=14) return dataframe :param timeframe: Informative timeframe. Must always be equal or higher than strategy timeframe. :param asset: Informative asset, for example BTC, BTC/USDT, ETH/BTC. Do not specify to use current pair. :param fmt: Column format (str) or column formatter (callable(name, asset, timeframe)). When not specified, defaults to: * {base}_{quote}_{column}_{timeframe} if asset is specified. * {column}_{timeframe} if asset is not specified. Format string supports these format variables: * {asset} - full name of the asset, for example 'BTC/USDT'. * {base} - base currency in lower case, for example 'eth'. * {BASE} - same as {base}, except in upper case. * {quote} - quote currency in lower case, for example 'usdt'. * {QUOTE} - same as {quote}, except in upper case. * {column} - name of dataframe column. * {timeframe} - timeframe of informative dataframe. :param ffill: ffill dataframe after merging informative pair. """ _asset = asset _timeframe = timeframe _fmt = fmt _ffill = ffill def decorator(fn: PopulateIndicators): informative_pairs = getattr(fn, '_ft_informative', []) informative_pairs.append(InformativeData(_asset, _timeframe, _fmt, _ffill)) setattr(fn, '_ft_informative', informative_pairs) return fn return decorator def _format_pair_name(config, pair: str) -> str: return pair.format(stake_currency=config['stake_currency'], stake=config['stake_currency']).upper() def _create_and_merge_informative_pair(strategy, dataframe: DataFrame, metadata: dict, inf_data: InformativeData, populate_indicators: PopulateIndicators): asset = inf_data.asset or '' timeframe = inf_data.timeframe fmt = inf_data.fmt config = strategy.config if asset: # Insert stake currency if needed. asset = _format_pair_name(config, asset) else: # Not specifying an asset will define informative dataframe for current pair. asset = metadata['pair'] if '/' in asset: base, quote = asset.split('/') else: # When futures are supported this may need reevaluation. # base, quote = asset, '' raise OperationalException('Not implemented.') # Default format. This optimizes for the common case: informative pairs using same stake # currency. When quote currency matches stake currency, column name will omit base currency. # This allows easily reconfiguring strategy to use different base currency. In a rare case # where it is desired to keep quote currency in column name at all times user should specify # fmt='{base}_{quote}_{column}_{timeframe}' format or similar. if not fmt: fmt = '{column}_{timeframe}' # Informatives of current pair if inf_data.asset: fmt = '{base}_{quote}_' + fmt # Informatives of other pairs inf_metadata = {'pair': asset, 'timeframe': timeframe} inf_dataframe = strategy.dp.get_pair_dataframe(asset, timeframe) inf_dataframe = populate_indicators(strategy, inf_dataframe, inf_metadata) formatter: Any = None if callable(fmt): formatter = fmt # A custom user-specified formatter function. else: formatter = fmt.format # A default string formatter. fmt_args = { 'BASE': base.upper(), 'QUOTE': quote.upper(), 'base': base.lower(), 'quote': quote.lower(), 'asset': asset, 'timeframe': timeframe, } inf_dataframe.rename(columns=lambda column: formatter(column=column, **fmt_args), inplace=True) date_column = formatter(column='date', **fmt_args) if date_column in dataframe.columns: raise OperationalException(f'Duplicate column name {date_column} exists in ' f'dataframe! Ensure column names are unique!') dataframe = merge_informative_pair(dataframe, inf_dataframe, strategy.timeframe, timeframe, ffill=inf_data.ffill, append_timeframe=False, date_column=date_column) return dataframe
2.078125
2
copy_reg.py
rtbo/vkdgen
2
5024
#! /usr/bin/env python3 import os from os import path root_dir = path.dirname(path.realpath(__file__)) local_reg_dir = path.join(root_dir, 'registry') os.makedirs(local_reg_dir, exist_ok=True) def copy_reg(reg_dir, files): import shutil for f in files: file_path = path.join(reg_dir, f) if not path.isfile(file_path): raise RuntimeError(file_path + ' could not be found') shutil.copy2(file_path, path.join(local_reg_dir, path.basename(f))) vk_files = [ 'registry/vk.xml', 'registry/reg.py', 'registry/generator.py' ] copy_reg(path.join(root_dir, 'Vulkan-Headers'), vk_files)
1.453125
1
examples/plots/plot_pass_network.py
DymondFormation/mplsoccer
0
5032
""" ============ Pass Network ============ This example shows how to plot passes between players in a set formation. """ import pandas as pd from mplsoccer.pitch import Pitch from matplotlib.colors import to_rgba import numpy as np from mplsoccer.statsbomb import read_event, EVENT_SLUG ############################################################################## # Set team and match info, and get event and tactics dataframes for the defined match_id match_id = 15946 team = 'Barcelona' opponent = 'Alavés (A), 2018/19 La Liga' event_dict = read_event(f'{EVENT_SLUG}/{match_id}.json', warn=False) players = event_dict['tactics_lineup'] events = event_dict['event'] ############################################################################## # Adding on the last tactics id and formation for the team for each event events.loc[events.tactics_formation.notnull(), 'tactics_id'] = events.loc[ events.tactics_formation.notnull(), 'id'] events[['tactics_id', 'tactics_formation']] = events.groupby('team_name')[[ 'tactics_id', 'tactics_formation']].ffill() ############################################################################## # Add the abbreviated player position to the players dataframe formation_dict = {1: 'GK', 2: 'RB', 3: 'RCB', 4: 'CB', 5: 'LCB', 6: 'LB', 7: 'RWB', 8: 'LWB', 9: 'RDM', 10: 'CDM', 11: 'LDM', 12: 'RM', 13: 'RCM', 14: 'CM', 15: 'LCM', 16: 'LM', 17: 'RW', 18: 'RAM', 19: 'CAM', 20: 'LAM', 21: 'LW', 22: 'RCF', 23: 'ST', 24: 'LCF', 25: 'SS'} players['position_abbreviation'] = players.player_position_id.map(formation_dict) ############################################################################## # Add on the subsitutions to the players dataframe, i.e. where players are subbed on # but the formation doesn't change sub = events.loc[events.type_name == 'Substitution', ['tactics_id', 'player_id', 'substitution_replacement_id', 'substitution_replacement_name']] players_sub = players.merge(sub.rename({'tactics_id': 'id'}, axis='columns'), on=['id', 'player_id'], how='inner', validate='1:1') players_sub = (players_sub[['id', 'substitution_replacement_id', 'position_abbreviation']] .rename({'substitution_replacement_id': 'player_id'}, axis='columns')) players = pd.concat([players, players_sub]) players.rename({'id': 'tactics_id'}, axis='columns', inplace=True) players = players[['tactics_id', 'player_id', 'position_abbreviation']] ############################################################################## # Add player position information to the events dataframe # add on the position the player was playing in the formation to the events dataframe events = events.merge(players, on=['tactics_id', 'player_id'], how='left', validate='m:1') # add on the position the receipient was playing in the formation to the events dataframe events = events.merge(players.rename({'player_id': 'pass_recipient_id'}, axis='columns'), on=['tactics_id', 'pass_recipient_id'], how='left', validate='m:1', suffixes=['', '_receipt']) ############################################################################## # Create dataframes for passes and player locations # get a dataframe with all passes mask_pass = (events.team_name == team) & (events.type_name == 'Pass') to_keep = ['id', 'match_id', 'player_id', 'player_name', 'outcome_name', 'pass_recipient_id', 'pass_recipient_name', 'x', 'y', 'end_x', 'end_y', 'tactics_id', 'tactics_formation', 'position_abbreviation', 'position_abbreviation_receipt'] passes = events.loc[mask_pass, to_keep].copy() print('Formations used by {} in match: '.format(team), passes['tactics_formation'].unique()) ############################################################################## # Filter passes by chosen formation, then group all passes and receipts to # calculate avg x, avg y, count of events for each slot in the formation formation = 433 passes_formation = passes[(passes.tactics_formation == formation) & (passes.position_abbreviation_receipt.notnull())].copy() passer_passes = passes_formation[['position_abbreviation', 'x', 'y']].copy() recipient_passes = passes_formation[['position_abbreviation_receipt', 'end_x', 'end_y']].copy() # rename columns to match those in passer_passes recipient_passes.rename({'position_abbreviation_receipt': 'position_abbreviation', 'end_x': 'x', 'end_y': 'y'}, axis='columns', inplace=True) # create a new dataframe containing all individual passes and receipts from passes_formation appended_passes = pd.concat(objs=[passer_passes, recipient_passes], ignore_index=True) average_locs_and_count = appended_passes.groupby('position_abbreviation').agg({ 'x': ['mean'], 'y': ['mean', 'count']}) average_locs_and_count.columns = ['x', 'y', 'count'] ############################################################################## # Group the passes by unique pairings of players and add the avg player positions to this dataframe # calculate the number of passes between each position (using min/ max so we get passes both ways) passes_formation['pos_max'] = passes_formation[['position_abbreviation', 'position_abbreviation_receipt']].max(axis='columns') passes_formation['pos_min'] = passes_formation[['position_abbreviation', 'position_abbreviation_receipt']].min(axis='columns') passes_between = passes_formation.groupby(['pos_min', 'pos_max']).id.count().reset_index() passes_between.rename({'id': 'pass_count'}, axis='columns', inplace=True) # add on the location of each player so we have the start and end positions of the lines passes_between = passes_between.merge(average_locs_and_count, left_on='pos_min', right_index=True) passes_between = passes_between.merge(average_locs_and_count, left_on='pos_max', right_index=True, suffixes=['', '_end']) ############################################################################## # Calculate the line width and marker sizes relative to the largest counts max_line_width = 18 max_marker_size = 3000 passes_between['width'] = passes_between.pass_count / passes_between.pass_count.max() * max_line_width average_locs_and_count['marker_size'] = (average_locs_and_count['count'] / average_locs_and_count['count'].max() * max_marker_size) ############################################################################## # Set color to make the lines more transparent when fewer passes are made min_transparency = 0.3 color = np.array(to_rgba('white')) color = np.tile(color, (len(passes_between), 1)) c_transparency = passes_between.pass_count / passes_between.pass_count.max() c_transparency = (c_transparency * (1 - min_transparency)) + min_transparency color[:, 3] = c_transparency ############################################################################## # Plotting pitch = Pitch(pitch_type='statsbomb', orientation='horizontal', pitch_color='#22312b', line_color='#c7d5cc', figsize=(16, 11), constrained_layout=True, tight_layout=False) fig, ax = pitch.draw() pass_lines = pitch.lines(passes_between.x, passes_between.y, passes_between.x_end, passes_between.y_end, lw=passes_between.width, color=color, zorder=1, ax=ax) pass_nodes = pitch.scatter(average_locs_and_count.x, average_locs_and_count.y, s=average_locs_and_count.marker_size, color='red', edgecolors='black', linewidth=1, alpha=1, ax=ax) for index, row in average_locs_and_count.iterrows(): pitch.annotate(row.name, xy=(row.x, row.y), c='white', va='center', ha='center', size=16, weight='bold', ax=ax) title = ax.set_title("{} {} Formation vs {}".format(team, formation, opponent), size=28, y=0.97, color='#c7d5cc') fig.set_facecolor("#22312b")
2.09375
2
python/johnstarich/interval.py
JohnStarich/dotfiles
3
5040
import time class Interval(object): def __init__(self, delay_time: int): self.delay_time = delay_time self.current_time = 0 @staticmethod def now(): return time.gmtime().tm_sec def should_run(self) -> bool: if self.current_time == 0: self.current_time = Interval.now() return True return self.is_done() def is_done(self) -> bool: timestamp = Interval.now() return self.current_time + self.delay_time < timestamp or \ self.current_time > timestamp def start(self) -> int: self.current_time = Interval.now() return self.current_time
2.140625
2
flametree/utils.py
Edinburgh-Genome-Foundry/Flametree
165
5056
import os import shutil from .ZipFileManager import ZipFileManager from .DiskFileManager import DiskFileManager from .Directory import Directory import string printable = set(string.printable) - set("\x0b\x0c") def is_hex(s): return any(c not in printable for c in s) def file_tree(target, replace=False): """Open a connection to a file tree which can be either a disk folder, a zip archive, or an in-memory zip archive. Parameters ---------- target Either the path to a target folder, or a zip file, or '@memory' to write a zip file in memory (at which case a string of the zip file is returned) If the target is already a flametree directory, it is returned as-is. replace If True, will remove the target if it already exists. If False, new files will be written inside the target and some files may be overwritten. """ if isinstance(target, Directory): return target if (not isinstance(target, str)) or is_hex(target): return Directory(file_manager=ZipFileManager(source=target)) elif target == "@memory": return Directory("@memory", file_manager=ZipFileManager("@memory")) elif target.lower().endswith(".zip"): return Directory(target, file_manager=ZipFileManager(target, replace=replace)) else: return Directory(target, file_manager=DiskFileManager(target))
2.328125
2
api-server/server/core/key.py
TK-IBM-Call-for-Code-Challange-2021/call-for-code-challenge-2021
75
5088
""" Api Key validation """ from typing import Optional from fastapi.security.api_key import APIKeyHeader from fastapi import HTTPException, Security, Depends from starlette.status import HTTP_401_UNAUTHORIZED, HTTP_400_BAD_REQUEST, HTTP_403_FORBIDDEN from server.core.security import verify_key from server.db.mongodb import AsyncIOMotorClient, get_database from server.models.user import User from server.db.crud.user import get_user_by_email from pydantic import EmailStr api_key_scheme = APIKeyHeader(name="X-API-KEY", auto_error=False) email_scheme = APIKeyHeader(name="X-EMAIL-ID", auto_error=False) async def validate_request( api_key: Optional[str] = Security(api_key_scheme), email_id: Optional[EmailStr] = Security(email_scheme), db: AsyncIOMotorClient = Depends(get_database) ) -> Optional[User]: """Validate a request with given email and api key to any endpoint resource """ if api_key is None: raise HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="X-API-KEY is missing", headers={} ) if email_id is None: raise HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="X-EMAIL-ID is missing", headers={} ) user = await get_user_by_email(db, email_id) # verify email & API key if user: api_key = str(user.salt) + str(api_key) if not verify_key(api_key, user.hashed_api_key): # api key mismatch raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Access not allowed", headers={} ) if user.disabled: # disabled user raise HTTPException( status_code=HTTP_403_FORBIDDEN, detail="User is disabled", headers={} ) if not user.is_active: # user's email is not verified raise HTTPException( status_code=HTTP_401_UNAUTHORIZED, detail="Email not verified", headers={} ) # All verified return User(**user.dict()) else: # not a valid email provided raise HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="Unknown Email", headers={} )
2.0625
2
tests/test_mate_hashes_methods.py
MacHu-GWU/pathlib_mate-project
9
5096
# -*- coding: utf-8 -*- import pytest from pathlib_mate.pathlib2 import Path class TestHashesMethods(object): def test(self): p = Path(__file__) assert len({ p.md5, p.get_partial_md5(nbytes=1 << 20), p.sha256, p.get_partial_sha256(nbytes=1 << 20), p.sha512, p.get_partial_sha512(nbytes=1 << 20), }) == 3 if __name__ == "__main__": import os basename = os.path.basename(__file__) pytest.main([basename, "-s", "--tb=native"])
1.109375
1
collection/cp/algorithms-master/python/binary_tree.py
daemonslayer/Notebook
1
5120
""" Binary Tree and basic properties 1. In-Order Traversal 2. Pre-Order Traversal 3. Post-Order Traversal 4. Level-Order Traversal """ from collections import deque class BinaryTree(object): """ Representation of a general binary tree data: value of element left: Left subtree right: Right subtree """ def __init__(self, data, left=None, right=None): if data is None: raise ValueError('data cannot be null') self.data = data self.left = left self.right = right def insert(self, data): raise NotImplementedError('Method insert is not Implemented') def delete(self, data): raise NotImplementedError('Method delete is not implemented') def inorder_traversal(self, write=True): """ Return list of node data as inorder traversal. If write is True then print as well. This is a iterative tree inorder traversal. Algorithm: 1. Create a stack of nodes node_stack 2. Mark root as current 3. While current is not none or node_stack is not empty a. While current is not empty push current to nde_stack and reassign current to current->left b. If current is empty and node_stack is not empty then pop the top of stack and print that node c. mark current as poped_node->right """ traversal_lis = [] node_stack = [] current = self while current or node_stack: while current: node_stack.append(current) current = current.left if node_stack: node = node_stack.pop() traversal_lis.append(node.data) current = node.right if write: for item in traversal_lis: print(item, end=' ') return traversal_lis def preorder_traversal(self, write=True): """ Return list of node data as preorder traversal. If write is true then print as well. Algorithm: 1. Create stack of nodes as node_stack 2. Mark root as current 3. While current is not none or node_stack is not empty a. While current is not empty i. Push current to node_stack ii. Add current->data to traversal_list iii. Reassign current to current->left b. If node_stack is not empty then pop the topmost node from node_stack and assign current to poped_node->right """ traversal_lis = [] node_stack = [] current = self while current or node_stack: while current: node_stack.append(current) traversal_lis.append(current.data) current = current.left if node_stack: node = node_stack.pop() current = node.right if write: for item in traversal_lis: print(item, end=' ') return traversal_lis def postorder_traversal(self, write=True): """ Return list of node data as postorder traversal. If write is true then print as well. Algorithm: 1. Create stack of nodes as node_stack 2. Mark root as current 3. While current is not None or node_stack is not empty a. While current is not None i. Push current to node_stack ii. Append current->data to traversal_list iii. Reassign current as current->right !IMPORTANT: Here we're iterating on current-right as we're doing postorder traversal b. If node_stack is not empty then pop top node and assign poped_node->left to current """ traversal_lis = [] node_stack = [] current = self while current or node_stack: while current: node_stack.append(current) traversal_lis.append(current.data) current = current.right if node_stack: node = node_stack.pop() current = node.left if write: for item in traversal_lis: print(item, end=' ') return traversal_lis def levelorder_traversal(self, write=True): """ Return list of node data as level order traversal. If write is true then print as well. Algorithm: 1. Maintain a queue of nodes to process as node_queue 2. Push root to node_queue 3. While node_queue is not empty a. Get top node of node_queue as top b. Push top->data to traversal_list c. Append top->left and top->right into node_queue if they are not null """ traversal_list = [] node_queue = deque() node_queue.append(self) while node_queue: top = node_queue.popleft() traversal_list.append(top.data) if top.left: node_queue.append(top.left) if top.right: node_queue.append(top.right) if write: for item in traversal_list: print(item, end=' ') return traversal_list def main(): """ Tree Structure: 1 / \ 2 3 / \ 4 5 """ tree = BinaryTree(1) tree.left = BinaryTree(2) tree.right = BinaryTree(3) tree.left.left = BinaryTree(4) tree.left.right = BinaryTree(5) assert tree.inorder_traversal(write=False) == [4, 2, 5, 1, 3] assert tree.preorder_traversal(write=False) == [1, 2, 4, 5, 3] assert tree.postorder_traversal(write=False) == [1, 3, 2, 5, 4] assert tree.levelorder_traversal(write=False) == [1, 2, 3, 4, 5] if __name__ == '__main__': main()
3.71875
4
src/py/gee/utils.py
openforis/collectearthonline
0
5128
import datetime import os import ee import math import sys import json from ee.ee_exception import EEException from gee.inputs import getLandsat, getS1 ########## Helper functions ########## def initialize(ee_account='', ee_key_path=''): try: if ee_account and ee_key_path and os.path.exists(ee_key_path): credentials = ee.ServiceAccountCredentials(ee_account, ee_key_path) ee.Initialize(credentials) else: ee.Initialize() except Exception as e: print(e) def getReducer(reducer): reducerName = reducer.lower() if(reducerName == 'min'): return ee.Reducer.min() elif (reducerName == 'max'): return ee.Reducer.max() elif (reducerName == 'mean'): return ee.Reducer.mean() elif (reducerName == 'mode'): return ee.Reducer.mode() elif (reducerName == 'first'): return ee.Reducer.first() elif (reducerName == 'last'): return ee.Reducer.last() elif (reducerName == 'sum'): return ee.Reducer.sum() else: return ee.Reducer.median() def reduceIC(imageCollection, reducer): reducerName = reducer.lower() if(reducerName == 'min'): return imageCollection.min() elif (reducerName == 'max'): return imageCollection.max() elif (reducerName == 'mean'): return imageCollection.mean() elif (reducerName == 'mode'): return imageCollection.mode() elif (reducerName == 'mosaic'): return imageCollection.mosaic() elif (reducerName == 'first'): return imageCollection.first() elif (reducerName == 'sum'): return imageCollection.sum() else: return imageCollection.median() def safeParseJSON(val): if isinstance(val, dict): return val else: try: return json.loads(val) except Exception as e: try: return json.loads(val.replace("'", "\"")) except Exception as e: return {} ########## Helper routes ########## def listAvailableBands(name, assetType): eeImage = None if assetType == "imageCollection": eeImage = ee.ImageCollection(name).first() else: eeImage = ee.Image(name) return { 'bands': eeImage.bandNames().getInfo(), 'imageName': name } ########## ee.Image ########## def imageToMapId(image, visParams): eeImage = ee.Image(image) mapId = eeImage.getMapId(visParams) # TODO, just return URL so the routes are easier to deduce whats being returned. return { 'url': mapId['tile_fetcher'].url_format } ########## ee.ImageCollection ########## def imageCollectionToMapId(assetId, visParams, reducer, startDate, endDate): eeCollection = ee.ImageCollection(assetId) if (startDate and endDate): eeFilterDate = ee.Filter.date(startDate, endDate) eeCollection = eeCollection.filter(eeFilterDate) reducedImage = ee.Image(reduceIC(eeCollection, reducer)) return imageToMapId(reducedImage, visParams) # TODO, should we allow user to select first cloud free image again? def firstCloudFreeImageInMosaicToMapId(assetId, visParams, startDate, endDate): skipCloudMask = False eeCollection = ee.ImageCollection(assetId) lowerAsset = assetId.lower() if("b2" not in visParams["bands"].lower()): skipCloudMask = True elif ("lc8" in lowerAsset): skipCloudMask = False elif ("le7" in lowerAsset): skipCloudMask = False elif ("lt5" in lowerAsset): skipCloudMask = False else: skipCloudMask = True if (startDate and endDate): eeFilterDate = ee.Filter.date(startDate, endDate) eeCollection = eeCollection.filter(eeFilterDate) eeFirstImage = ee.Image(eeCollection.mosaic()) try: if(skipCloudMask == False): sID = '' if ("lc8" in lowerAsset): sID = 'OLI_TIRS' elif ("le7" in lowerAsset): sID = 'ETM' elif ("lt5" in lowerAsset): sID = 'TM' scored = ee.Algorithms.Landsat.simpleCloudScore( eeFirstImage.set('SENSOR_ID', sID)) mask = scored.select(['cloud']).lte(20) masked = eeFirstImage.updateMask(mask) values = imageToMapId(masked, visParams) else: values = imageToMapId(eeFirstImage, visParams) except EEException as ine: imageToMapId(eeFirstImage, visParams) return values ########## ee.FeatureCollection ########## def getFeatureCollectionTileUrl(featureCollection, field, matchID, visParams): fc = ee.FeatureCollection(featureCollection) single = fc.filter(ee.Filter.equals(field, matchID)) mapId = ee.Image().paint(single, 0, 2).getMapId(visParams) return mapId['tile_fetcher'].url_format ########## Pre defined ee.ImageCollection ########## # Index Image Collection def lsMaskClouds(img, cloudThresh=10): score = ee.Image(1.0) # Clouds are reasonably bright in the blue band. blue_rescale = img.select('blue').subtract(ee.Number(0.1)).divide( ee.Number(0.3).subtract(ee.Number(0.1))) score = score.min(blue_rescale) # Clouds are reasonably bright in all visible bands. visible = img.select('red').add( img.select('green')).add(img.select('blue')) visible_rescale = visible.subtract(ee.Number(0.2)).divide( ee.Number(0.8).subtract(ee.Number(0.2))) score = score.min(visible_rescale) # Clouds are reasonably bright in all infrared bands. infrared = img.select('nir').add( img.select('swir1')).add(img.select('swir2')) infrared_rescale = infrared.subtract(ee.Number(0.3)).divide( ee.Number(0.8).subtract(ee.Number(0.3))) score = score.min(infrared_rescale) # Clouds are reasonably cool in temperature. temp_rescale = img.select('temp').subtract(ee.Number(300)).divide( ee.Number(290).subtract(ee.Number(300))) score = score.min(temp_rescale) # However, clouds are not snow. ndsi = img.normalizedDifference(['green', 'swir1']) ndsi_rescale = ndsi.subtract(ee.Number(0.8)).divide( ee.Number(0.6).subtract(ee.Number(0.8))) score = score.min(ndsi_rescale).multiply(100).byte() mask = score.lt(cloudThresh).rename(['cloudMask']) img = img.updateMask(mask) return img.addBands(score) def s2MaskClouds(img): qa = img.select('QA60') # Bits 10 and 11 are clouds and cirrus, respectively. cloudBitMask = int(math.pow(2, 10)) cirrusBitMask = int(math.pow(2, 11)) # clear if both flags set to zero. clear = qa.bitwiseAnd(cloudBitMask).eq(0).And( qa.bitwiseAnd(cirrusBitMask).eq(0)) return img.divide(10000).updateMask(clear).set('system:time_start', img.get('system:time_start')) def bandPassAdjustment(img): keep = img.select(['temp']) bands = ['blue', 'green', 'red', 'nir', 'swir1', 'swir2'] # linear regression coefficients for adjustment gain = ee.Array([[0.977], [1.005], [0.982], [1.001], [1.001], [0.996]]) bias = ee.Array([[-0.00411], [-0.00093], [0.00094], [-0.00029], [-0.00015], [-0.00097]]) # Make an Array Image, with a 2-D Array per pixel. arrayImage2D = img.select(bands).toArray().toArray(1) # apply correction factors and reproject array to geographic image componentsImage = ee.Image(gain).multiply(arrayImage2D).add(ee.Image(bias)) \ .arrayProject([0]).arrayFlatten([bands]).float() # .set('system:time_start',img.get('system:time_start')); return keep.addBands(componentsImage) def getLandSatMergedCollection(): sensorBandDictLandsatTOA = {'L8': [1, 2, 3, 4, 5, 9, 6], 'L7': [0, 1, 2, 3, 4, 5, 7], 'L5': [0, 1, 2, 3, 4, 5, 6], 'L4': [0, 1, 2, 3, 4, 5, 6], 'S2': [1, 2, 3, 7, 11, 10, 12]} bandNamesLandsatTOA = ['blue', 'green', 'red', 'nir', 'swir1', 'temp', 'swir2'] metadataCloudCoverMax = 100 lt4 = ee.ImageCollection('LANDSAT/LT4_L1T_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L4'], bandNamesLandsatTOA).map(lsMaskClouds) lt5 = ee.ImageCollection('LANDSAT/LT5_L1T_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L5'], bandNamesLandsatTOA).map(lsMaskClouds) le7 = ee.ImageCollection('LANDSAT/LE7_L1T_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L7'], bandNamesLandsatTOA).map(lsMaskClouds) lc8 = ee.ImageCollection('LANDSAT/LC08/C01/T1_TOA') \ .filterMetadata('CLOUD_COVER', 'less_than', metadataCloudCoverMax) \ .select(sensorBandDictLandsatTOA['L8'], bandNamesLandsatTOA).map(lsMaskClouds) s2 = ee.ImageCollection('COPERNICUS/S2') \ .filterMetadata('CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax) \ .map(s2MaskClouds).select(sensorBandDictLandsatTOA['S2'], bandNamesLandsatTOA) \ .map(bandPassAdjustment) return ee.ImageCollection(lt4.merge(lt5).merge(le7).merge(lc8).merge(s2)) def filteredImageNDVIToMapId(startDate, endDate): def calcNDVI(img): return img.expression('(i.nir - i.red) / (i.nir + i.red)', {'i': img}).rename(['NDVI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = 'c9c0bf,435ebf,eee8aa,006400' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcNDVI).mean()) return imageToMapId(eviImage, visParams) def filteredImageEVIToMapId(startDate, endDate): def calcEVI(img): return img.expression('2.5 * (i.nir - i.red) / (i.nir + 6.0 * i.red - 7.5 * i.blue + 1)', {'i': img}).rename(['EVI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcEVI).mean()) return imageToMapId(eviImage, visParams) def filteredImageEVI2ToMapId(startDate, endDate): def calcEVI2(img): return img.expression('2.5 * (i.nir - i.red) / (i.nir + 2.4 * i.red + 1)', {'i': img}).rename(['EVI2']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = 'F5F5F5,E6D3C5,C48472,B9CF63,94BF3D,6BB037,42A333,00942C,008729,007824,004A16' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcEVI2).mean()) return imageToMapId(eviImage, visParams) def filteredImageNDMIToMapId(startDate, endDate): def calcNDMI(img): return img.expression('(i.nir - i.swir1) / (i.nir + i.swir1)', {'i': img}).rename(['NDMI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = '0000FE,2E60FD,31B0FD,00FEFE,50FE00,DBFE66,FEFE00,FFBB00,FF6F00,FE0000' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcNDMI).mean()) return imageToMapId(eviImage, visParams) def filteredImageNDWIToMapId(startDate, endDate): def calcNDWI(img): return img.expression('(i.green - i.nir) / (i.green + i.nir)', {'i': img}).rename(['NDWI']) \ .set('system:time_start', img.get('system:time_start')) eeCollection = getLandSatMergedCollection().filterDate(startDate, endDate) colorPalette = '505050,E8E8E8,00FF33,003300' visParams = {'opacity': 1, 'max': 1, 'min': -1, 'palette': colorPalette} eviImage = ee.Image(eeCollection.map(calcNDWI).mean()) return imageToMapId(eviImage, visParams) def filteredImageByIndexToMapId(startDate, endDate, index): lowerIndex = index.lower() if (lowerIndex == 'ndvi'): return filteredImageNDVIToMapId(startDate, endDate) elif (lowerIndex == 'evi'): return filteredImageEVIToMapId(startDate, endDate) elif (lowerIndex == 'evi2'): return filteredImageEVI2ToMapId(startDate, endDate) elif (lowerIndex == 'ndmi'): return filteredImageNDMIToMapId(startDate, endDate) elif (lowerIndex == 'ndwi'): return filteredImageNDWIToMapId(startDate, endDate) def filteredImageCompositeToMapId(assetId, visParams, startDate, endDate, metadataCloudCoverMax, simpleCompositeVariable): eeCollection = ee.ImageCollection(assetId) if (startDate and endDate): eeCollection = eeCollection.filterDate(startDate, endDate) eeCollection.filterMetadata( 'CLOUD_COVER', 'less_than', metadataCloudCoverMax ) eeMosaicImage = ee.Algorithms.Landsat.simpleComposite( eeCollection, simpleCompositeVariable, 10, 40, True ) return imageToMapId(eeMosaicImage, visParams) def filteredSentinelComposite(visParams, startDate, endDate, metadataCloudCoverMax): def cloudScore(img): def rescale(img, exp, thresholds): return img.expression(exp, {'img': img}).subtract(thresholds[0]).divide(thresholds[1] - thresholds[0]) score = ee.Image(1.0) score = score.min(rescale(img, 'img.B2', [0.1, 0.3])) score = score.min(rescale(img, 'img.B4 + img.B3 + img.B2', [0.2, 0.8])) score = score.min( rescale(img, 'img.B8 + img.B11 + img.B12', [0.3, 0.8])) ndsi = img.normalizedDifference(['B3', 'B11']) return score.min(rescale(ndsi, 'img', [0.8, 0.6])) def cloudScoreS2(img): rescale = img.divide(10000) score = cloudScore(rescale).multiply(100).rename('cloudscore') return img.addBands(score) sentinel2 = ee.ImageCollection('COPERNICUS/S2') f2017s2 = sentinel2.filterDate(startDate, endDate).filterMetadata( 'CLOUDY_PIXEL_PERCENTAGE', 'less_than', metadataCloudCoverMax) m2017s2 = f2017s2.map(cloudScoreS2) m2017s3 = m2017s2.median() return imageToMapId(m2017s3, visParams) def filteredSentinelSARComposite(visParams, startDate, endDate): def toNatural(img): return ee.Image(10).pow(img.divide(10)) def addRatioBands(img): # not using angle band vv = img.select('VV') vh = img.select('VH') vv_vh = vv.divide(vh).rename('VV/VH') vh_vv = vh.divide(vv).rename('VH/VV') return vv.addBands(vh).addBands(vv_vh).addBands(vh_vv) sentinel1 = ee.ImageCollection('COPERNICUS/S1_GRD') sentinel1 = sentinel1.filterDate(startDate, endDate) \ .filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VV')) \ .filter(ee.Filter.listContains('transmitterReceiverPolarisation', 'VH')) \ .filter(ee.Filter.eq('instrumentMode', 'IW')) sentinel1 = sentinel1.map(toNatural) sentinel1 = sentinel1.map(addRatioBands) median = sentinel1.median() return imageToMapId(median, visParams) ########## Time Series ########## def getTimeSeriesByCollectionAndIndex(assetId, indexName, scale, coords, startDate, endDate, reducer): geometry = None indexCollection = None if isinstance(coords[0], list): geometry = ee.Geometry.Polygon(coords) else: geometry = ee.Geometry.Point(coords) if indexName != None: indexCollection = ee.ImageCollection(assetId).filterDate( startDate, endDate).select(indexName) else: indexCollection = ee.ImageCollection( assetId).filterDate(startDate, endDate) def getIndex(image): theReducer = getReducer(reducer) if indexName != None: indexValue = image.reduceRegion( theReducer, geometry, scale).get(indexName) else: indexValue = image.reduceRegion(theReducer, geometry, scale) date = image.get('system:time_start') indexImage = ee.Image().set( 'indexValue', [ee.Number(date), indexValue]) return indexImage def getClipped(image): return image.clip(geometry) clippedcollection = indexCollection.map(getClipped) indexCollection1 = clippedcollection.map(getIndex) indexCollection2 = indexCollection1.aggregate_array('indexValue') return indexCollection2.getInfo() def getTimeSeriesByIndex(indexName, scale, coords, startDate, endDate, reducer): bandsByCollection = { 'LANDSAT/LC08/C01/T1_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'], 'LANDSAT/LC08/C01/T2_TOA': ['B2', 'B3', 'B4', 'B5', 'B6', 'B7'], 'LANDSAT/LE07/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LE07/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT05/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT05/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT04/C01/T1_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'], 'LANDSAT/LT04/C01/T2_TOA': ['B1', 'B2', 'B3', 'B4', 'B5', 'B7'] } indexes = { 'NDVI': '(nir - red) / (nir + red)', 'EVI': '2.5 * (nir - red) / (nir + 6.0 * red - 7.5 * blue + 1)', 'EVI2': '2.5 * (nir - red) / (nir + 2.4 * red + 1)', 'NDMI': '(nir - swir1) / (nir + swir1)', 'NDWI': '(green - nir) / (green + nir)', 'NBR': '(nir - swir2) / (nir + swir2)', 'LSAVI': '((nir - red) / (nir + red + 0.5)) * (1 + 0.5)' } def create(name): def maskClouds(image): def isSet(types): """ https://landsat.usgs.gov/collectionqualityband """ typeByValue = { 'badPixels': 15, 'cloud': 16, 'shadow': 256, 'snow': 1024, 'cirrus': 4096 } anySet = ee.Image(0) for Type in types: anySet = anySet.Or(image.select( 'BQA').bitwiseAnd(typeByValue[Type]).neq(0)) return anySet return image.updateMask(isSet(['badPixels', 'cloud', 'shadow', 'cirrus']).Not()) def toIndex(image): bands = bandsByCollection[name] return image.expression(indexes[indexName], { 'blue': image.select(bands[0]), 'green': image.select(bands[1]), 'red': image.select(bands[2]), 'nir': image.select(bands[3]), 'swir1': image.select(bands[4]), 'swir2': image.select(bands[5]), }).clamp(-1, 1).rename(['index']) def toIndexWithTimeStart(image): time = image.get('system:time_start') image = maskClouds(image) return toIndex(image).set('system:time_start', time) # if startDate and endDate: return ee.ImageCollection(name).filterDate(startDate, endDate).filterBounds(geometry).map(toIndexWithTimeStart, True) else: return ee.ImageCollection(name).filterBounds(geometry).map(toIndexWithTimeStart, True) def reduceRegion(image): theReducer = getReducer(reducer) reduced = image.reduceRegion( theReducer, geometry=geometry, scale=scale, maxPixels=1e6) return ee.Feature(None, { 'index': reduced.get('index'), 'timeIndex': [image.get('system:time_start'), reduced.get('index')] }) geometry = None if isinstance(coords[0], list) or isinstance(coords[0], tuple): geometry = ee.Geometry.Polygon(coords) else: geometry = ee.Geometry.Point(coords) collection = ee.ImageCollection([]) for name in bandsByCollection: collection = collection.merge(create(name)) return ee.ImageCollection(ee.ImageCollection(collection).sort('system:time_start').distinct('system:time_start')) \ .map(reduceRegion) \ .filterMetadata('index', 'not_equals', None) \ .aggregate_array('timeIndex') \ .getInfo() ########## Degradation########## def getDegradationTileUrlByDateS1(geometry, date, visParams): imDate = datetime.datetime.strptime(date, "%Y-%m-%d") befDate = imDate - datetime.timedelta(days=1) aftDate = imDate + datetime.timedelta(days=1) if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) sentinel1Data = getS1({ "targetBands": ['VV', 'VH', 'VV/VH'], 'region': geometry}) start = befDate.strftime('%Y-%m-%d') end = aftDate.strftime('%Y-%m-%d') selectedImage = sentinel1Data.filterDate(start, end).first() selectedImage = ee.Image(selectedImage) mapparams = selectedImage.getMapId(visParams) return mapparams['tile_fetcher'].url_format def getDegradationPlotsByPointS1(geometry, start, end): if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) sentinel1Data = getS1({ "targetBands": ['VV', 'VH', 'VV/VH'], 'region': geometry }).filterDate(start, end) def myimageMapper(img): theReducer = ee.Reducer.mean() indexValue = img.reduceRegion(theReducer, geometry, 30) date = img.get('system:time_start') visParams = {'bands': ['VV', 'VH', 'ratioVVVH'], 'min': [-15, -25, .40], 'max': [0, -10, 1], 'gamma': 1.6} indexImage = ee.Image().set( 'indexValue', [ee.Number(date), indexValue]) return indexImage lsd = sentinel1Data.map(myimageMapper, True) indexCollection2 = lsd.aggregate_array('indexValue') values = indexCollection2.getInfo() return values def getDegradationTileUrlByDate(geometry, date, visParams): imDate = datetime.datetime.strptime(date, "%Y-%m-%d") startDate = imDate - datetime.timedelta(days=1) endDate = imDate + datetime.timedelta(days=1) if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) landsatData = getLandsat({ "start": startDate.strftime('%Y-%m-%d'), "end": endDate.strftime('%Y-%m-%d'), "targetBands": ['RED', 'GREEN', 'BLUE', 'SWIR1', 'NIR'], "region": geometry, "sensors": {"l4": False, "l5": False, "l7": False, "l8": True} }) selectedImage = landsatData.first() unmasked = ee.Image(selectedImage).multiply(10000).toInt16().unmask() mapparams = unmasked.getMapId(visParams) return mapparams['tile_fetcher'].url_format def getDegradationPlotsByPoint(geometry, start, end, band): if isinstance(geometry[0], list): geometry = ee.Geometry.Polygon(geometry) else: geometry = ee.Geometry.Point(geometry) landsatData = getLandsat({ "start": start, "end": end, "targetBands": [band], "region": geometry, "sensors": {"l4": True, "l5": True, "l7": True, "l8": True} }) def myImageMapper(img): theReducer = ee.Reducer.mean() indexValue = img.reduceRegion(theReducer, geometry, 30) date = img.get('system:time_start') indexImage = ee.Image().set( 'indexValue', [ee.Number(date), indexValue] ) return indexImage lsd = landsatData.map(myImageMapper, True) indexCollection2 = lsd.aggregate_array('indexValue') values = indexCollection2.getInfo() return values ########## Stats ########## def getStatistics(extent): extentGeom = ee.Geometry.Polygon(extent) elev = ee.Image('USGS/GTOPO30') minmaxElev = elev.reduceRegion( ee.Reducer.minMax(), extentGeom, 1000, maxPixels=500000000) minElev = minmaxElev.get('elevation_min').getInfo() maxElev = minmaxElev.get('elevation_max').getInfo() ciesinPopGrid = ee.Image('CIESIN/GPWv4/population-count/2020') popDict = ciesinPopGrid.reduceRegion( ee.Reducer.sum(), extentGeom, maxPixels=500000000) pop = popDict.get('population-count').getInfo() pop = int(pop) return { 'minElev': minElev, 'maxElev': maxElev, 'pop': pop }
1.671875
2