max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
hth/shows/tests/factories.py | roperi/myband | 1 | 12804 | from datetime import date
from random import randrange
import factory
import factory.fuzzy
from hth.core.tests.utils import from_today
class VenueFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'shows.Venue'
name = factory.Sequence(lambda n: 'Venue %d' % n)
city = factory.Sequence(lambda n: 'City %d' % n)
website = factory.Sequence(lambda n: 'http://venue-%d.dev' % n)
class GigFactory(factory.django.DjangoModelFactory):
class Meta:
model = 'shows.Gig'
date = factory.fuzzy.FuzzyDate(date(2000, 1, 1))
venue = factory.SubFactory(VenueFactory)
description = factory.fuzzy.FuzzyText(length=100)
details = factory.fuzzy.FuzzyText(length=100)
class PublishedGigFactory(GigFactory):
publish = True
class UpcomingGigFactory(PublishedGigFactory):
# Pick a random date from today through next year
date = factory.LazyAttribute(lambda obj: from_today(days=randrange(365)))
@classmethod
def create_batch(cls, size, **kwargs):
batch = super().create_batch(size, **kwargs)
return sorted(batch, key=lambda x: x.date)
class PastGigFactory(PublishedGigFactory):
# Pick a random date from 10 years ago through yesterday
date = factory.LazyAttribute(lambda obj: from_today(randrange(-3650, 0)))
@classmethod
def create_batch(cls, size, **kwargs):
batch = super().create_batch(size, **kwargs)
return sorted(batch, key=lambda x: x.date, reverse=True)
| 1.6875 | 2 |
frames.py | mppc12/special_subject_tea | 0 | 12932 | <filename>frames.py<gh_stars>0
import pandas as pd
from group import Group
class Frames:
def __init__(self, frame=None):
self.cleanups = Cleanup()
self.groups = Group()
class Cleanup:
def __init__(self, frame=None):
self.frame = frame
def __call__(self, frame):
self.frame = frame
return self
def dropcol(self):
column = ['貨品號列', '重量(公噸)', '英文貨名', '數量(限11碼貨品)', '數量單位']
frame = self.frame.drop(column, axis=1, inplace=False)
return frame
def droprow(self):
rowitem = ['普洱茶,每包不超過3公斤',
'普洱茶,每包超過3公斤',
'茶或馬黛茶之萃取物、精、濃縮物及以茶、馬黛茶之萃取物、精、濃縮物或以茶、馬黛茶為主要成分之調製品']
frame = self.frame[self.frame['中文貨名'].isin(rowitem) == False]
return frame
def modifydate(self):
rc_to_vi = {'92年':'2003', '93年':'2004', '94年':'2005', '95年':'2006',
'96年':'2007', '97年':'2008', '98年':'2009', '99年':'2010',
'100年':'2011', '101年':'2012', '102年':'2013', '103年':'2014',
'104年':'2015', '105年':'2016', '106年':'2017', '107年':'2018',
'108年':'2019'}
frame = self.frame.replace(rc_to_vi, inplace = False)
return frame
def dtypeint(self):
dtypes = ['重量(公斤)', '美元(千元)']
for i in dtypes:
self.frame[i] = pd.to_numeric(self.frame[i])
frame = self.frame
return frame
def modifyitem(self):
item = {'其他綠茶(未發酵),每包超過3公斤': '綠茶(未發酵),每包超過3公斤',
'薰芬綠茶,每包超過3公斤' : '綠茶(未發酵),每包超過3公斤'}
frame = self.frame.replace(item, inplace = False)
return frame
| 1.796875 | 2 |
dumplogs/bin.py | xinhuagu/dumplogs | 1 | 13060 | import boto3
import argparse
import os,sys
def main(argv=None):
argv = (argv or sys.argv)[1:]
parser = argparse.ArgumentParser(description='dump all aws log streams into files')
parser.add_argument("--profile",
dest="aws_profile",
type=str,
default=os.environ.get('AWS_PROFILE', None),
help="aws profile")
parser.add_argument("-o", "--output",
type=str,
dest='output',
default=".",
help="output folder")
parser.add_argument('group_name',help='aws loggroup name')
options,args = parser.parse_known_args(argv)
options.aws_profile
options.output
options.group_name
"""
main logic
"""
client = boto3.client('logs')
aws_profile = options.aws_profile
group_name = options.group_name
output_folder = options.output
stream_list=[]
stream_response = client.describe_log_streams(
logGroupName=group_name,
orderBy='LastEventTime',
limit=50,
)
while True:
stream_name_arr = stream_response['logStreams']
for stream_elm in stream_name_arr:
stream_name = stream_elm['logStreamName']
stream_list.append(stream_name)
if "nextToken" in stream_response:
next_token = stream_response['nextToken']
stream_response = client.describe_log_streams(
logGroupName=group_name,
orderBy='LastEventTime',
nextToken=next_token,
limit=50,
)
else:
break
print("loggroup {} has total {} streams".format(group_name,len(stream_list)))
for s_name in stream_list:
file_name=s_name.replace("[$LATEST]", "").replace("/","-")
stream_content= client.get_log_events(
logGroupName=group_name,
logStreamName=s_name,
)
print("{} ==> {}".format(s_name,file_name))
completeName = os.path.join(output_folder, file_name)
with open(completeName, "w") as text_file:
text_file.write("{}".format(stream_content))
print("Done.")
| 1.71875 | 2 |
data_utils/dataset/kodak_dataset.py | hieu1999210/image_compression | 0 | 13188 | <filename>data_utils/dataset/kodak_dataset.py<gh_stars>0
# Copyright 2020 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
from glob import glob
from PIL import Image
from torch.utils.data import Dataset
from ..transforms import get_transforms
from .build import DATASET_REGISTRY
@DATASET_REGISTRY.register()
class KodakDataset(Dataset):
def __init__(self, data_folder, mode, cfg, **kwargs):
"""
"""
super().__init__()
self.cfg = cfg
self.paths = sorted(glob(f"{data_folder}/*"))
print(f"There are {len(self)} image in {mode} dataset")
self.transforms = get_transforms(cfg, mode)
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
"""
"""
path = self.paths[idx]
image_id = os.path.split(path)[-1].replace(".png", "")
img = self._load_img(idx)
img = self.transforms(img)
return image_id, img
def _load_img(self, idx):
"""
args: image path
return: pillow image
"""
image = Image.open(self.paths[idx]).convert('RGB')
return image
| 1.75 | 2 |
pytorch/torch/_utils_internal.py | raghavnauhria/whatmt | 15 | 13316 | <filename>pytorch/torch/_utils_internal.py<gh_stars>10-100
from __future__ import absolute_import, division, print_function, unicode_literals
import os
# this arbitrary-looking assortment of functionality is provided here
# to have a central place for overrideable behavior. The motivating
# use is the FB build environment, where this source file is replaced
# by an equivalent.
if os.path.basename(os.path.dirname(__file__)) == 'shared':
torch_parent = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
else:
torch_parent = os.path.dirname(os.path.dirname(__file__))
def get_file_path(*path_components):
return os.path.join(torch_parent, *path_components)
def get_file_path_2(*path_components):
return os.path.join(*path_components)
def get_writable_path(path):
return path
def prepare_multiprocessing_environment(path):
pass
def resolve_library_path(path):
return os.path.realpath(path)
TEST_MASTER_ADDR = '127.0.0.1'
TEST_MASTER_PORT = 29500
| 1.203125 | 1 |
src/houdini_package_runner/items/base.py | captainhammy/houdini_package_runner | 3 | 13444 | <filename>src/houdini_package_runner/items/base.py
"""This module contains a base runnable item."""
# =============================================================================
# IMPORTS
# =============================================================================
# Future
from __future__ import annotations
# Standard Library
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, List
# Imports for type checking.
if TYPE_CHECKING:
import pathlib
import houdini_package_runner.runners.base
# =============================================================================
# CLASSES
# =============================================================================
class BaseItem(ABC):
"""Base class for a runnable item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, write_back: bool = False) -> None:
self._contents_changed = False
self._ignored_builtins: List[str] = []
self._is_single_line = False
self._is_test_item = False
self._write_back = write_back
def __repr__(self):
return f"<{self.__class__.__name__}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def contents_changed(self) -> bool:
"""Whether the contents of the item have changed."""
return self._contents_changed
@contents_changed.setter
def contents_changed(self, contents_changed: bool):
self._contents_changed = contents_changed
# -------------------------------------------------------------------------
@property
def ignored_builtins(self) -> List[str]:
"""A list of known builtins to ignore for checks which look for imports."""
return self._ignored_builtins
# -------------------------------------------------------------------------
@property
def is_single_line(self) -> bool:
"""Whether the item code on a single line."""
return self._is_single_line
# -------------------------------------------------------------------------
@property
def is_test_item(self) -> bool:
"""Whether the item is a test related item."""
return self._is_test_item
@is_test_item.setter
def is_test_item(self, is_test_item: bool):
self._is_test_item = is_test_item
# -------------------------------------------------------------------------
@property
def write_back(self) -> bool:
"""Whether the item should write changes back."""
return self._write_back
@write_back.setter
def write_back(self, write_back):
self._write_back = write_back
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
class BaseFileItem(BaseItem):
"""Base class for a runnable item.
:param path: The path for the item.
:param write_back: Whether the item should write itself back to disk.
"""
def __init__(self, path: pathlib.Path, write_back: bool = False) -> None:
super().__init__(write_back=write_back)
self._path = path
def __repr__(self):
return f"<{self.__class__.__name__} {self.path}>"
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def path(self) -> pathlib.Path:
"""The path on disk."""
return self._path
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
@abstractmethod
def process(
self, runner: houdini_package_runner.runners.base.HoudiniPackageRunner
) -> int:
"""Process an item.
:param runner: The package runner processing the item.
:return: The process return code.
"""
| 1.140625 | 1 |
utils/extractor.py | nwoodward/twarc | 20 | 13572 | <reponame>nwoodward/twarc
#!/usr/bin/env python3
from datetime import datetime
import json
import os
import re
import argparse
import csv
import copy
import sys
import gzip
strptime = datetime.strptime
class attriObject:
"""Class object for attribute parser."""
def __init__(self, string):
self.value = re.split(":", string)
self.title = self.value[-1]
def getElement(self, json_object):
found = [json_object]
for entry in self.value:
for index in range(len(found)):
try:
found[index] = found[index][entry]
except (TypeError, KeyError):
print("'{0}' is not a valid json entry.".format(":".join(self.value)))
sys.exit()
#If single search object is a list, search entire list. Error if nested lists.
if isinstance(found[index], list):
if len(found) > 1:
raise Exception("Extractor currently does not handle nested lists.")
found = found[index]
return found
def tweets_files(string, path):
"""Iterates over json files in path."""
for filename in os.listdir(path):
if re.match(string, filename) and ".jsonl" in filename:
f = gzip.open if ".gz" in filename else open
yield path + filename, f
Ellipsis
def parse(args):
with open(args.output, 'w+', encoding="utf-8") as output:
csv_writer = csv.writer(output, dialect=args.dialect)
csv_writer.writerow([a.title for a in args.attributes])
count = 0
tweets = set()
for filename, f in tweets_files(args.string, args.path):
print("parsing", filename)
with f(filename, 'rb') as data_file:
for line in data_file:
try:
json_object = json.loads(line.decode("utf-8"))
except ValueError:
print("Error in", filename, "entry incomplete.")
continue
#Check for duplicates
identity = json_object['id']
if identity in tweets:
continue
tweets.add(identity)
#Check for time restrictions.
if args.start or args.end:
tweet_time = strptime(json_object['created_at'],'%a %b %d %H:%M:%S +0000 %Y')
if args.start and args.start > tweet_time:
continue
if args.end and args.end < tweet_time:
continue
#Check for hashtag.
if args.hashtag:
for entity in json_object['entities']["hashtags"]:
if entity['text'].lower() == args.hashtag:
break
else:
continue
count += extract(json_object, args, csv_writer)
print("Searched", len(tweets), "tweets and recorded", count, "items.")
print("largest id:", max(tweets))
def extract(json_object, args, csv_writer):
"""Extract and write found attributes."""
found = [[]]
for attribute in args.attributes:
item = attribute.getElement(json_object)
if len(item) == 0:
for row in found:
row.append("NA")
else:
found1 = []
for value in item:
if value is None:
value = "NA"
new = copy.deepcopy(found)
for row in new:
row.append(value)
found1.extend(new)
found = found1
for row in found:
csv_writer.writerow(row)
return len(found)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Extracts attributes from tweets.')
parser.add_argument("attributes", nargs='*', help="Attributes to search for. Attributes inside nested inside other attributes should be seperated by a colon. Example: user:screen_name, entities:hashtags:text.")
parser.add_argument("-dialect", default="excel", help="Sets dialect for csv output. Defaults to excel. See python module csv.list_dialects()")
parser.add_argument("-string", default="", help="Regular expression for files to parse. Defaults to empty string.")
parser.add_argument("-path", default="./", help="Optional path to folder containing tweets. Defaults to current folder.")
parser.add_argument("-output", default="output.csv", help="Optional file to output results. Defaults to output.csv.")
parser.add_argument("-start", default="", help="Define start date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-end", default="", help="Define end date for tweets. Format (mm:dd:yyyy)")
parser.add_argument("-hashtag", default="", help="Define a hashtag that must be in parsed tweets.")
args = parser.parse_args()
if not args.path.endswith("/"):
args.path += "/"
args.start = strptime(args.start, '%m:%d:%Y') if args.start else False
args.end = strptime(args.end, '%m:%d:%Y') if args.end else False
args.attributes = [attriObject(i) for i in args.attributes]
args.string = re.compile(args.string)
args.hashtag = args.hashtag.lower()
parse(args)
| 1.90625 | 2 |
env/lib/python2.7/site-packages/billiard/py2/reduction.py | jlwysf/onduty | 39 | 13700 | <reponame>jlwysf/onduty
#
# Module to allow connection and socket objects to be transferred
# between processes
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, <NAME>
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import os
import sys
import socket
import threading
from pickle import Pickler
from .. import current_process
from .._ext import _billiard, win32
from ..util import register_after_fork, debug, sub_debug
is_win32 = sys.platform == 'win32'
is_pypy = hasattr(sys, 'pypy_version_info')
is_py3k = sys.version_info[0] == 3
if not(is_win32 or is_pypy or is_py3k or hasattr(_billiard, 'recvfd')):
raise ImportError('pickling of connections not supported')
close = win32.CloseHandle if sys.platform == 'win32' else os.close
__all__ = []
# globals set later
_listener = None
_lock = None
_cache = set()
#
# ForkingPickler
#
class ForkingPickler(Pickler): # noqa
dispatch = Pickler.dispatch.copy()
@classmethod
def register(cls, type, reduce):
def dispatcher(self, obj):
rv = reduce(obj)
self.save_reduce(obj=obj, *rv)
cls.dispatch[type] = dispatcher
def _reduce_method(m): # noqa
if m.__self__ is None:
return getattr, (m.__self__.__class__, m.__func__.__name__)
else:
return getattr, (m.__self__, m.__func__.__name__)
ForkingPickler.register(type(ForkingPickler.save), _reduce_method)
def _reduce_method_descriptor(m):
return getattr, (m.__objclass__, m.__name__)
ForkingPickler.register(type(list.append), _reduce_method_descriptor)
ForkingPickler.register(type(int.__add__), _reduce_method_descriptor)
try:
from functools import partial
except ImportError:
pass
else:
def _reduce_partial(p):
return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
return partial(func, *args, **keywords)
ForkingPickler.register(partial, _reduce_partial)
def dump(obj, file, protocol=None):
ForkingPickler(file, protocol).dump(obj)
#
# Platform specific definitions
#
if sys.platform == 'win32':
# XXX Should this subprocess import be here?
import _subprocess # noqa
def send_handle(conn, handle, destination_pid):
from ..forking import duplicate
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = duplicate(handle, process_handle)
conn.send(new_handle)
finally:
close(process_handle)
def recv_handle(conn):
return conn.recv()
else:
def send_handle(conn, handle, destination_pid): # noqa
_billiard.sendfd(conn.fileno(), handle)
def recv_handle(conn): # noqa
return _billiard.recvfd(conn.fileno())
#
# Support for a per-process server thread which caches pickled handles
#
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
close(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
register_after_fork(_reset, _reset)
def _get_listener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
from ..connection import Listener
debug('starting listener and thread for sending handles')
_listener = Listener(authkey=current_process().authkey)
t = threading.Thread(target=_serve)
t.daemon = True
t.start()
finally:
_lock.release()
return _listener
def _serve():
from ..util import is_exiting, sub_warning
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
send_handle(conn, handle_wanted, destination_pid)
close(handle_wanted)
conn.close()
except:
if not is_exiting():
sub_warning('thread for sharing handles raised exception',
exc_info=True)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduce_handle(handle):
from ..forking import Popen, duplicate
if Popen.thread_is_spawning():
return (None, Popen.duplicate_for_child(handle), True)
dup_handle = duplicate(handle)
_cache.add(dup_handle)
sub_debug('reducing handle %d', handle)
return (_get_listener().address, dup_handle, False)
def rebuild_handle(pickled_data):
from ..connection import Client
address, handle, inherited = pickled_data
if inherited:
return handle
sub_debug('rebuilding handle %d', handle)
conn = Client(address, authkey=current_process().authkey)
conn.send((handle, os.getpid()))
new_handle = recv_handle(conn)
conn.close()
return new_handle
#
# Register `_billiard.Connection` with `ForkingPickler`
#
def reduce_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_connection, (rh, conn.readable, conn.writable)
def rebuild_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.Connection(
handle, readable=readable, writable=writable
)
# Register `socket.socket` with `ForkingPickler`
#
def fromfd(fd, family, type_, proto=0):
s = socket.fromfd(fd, family, type_, proto)
if s.__class__ is not socket.socket:
s = socket.socket(_sock=s)
return s
def reduce_socket(s):
reduced_handle = reduce_handle(s.fileno())
return rebuild_socket, (reduced_handle, s.family, s.type, s.proto)
def rebuild_socket(reduced_handle, family, type_, proto):
fd = rebuild_handle(reduced_handle)
_sock = fromfd(fd, family, type_, proto)
close(fd)
return _sock
ForkingPickler.register(socket.socket, reduce_socket)
#
# Register `_billiard.PipeConnection` with `ForkingPickler`
#
if sys.platform == 'win32':
def reduce_pipe_connection(conn):
rh = reduce_handle(conn.fileno())
return rebuild_pipe_connection, (rh, conn.readable, conn.writable)
def rebuild_pipe_connection(reduced_handle, readable, writable):
handle = rebuild_handle(reduced_handle)
return _billiard.PipeConnection(
handle, readable=readable, writable=writable
)
| 1.453125 | 1 |
barry/convert.py | jyotiska/barry | 0 | 13828 | from exceptions import BarryFileException, BarryConversionException, BarryExportException, BarryDFException
import pandas as pd
import requests
from StringIO import StringIO
def detect_file_extension(filename):
"""Extract and return the extension of a file given a filename.
Args:
filename (str): name of the file
Returns:
str: extension of the file
Raises:
BarryFileException: if extension not present in filename
"""
if filename is None:
raise BarryFileException("Input file name cannot be None")
split_filename = filename.split(".")
if len(split_filename) > 1:
return str(split_filename[-1]).lower()
else:
raise BarryFileException("Could not determine input file type from file extension")
def xls_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLS file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def xlsx_to_df(filename, skip_rows, skip_header, columns):
"""Converts a XLSX file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_excel(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def csv_to_df(filename, skip_rows, skip_header, columns):
"""Converts a CSV file to Pandas dataframe.
Args:
filename (str): name of the file
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
return pd.read_csv(filename, skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def url_to_df(url, skip_rows, skip_header, columns):
"""Converts a CSV from HTTP URL to Pandas dataframe.
Args:
url (str): http url of the csv
skip_rows (int): number of rows to skip from top
skip_header (bool): whether to skip header
columns (list or None): list of column names
Returns:
dataframe: a pandas dataframe
Raises:
BarryConversionException: if file cannot be converted to dataframe
"""
try:
# Check if columns names has been passed
if columns is not None and len(columns) > 0:
skip_header = 0
# Check if header needs to be skipped
if skip_header is True:
skip_header = None
else:
skip_header = 0
url_content = requests.get(url).content
return pd.read_csv(StringIO(url_content), skiprows=skip_rows, header=skip_header, names=columns)
except Exception as e:
raise BarryConversionException("Could not convert file %s to dataframe" % (filename))
def df_to_xls(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_xlsx(df, out_filename):
"""Writes a Pandas dataframe to a XLS file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_excel(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_json(df, out_filename):
"""Writes a Pandas dataframe to a JSON file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_json(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def df_to_csv(df, out_filename):
"""Writes a Pandas dataframe to a CSV file.
Args:
df (dataframe): dataframe to be written to file
filename (str): name of the file
Raises:
BarryExportException: if file cannot be converted to dataframe
"""
try:
df.to_csv(out_filename)
except Exception as e:
raise BarryExportException("Could not write dataframe to file %s" % (out_filename))
def sort_df(df, sort_column, ascending):
"""Sort a DataFrame with the column name passed in ascending/descending order.
Args:
df (dataframe): dataframe that needs to be sorted
sort_column (str): column to be sorted on
ascending (bool): sort order, ascending if True, descending if False
Returns:
dataframe: a pandas dataframe
Raises:
BarryDFException: if there is any error while sorting the dataframe
"""
try:
return df.sort(columns=sort_column, ascending=ascending)
except Exception as e:
raise BarryDFException("Could not sort dataframe on columns %s" % (sort_column))
| 2.34375 | 2 |
src/vargenpath/pipeline.py | AldisiRana/VarGenPath | 0 | 13956 | # -*- coding: utf-8 -*-
"""Pipeline for VarGenPath"""
from typing import Optional
from .constants import LINKSET_PATH, FILE_TYPES
from .utils import (
get_cytoscape_connection, get_associated_genes, var_genes_network, extend_vargen_network, save_session, save_image,
save_network
)
def get_vargenpath_network(
*,
variant_list: list,
network_name: Optional[str] = 'VarGenPath network',
linkset_path: Optional[str] = LINKSET_PATH,
session_path: Optional[str] = None,
image_path: Optional[str] = None,
extend_network: bool = True,
image_type: Optional[str] = 'svg',
network_path: Optional[str] = None,
network_file_path: Optional[str] = 'cyjs',
) -> dict:
"""
Pipeline for creating vargenpath network.
:param network_file_path: the type of network file to be saved.
:param network_path: if input path, the cytoscape network will be saved to this path.
:param variant_list: list of variants.
:param network_name: the name of the network.
:param linkset_path: the path to the linkset to extend network.
:param session_path: if input path, the cytoscape session will be saved to this path.
:param image_path: if input path, the image of the network will be saved to this path.
:param extend_network: if true, the network will be extended.
:param image_type: the type of the image to be saved.
:return: cytoscape network
"""
try:
cy = get_cytoscape_connection()
except Exception:
raise Exception('Uh-oh! Make sure that cytoscape is open then try again.')
vargen_df = get_associated_genes(variant_list)
network = var_genes_network(variants_genes_df=vargen_df, client=cy, network_name=network_name)
if extend_network:
network = extend_vargen_network(linkset_path, client=cy)
if session_path is not None:
save_session(session_file=session_path, client=cy)
if image_path is not None:
save_image(network_image=image_path, image_type=FILE_TYPES[image_type])
if network_path is not None:
save_network(network_path=network_path, file_type=FILE_TYPES[network_file_path])
return network
| 1.53125 | 2 |
scripts/010_smultixcan/utils/ukb_gtex_variants_intersection/compute_intersection_ukb_gtex_variants.py | miltondp/phenomexcan | 3 | 14084 | #!/usr/bin/env python
import os
import argparse
import sqlite3
from glob import glob
import pandas as pd
parser = argparse.ArgumentParser()
parser.add_argument('--gtex-models-dir', type=str, required=True)
parser.add_argument('--variants-file-with-gtex-id', type=str, required=True)
parser.add_argument('--output-file', type=str, required=True)
args = parser.parse_args()
all_models = glob(os.path.join(args.gtex_models_dir, '*.db'))
assert len(all_models) == 49, len(all_models)
all_variants_ids = set()
for m in all_models:
print(f'Processing {m}')
with sqlite3.connect(m) as conn:
df = pd.read_sql('select varID from weights', conn)['varID']
all_variants_ids.update(set(df.values))
print(f'Read {len(all_variants_ids)} unique variants in GTEx models')
print(f'Reading {args.variants_file_with_gtex_id}')
variants_gtexid = pd.read_csv(args.variants_file_with_gtex_id, sep='\t', usecols=['panel_variant_id'], squeeze=True).dropna()
variants_gtexid = set(variants_gtexid.values)
print(f' Read {len(variants_gtexid)} variants')
print('Merging GTEx and other variants')
merged_variants = variants_gtexid.intersection(all_variants_ids)
print(f'Final number of merged variants: {len(merged_variants)}')
print(f'Coverage of GTEx variants: {(len(merged_variants) / len(all_variants_ids)) * 100:.2f}%')
print(f'Writing to {args.output_file}')
pd.DataFrame({'rsid': list(merged_variants)}).to_csv(args.output_file, index=False)
| 1.671875 | 2 |
point_to_box/model.py | BavarianToolbox/point_to_box | 0 | 14212 | <reponame>BavarianToolbox/point_to_box
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_model.ipynb (unless otherwise specified).
__all__ = ['EfficientLoc', 'CIoU']
# Cell
#export
from efficientnet_pytorch import EfficientNet
import copy
import time
import math
import torch
import torch.optim as opt
from torch.utils.data import DataLoader
from torchvision import transforms
# Cell
class EfficientLoc():
def __init__(self, version = 'efficientnet-b0', in_channels = 4, out_features = 4, export = False):
"""
EfficientLoc model class for loading, training, and exporting models
"""
self.version = version
# self.inter_channels = versoin_dict([version])
# TODO
# check version is compliant
self.in_channels = in_channels
self.out_features = out_features
self.export = export
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.data_parallel = False
self.model = self.get_model(version = self.version,
in_channels = self.in_channels, out_features = self.out_features)
def get_model(self, version, in_channels, out_features):
"""
Adjusts efficient net model architecture for point-to-box data
"""
version_chnls = {
'efficientnet-b0': 1280,
'efficientnet-b1': 1280,
'efficientnet-b2': 1408,
'efficientnet-b3': 1536,
'efficientnet-b4': 1792
# 'efficientnet-b5': 456
# 'efficientnet-b6': 528
# 'efficientnet-b7': 600
# 'efficientnet-b8': 672
# 'efficientnet-l2': 800
}
inter_channel = version_chnls[version]
model = EfficientNet.from_pretrained(version, include_top = False)
# adjust in channels in conv stem
model._change_in_channels(in_channels)
# if self.export:
model.set_swish(memory_efficient= (not self.export))
model = torch.nn.Sequential(
model,
# torch.nn.AdaptiveAvgPool2d(),
torch.nn.Dropout(0.2),
torch.nn.Flatten(),
torch.nn.Linear(inter_channel, out_features),
# torch.nn.Linear(100, out_features),
torch.nn.Sigmoid()
)
for param in model.parameters():
param.requires_grad = True
if torch.cuda.device_count() > 1:
print(f'Using {torch.cuda.device_count()} GPUs')
model = torch.nn.DataParallel(model)
self.data_parallel = True
model.to(self.device)
return model
def train(self, dataloaders, criterion, optimizer, num_epochs, ds_sizes, print_every = 100, scheduler=None):
"""
Training function for model
**Params**
loaders : dict of val/train DataLoaders
criterion : loss function
optimizer : training optimizer
num_epochs : number of training epochs
ds_sizes : dict of number of samples in
print_every : batch_interval for intermediate loss printing
scheduler : Optional learning rate scheduler
"""
train_start = time.time()
best_model_wts = copy.deepcopy(self.model.state_dict())
best_loss = 10000000.0
for epoch in range(num_epochs):
print(f'Epoch {epoch + 1}/{num_epochs}')
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
phase_start = time.time()
if phase == 'train':
self.model.train()
else:
self.model.eval()
inter_loss = 0.
running_loss = 0.
batches_past = 0
# Iterate over data.
for i, (inputs, labels) in enumerate(dataloaders[phase]):
inputs = inputs.to(self.device)
labels = labels.to(self.device)
# zero the parameter gradients
optimizer.zero_grad()
# forward, only track history in train phase
with torch.set_grad_enabled(phase == 'train'):
outputs = self.model(inputs)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
running_loss += loss.item()
inter_loss += loss.item()
if (i+1) % print_every == 0:
inter_loss = inter_loss / ((i+1-batches_past) * inputs.shape[0])
print(f'Intermediate loss: {inter_loss:.6f}')
inter_loss = 0.
batches_past = i+1
if phase == 'train' and scheduler is not None:
scheduler.step()
epoch_loss = running_loss / ds_sizes[phase]
phase_duration = time.time() - phase_start
phase_duration = f'{(phase_duration // 60):.0f}m {(phase_duration % 60):.0f}s'
print('-' * 5)
print(f'{phase} Phase Duration: {phase_duration} Average Loss: {epoch_loss:.6f} in ')
print('-' * 5)
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(self.model.state_dict())
time_elapsed = time.time() - train_start
print(f'Training complete in {(time_elapsed // 60):.0f}m {(time_elapsed % 60):.0f}s')
print(f'Best val Loss: {best_loss:.4f}')
# load best model weights
self.model.load_state_dict(best_model_wts)
def save(self, dst, info = None):
"""Save model and optimizer state dict
**Params**
dst : destination file path including .pth file name
info : Optional dictionary with model info
"""
if info:
torch.save(info, dst)
else:
model_dict = self.model.state_dict()
if self.data_parallel:
model_dict = self.model.module.state_dict()
torch.save({
'base_arch' : self.version,
'model_state_dict' : model_dict,
}, dst)
def load(self, model_state_dict):
"""Load model weights from state-dict"""
self.model.load_state_dict(model_state_dict)
def _export(self, dst, dummy, verbose = True):
"""Export model as onnx graph
**Params**
dst : destination including .onnx file name
dummy : dummy variable for export structure, shape (B,C,W,H)
"""
self.model.eval()
torch.onnx.export(self.model, dummy, dst, verbose = verbose)
# Cell
class CIoU(torch.nn.Module):
"""Complete IoU loss class"""
def __init__(self) -> None:
super(CIoU, self).__init__()
def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
return self.ciou(input, target)
# return F.l1_loss(input, target, reduction=self.reduction)
def ciou(self, bboxes1, bboxes2):
bboxes1 = torch.sigmoid(bboxes1)
bboxes2 = torch.sigmoid(bboxes2)
rows = bboxes1.shape[0]
cols = bboxes2.shape[0]
cious = torch.zeros((rows, cols))
if rows * cols == 0:
return cious
exchange = False
if bboxes1.shape[0] > bboxes2.shape[0]:
bboxes1, bboxes2 = bboxes2, bboxes1
cious = torch.zeros((cols, rows))
exchange = True
w1 = torch.exp(bboxes1[:, 2])
h1 = torch.exp(bboxes1[:, 3])
w2 = torch.exp(bboxes2[:, 2])
h2 = torch.exp(bboxes2[:, 3])
area1 = w1 * h1
area2 = w2 * h2
center_x1 = bboxes1[:, 0]
center_y1 = bboxes1[:, 1]
center_x2 = bboxes2[:, 0]
center_y2 = bboxes2[:, 1]
inter_l = torch.max(center_x1 - w1 / 2,center_x2 - w2 / 2)
inter_r = torch.min(center_x1 + w1 / 2,center_x2 + w2 / 2)
inter_t = torch.max(center_y1 - h1 / 2,center_y2 - h2 / 2)
inter_b = torch.min(center_y1 + h1 / 2,center_y2 + h2 / 2)
inter_area = torch.clamp((inter_r - inter_l),min=0) * torch.clamp((inter_b - inter_t),min=0)
c_l = torch.min(center_x1 - w1 / 2,center_x2 - w2 / 2)
c_r = torch.max(center_x1 + w1 / 2,center_x2 + w2 / 2)
c_t = torch.min(center_y1 - h1 / 2,center_y2 - h2 / 2)
c_b = torch.max(center_y1 + h1 / 2,center_y2 + h2 / 2)
inter_diag = (center_x2 - center_x1)**2 + (center_y2 - center_y1)**2
c_diag = torch.clamp((c_r - c_l),min=0)**2 + torch.clamp((c_b - c_t),min=0)**2
union = area1+area2-inter_area
u = (inter_diag) / c_diag
iou = inter_area / union
v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w2 / h2) - torch.atan(w1 / h1)), 2)
with torch.no_grad():
S = (iou>0.5).float()
alpha= S*v/(1-iou+v)
cious = iou - u - alpha * v
cious = torch.clamp(cious,min=-1.0,max = 1.0)
if exchange:
cious = cious.T
return torch.sum(1-cious) | 1.960938 | 2 |
HackerEarth/Python/BasicProgramming/InputOutput/BasicsOfInputOutput/SeatingArrangement.py | cychitivav/programming_exercises | 0 | 14340 | #!/Usr/bin/env python
"""
Akash and Vishal are quite fond of travelling. They mostly travel by railways. They were travelling in a train one day and they got interested in the seating arrangement of their compartment. The compartment looked something like
So they got interested to know the seat number facing them and the seat type facing them. The seats are denoted as follows :
Window Seat : WS
Middle Seat : MS
Aisle Seat : AS
You will be given a seat number, find out the seat number facing you and the seat type, i.e. WS, MS or AS.
INPUT:
First line of input will consist of a single integer T denoting number of test-cases. Each test-case consists of a single integer N denoting the seat-number.
OUTPUT:
For each test case, print the facing seat-number and the seat-type, separated by a single space in a new line.
CONSTRAINTS:
1 ≤ T ≤ 10^5
1 ≤ N ≤ 10^8
"""
__author__ = "<NAME>"
__date__ = "March 17, 2019"
__email__ = "<EMAIL>"
T = int(input())
while T > 0:
N = int(input())
position = N % 12
section = N//12
if position == 1:
word = str((position + 11) + 12*section)
print(word + ' WS')
elif position == 2:
word = str((position + 9) + 12*section)
print(word + ' MS')
elif position == 3:
word = str((position + 7) + 12*section)
print(word + ' AS')
elif position == 4:
word = str((position + 5) + 12*section)
print(word + ' AS')
elif position == 5:
word = str((position + 3) + 12*section)
print(word + ' MS')
elif position == 6:
word = str((position + 1) + 12*section)
print(word + ' WS')
elif position == 7:
word = str((position - 1) + 12*section)
print(word + ' WS')
elif position == 8:
word = str((position - 3) + 12*section)
print(word + ' MS')
elif position == 9:
word = str((position - 5) + 12*section)
print(word + ' AS')
elif position == 10:
word = str((position - 7) + 12*section)
print(word + ' AS')
elif position == 11:
word = str((position - 9) + 12*section)
print(word + ' MS')
else:
word = str((position - 11) + 12*section)
print(word + ' WS')
T -= 1
| 2.78125 | 3 |
Aulas/aula14.py | adonaifariasdev/cursoemvideo-python3 | 0 | 14468 | <gh_stars>0
'''for c in range(1, 10):
print(c)
print('FIM')'''
'''c = 1
while c < 10:
print(c)
c += 1
print('FIM')'''
'''n = 1
while n != 0: #flag ou condição de parada
n = int(input('Digite um valor: '))
print('FIM')'''
'''r = 'S'
while r == 'S':
n = int(input('Digite um valor: '))
r = str(input('Quer continuar? [S/N]')).upper()
print('FIM')'''
n = 1
totPar = totaImpar = 0
while n != 0:
n = int(input('Digite um valor: '))
if n != 0: # nao vai contabilizar o 0 no final da contagem
if n % 2 ==0:
totPar += 1
else:
totaImpar += 1
print('Você digitou {} numeros pares e {} numeros impares.'.format(totPar, totaImpar))
# OBS.: nesse caso não vai considerar o 0 como numero!!!!
| 2.703125 | 3 |
paddlex/ppdet/modeling/heads/detr_head.py | xiaolao/PaddleX | 3,655 | 14596 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddlex.ppdet.core.workspace import register
import pycocotools.mask as mask_util
from ..initializer import linear_init_, constant_
from ..transformers.utils import inverse_sigmoid
__all__ = ['DETRHead', 'DeformableDETRHead']
class MLP(nn.Layer):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.LayerList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
self._reset_parameters()
def _reset_parameters(self):
for l in self.layers:
linear_init_(l)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class MultiHeadAttentionMap(nn.Layer):
"""This is a 2D attention module, which only returns the attention softmax (no multiplication by value)"""
def __init__(self,
query_dim,
hidden_dim,
num_heads,
dropout=0.0,
bias=True):
super().__init__()
self.num_heads = num_heads
self.hidden_dim = hidden_dim
self.dropout = nn.Dropout(dropout)
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.XavierUniform())
bias_attr = paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.Constant()) if bias else False
self.q_proj = nn.Linear(query_dim, hidden_dim, weight_attr, bias_attr)
self.k_proj = nn.Conv2D(
query_dim,
hidden_dim,
1,
weight_attr=weight_attr,
bias_attr=bias_attr)
self.normalize_fact = float(hidden_dim / self.num_heads)**-0.5
def forward(self, q, k, mask=None):
q = self.q_proj(q)
k = self.k_proj(k)
bs, num_queries, n, c, h, w = q.shape[0], q.shape[1], self.num_heads,\
self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]
qh = q.reshape([bs, num_queries, n, c])
kh = k.reshape([bs, n, c, h, w])
# weights = paddle.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh)
qh = qh.transpose([0, 2, 1, 3]).reshape([-1, num_queries, c])
kh = kh.reshape([-1, c, h * w])
weights = paddle.bmm(qh * self.normalize_fact, kh).reshape(
[bs, n, num_queries, h, w]).transpose([0, 2, 1, 3, 4])
if mask is not None:
weights += mask
# fix a potenial bug: https://github.com/facebookresearch/detr/issues/247
weights = F.softmax(weights.flatten(3), axis=-1).reshape(weights.shape)
weights = self.dropout(weights)
return weights
class MaskHeadFPNConv(nn.Layer):
"""
Simple convolutional head, using group norm.
Upsampling is done using a FPN approach
"""
def __init__(self, input_dim, fpn_dims, context_dim, num_groups=8):
super().__init__()
inter_dims = [input_dim,
] + [context_dim // (2**i) for i in range(1, 5)]
weight_attr = paddle.ParamAttr(
initializer=paddle.nn.initializer.KaimingUniform())
bias_attr = paddle.framework.ParamAttr(
initializer=paddle.nn.initializer.Constant())
self.conv0 = self._make_layers(input_dim, input_dim, 3, num_groups,
weight_attr, bias_attr)
self.conv_inter = nn.LayerList()
for in_dims, out_dims in zip(inter_dims[:-1], inter_dims[1:]):
self.conv_inter.append(
self._make_layers(in_dims, out_dims, 3, num_groups,
weight_attr, bias_attr))
self.conv_out = nn.Conv2D(
inter_dims[-1],
1,
3,
padding=1,
weight_attr=weight_attr,
bias_attr=bias_attr)
self.adapter = nn.LayerList()
for i in range(len(fpn_dims)):
self.adapter.append(
nn.Conv2D(
fpn_dims[i],
inter_dims[i + 1],
1,
weight_attr=weight_attr,
bias_attr=bias_attr))
def _make_layers(self,
in_dims,
out_dims,
kernel_size,
num_groups,
weight_attr=None,
bias_attr=None):
return nn.Sequential(
nn.Conv2D(
in_dims,
out_dims,
kernel_size,
padding=kernel_size // 2,
weight_attr=weight_attr,
bias_attr=bias_attr),
nn.GroupNorm(num_groups, out_dims),
nn.ReLU())
def forward(self, x, bbox_attention_map, fpns):
x = paddle.concat([
x.tile([bbox_attention_map.shape[1], 1, 1, 1]),
bbox_attention_map.flatten(0, 1)
], 1)
x = self.conv0(x)
for inter_layer, adapter_layer, feat in zip(self.conv_inter[:-1],
self.adapter, fpns):
feat = adapter_layer(feat).tile(
[bbox_attention_map.shape[1], 1, 1, 1])
x = inter_layer(x)
x = feat + F.interpolate(x, size=feat.shape[-2:])
x = self.conv_inter[-1](x)
x = self.conv_out(x)
return x
@register
class DETRHead(nn.Layer):
__shared__ = ['num_classes', 'hidden_dim', 'use_focal_loss']
__inject__ = ['loss']
def __init__(self,
num_classes=80,
hidden_dim=256,
nhead=8,
num_mlp_layers=3,
loss='DETRLoss',
fpn_dims=[1024, 512, 256],
with_mask_head=False,
use_focal_loss=False):
super(DETRHead, self).__init__()
# add background class
self.num_classes = num_classes if use_focal_loss else num_classes + 1
self.hidden_dim = hidden_dim
self.loss = loss
self.with_mask_head = with_mask_head
self.use_focal_loss = use_focal_loss
self.score_head = nn.Linear(hidden_dim, self.num_classes)
self.bbox_head = MLP(hidden_dim,
hidden_dim,
output_dim=4,
num_layers=num_mlp_layers)
if self.with_mask_head:
self.bbox_attention = MultiHeadAttentionMap(hidden_dim, hidden_dim,
nhead)
self.mask_head = MaskHeadFPNConv(hidden_dim + nhead, fpn_dims,
hidden_dim)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.score_head)
@classmethod
def from_config(cls, cfg, hidden_dim, nhead, input_shape):
return {
'hidden_dim': hidden_dim,
'nhead': nhead,
'fpn_dims': [i.channels for i in input_shape[::-1]][1:]
}
@staticmethod
def get_gt_mask_from_polygons(gt_poly, pad_mask):
out_gt_mask = []
for polygons, padding in zip(gt_poly, pad_mask):
height, width = int(padding[:, 0].sum()), int(padding[0, :].sum())
masks = []
for obj_poly in polygons:
rles = mask_util.frPyObjects(obj_poly, height, width)
rle = mask_util.merge(rles)
masks.append(
paddle.to_tensor(mask_util.decode(rle)).astype('float32'))
masks = paddle.stack(masks)
masks_pad = paddle.zeros(
[masks.shape[0], pad_mask.shape[1], pad_mask.shape[2]])
masks_pad[:, :height, :width] = masks
out_gt_mask.append(masks_pad)
return out_gt_mask
def forward(self, out_transformer, body_feats, inputs=None):
r"""
Args:
out_transformer (Tuple): (feats: [num_levels, batch_size,
num_queries, hidden_dim],
memory: [batch_size, hidden_dim, h, w],
src_proj: [batch_size, h*w, hidden_dim],
src_mask: [batch_size, 1, 1, h, w])
body_feats (List(Tensor)): list[[B, C, H, W]]
inputs (dict): dict(inputs)
"""
feats, memory, src_proj, src_mask = out_transformer
outputs_logit = self.score_head(feats)
outputs_bbox = F.sigmoid(self.bbox_head(feats))
outputs_seg = None
if self.with_mask_head:
bbox_attention_map = self.bbox_attention(feats[-1], memory,
src_mask)
fpn_feats = [a for a in body_feats[::-1]][1:]
outputs_seg = self.mask_head(src_proj, bbox_attention_map,
fpn_feats)
outputs_seg = outputs_seg.reshape([
feats.shape[1], feats.shape[2], outputs_seg.shape[-2],
outputs_seg.shape[-1]
])
if self.training:
assert inputs is not None
assert 'gt_bbox' in inputs and 'gt_class' in inputs
gt_mask = self.get_gt_mask_from_polygons(
inputs['gt_poly'],
inputs['pad_mask']) if 'gt_poly' in inputs else None
return self.loss(
outputs_bbox,
outputs_logit,
inputs['gt_bbox'],
inputs['gt_class'],
masks=outputs_seg,
gt_mask=gt_mask)
else:
return (outputs_bbox[-1], outputs_logit[-1], outputs_seg)
@register
class DeformableDETRHead(nn.Layer):
__shared__ = ['num_classes', 'hidden_dim']
__inject__ = ['loss']
def __init__(self,
num_classes=80,
hidden_dim=512,
nhead=8,
num_mlp_layers=3,
loss='DETRLoss'):
super(DeformableDETRHead, self).__init__()
self.num_classes = num_classes
self.hidden_dim = hidden_dim
self.nhead = nhead
self.loss = loss
self.score_head = nn.Linear(hidden_dim, self.num_classes)
self.bbox_head = MLP(hidden_dim,
hidden_dim,
output_dim=4,
num_layers=num_mlp_layers)
self._reset_parameters()
def _reset_parameters(self):
linear_init_(self.score_head)
constant_(self.score_head.bias, -4.595)
constant_(self.bbox_head.layers[-1].weight)
bias = paddle.zeros_like(self.bbox_head.layers[-1].bias)
bias[2:] = -2.0
self.bbox_head.layers[-1].bias.set_value(bias)
@classmethod
def from_config(cls, cfg, hidden_dim, nhead, input_shape):
return {'hidden_dim': hidden_dim, 'nhead': nhead}
def forward(self, out_transformer, body_feats, inputs=None):
r"""
Args:
out_transformer (Tuple): (feats: [num_levels, batch_size,
num_queries, hidden_dim],
memory: [batch_size,
\sum_{l=0}^{L-1} H_l \cdot W_l, hidden_dim],
reference_points: [batch_size, num_queries, 2])
body_feats (List(Tensor)): list[[B, C, H, W]]
inputs (dict): dict(inputs)
"""
feats, memory, reference_points = out_transformer
reference_points = inverse_sigmoid(reference_points.unsqueeze(0))
outputs_bbox = self.bbox_head(feats)
# It's equivalent to "outputs_bbox[:, :, :, :2] += reference_points",
# but the gradient is wrong in paddle.
outputs_bbox = paddle.concat(
[
outputs_bbox[:, :, :, :2] + reference_points,
outputs_bbox[:, :, :, 2:]
],
axis=-1)
outputs_bbox = F.sigmoid(outputs_bbox)
outputs_logit = self.score_head(feats)
if self.training:
assert inputs is not None
assert 'gt_bbox' in inputs and 'gt_class' in inputs
return self.loss(outputs_bbox, outputs_logit, inputs['gt_bbox'],
inputs['gt_class'])
else:
return (outputs_bbox[-1], outputs_logit[-1], None)
| 2.265625 | 2 |
foo/pictureR/wordsTemplate.py | MangetsuC/arkHelper | 147 | 14724 | <filename>foo/pictureR/wordsTemplate.py<gh_stars>100-1000
from PIL import Image, ImageDraw, ImageFont
from numpy import asarray
from cv2 import cvtColor, COLOR_RGB2BGR, imshow, waitKey
from os import getcwd
def getFontSize_name(resolution):
x = resolution[0]
if x <= 1024:
return (16, (1024,576))
elif x <= 1280:
return (21, (1280,720))
elif x <= 1440:
return (23, (1440,810))
elif x <= 1600:
return (26, (1600,900))
else:
return (31, (1920,1080))
def getTemplatePic_CH(words, fontsize):
#字号典型值 基建干员名称23 进驻总览房屋名称28(1440*810) 基建干员名称30 进驻总览房屋名称38(1920*1080)
ttf = ImageFont.truetype(getcwd() + "/res/fonts/SourceHanSansCN-Regular.otf", fontsize) #字体选用思源黑体
wordsPic = Image.new('RGB', ttf.getsize(words))
wordsDraw = ImageDraw.Draw(wordsPic)
wordsDraw.text((0, 0), words, font=ttf, fill=(255,255,255)) #创建对应的模板
#temp = cvtColor(asarray(wordsPic), COLOR_RGB2BGR)
#imshow('test', temp)
#waitKey(0)
return cvtColor(asarray(wordsPic), COLOR_RGB2BGR)
def getTemplatePic_NUM(num, fontsize):
#字号典型值 进驻总览干员心情28
num = str(num)
ttf = ImageFont.truetype(getcwd() + "/res/fonts/Bender.otf", fontsize) #字体选用bender
wordsPic = Image.new('RGB', ttf.getsize(num), color = (255, 255, 255))
wordsDraw = ImageDraw.Draw(wordsPic)
wordsDraw.text((0, 0), num, font=ttf, fill=(0,0,0)) #创建对应的模板
return cvtColor(asarray(wordsPic), COLOR_RGB2BGR) | 1.820313 | 2 |
sorting/insertion_sort.py | src24/algos | 0 | 14852 | from typing import List
# O(n^2)
def insertion_sort(arr: List[int], desc: bool = False) -> None:
for i, item in enumerate(arr):
if i == 0:
continue
j: int = i - 1
while j >= 0 and (arr[j] > item) ^ desc:
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = item
| 2.3125 | 2 |
Day 7/Day 7.py | Dullstar/Advent-Of-Code-2020 | 0 | 14980 | import re
class Rule:
def __init__(self, line):
line = line.strip().split(" contain ")
line[1] = line[1].strip(".").split(", ")
self.contents = {}
for item in line[1]:
# Grab that number out in front
regex = re.compile(r"[0-9]+")
# If we didn't find one that means it's no bags inside
if match := regex.match(item):
quantity = int(item[match.span()[0]:match.span()[1]])
# The +1 deals with the space
bag_type = item[match.span()[1] + 1:]
if quantity > 1:
# This gets rid of the s if it's plural
bag_type = bag_type[:-1]
self.contents[bag_type] = quantity
# The s makes things irritating so I want it gone
self.bag_type = line[0][:-1]
def contains_directly(self, bag_type: str):
return bag_type in self.contents
# Warning: recursive
def contains(self, bag_type: str, rule_dict: dict):
if self.contains_directly(bag_type):
return True
else:
for bag in self.contents:
if bag in rule_dict:
if rule_dict[bag].contains(bag_type, rule_dict):
return True
else:
print("An unexpected bag was discovered!")
return False
def count_internal_bags(self, rule_dict: dict):
internal_bags = 0
for bag in self.contents:
# count these bags...
internal_bags += self.contents[bag] # recall that this value represents the quantity
# ...and count the bags inside of it
internal_bags += rule_dict[bag].count_internal_bags(rule_dict) * self.contents[bag]
return internal_bags
def parse_input(filename: str):
with open(filename, "r") as file:
rules = {}
for line in file:
rule = Rule(line)
print(f"{rule.bag_type} contains {rule.contents}")
rules[rule.bag_type] = rule
return rules
def main():
rule_dict = parse_input("input.txt")
shiny_gold = 0
for rule_entry in rule_dict.keys():
rule = rule_dict[rule_entry]
if rule.contains("shiny gold bag", rule_dict):
print(f"Found {rule.contents} in {rule.bag_type}")
shiny_gold += 1
print("\n")
print(f"Found {shiny_gold} bags containing at least one shiny gold bag.")
print(f"A shiny gold bag contains {rule_dict['shiny gold bag'].count_internal_bags(rule_dict)} bags.")
if __name__ == "__main__":
main()
| 2.828125 | 3 |
Examples/AcceptAllRevisions.py | aspose-words-cloud/aspose-words-cloud-python | 14 | 15108 | import os
import asposewordscloud
import asposewordscloud.models.requests
from asposewordscloud.rest import ApiException
from shutil import copyfile
words_api = WordsApi(client_id = '####-####-####-####-####', client_secret = '##################')
file_name = 'test_doc.docx'
# Upload original document to cloud storage.
my_var1 = open(file_name, 'rb')
my_var2 = file_name
upload_file_request = asposewordscloud.models.requests.UploadFileRequest(file_content=my_var1, path=my_var2)
words_api.upload_file(upload_file_request)
# Calls AcceptAllRevisions method for document in cloud.
my_var3 = file_name
request = asposewordscloud.models.requests.AcceptAllRevisionsRequest(name=my_var3)
words_api.accept_all_revisions(request) | 1.25 | 1 |
pextant/sextant.py | norheim/pextant | 0 | 15236 | <gh_stars>0
from flask_settings import GEOTIFF_FULL_PATH
import sys
import traceback
sys.path.append('../')
import numpy as np
import json
from datetime import timedelta
from functools import update_wrapper
from pextant.EnvironmentalModel import GDALMesh
from pextant.explorers import Astronaut
from pextant.analysis.loadWaypoints import JSONloader
from pextant.lib.geoshapely import GeoPolygon, LAT_LONG
from pextant.solvers.astarMesh import astarSolver
from flask import Flask
from flask import make_response, request, current_app
app = Flask(__name__)
def crossdomain(origin=None, methods=None, headers=None,
max_age=21600, attach_to_all=True,
automatic_options=True):
if methods is not None:
methods = ', '.join(sorted(x.upper() for x in methods))
if headers is not None and not isinstance(headers, basestring):
headers = ', '.join(x.upper() for x in headers)
if not isinstance(origin, basestring):
origin = ', '.join(origin)
if isinstance(max_age, timedelta):
max_age = max_age.total_seconds()
def get_methods():
if methods is not None:
return methods
options_resp = current_app.make_default_options_response()
return options_resp.headers['allow']
def decorator(f):
def wrapped_function(*args, **kwargs):
if automatic_options and request.method == 'OPTIONS':
resp = current_app.make_default_options_response()
else:
resp = make_response(f(*args, **kwargs))
if not attach_to_all and request.method != 'OPTIONS':
return resp
h = resp.headers
h['Access-Control-Allow-Origin'] = origin
h['Access-Control-Allow-Methods'] = get_methods()
h['Access-Control-Max-Age'] = str(max_age)
if headers is not None:
h['Access-Control-Allow-Headers'] = headers
return resp
f.provide_automatic_options = False
return update_wrapper(wrapped_function, f)
return decorator
def main(argv):
print 'STARTING SEXTANT'
geotiff_full_path = ""
try:
geotiff_full_path = argv[0]
except IndexError:
# print 'Syntax is "sextant <inputfile>"'
pass
if not geotiff_full_path or geotiff_full_path == 'sextant:app':
geotiff_full_path = GEOTIFF_FULL_PATH
print geotiff_full_path
gdal_mesh = GDALMesh(geotiff_full_path)
explorer = Astronaut(80)
solver, waypoints, environmental_model = None, None, None
@app.route('/test', methods=['GET', 'POST'])
@crossdomain(origin='*')
def test():
print str(request)
return json.dumps({'test':'test'})
@app.route('/setwaypoints', methods=['GET', 'POST'])
@crossdomain(origin='*')
def set_waypoints():
try:
global solver, waypoints, environmental_model
print('in set waypoints')
request_data = request.get_json(force=True)
xp_json = request_data['xp_json']
json_loader = JSONloader(xp_json['sequence'])
print 'loaded xp json'
waypoints = json_loader.get_waypoints()
print 'gdal mesh is built from %s' % str(geotiff_full_path)
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy')
print('loaded fine')
return json.dumps({'loaded': True})
except Exception, e:
traceback.print_exc()
response = {'error': str(e),
'status_code': 400}
return response
@app.route('/solve', methods=['GET', 'POST'])
@crossdomain(origin='*')
def solve():
global solver, waypoints, environmental_model
print 'in solve'
request_data = request.get_json(force=True)
return_type = request_data['return']
if 'xp_json' in request_data:
xp_json = request_data['xp_json']
json_loader = JSONloader(xp_json['sequence'])
waypoints = json_loader.get_waypoints()
print(waypoints.to(LAT_LONG))
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy')
search_results, rawpoints, _ = solver.solvemultipoint(waypoints)
return_json = {
'latlong':[]
}
if return_type == 'segmented':
for search_result in search_results.list:
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(search_result.raw).transpose()).to(LAT_LONG)
return_json['latlong'].append({'latitudes': list(lat), 'longitudes': list(lon)})
else:
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(rawpoints).transpose()).to(LAT_LONG)
return_json['latlong'].append({'latitudes': list(lat), 'longitudes': list(lon)})
return json.dumps(return_json)
# OLD Stuff: delete
@app.route('/', methods=['GET', 'POST'])
@crossdomain(origin='*')
def get_waypoints():
print('got request')
data = request.get_json(force=True)
data_np = np.array(data['waypoints']).transpose()
#json_waypoints = JSONloader(xpjson)
waypoints = GeoPolygon(LAT_LONG, *data_np)
print waypoints.to(LAT_LONG)
environmental_model = gdal_mesh.loadSubSection(waypoints.geoEnvelope(), cached=True)
explorer = Astronaut(80)
solver = astarSolver(environmental_model, explorer, optimize_on='Energy', cached=True)
_, rawpoints, _ = solver.solvemultipoint(waypoints)
lat, lon = GeoPolygon(environmental_model.ROW_COL, *np.array(rawpoints).transpose()).to(LAT_LONG)
print((lat, lon))
return json.dumps({'latitudes': list(lat), 'longitudes': list(lon)})
if argv[0] != 'sextant:app':
app.run(host='localhost', port=5000)
# if __name__ == "__main__":
main(sys.argv[1:])
#main(['../data/maps/dem/HI_air_imagery.tif']) | 1.492188 | 1 |
utils/warmup.py | hengwei-chan/3D_SBDD | 67 | 15364 | <gh_stars>10-100
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from torch.optim.lr_scheduler import _LRScheduler
from torch.optim.lr_scheduler import ReduceLROnPlateau
class GradualWarmupScheduler(_LRScheduler):
""" Gradually warm-up(increasing) learning rate in optimizer.
Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.
Args:
optimizer (Optimizer): Wrapped optimizer.
multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.
total_epoch: target learning rate is reached at total_epoch, gradually
after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)
"""
def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):
self.multiplier = multiplier
if self.multiplier < 1.:
raise ValueError('multiplier should be greater thant or equal to 1.')
self.total_epoch = total_epoch
self.after_scheduler = after_scheduler
self.finished = False
super(GradualWarmupScheduler, self).__init__(optimizer)
def get_lr(self):
if self.last_epoch > self.total_epoch:
if self.after_scheduler:
if not self.finished:
self.after_scheduler.base_lrs = [base_lr * self.multiplier for base_lr in self.base_lrs]
self.finished = True
return self.after_scheduler.get_last_lr()
return [base_lr * self.multiplier for base_lr in self.base_lrs]
if self.multiplier == 1.0:
return [base_lr * (float(self.last_epoch) / self.total_epoch) for base_lr in self.base_lrs]
else:
return [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
def step_ReduceLROnPlateau(self, metrics, epoch=None):
if epoch is None:
epoch = self.last_epoch + 1
self.last_epoch = epoch if epoch != 0 else 1 # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning
if self.last_epoch <= self.total_epoch:
warmup_lr = [base_lr * ((self.multiplier - 1.) * self.last_epoch / self.total_epoch + 1.) for base_lr in self.base_lrs]
for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):
param_group['lr'] = lr
else:
if epoch is None:
self.after_scheduler.step(metrics, None)
else:
self.after_scheduler.step(metrics, epoch - self.total_epoch)
def step(self, epoch=None, metrics=None):
if type(self.after_scheduler) != ReduceLROnPlateau:
if self.finished and self.after_scheduler:
if epoch is None:
self.after_scheduler.step(None)
else:
self.after_scheduler.step(epoch - self.total_epoch)
self._last_lr = self.after_scheduler.get_last_lr()
else:
return super(GradualWarmupScheduler, self).step(epoch)
else:
self.step_ReduceLROnPlateau(metrics, epoch)
| 2.03125 | 2 |
gui(12102018).py | hanhydro/T2H | 0 | 15492 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'gui.ui'
#
# Created by: PyQt5 UI code generator 5.6
#
# WARNING! All changes made in this file will be lost!
import os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import (QApplication, QDialog,
QProgressBar, QPushButton, QMessageBox)
import matplotlib.pyplot as plt
from matplotlib import style
import T2H, PLOT
import flopy
from matplotlib.backends.qt_compat import QtCore, QtWidgets, is_pyqt5
if is_pyqt5():
from matplotlib.backends.backend_qt5agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
else:
from matplotlib.backends.backend_qt4agg import (
FigureCanvas, NavigationToolbar2QT as NavigationToolbar)
from matplotlib.figure import Figure
#%%
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("T2H Graphical User Interface")
MainWindow.resize(1280, 800)
self.centralWidget = QtWidgets.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
#%% QFrames
self.frame_1 = QtWidgets.QFrame(self.centralWidget)
self.frame_1.setGeometry(QtCore.QRect(810, 70, 461, 201))
self.frame_1.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_1.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_1.setObjectName("frame_2")
self.frame_2 = QtWidgets.QFrame(self.centralWidget)
self.frame_2.setGeometry(QtCore.QRect(810, 280, 461, 101))
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.frame_3 = QtWidgets.QFrame(self.centralWidget)
self.frame_3.setGeometry(QtCore.QRect(810, 390, 461, 31))
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
#%% QLabels
self.sedK = QtWidgets.QLabel(self.frame_2)
self.sedK.setGeometry(QtCore.QRect(30, 10, 141, 16))
self.sedK.setObjectName("sedK")
self.aqK = QtWidgets.QLabel(self.frame_2)
self.aqK.setGeometry(QtCore.QRect(30, 40, 141, 16))
self.aqK.setObjectName("aqK")
self.faultK = QtWidgets.QLabel(self.frame_2)
self.faultK.setGeometry(QtCore.QRect(30, 70, 141, 16))
self.faultK.setObjectName("faultK")
self.sedKN = QtWidgets.QLabel(self.centralWidget)
self.sedKN.setGeometry(QtCore.QRect(910, 500, 141, 16))
self.sedKN.setObjectName("sedKN")
self.sedKNlabel = QtWidgets.QLabel(self.centralWidget)
self.sedKNlabel.setGeometry(QtCore.QRect(1100, 500, 61, 16))
self.sedKNlabel.setObjectName("sedKNlabel")
self.aquiferKNlabel = QtWidgets.QLabel(self.centralWidget)
self.aquiferKNlabel.setGeometry(QtCore.QRect(1100, 520, 61, 16))
self.aquiferKNlabel.setObjectName("aquiferKNlabel")
self.aqKN = QtWidgets.QLabel(self.centralWidget)
self.aqKN.setGeometry(QtCore.QRect(910, 520, 81, 16))
self.aqKN.setObjectName("aqKN")
self.faultKN = QtWidgets.QLabel(self.centralWidget)
self.faultKN.setGeometry(QtCore.QRect(910, 540, 81, 16))
self.faultKN.setObjectName("faultKN")
self.faultKNlabel = QtWidgets.QLabel(self.centralWidget)
self.faultKNlabel.setGeometry(QtCore.QRect(1100, 540, 61, 16))
self.faultKNlabel.setObjectName("faultKNlabel")
self.label_21 = QtWidgets.QLabel(self.frame_3)
self.label_21.setGeometry(QtCore.QRect(10, 7, 141, 16))
self.label_21.setObjectName("label_21")
self.visoptionsLabel = QtWidgets.QLabel(self.centralWidget)
self.visoptionsLabel.setGeometry(QtCore.QRect(20, 540, 141, 16))
self.visoptionsLabel.setObjectName("visoptionsLabel")
self.fileLabel = QtWidgets.QLabel(self.centralWidget)
self.fileLabel.setGeometry(QtCore.QRect(810, 4, 60, 16))
self.fileLabel.setObjectName("fileLabel")
self.fileLabel_path = QtWidgets.QLabel(self.centralWidget)
self.fileLabel_path.setGeometry(QtCore.QRect(880, 4, 320, 16))
self.fileLabel_path.setObjectName("fileLabel_path")
self.label = QtWidgets.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(814, 51, 241, 16))
self.label.setObjectName("label")
self.nz = QtWidgets.QLabel(self.centralWidget)
self.nz.setGeometry(QtCore.QRect(840, 104, 141, 16))
self.nz.setObjectName("nz")
self.targetperiod = QtWidgets.QLabel(self.centralWidget)
self.targetperiod.setGeometry(QtCore.QRect(840, 80, 151, 16))
self.targetperiod.setObjectName("targetperiod")
self.nzfixed = QtWidgets.QLabel(self.centralWidget)
self.nzfixed.setGeometry(QtCore.QRect(840, 128, 141, 16))
self.nzfixed.setObjectName("nzfixed")
self.constrecharge = QtWidgets.QLabel(self.centralWidget)
self.constrecharge.setGeometry(QtCore.QRect(840, 176, 151, 16))
self.constrecharge.setObjectName("constrecharge")
#
self.hiniratio = QtWidgets.QLabel(self.centralWidget)
self.hiniratio.setGeometry(QtCore.QRect(840, 242, 151, 16))
self.hiniratio.setObjectName("hiniratio")
self.datvar = QtWidgets.QLabel(self.centralWidget)
self.datvar.setGeometry(QtCore.QRect(840, 152, 161, 16))
self.datvar.setObjectName("datvar")
# Recharge input
self.constrecharge_2 = QtWidgets.QLabel(self.centralWidget)
self.constrecharge_2.setGeometry(QtCore.QRect(840, 200, 151, 16))
self.constrecharge_2.setObjectName("constrecharge_2")
# Image pane
self.image = QtWidgets.QLabel(self.centralWidget)
self.image.setGeometry(QtCore.QRect(10, 10, 780, 520))
self.image.setObjectName("image")
self.pixmap = QtGui.QPixmap("logo.png")
self.image.setPixmap(self.pixmap)
#%% QLineEdits
self.sedKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.sedKlineEdit.setGeometry(QtCore.QRect(260, 10, 113, 21))
self.sedKlineEdit.setObjectName("sedKlineEdit")
self.sedKlineEdit.setText("547.5")
#
self.aqKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.aqKlineEdit.setGeometry(QtCore.QRect(260, 40, 113, 21))
self.aqKlineEdit.setObjectName("aqKlineEdit")
self.aqKlineEdit.setText("36.5")
#
self.faultKlineEdit = QtWidgets.QLineEdit(self.frame_2)
self.faultKlineEdit.setGeometry(QtCore.QRect(260, 70, 113, 21))
self.faultKlineEdit.setObjectName("faultKlineEdit")
self.faultKlineEdit.setText("0.0365")
#
self.nzfline = QtWidgets.QLineEdit(self.centralWidget)
self.nzfline.setGeometry(QtCore.QRect(1070, 128, 113, 21))
self.nzfline.setObjectName("nzfline")
self.nzfline.setText("10")
#
self.nzline = QtWidgets.QLineEdit(self.centralWidget)
self.nzline.setGeometry(QtCore.QRect(1070, 104, 113, 21))
self.nzline.setObjectName("nzline")
self.nzline.setText("40")
#
self.datline = QtWidgets.QLineEdit(self.centralWidget)
self.datline.setGeometry(QtCore.QRect(1070, 152, 113, 21))
self.datline.setObjectName("datline")
self.datline.setText("-10000")
#
self.hiniratioLineEdit = QtWidgets.QLineEdit(self.centralWidget)
self.hiniratioLineEdit.setGeometry(QtCore.QRect(1070, 242, 113, 21))
self.hiniratioLineEdit.setObjectName("hiniratioLineEdit")
self.hiniratioLineEdit.setText("0.9")
#
self.datvarline = QtWidgets.QLineEdit(self.centralWidget)
self.datvarline.setGeometry(QtCore.QRect(1070, 176, 113, 21))
self.datvarline.setObjectName("datvarline")
self.datvarline.setText("-3000")
self.rchline = QtWidgets.QLineEdit(self.centralWidget)
self.rchline.setGeometry(QtCore.QRect(1070, 200, 113, 21))
self.rchline.setObjectName("rchline")
self.rchline.setText("0.05")
# Ma input lineedit
self.maline = QtWidgets.QLineEdit(self.centralWidget)
self.maline.setGeometry(QtCore.QRect(1070, 80, 113, 21))
self.maline.setObjectName("maline")
self.maline.setText("12.5")
#%% QPushButtons
self.load = QtWidgets.QPushButton(self.centralWidget)
self.load.setGeometry(QtCore.QRect(1100, -1, 71, 32))
self.load.setObjectName("loadButton")
self.load.clicked.connect(self.fileloader)
self.load1 = QtWidgets.QPushButton(self.centralWidget)
self.load1.setGeometry(QtCore.QRect(1170, -1, 101, 32))
self.load1.setObjectName("loadButton1")
self.load1.clicked.connect(self.fileloader)
self.applyButton = QtWidgets.QPushButton(self.frame_1)
self.applyButton.setGeometry(QtCore.QRect(380, 60, 81, 81))
self.applyButton.setObjectName("applyButton")
self.applyButton.clicked.connect(self.applyclicked)
self.fileDialog_3 = QtWidgets.QPushButton(self.frame_2)
self.fileDialog_3.setGeometry(QtCore.QRect(380, 20, 81, 71))
self.fileDialog_3.setObjectName("fileDialog_3")
self.fileDialog_3.clicked.connect(self.applyCalClicked)
# Model run button
self.ModelRunButton = QtWidgets.QPushButton(self.centralWidget)
self.ModelRunButton.setGeometry(QtCore.QRect(640, 620, 113, 32))
self.ModelRunButton.setObjectName("ModelRunButton")
self.ModelRunButton.clicked.connect(self.run)
self.QuitButton = QtWidgets.QPushButton(self.centralWidget)
self.QuitButton.setGeometry(QtCore.QRect(760, 620, 113, 32))
self.QuitButton.setObjectName("QuitButton")
self.QuitButton.clicked.connect(QCoreApplication.instance().quit)
self.VtkOutputButton = QtWidgets.QPushButton(self.centralWidget)
self.VtkOutputButton.setGeometry(QtCore.QRect(880, 620, 113, 32))
self.VtkOutputButton.setObjectName("VtkOutputButton")
# self.VtkOutputButton.clicked.connect(self.vtk)
self.PlotButton = QtWidgets.QPushButton(self.centralWidget)
self.PlotButton.setGeometry(QtCore.QRect(460, 560, 113, 32))
self.PlotButton.setObjectName("PlotButton")
self.PlotButton.clicked.connect(self.plot)
#%% QGraphicsViews
self.figure = plt.figure(figsize=(12,12))
self.canvas = FigureCanvas(self.figure)
#%% QComboBoxes
# File combo box
self.fileBox = QtWidgets.QComboBox(self.centralWidget)
self.fileBox.setGeometry(QtCore.QRect(808, 25, 461, 26))
self.fileBox.setObjectName("fileBox")
# Solver selection combo box
self.solverBox = QtWidgets.QComboBox(self.frame_3)
self.solverBox.setGeometry(QtCore.QRect(63, 2, 281, 26))
self.solverBox.setObjectName("solverBox")
self.solverBox.addItem("xMD")
self.solverBox.addItem("GMRES")
#
self.visComboBox = QtWidgets.QComboBox(self.centralWidget)
self.visComboBox.setGeometry(QtCore.QRect(10, 560, 441, 26))
self.visComboBox.setObjectName("visComboBox")
self.visComboBox.addItem("Cross Section")
self.visComboBox.addItem("Fault Plane")
self.visComboBox.addItem("Vertical Flow Barriers (VFB)")
self.visComboBox.addItem("Horizontal Flow Barriers (HFB)")
#%% QCheckBoxes
#
self.elevdependentChecker = QtWidgets.QCheckBox(self.centralWidget)
self.elevdependentChecker.setGeometry(QtCore.QRect(860, 220, 231, 20))
self.elevdependentChecker.setObjectName("elevdependentChecker")
#%% QProgressBars
self.progress = QProgressBar(self.centralWidget)
self.progress.setGeometry(10, 620, 600, 25)
self.progress.setMaximum(100)
#%% Mainwindows
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtWidgets.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 1024, 22))
self.menuBar.setObjectName("menuBar")
self.menuT2H_Main = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Main.setObjectName("menuT2H_Main")
self.menuT2H_Checker = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Checker.setObjectName("menuT2H_Checker")
self.menuT2H_Plot = QtWidgets.QMenu(self.menuBar)
self.menuT2H_Plot.setObjectName("menuT2H_Plot")
MainWindow.setMenuBar(self.menuBar)
self.mainToolBar = QtWidgets.QToolBar(MainWindow)
self.mainToolBar.setObjectName("mainToolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.mainToolBar)
self.statusBar = QtWidgets.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.menuBar.addAction(self.menuT2H_Main.menuAction())
self.menuBar.addAction(self.menuT2H_Checker.menuAction())
self.menuBar.addAction(self.menuT2H_Plot.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
#%% Functions
def applyclicked(self):
self.Ma = float(self.maline.text())
self.Ma = format(self.Ma, '.1f')
self.nz = int(self.nzline.text())
self.nz_fixed = int(self.nzfline.text())
self.dx = 1000
self.dy = 1000
self.inz = self.nz - self.nz_fixed
self.dat = int(self.datline.text())
self.dat_var = int(self.datvarline.text())
self.idat = self.dat - self.dat_var
self.rech = float(self.rchline.text())
self.perm_sed = float(self.sedKlineEdit.text())
self.hratio = float(self.hiniratioLineEdit.text())
self.Kconst = float(self.aqKlineEdit.text())
self.hydchr = self.Kconst/1000
self.target_row = 101
self.iskip = 4
self.ivtk = 1
self.h_tol = 1e-4
self.fileLabel_path.setText("/tisc_output/topo_" + self.Ma +"0Ma.txt")
self.ans = QMessageBox.question(self.centralWidget, "Confirmation",\
"Are these correct?\n" + "Period: " + self.Ma\
+ "Ma\n" + "Nz: " + str(self.nz) +"\n" + "Datum: "\
+ str(self.dat) + " m\n", QMessageBox.Yes, QMessageBox.No)
if self.ans == QMessageBox.Yes:
self.rchline.setEnabled(False)
self.maline.setEnabled(False)
self.nzline.setEnabled(False)
self.nzfline.setEnabled(False)
self.datline.setEnabled(False)
self.datvarline.setEnabled(False)
self.hiniratioLineEdit.setEnabled(False)
QMessageBox.about(self.centralWidget, "Confirmed", "Properties confirmed")
else:
QMessageBox.about(self.centralWidget, "Check values", "Check values again!")
def applyCalClicked(self):
self.perm_sed = self.sedKlineEdit.text()
self.Kconst = self.aqKlineEdit.text()
self.hydchr = self.faultKlineEdit.text()
self.sedKNlabel.setText(str(float(self.perm_sed)/float(self.rchline.text())))
self.aquiferKNlabel.setText(str(float(self.Kconst)/float(self.rchline.text())))
self.faultKNlabel.setText(str(float(self.hydchr)/float(self.rchline.text())))
self.ans = QMessageBox.question(self.centralWidget, "Confirmation",\
"Are these correct?\n" + "Period: " + self.Ma\
+ "Ma\n" + "Nz: " + str(self.nz) +"\n" + "Datum: "\
+ str(self.dat) + " m\n", QMessageBox.Yes, QMessageBox.No)
if self.ans == QMessageBox.Yes:
self.sedKlineEdit.setEnabled(False)
self.aqKlineEdit.setEnabled(False)
self.faultKlineEdit.setEnabled(False)
QMessageBox.about(self.centralWidget, "Confirmed", "Properties confirmed")
else:
QMessageBox.about(self.centralWidget, "Check values", "Check values again!")
#%%
def run(self):
self.Ma = float(self.maline.text())
self.Ma = format(self.Ma, '.1f')
self.nz = int(self.nzline.text())
self.nz_fixed = int(self.nzfline.text())
self.dx = 1000
self.dy = 1000
self.inz = self.nz - self.nz_fixed
self.dat = int(self.datline.text())
self.dat_var = int(self.datvarline.text())
self.idat = self.dat - self.dat_var
self.rech = float(self.rchline.text())
self.perm_sed = float(self.sedKlineEdit.text())
self.hratio = float(self.hiniratioLineEdit.text())
self.Kconst = float(self.aqKlineEdit.text())
self.hydchr = self.Kconst/1000
self.target_row = 101
self.iskip = 4
self.ivtk = 1
self.h_tol = 1e-4
self.model = T2H.main(self.Ma, self.nz, self.nz_fixed, self.inz, self.dx,\
self.dy, self.dat, self.dat_var, self.idat\
, self.rech, self.perm_sed, self.target_row,\
self.Kconst, self.hratio, self.hydchr,\
self.iskip, self.ivtk, self.h_tol)
self.mf = self.model.mf
self.mf.dis.check()
self.mf.write_input()
self.mf.run_model()
return self.mf
def plot(self):
try:
self.mf
except AttributeError:
QMessageBox.about(self.centralWidget, "Warning", "Please run a model first")
else:
self.vcb = self.visComboBox.itemData
print(self.vcb)
if self.vcb == "Cross Section":
figheadxsect, axheadxsect = plt.subplots(figsize=(40,5))
self.mfxsect = PLOT.fmfxsect(self.mf, self.model.mfdis, self.target_row, axheadxsect).mfxsect
self.a = PLOT.head(self.mf, self.model.fdirmodel).a
self.headc = PLOT.headc(self.mfxsect, self.a)
self.headcontour = self.headc.headcontour
self.gdplot = self.mfxsect.plot_grid(color='r', linewidths=0.2)
self.BCplot = self.mfxsect.plot_ibound(self.model.ibound, color_noflow = 'black',\
color_ch = 'blue', head = self.a)
self.canvas.draw()
print("plot")
def fileloader(self):
self.path = os.getcwd() + "/tisc_output/"
self.l = os.listdir(self.path)
self.bdtopo = [0]*len(self.l)
self.topo = [0]*len(self.l)
self.fault = [0]*len(self.l)
self.sedthick = [0]*len(self.l)
for file in range(len(self.l)):
if self.l[file].startswith("bdtopo"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.bdtopo[file] = float(self.l[file][7:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("topo"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.topo[file] = float(self.l[file][5:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("fault"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.fault[file] = float(self.l[file][6:]\
.split("Ma.txt")[0])
elif self.l[file].startswith("sedthick"):
if os.stat(self.path+self.l[file]).st_size > 5: # greater than 5 bytes
self.sedthick[file] = float(self.l[file][9:]\
.split("Ma.txt")[0])
self.a = list(filter((0).__ne__, self.topo))
self.a.sort()
self.b = list(filter((0).__ne__, self.bdtopo))
self.b.sort()
self.c = list(filter((0).__ne__, self.fault))
self.c.sort()
self.d = list(filter((0).__ne__, self.sedthick))
self.d.sort()
self.df = []
for nfile in range(len(self.a)):
if self.b.count(self.a[nfile]) == 1:
if self.c.count(self.a[nfile]) == 1:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "y", "y", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "y", "y", "n"]
self.df.append(data)
elif self.c.count(self.a[nfile]) == 0:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "y", "n", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "y", "n", "n"]
self.df.append(data)
elif self.b.count(self.a[nfile]) == 0:
if self.c.count(self.a[nfile]) == 1:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "n", "y", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "n", "y", "n"]
self.df.append(data)
elif self.c.count(self.a[nfile]) == 0:
if self.d.count(self.a[nfile]) == 1:
data = [self.a[nfile], "y", "n", "n", "y"]
self.df.append(data)
elif self.d.count(self.a[nfile]) == 0:
data = [self.a[nfile], "y", "n", "n", "n"]
self.df.append(data)
for age in range(len(self.a)):
if self.df[age][2] == "y" and self.df[age][3] == "y" and self.df[age][4] == "y":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | Faults | Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "y" and self.df[age][4] == "n":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | Faults | No Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "n" and self.df[age][4] == "y":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | No Faults | Sediments")
elif self.df[age][2] == "y" and self.df[age][3] == "n" and self.df[age][4] == "n":
self.fileBox.addItem("Snapshot:" + str(self.df[age][0]) + "Ma | No Faults | No Sediments")
#%%
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "T2H Graphical User Interface"))
self.applyButton.setText(_translate("MainWindow", "Apply"))
self.sedK.setText(_translate("MainWindow", "Sediment K (m/yr)"))
self.aqK.setText(_translate("MainWindow", "Aquifer K (m/yr)"))
self.faultK.setText(_translate("MainWindow", "Fault zone K (m/yr)"))
self.fileDialog_3.setText(_translate("MainWindow", "Apply"))
self.sedKN.setText(_translate("MainWindow", "Sediment K / N:"))
self.sedKNlabel.setText(_translate("MainWindow", "N/A"))
self.aquiferKNlabel.setText(_translate("MainWindow", "N/A"))
self.aqKN.setText(_translate("MainWindow", "Aquifer K / N:"))
self.faultKN.setText(_translate("MainWindow", "Fault K / N:"))
self.faultKNlabel.setText(_translate("MainWindow", "N/A"))
self.label_21.setText(_translate("MainWindow", "Solver"))
self.ModelRunButton.setText(_translate("MainWindow", "Execute"))
self.load.setText(_translate("MainWindow", "Load"))
self.load1.setText(_translate("MainWindow", "Set selected"))
self.QuitButton.setText(_translate("MainWindow", "Abort"))
self.VtkOutputButton.setText(_translate("MainWindow", "VTK output"))
self.PlotButton.setText(_translate("MainWindow", "Plot"))
self.visoptionsLabel.setText(_translate("MainWindow", "Visualization options"))
self.fileLabel.setText(_translate("MainWindow", "File: "))
self.fileLabel_path.setText(_translate("MainWindow", "path"))
self.label.setText(_translate("MainWindow", "*dx = dy = 1,000 m fixed in this version"))
self.nz.setText(_translate("MainWindow", "Number of layers (nz)"))
self.targetperiod.setText(_translate("MainWindow", "Target period (Ma)"))
self.nzfixed.setText(_translate("MainWindow", "Fixed layers (nz_fixed)"))
self.constrecharge.setText(_translate("MainWindow", "Datum of variable dz (m)"))
self.hiniratio.setText(_translate("MainWindow", "Initial head ratio to topo."))
self.elevdependentChecker.setText(_translate("MainWindow", "Elevation-dependent recharge"))
self.datvar.setText(_translate("MainWindow", "Model datum (m)"))
self.constrecharge_2.setText(_translate("MainWindow", "Const. Recharge (m/yr)"))
self.menuT2H_Main.setTitle(_translate("MainWindow", "T2H Main"))
self.menuT2H_Checker.setTitle(_translate("MainWindow", "T2H Checker"))
self.menuT2H_Plot.setTitle(_translate("MainWindow", "T2H Plot"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 1.726563 | 2 |
codes/utils.py | epfml/byzantine-robust-noniid-optimizer | 7 | 15620 | import os
import shutil
import logging
class BColors(object):
HEADER = "\033[95m"
OK_BLUE = "\033[94m"
OK_CYAN = "\033[96m"
OK_GREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
END_C = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
def touch(fname: str, times=None, create_dirs: bool = False):
if create_dirs:
base_dir = os.path.dirname(fname)
if not os.path.exists(base_dir):
os.makedirs(base_dir)
with open(fname, "a"):
os.utime(fname, times)
def touch_dir(base_dir: str) -> None:
if not os.path.exists(base_dir):
os.makedirs(base_dir)
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def top1_accuracy(output, target):
return accuracy(output, target, topk=(1,))[0].item()
def log(*args, **kwargs):
pass
def log_dict(*args, **kwargs):
pass
def initialize_logger(log_root):
if not os.path.exists(log_root):
os.makedirs(log_root)
else:
shutil.rmtree(log_root)
os.makedirs(log_root)
print(f"Logging files to {log_root}")
# Only to file; One dict per line; Easy to process
json_logger = logging.getLogger("stats")
json_logger.setLevel(logging.INFO)
fh = logging.FileHandler(os.path.join(log_root, "stats"))
fh.setLevel(logging.INFO)
fh.setFormatter(logging.Formatter("%(message)s"))
json_logger.addHandler(fh)
debug_logger = logging.getLogger("debug")
debug_logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter("%(message)s"))
debug_logger.addHandler(ch)
fh = logging.FileHandler(os.path.join(log_root, "debug"))
fh.setLevel(logging.INFO)
debug_logger.addHandler(fh)
| 1.984375 | 2 |
models.py | zhangjingqiang/qiang-tools | 0 | 15748 | <gh_stars>0
from flask.ext.login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class User(UserMixin, db.Model):
"""
User who can use this application.
"""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
password_hash = db.Column(db.String(128))
def __init__(self, username, password):
self.username = username
self.password = password
@property
def password(self):
raise AttributeError('password is not readable')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User %r>' % self.username
class Tool(db.Model):
"""
Tools details.
"""
__tablename__ = 'tools'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String())
def __repr__(self):
return '<Tool {}>'.format(self.id)
| 1.695313 | 2 |
courspider/department_calendar.py | Zylphrex/courspider | 0 | 15876 | <filename>courspider/department_calendar.py
import re
from courspider.faculty_calendar_resources.department import Department
from courspider.faculty_calendar_resources.url import URL
from courspider.course import Course
class DepartmentCalendar:
def __init__(self, session, url):
"""
Initialize a new Department Calendar for the given url
:param session: The session of the calendar
:type session: Session
:param url: The url to the specified year's calendar
:type url: URL
"""
self.session = session
self.url = url
self.department = DepartmentCalendar.find_department_name(url)
self.courses = []
# regex used for the _find_department method
_department_name = re.compile(r"<h1>(.*)<\/h1>")
@staticmethod
def find_department_name(url):
"""
Return the Department found at the given url
:param url: The url of the department.
:type url: URL
:return: The Department
:rtype: Department
"""
matches = DepartmentCalendar._department_name.\
findall(url.raw_html)
# only a single h1 tag in the html, and it is the department name
return Department(matches[0])
# please don't touch this regular expression without fully understanding it
# it has been adjusted after many iterations after finding strange
# formatting in the raw html, so any changes is not advised
# regular expression used to filter out the course data
regex = r'<a name="([A-Z]{3}\d\d\d[A-Z]\d)"><\/a><span class="strong">\1\s*(.*?)<\/span>(\s*<div>)?\s*(<\/p>)?\s*<\/?(p|div)(.*?)?>(.*?)<\/?(p|div)>(\s*<\/div>)?\s*(<p>)?(\s*<(p|div)>(.*?)<\/(p|div)>)?(\s*<(p|div)>(.*?)<\/(p|div)>)?\s*(<p>)?\s*(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?(Exclusion:\s*(.*?)|Prerequisite:\s*(.*?)|Corequisite:\s*(.*?)|Recommended Preparation:\s*(.*?))?(\s*<br>\s*)?\s*(Distribution Requirement Status:\s*(.*?)\s*)?(<br>)?\s*(Breadth Requirement:\s*(.*?)\s*)?(<br>|<\/?p>)'
_course = re.compile(regex, re.DOTALL)
def get_courses(self):
"""
Returns a list of all the courses in this Department Calendar.
:return: list of all courses in this DepartmentCalendar
:rtype: list[Course]
"""
# if the list has been generated
if self.courses:
return self.courses
# generate list if necessary
courses_data = DepartmentCalendar._course.findall(self.url.raw_html)
for course_data in courses_data:
self.courses.append(self._create_course(course_data))
return self.courses.copy()
def _create_course(self, data):
"""
Create a course object from the data extracted using the above regex
:param data: The data extracted using the above regex
:type data: tuple(str, ...)
:return: A course object
:rtype: Course
"""
# these numbers come from the group numbers from the regex above
# '_course' count them if you wanna
course_code = DepartmentCalendar._erase_html(data[0])
course_name = DepartmentCalendar._erase_html(data[1])
course_description = DepartmentCalendar._erase_html(
data[6] + data[11] + data[15])
exclusion = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 20))
prerequisite = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 21))
corequisite = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 22))
recommended = DepartmentCalendar._erase_html(
DepartmentCalendar._select_data(data, 23))
distribution_requirement = DepartmentCalendar._erase_html(
data[44])
breath_requirement = DepartmentCalendar._erase_html(data[47])
print("found course: {}".format(course_code))
return Course(course_code, course_name, course_description,
exclusion, prerequisite, corequisite, recommended,
distribution_requirement, breath_requirement,
self.department)
def _select_data(data, start):
result = ""
for i in range(4):
result += data[start + i * 6]
return result
_tags = re.compile('<.*?>', re.DOTALL)
@staticmethod
def _erase_html(data):
"""
Erases any remaining html tags in the text.
:param data: The raw data
:type data: str
:return: The data after removing remaining html tags
:rtype: str
"""
return DepartmentCalendar._tags.sub('', data)
| 2.03125 | 2 |
sarpy_gui_apps/apps/canvas_demo/panels/canvas_demo_button_panel.py | spowlas/sarpy | 0 | 16004 | from tkinter_gui_builder.panel_templates.widget_panel.widget_panel import AbstractWidgetPanel
from tkinter_gui_builder.widgets import basic_widgets
class CanvasDemoButtonPanel(AbstractWidgetPanel):
fname_select = basic_widgets.Button
zoom_in = basic_widgets.Button
zoom_out = basic_widgets.Button
rect_select = basic_widgets.Button
update_rect_image = basic_widgets.Button
pan = basic_widgets.Button
draw_line_w_drag = basic_widgets.Button
draw_line_w_click = basic_widgets.Button
draw_arrow_w_drag = basic_widgets.Button
draw_arrow_w_click = basic_widgets.Button
draw_rect_w_drag = basic_widgets.Button
draw_rect_w_click = basic_widgets.Button
draw_polygon_w_click = basic_widgets.Button
draw_point_w_click = basic_widgets.Button
modify_existing_shape = basic_widgets.Button
color_selector = basic_widgets.Button
save_kml = basic_widgets.Button
select_existing_shape = basic_widgets.Combobox # type: basic_widgets.Combobox
remap_dropdown = basic_widgets.Combobox # type: basic_widgets.Combobox
def __init__(self, parent):
AbstractWidgetPanel.__init__(self, parent)
controls = ["fname_select",
"zoom_in",
"zoom_out",
"pan",
"draw_line_w_drag",
"draw_line_w_click",
"draw_arrow_w_drag",
"draw_arrow_w_click",
"draw_rect_w_drag",
"draw_rect_w_click",
"draw_polygon_w_click",
"draw_point_w_click",
"select_existing_shape",
"modify_existing_shape",
"save_kml",
"color_selector",
"rect_select",
"update_rect_image",
"remap_dropdown"]
self.init_w_box_layout(controls, 4, column_widths=20)
self.remap_dropdown.update_combobox_values(["density",
"brighter",
"darker",
"high contrast",
"linear",
"log",
"pedf",
"nrl"])
self.set_label_text("taser buttons")
if __name__ == '__main__':
print(dir(AbstractWidgetPanel))
| 1.625 | 2 |
test.py | pnawalramka/cowin | 0 | 16132 | <reponame>pnawalramka/cowin
import json
from unittest import mock, TestCase
import check_availability
json_data = \
"""
{
"centers": [
{
"center_id": 1234,
"name": "District General Hostpital",
"name_l": "",
"address": "45 M G Road",
"address_l": "",
"state_name": "Maharashtra",
"state_name_l": "",
"district_name": "Satara",
"district_name_l": "",
"block_name": "Jaoli",
"block_name_l": "",
"pincode": "413608",
"lat": 28.7,
"long": 77.1,
"from": "09:00:00",
"to": "18:00:00",
"fee_type": "Free",
"vaccine_fees": [
{
"vaccine": "COVISHIELD",
"fee": "250"
}
],
"sessions": [
{
"session_id": "3fa85f64-5717-4562-b3fc-2c963f66afa6",
"date": "31-05-2021",
"available_capacity": 50,
"available_capacity_dose1": 25,
"available_capacity_dose2": 25,
"min_age_limit": 18,
"vaccine": "COVISHIELD",
"slots": [
"FORENOON",
"AFTERNOON"
]
}
]
}
]
}
"""
def mock_get(*args, **kwargs):
mock_res = mock.Mock()
mock_res.json.return_value = json.loads(json_data)
return mock_res
class TestCheck(TestCase):
@mock.patch('requests.get', side_effect=mock_get)
def test_check(self, mock_get):
got = check_availability.check('123', 18)
self.assertEqual(1, len(got))
self.assertEqual(1234, got[0]['center_id'])
| 1.804688 | 2 |
lims/models/shipping.py | razorlabs/BRIMS-backend | 1 | 16260 | from django.db import models
"""
ShipmentModels have a one to many relationship with boxes and aliquot
Aliquot and Box foreign keys to a ShipmentModel determine manifest contents
for shipping purposes (resolved in schema return for manifest view)
"""
class ShipmentModel(models.Model):
carrier = models.ForeignKey('CarrierModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
shipment_number = models.CharField(max_length=255, blank=True, null=True)
# TODO What should we do if a destination is removed?
destination = models.ForeignKey('DestinationModel',
on_delete=models.SET_NULL,
blank=True,
null=True)
sent_date = models.DateTimeField(blank=True, null=True)
received_date = models.DateTimeField(blank=True, null=True)
notes = models.CharField(max_length=255, blank=True, null=True)
class DestinationModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
class CarrierModel(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
| 1.46875 | 1 |
test/test_vlan_group.py | nrfta/python-netbox-client | 0 | 16388 | <filename>test/test_vlan_group.py
# coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import netbox_client
from netbox_client.models.vlan_group import VLANGroup # noqa: E501
from netbox_client.rest import ApiException
class TestVLANGroup(unittest.TestCase):
"""VLANGroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVLANGroup(self):
"""Test VLANGroup"""
# FIXME: construct object with mandatory attributes with example values
# model = netbox_client.models.vlan_group.VLANGroup() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 1.085938 | 1 |
session11.py | sahanashetty31/session_11_epai3_assignment | 0 | 16516 | import math
from functools import lru_cache
class Polygon:
def __init__(self, n, R):
if n < 3:
raise ValueError('Polygon must have at least 3 vertices.')
self._n = n
self._R = R
def __repr__(self):
return f'Polygon(n={self._n}, R={self._R})'
@property
def count_vertices(self):
return self._n
@property
def count_edges(self):
return self._n
@property
def circumradius(self):
return self._R
@property
def interior_angle(self):
return (self._n - 2) * 180 / self._n
@property
def side_length(self):
return 2 * self._R * math.sin(math.pi / self._n)
@property
def apothem(self):
return self._R * math.cos(math.pi / self._n)
@property
def area(self):
return self._n / 2 * self.side_length * self.apothem
@property
def perimeter(self):
return self._n * self.side_length
def __eq__(self, other):
if isinstance(other, self.__class__):
return (self.count_edges == other.count_edges and self.circumradius == other.circumradius)
else:
return NotImplemented
def __gt__(self, other):
if isinstance(other, self.__class__):
return self.count_vertices > other.count_vertices
else:
return NotImplemented
class Polygons:
def __init__(self, m, R):
if m < 3:
raise ValueError('m must be greater than 3')
self._m = m
self._R = R
self._polygons = [Polygon(i, R) for i in range(3, m+1)]
def __len__(self):
return self._m - 2
def __repr__(self):
return f'Polygons(m={self._m}, R={self._R})'
def __getitem__(self, s):
return self._polygons[s]
def __iter__(self):
return self.PolygonIterator(self)
@property
def max_efficiency_polygon(self):
sorted_polygons = sorted(self._polygons,
key=lambda p: p.area/p.perimeter,
reverse=True)
return sorted_polygons[0]
class PolyIterator:
def __init__(self, poly_obj):
self._poly_obj = poly_obj
self._index = 0
def __iter__(self):
return self
def __next__(self):
if self._index >= len(self._poly_obj):
raise StopIteration
else:
item = self._poly_obj._polygons[self._index]
self._index += 1
return item | 2.921875 | 3 |
verbose.py | lowrey/myjsonstore | 1 | 16644 | import sys
verbose = False
def set_v(v):
global verbose
verbose = v
def print_v(s):
if verbose:
print(s)
def write_v(s):
if verbose:
sys.stdout.write(s)
| 1.179688 | 1 |
code/python3/index_values_with_geo.py | jaylett/xapian-docsprint | 47 | 16772 | #!/usr/bin/env python
import json
from support import parse_states
import sys
import xapian
def index(datapath, dbpath):
# Create or open the database we're going to be writing to.
db = xapian.WritableDatabase(dbpath, xapian.DB_CREATE_OR_OPEN)
# Set up a TermGenerator that we'll use in indexing.
termgenerator = xapian.TermGenerator()
termgenerator.set_stemmer(xapian.Stem("en"))
for fields in parse_states(datapath):
# 'fields' is a dictionary mapping from field name to value.
# Pick out the fields we're going to index.
name = fields.get('name', u'')
description = fields.get('description', u'')
motto = fields.get('motto', u'')
admitted = fields.get('admitted', None)
population = fields.get('population', None)
order = fields.get('order', u'')
# We make a document and tell the term generator to use this.
doc = xapian.Document()
termgenerator.set_document(doc)
# index each field with a suitable prefix
termgenerator.index_text(name, 1, 'S')
termgenerator.index_text(description, 1, 'XD')
termgenerator.index_text(motto, 1, 'XM')
# Index fields without prefixes for general search.
termgenerator.index_text(name)
termgenerator.increase_termpos()
termgenerator.index_text(description)
termgenerator.increase_termpos()
termgenerator.index_text(motto)
# Add document values.
if admitted is not None:
doc.add_value(1, xapian.sortable_serialise(int(admitted[:4])))
doc.add_value(2, admitted) # YYYYMMDD
if population is not None:
doc.add_value(3, xapian.sortable_serialise(int(population)))
### Start of example code.
midlat = fields['midlat']
midlon = fields['midlon']
if midlat and midlon:
doc.add_value(4, "%f,%f" % (float(midlat), float(midlon)))
### End of example code.
# Store all the fields for display purposes.
doc.set_data(json.dumps(fields))
# We use the order to ensure each object ends up in the
# database only once no matter how many times we run the
# indexer.
idterm = u"Q" + order
doc.add_boolean_term(idterm)
db.replace_document(idterm, doc)
if len(sys.argv) != 3:
print("Usage: %s DATAPATH DBPATH" % sys.argv[0])
sys.exit(1)
index(datapath = sys.argv[1], dbpath = sys.argv[2])
| 1.796875 | 2 |
sa/profiles/Alcatel/AOS/get_inventory.py | prorevizor/noc | 84 | 16900 | <filename>sa/profiles/Alcatel/AOS/get_inventory.py
# ----------------------------------------------------------------------
# Alcatel.AOS.get_inventory
# ----------------------------------------------------------------------
# Copyright (C) 2007-2014 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import re
# NOC modules
from noc.core.script.base import BaseScript
from noc.sa.interfaces.igetinventory import IGetInventory
class Script(BaseScript):
name = "Alcatel.AOS.get_inventory"
interface = IGetInventory
rx_ni = re.compile(
r"^\s+GBIC\s+(?P<int_number>\d+)\n"
r"\s+Manufacturer Name:\s+(?P<vendor>\S+)(|\s+),\n"
r"^\s+Part Number:\s+(?P<part_number>\S+)(|\s+),\n"
r"^\s+Hardware Revision:\s+(|(?P<hw_rev>\S+))(|\s+),\n"
r"^\s+Serial Number:\s+(?P<serial>\S+)(|\s+)(|\s+),\n",
re.IGNORECASE | re.MULTILINE | re.DOTALL,
)
def execute(self):
objects = []
# Chassis info
p = self.scripts.get_version()
objects += [
{
"type": "CHASSIS",
"number": None,
"vendor": "ALU",
"serial": p["attributes"].get("Serial Number"),
"description": "%s %s" % (p["vendor"], p["platform"]),
"part_no": p["platform"],
"revision": p["attributes"].get("HW version"),
"builtin": False,
}
]
# Transiver Detected
iface = self.cli("show ni")
for match in self.rx_ni.finditer(iface):
number = match.group("int_number")
# type = match.group("int")
# vendor = match.group("vendor")
serial = match.group("serial")
hw_rev = match.group("hw_rev")
if not hw_rev:
hw_rev = "None"
part_no = match.group("part_number")
if "XFP-10G-LR" in part_no:
part = "NoName | Transceiver | 10G | XFP LR"
elif "SFP-LX" in part_no:
part = "NoName | Transceiver | 1G | SFP LX"
elif "SFP-LH" in part_no:
part = "NoName | Transceiver | 1G | SFP LH"
elif "GLC-BX" in part_no:
part = "Cisco | Transceiver | 1G | GLC-BX-D"
else:
part = "NoName | Transceiver | 1G | SFP SX"
objects += [
{
"type": "XCVR",
"number": number,
"vendor": "NONAME",
"serial": serial,
"description": "SFP Transceiver " + part_no,
"part_no": [part],
"revision": hw_rev,
"builtin": False,
}
]
return objects
| 1.234375 | 1 |
Leetcode/89.grayCode.py | Song2017/Leetcode_python | 1 | 17028 | <filename>Leetcode/89.grayCode.py
class Solution:
'''
格雷编码是一个二进制数字系统,在该系统中,两个连续的数值仅有一个位数的差异。
给定一个代表编码总位数的非负整数 n,打印其格雷编码序列。格雷编码序列必须以 0 开头。
输入: 2
输出: [0,1,3,2]
解释: 00 - 0, 01 - 1, 11 - 3, 10 - 2
'''
def grayCode(self, n: int):
# 观察连续数值对应的格雷编码序列对应的关系
# 追加二进制位到首位, 0: 数值仍为前一个数组的值, 1: 前一个数组的每个元素 + 2的(n-1)次幂
ans, cnt = [0], 0
while cnt < n:
ad = 2**cnt
tmp = list(map(lambda x: x ^ ad, ans))
tmp.reverse()
ans += tmp
cnt += 1
return ans
def grayCodeF(self, n: int):
''''
关键是搞清楚格雷编码的生成过程, G(i) = i ^ (i/2);
如 n = 3:
G(0) = 000
G(1) = 1 ^ 0 = 001 ^ 000 = 001
G(2) = 2 ^ 1 = 010 ^ 001 = 011
G(3) = 3 ^ 1 = 011 ^ 001 = 010
G(4) = 4 ^ 2 = 100 ^ 010 = 110
G(5) = 5 ^ 2 = 101 ^ 010 = 111
G(6) = 6 ^ 3 = 110 ^ 011 = 101
G(7) = 7 ^ 3 = 111 ^ 011 = 100
'''
return [i ^ i >> 1 for i in range(2**n)]
s = Solution()
print(s.grayCode(3))
| 2.359375 | 2 |
const.py | TakosukeGH/pmx_bone_importer | 0 | 17156 | ADDON_NAME = "pmx_bone_importer"
LOG_FILE_NAME = "pmx_bone_importer.log"
| -0.46875 | 0 |
aqg/utils/summarizer.py | Sicaida/Automatic_Question_Generation | 134 | 17284 | <gh_stars>100-1000
from __future__ import absolute_import
from __future__ import division, print_function, unicode_literals
from sumy.parsers.html import HtmlParser
from sumy.parsers.plaintext import PlaintextParser
from sumy.nlp.tokenizers import Tokenizer
#from sumy.summarizers.lsa import LsaSummarizer as Summarizer
from sumy.summarizers.lex_rank import LexRankSummarizer as Summarizer
from sumy.nlp.stemmers import Stemmer
from sumy.utils import get_stop_words
class TextSummarizer:
def __init__(self, count=10):
self.LANGUAGE = "czech"
self.SENTENCES_COUNT = count
def summarize_from_url(self,url):
parser = HtmlParser.from_url(url, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_text(self,text):
parser = PlaintextParser.from_string(text, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
def summarize_from_file(self,file_name):
parser = PlaintextParser.from_file(file_name, Tokenizer(self.LANGUAGE))
stemmer = Stemmer(self.LANGUAGE)
summarizer = Summarizer(stemmer)
file_1 = open("summarizer_output.txt","w+")
file_2 = open("summarizer_output2.txt","w+")
for sentence in summarizer(parser.document, self.SENTENCES_COUNT):
file_2.write(str(sentence))
file_1.write(str(sentence))
file_1.write("\n")
file_1.close()
file_2.close()
# t = TextSummarizer()
# t.summarize_from_file("obama_short.txt")
# pdf = pdfgeneration()
# pdf.generate_pdf_summarizer("summarizer_output2.txt")
| 2.21875 | 2 |
app/email.py | DXYyang/shenNeng_gasAnalysis | 1 | 17412 | from threading import Thread
from flask import current_app,render_template
from flask_mail import Message
from . import mail
def send_async_email(app,msg):
with app.app_context():
mail.send(msg)
def send_email(to,subject,template,**kwargs):
app=current_app._get_current_object()
msg=Message(app.config['FLASKY_MAIL_SUBJECT_PREFIX']+' '+subject,
sender=app.config['FLASKY_MAIL_SENDER'],recipients=[to])
msg.body=render_template(template+'.txt',**kwargs)
msg.html=render_template(template+'.html',**kwargs)
thr=Thread(target=send_async_email,args=[app,msg])
thr.start()
return thr | 1.46875 | 1 |
src/masonite/contracts/AuthContract.py | holic-cl/masonite | 95 | 17540 | from abc import ABC as Contract, abstractmethod
class AuthContract(Contract):
@abstractmethod
def user(self):
pass
@abstractmethod
def save(self):
pass
@abstractmethod
def delete(self):
pass
| 1.335938 | 1 |
test/integrationMyndFskr.py | redhog/ferenda | 18 | 17668 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from builtins import *
import os
import sys
import shutil
import inspect
from ferenda import TextReader, util
from ferenda.testutil import RepoTester, file_parametrize
from ferenda.compat import unittest
# SUT
from ferenda.sources.legal.se import myndfskr
class Parse(RepoTester):
repoclass = myndfskr.MyndFskrBase # in some cases we might need to get a
# specific one like SOSFS, see below
aliases = {} # setUpClass fills this in
@classmethod
def setUpClass(cls):
super(Parse, cls).setUpClass()
# enumerate all classes defined in the module where
# MyndFskrBase is defined, check their static property 'alias'
# and use it to add to cls.aliases
for name, obj in inspect.getmembers(myndfskr):
if inspect.isclass(obj) and hasattr(obj, 'alias'):
cls.aliases[obj.alias] = obj
def parse_filename(self, filename):
# a few of the subclasses have specialized rules. make sure we
# instantiate the correct class
alias = os.path.basename(filename).split("-")[0]
basefile = os.path.splitext(
os.path.basename(filename))[0].replace("-",
"/", 1).replace("-", ":")
repoclass = self.aliases[alias]
repo = repoclass(datadir=self.datadir,
storelocation=self.datadir + "/ferenda.sqlite",
indexlocation=self.datadir + "/whoosh",)
return repo, basefile
def parametric_test(self, filename):
# these options adjusts the constructed URIs. by default, the
# official rpubl URIs are minted.
#
# self.repo.config.localizeuri = True
# self.repo.config.url = "http://example.org/"
# self.repo.config.urlpath = ''
# a few of the subclasses have specialized rules. make sure we
# instantiate the correct class
repo, basefile = self.parse_filename(filename)
doc = repo.make_document(basefile)
text = repo.sanitize_text(util.readfile(filename), basefile)
reader = TextReader(string=text, encoding='utf-8')
props = repo.extract_metadata(reader, basefile)
props = repo.sanitize_metadata(props, basefile)
resource = repo.polish_metadata(props, basefile)
repo.infer_metadata(resource, basefile)
wantfile = filename.replace(".txt", ".n3")
if os.path.exists(wantfile):
self.assertEqualGraphs(wantfile, resource.graph, exact=False)
else:
self.fail("Expected a %s with the following content:\n\n%s" %
(wantfile, doc.meta.serialize(format="n3").decode("utf-8")))
file_parametrize(Parse, "test/files/myndfskr", ".txt")
| 1.585938 | 2 |
object_detection/det_heads/retinaNet_head/retinanet_head.py | no-name-xiaosheng/PaddleViT | 993 | 17796 | <reponame>no-name-xiaosheng/PaddleViT<gh_stars>100-1000
# Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.nn.initializer import Normal, Constant
from retinanet_loss import RetinaNetLoss
from post_process import RetinaNetPostProcess
from det_utils.generator_utils import AnchorGenerator
class RetinaNetHead(nn.Layer):
'''
The head used in RetinaNet for object classification and box regression.
It has two subnets for the two tasks, with a common structure but separate parameters.
'''
def __init__(self, config):
'''
Args:
input_shape (List[ShapeSpec]): input shape.
num_classes (int): number of classes. Used to label background proposals.
num_anchors (int): number of generated anchors.
conv_dims (List[int]): dimensions for each convolution layer.
norm (str or callable):
Normalization for conv layers except for the two output layers.
See :func:`detectron2.layers.get_norm` for supported types.
loss_func (class): the class is used to compute loss.
prior_prob (float): Prior weight for computing bias.
'''
super(RetinaNetHead, self).__init__()
num_convs = config.RETINANET.NUM_CONVS
input_channels = config.RETINANET.INPUT_CHANNELS
norm = config.RETINANET.NORM
prior_prob = config.RETINANET.PRIOR_PROB
self.num_classes = config.RETINANET.NUM_CLASSES
self.get_loss = RetinaNetLoss(
focal_loss_alpha=config.RETINANET.FOCAL_LOSS_ALPHA,
focal_loss_gamma=config.RETINANET.FOCAL_LOSS_GAMMA,
smoothl1_loss_delta=config.RETINANET.SMOOTHL1_LOSS_DELTA,
positive_thresh=config.RETINANET.POSITIVE_THRESH,
negative_thresh=config.RETINANET.NEGATIVE_THRESH,
allow_low_quality=config.RETINANET.ALLOW_LOW_QUALITY,
num_classes=config.RETINANET.NUM_CLASSES,
weights=config.RETINANET.WEIGHTS
)
self.postprocess = RetinaNetPostProcess(
score_threshold=config.RETINANET.SCORE_THRESH,
keep_top_k=config.RETINANET.KEEP_TOPK,
nms_top_k=config.RETINANET.NMS_TOPK,
nms_threshold=config.RETINANET.NMS_THRESH,
bbox_reg_weights=config.RETINANET.WEIGHTS
)
self.anchor_generator = AnchorGenerator(anchor_sizes=config.RETINANET.ANCHOR_SIZE,
aspect_ratios=config.RETINANET.ASPECT_RATIOS,
strides=config.RETINANET.STRIDES,
offset=config.RETINANET.OFFSET)
num_anchors = self.anchor_generator.num_anchors
conv_dims = [input_channels] * num_convs
cls_net = []
reg_net = []
for in_channels, out_channels in zip(
[input_channels] + list(conv_dims), conv_dims
):
cls_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
cls_net.append(nn.BatchNorm2D(out_channels))
cls_net.append(nn.ReLU())
reg_net.append(
nn.Conv2D(in_channels, out_channels, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)))
)
if norm == "bn":
reg_net.append(nn.BatchNorm2D(out_channels))
reg_net.append(nn.ReLU())
self.cls_net = nn.Sequential(*cls_net)
self.reg_net = nn.Sequential(*reg_net)
bias_value = -math.log((1 - prior_prob) / prior_prob)
self.cls_score = nn.Conv2D(
conv_dims[-1], num_anchors * self.num_classes, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01)),
bias_attr=paddle.ParamAttr(initializer=Constant(bias_value))
)
self.bbox_pred = nn.Conv2D(
conv_dims[-1], num_anchors * 4, kernel_size=3, stride=1, padding=1,
weight_attr=paddle.ParamAttr(initializer=Normal(mean=0., std=0.01))
)
def forward(self, feats, inputs):
'''
Returns:
loss_dict (dict) | pred_result(tensor), bbox_num(tensor):
loss_dict: contains cls_losses and reg_losses.
pred_result: the shape is [M, 6], M is the number of final preds,
Each row has 6 values: [label, score, xmin, ymin, xmax, ymax]
bbox_num: the shape is [N], N is the num of batch_size,
bbox_num[i] means the i'th img have bbox_num[i] boxes.
'''
anchors = self.anchor_generator(feats)
pred_scores = []
pred_boxes = []
for feat in feats:
pred_scores.append(self.cls_score(self.cls_net(feat)))
pred_boxes.append(self.bbox_pred(self.reg_net(feat)))
pred_scores_list = [
transpose_to_bs_hwa_k(s, self.num_classes) for s in pred_scores
]
pred_boxes_list = [
transpose_to_bs_hwa_k(s, 4) for s in pred_boxes
]
if self.training:
anchors = paddle.concat(anchors)
loss_dict = self.get_loss(anchors, [pred_scores_list, pred_boxes_list], inputs)
return loss_dict
else:
img_whwh = paddle.concat([inputs["imgs_shape"][:, 1:2],
inputs["imgs_shape"][:, 0:1]], axis=-1)
pred_result, bbox_num = self.postprocess(
pred_scores_list,
pred_boxes_list,
anchors,
inputs["scale_factor_wh"],
img_whwh
)
return pred_result, bbox_num
def transpose_to_bs_hwa_k(tensor, k):
assert tensor.dim() == 4
bs, _, h, w = tensor.shape
tensor = tensor.reshape([bs, -1, k, h, w])
tensor = tensor.transpose([0, 3, 4, 1, 2])
return tensor.reshape([bs, -1, k])
| 1.664063 | 2 |
telemanom/_globals.py | tonyzeng2019/telemanom | 0 | 17924 | #!/usr/bin/env python
# coding: utf-8
import yaml
import json
import sys
import os
sys.path.append('../venv/lib/python3.5/site-packages')
from elasticsearch import Elasticsearch
sys.path.append('../telemanom')
class Config:
'''Loads parameters from config.yaml into global object'''
def __init__(self, path_to_config):
if os.path.isfile(path_to_config):
pass
else:
path_to_config = '../%s' %path_to_config
setattr(self, "path_to_config", path_to_config)
dictionary = None
with open(path_to_config, "r") as f:
dictionary = yaml.load(f.read())
try:
for k,v in dictionary.items():
setattr(self, k, v)
except:
for k,v in dictionary.iteritems():
setattr(self, k, v)
def build_group_lookup(self, path_to_groupings):
channel_group_lookup = {}
with open(path_to_groupings, "r") as f:
groupings = json.loads(f.read())
for subsystem in groupings.keys():
for subgroup in groupings[subsystem].keys():
for chan in groupings[subsystem][subgroup]:
channel_group_lookup[chan["key"]] = {}
channel_group_lookup[chan["key"]]["subsystem"] = subsystem
channel_group_lookup[chan["key"]]["subgroup"] = subgroup
return channel_group_lookup | 1.632813 | 2 |
setup.py | leandron/steinlib | 4 | 18052 | from setuptools import setup
tests_require = [
'cov-core',
'mock',
'nose2',
]
setup(name='steinlib',
version='0.1',
description='Python bindings for Steinlib format.',
url='http://github.com/leandron/steinlib',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
packages=['steinlib'],
tests_require=tests_require,
test_suite='nose2.collector.collector',
zip_safe=False)
| 0.871094 | 1 |
komapy/decorators.py | bpptkg/komapy | 0 | 18180 | from functools import partial
class counter:
"""
A counter decorator to track how many times a function is called.
"""
def __init__(self, func):
self.func = func
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
return self.func(*args, **kwargs)
def register_as_decorator(func):
"""
Register extensions, transforms, or addons function as decorator.
"""
def wrapper(*args, **kwargs):
# If argument length < 2, user just provides function name without its
# resolver. So return partial function. Otherwise, return original
# function.
if len(args) < 2:
return partial(func, *args, **kwargs)
return partial(func, *args, **kwargs)()
return wrapper
| 2.59375 | 3 |
occam_utils/occam_datasets.py | dschinagl/occam | 1 | 18308 | <gh_stars>1-10
import numpy as np
import torch
from spconv.pytorch.utils import PointToVoxel
from scipy.spatial.transform import Rotation
from pcdet.datasets import DatasetTemplate
class BaseDataset(DatasetTemplate):
"""
OpenPCDet dataset to load and preprocess the point cloud
"""
def __init__(self, data_config, class_names, occam_config):
"""
Parameters
----------
data_config : EasyDict
dataset cfg including data preprocessing properties (OpenPCDet)
class_names :
list of class names (OpenPCDet)
occam_config: EasyDict
sampling properties for attribution map generation, see cfg file
"""
super().__init__(dataset_cfg=data_config, class_names=class_names,
training=False)
self.occam_config = occam_config
def load_and_preprocess_pcl(self, source_file_path):
"""
load given point cloud file and preprocess data according OpenPCDet cfg
Parameters
----------
source_file_path : str
path to point cloud to analyze (bin or npy)
Returns
-------
pcl : ndarray (N, 4)
preprocessed point cloud (x, y, z, intensity)
"""
if source_file_path.split('.')[-1] == 'bin':
points = np.fromfile(source_file_path, dtype=np.float32)
points = points.reshape(-1, 4)
elif source_file_path.split('.')[-1] == 'npy':
points = np.load(source_file_path)
else:
raise NotImplementedError
# FOV crop is usually done using the image
if self.occam_config.FOV_CROP:
angles = np.abs(np.degrees(np.arctan2(points[:, 1], points[:, 0])))
mask = angles <= self.occam_config.FOV_ANGLE
points = points[mask, :]
input_dict = {
'points': points
}
data_dict = self.prepare_data(data_dict=input_dict)
pcl = data_dict['points']
return pcl
class OccamInferenceDataset(DatasetTemplate):
"""
OpenPCDet dataset for occam inference; in each iteration a sub-sampled
point cloud according occam config is generated
"""
def __init__(self, data_config, class_names, occam_config, pcl, nr_it, logger):
"""
Parameters
----------
data_config : EasyDict
dataset cfg including data preprocessing properties (OpenPCDet)
class_names :
list of class names (OpenPCDet)
occam_config: EasyDict
sampling properties for attribution map generation, see cfg file
pcl : ndarray (N, 4)
preprocessed full point cloud
nr_it : int
number of sub-sampling iterations
logger : Logger
"""
super().__init__(
dataset_cfg=data_config, class_names=class_names, training=False,
root_path=None, logger=logger
)
self.occam_config = occam_config
self.pcl = pcl
self.logger = logger
self.nr_it = nr_it
self.sampling_rand_rot = self.occam_config.SAMPLING.RANDOM_ROT
self.sampling_vx_size = np.array(self.occam_config.SAMPLING.VOXEL_SIZE)
self.lbda = self.occam_config.SAMPLING.LAMBDA # see paper
self.sampling_density_coeff = np.array(
self.occam_config.SAMPLING.DENSITY_DISTR_COEFF)
self.sampling_range = self.get_sampling_range(
rand_rot=self.sampling_rand_rot,
pcl=self.pcl,
vx_size=self.sampling_vx_size
)
self.voxel_generator = PointToVoxel(
vsize_xyz=list(self.sampling_vx_size),
coors_range_xyz=list(self.sampling_range),
num_point_features=3,
max_num_points_per_voxel=self.occam_config.SAMPLING.MAX_PTS_PER_VOXEL,
max_num_voxels=self.occam_config.SAMPLING.MAX_VOXELS
)
def get_sampling_range(self, rand_rot, pcl, vx_size):
"""
compute min/max sampling range for given random rotation
Parameters
----------
rand_rot : float
max random rotation before sampling (+/-) in degrees
pcl : ndarray (N, 4)
full point cloud
vx_size : ndarray (3)
voxel size for sampling in x, y, z
Returns
-------
sampling_range : ndarray (6)
min/max sampling range for given rotation
"""
rotmat_pos = Rotation.from_rotvec([0, 0, rand_rot], degrees=True)
rotmat_neg = Rotation.from_rotvec([0, 0, -rand_rot], degrees=True)
rot_pts = np.concatenate(
(np.matmul(rotmat_pos.as_matrix(), pcl[:, :3].T),
np.matmul(rotmat_neg.as_matrix(), pcl[:, :3].T)), axis=1)
min_grid = np.floor(np.min(rot_pts, axis=1) / vx_size) * vx_size - vx_size
max_grid = np.ceil(np.max(rot_pts, axis=1) / vx_size) * vx_size + vx_size
sampling_range = np.concatenate((min_grid, max_grid))
return sampling_range
def __len__(self):
return self.nr_it
def __getitem__(self, index):
if index == self.nr_it:
raise IndexError
# randomly rotate and translate full pcl
rand_transl = np.random.rand(1, 3) * (self.sampling_vx_size[None, :])
rand_transl -= self.sampling_vx_size[None, :] / 2
rand_rot_ = np.random.rand(1) * self.sampling_rand_rot * 2 \
- self.sampling_rand_rot
rand_rot_mat = Rotation.from_rotvec([0, 0, rand_rot_[0]], degrees=True)
rand_rot_mat = rand_rot_mat.as_matrix()
rand_rot_pcl = np.matmul(rand_rot_mat, self.pcl[:, :3].T).T
rand_rot_transl_pcl = rand_rot_pcl + rand_transl
rand_rot_transl_pcl = np.ascontiguousarray(rand_rot_transl_pcl)
# voxelixe full pcl
_, vx_coord, _, pt_vx_id = self.voxel_generator.generate_voxel_with_id(
torch.from_numpy(rand_rot_transl_pcl))
vx_coord, pt_vx_id = vx_coord.numpy(), pt_vx_id.numpy()
vx_coord = vx_coord[:, [2, 1, 0]]
# compute voxel center in original pcl
vx_orig_coord = vx_coord * self.sampling_vx_size[None, :]
vx_orig_coord += self.sampling_range[:3][None, :]
vx_orig_coord += self.sampling_vx_size[None, :] / 2
vx_orig_coord -= rand_transl
vx_orig_coord = np.matmul(np.linalg.inv(rand_rot_mat), vx_orig_coord.T).T
vx_dist = np.linalg.norm(vx_orig_coord, axis=1)
vx_keep_prob = self.lbda * (
np.power(vx_dist, 2) * self.sampling_density_coeff[0]
+ vx_dist * self.sampling_density_coeff[1]
+ self.sampling_density_coeff[2])
vx_keep_ids = np.where(np.random.rand(vx_keep_prob.shape[0]) < vx_keep_prob)[0]
pt_keep_mask = np.in1d(pt_vx_id, vx_keep_ids)
input_dict = {
'points': self.pcl[pt_keep_mask, :],
'mask': pt_keep_mask
}
data_dict = self.prepare_data(data_dict=input_dict)
return data_dict
| 2.0625 | 2 |
application.py | milindvb/python-docs-hello-world | 0 | 18436 | from flask import Flask
# import pyodbc
app = Flask(__name__)
@app.route("/")
def hello():
# Some other example server values are
# server = 'localhost\sqlexpress' # for a named instance
# server = 'myserver,port' # to specify an alternate port
# server = 'tcp:mytest.centralus.cloudapp.azure.com'
# database = 'test'
# username = 'ndb'
# password = '<PASSWORD>###'
# cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
# cursor = cnxn.cursor()
# cursor.execute('SELECT * FROM dbo.Users')
# s = ' '
# for row in cursor:
# s += ''.join(row)
# print(row)
s = '!! Azure'
return "hello"+s
| 1.367188 | 1 |
helper/validation_scripts/launch-lm-profile.py | NanoMembers/DeepFlow | 3 | 18564 | <gh_stars>1-10
#!/tools/python/python3.8.3/bin/python
import os
import shutil
import subprocess
import numpy as np
batch_list=[i*1024 for i in range(2,7)]
seq_list=[10]
hidden_list=[i*1024 for i in range(2,7)]
vocab_list=[2048] #[int(i) for i in (2**np.linspace(10,13,20)//2*2)]
layer_list=[1]
bpe_list=[10]
epoch_list=[3]
def run_command(cmd, var, result):
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True).decode("utf-8")
output = output.strip().replace(',','')
result[var] = float(output) if output != "" else output
except:
print("command for {} did not work".format(var))
output_dir="/mnt/home/newsha/baidu/developement/MechaFlow/validation/benchmarks/rnnlm/profile_gemm"
result_file="{}/result.csv".format(output_dir)
if os.path.exists(output_dir):
shutil.rmtree(output_dir, ignore_errors=True)
os.makedirs(output_dir)
print("Created {}".format(output_dir))
with open(result_file, "w") as f:
f.write("Batch Seq Hidden Vocab Layers Epoch BPE core_util dram_util l2_util dram_read dram_write l2_access fp16_inst fma_inst\n\n")
print("Batch Seq Hidden Vocab Layers Epoch BPE core_util dram_util l2_util dram_read dram_write l2_access fp16_inst fma_inst\n\n")
for b in batch_list:
for s in seq_list:
for d in hidden_list:
for v in vocab_list:
for l in layer_list:
for bpe in bpe_list:
for e in epoch_list:
bpe = min(bpe, 25000//b)
fname = "B{}-S{}-D{}-V{}-L{}-E{}-P{}".format(b,s,d,v,l,e,bpe)
output_file = "{}/{}.out".format(output_dir, fname)
command1="/tools/cuda/cuda-11.0.1/bin/ncu --metrics \"regex:.*\" -k volta_fp16_s884gemm_fp16_... -s 0 -c 1 '/tools/venvs/tensorflow/tensorflow-2.2.0/bin/python' lm-fp16.py -m train -train data/test-index.txt -test data/test-index.txt -valid data/test-index.txt -b{} -s{} -d{} -v{} -l{} -p{} -e{} > {} 2>&1".format(b, s, d, v, l, bpe, e, output_file)
#command1 = "/tools/cuda/cuda-11.0.1/bin/nsys profile -t cuda,osrt,cudnn,cublas,nvtx,mpi -o profile/{} --stats=true -f true python lm-fp16.py -b{} -s{} -d{} -v{} -l{} -p{} -e{} -m train -train data/test-index.txt -test data/test-index.txt -valid data/test-index.txt > {} 2>&1".format(fname, b, s, d, v, l, bpe, e, output_file)
command2 = "cat {} | grep \"sm__pipe_tensor_op_hmma_cycles_active.avg.pct_of_peak_sustained_active\"| awk {{'print $3'}}".format(output_file) #unit
command3 = "cat {} | grep \"dram__throughput.avg.pct_of_peak_sustained_active\"| awk {{'print $3'}}".format(output_file) #unit
command4 = "cat {} | grep lts__t_sectors.avg.pct_of_peak_sustained_active | awk {{'print $3'}}".format(output_file) #unit
command5 = "cat {} | grep dram_read_bytes | grep sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command6 = "cat {} | grep dram_write_bytes | grep sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command7 = "cat {} | grep lts__t_bytes.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command8 = "cat {} | grep sm__sass_thread_inst_executed_op_fp16_pred_on.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
command9 = "cat {} | grep sm__sass_thread_inst_executed_ops_fadd_fmul_ffma_pred_on.sum | head -n 1 | awk {{'print $3'}}".format(output_file) #unit
result = {'ncu':-1, 'core_util':-1, 'dram_util':-1,
'l2_util':-1, 'dram_read':-1, 'dram_write':-1,
'l2_access':-1, 'fp16_inst':-1, 'fma_inst':-1}
run_command(command1, 'ncu', result)
run_command(command2, 'core_util', result)
run_command(command3, 'dram_util', result)
run_command(command4, 'l2_util', result)
run_command(command5, 'dram_read', result)
run_command(command6, 'dram_write', result)
run_command(command7, 'l2_access', result)
run_command(command8, 'fp16_inst', result)
run_command(command9, 'fma_inst', result)
with open(result_file, "a+") as f:
f.write("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d} {6:d} {7:.2f} {8:.2f} {9:.2f} {10:,} {11:,} {12:,} {13:,} {14:,}\n".format(b, s, d, v, l, e, bpe, result['core_util'], result['dram_util'], result['l2_util'], result['dram_read'], result['dram_write'], result['l2_access'], int(result['fp16_inst']), int(result['fma_inst'])))
print("{0:d} {1:d} {2:d} {3:d} {4:d} {5:d} {6:d} {7:.2f} {8:.2f} {9:.2f} {10:,} {11:,} {12:,} {13:,} {14:,}\n".format(b, s, d, v, l, e, bpe, result['core_util'], result['dram_util'], result['l2_util'], result['dram_read'], result['dram_write'], result['l2_access'], int(result['fp16_inst']), int(result['fma_inst'])))
| 1.554688 | 2 |
capstone/rl/utils/linear_annealing.py | davidrobles/mlnd-capstone-code | 2 | 18692 | from .callbacks import Callback
class LinearAnnealing(Callback):
def __init__(self, obj, param, init, final, n_episodes):
self.doing = 'inc' if init < final else 'dec'
self.obj = obj
self.param = param
self.init = init
self.final = final
self.n_episodes = n_episodes
self.change_rate = (final - init) / n_episodes
def on_episode_end(self, episode, qf):
if ((self.doing == 'inc' and getattr(self.obj, self.param) < self.final) or
(self.doing == 'dec' and getattr(self.obj, self.param) > self.final)):
prev = getattr(self.obj, self.param)
setattr(self.obj, self.param, prev + self.change_rate)
| 1.90625 | 2 |
mini_cluster_07.py | jgpattis/Desres-sars-cov-2-apo-mpro | 0 | 18820 | <filename>mini_cluster_07.py
#cluster data into a small amount of clusters to later pull out structures
import pyemma.coordinates as coor
import numpy as np
sys = 'back'
tica_data = coor.load('tica_data_05/back_tica_data.h5')
n_clusters = 50
cl = coor.cluster_kmeans(tica_data, k=n_clusters, max_iter=50)
cl.save(f'{sys}_{n_clusters}_mini_cluster_object.h5', overwrite=True)
cl.write_to_hdf5(f'{sys}_{n_clusters}_cluster_dtrajs.h5')
| 1.898438 | 2 |
resources.py | slowiklukasz/qgis-inventories | 0 | 18948 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x02\x05\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x20\x00\x00\x00\x20\x08\x06\x00\x00\x00\x73\x7a\x7a\xf4\
\x00\x00\x00\x01\x73\x52\x47\x42\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04\x67\x41\x4d\x41\x00\x00\xb1\x8f\x0b\xfc\x61\x05\x00\x00\x00\
\x09\x70\x48\x59\x73\x00\x00\x12\x74\x00\x00\x12\x74\x01\xde\x66\
\x1f\x78\x00\x00\x01\x9a\x49\x44\x41\x54\x58\x47\xc5\x94\x3b\x4e\
\x03\x41\x10\x44\x7d\x01\x22\x12\x02\x9c\x20\x0e\x40\xc2\x2d\xe0\
\x42\xdc\x84\x63\x10\x70\x25\x32\x62\x42\xa3\xb2\x54\xab\x47\x6f\
\xf5\x78\x96\x9f\x83\x27\xe1\xe9\xea\xee\xb7\xe3\xc5\xbb\xd7\xb7\
\xfd\xe1\x9c\x4c\x0b\xdc\x3f\xdd\xc5\x73\x32\x93\xa9\x4c\x09\x68\
\xb0\x49\x75\x31\x93\x49\xfc\x89\xc0\xe3\xf3\x65\xcc\x24\x4e\x0a\
\x6c\x19\xcc\xec\xcd\xcb\xc3\x42\xca\x9a\x4d\x02\xa9\x4e\x98\x95\
\xec\xc5\xc7\xd5\x91\x91\xc4\xbf\x08\x8c\x24\x86\x02\x75\x60\xca\
\x54\xd8\xf3\xab\x02\xa9\x9e\x60\xcf\xd9\x05\xfc\x35\x74\xcb\xdf\
\xaf\x6f\xd7\x02\x0a\x8b\x3a\xa8\xe6\x46\xb0\x77\xb4\x7c\x25\xa0\
\xb0\xaf\x8c\x43\x98\x99\xe1\x54\xaf\x97\xeb\xef\x45\x80\xcb\xab\
\x40\xf7\x14\x1d\xec\x4d\x75\x2f\x17\x51\x80\x03\x74\xfd\x3f\x11\
\x10\xac\xf1\xe9\xc5\x49\x01\x7d\xde\x2a\x20\x38\x43\xfd\xa2\x2e\
\x17\xab\x77\x80\x8d\x6e\x66\x66\x16\xce\xf0\x62\x51\xe7\x7d\x11\
\x10\x6c\xdc\xfa\xf6\x13\xce\x11\x5a\xee\x1b\xa6\xc4\x50\xa0\xd6\
\xcc\x4c\x46\x30\xe7\x1b\x18\x0a\xb0\x41\xb0\xd6\x65\xba\x9c\x60\
\x46\x8b\x2d\xc1\x4c\x2b\x90\xae\x9f\xf5\x4a\xcd\xa6\xbc\x9e\xbc\
\x4a\xb4\x02\x3c\xaf\xb5\x0e\xe6\xb5\x44\x0f\x91\xea\x94\x58\x04\
\x18\x64\x38\xd5\x7c\x3b\x75\x81\xe1\x02\x9e\x73\xa6\x33\x51\x80\
\xd7\xcf\x73\xe1\x73\xd3\x49\xb8\x9e\xce\x4c\x2b\x90\xce\x78\x5e\
\x19\x49\xd4\x5a\xed\x3d\x0a\x30\xe0\xa7\xe7\x99\x60\x93\xd0\x0b\
\x45\xd4\xd7\x89\x90\x3a\x67\x25\x50\x3f\xfb\x8c\x68\xa1\x7f\x54\
\xcc\xac\x44\x9d\xb5\x12\xa8\xd4\x86\xb4\xdc\xa8\xa6\xcc\x16\x89\
\x5d\x0a\x18\x06\xcd\x8c\x80\x18\xdd\x06\xe7\xb5\x02\x0c\x91\x59\
\x01\xd1\x49\x30\x13\xbf\x02\x06\x12\x49\xa2\x2e\x37\x49\x82\xf5\
\xe5\xdf\x70\x2b\x5a\x48\x52\x66\x86\x6f\x0b\xfc\x0e\xfb\xc3\x27\
\x2f\x90\x9e\xc6\xb7\x8c\xf7\x21\x00\x00\x00\x00\x49\x45\x4e\x44\
\xae\x42\x60\x82\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x13\
\x0e\xb7\x46\xa2\
\x00\x69\
\x00\x6e\x00\x76\x00\x65\x00\x6e\x00\x74\x00\x6f\x00\x72\x00\x79\x00\x5f\x00\x76\x00\x61\x00\x6c\x00\x69\x00\x64\x00\x61\x00\x74\
\x00\x6f\x00\x72\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x40\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x7e\xb7\x66\x8e\xd2\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 0.863281 | 1 |
bfgame/components/equipment.py | ChrisLR/BasicDungeonRL | 3 | 19076 | <gh_stars>1-10
from bflib import units
from core import contexts
from core.components import Component, listing
from core.messaging import StringBuilder, Actor, Target, Verb
@listing.register
class Equipment(Component):
NAME = "equipment"
__slots__ = ["armor_restrictions", "weapon_restrictions", "weapon_size_restrictions",
"wear_locations", "wield_locations", "empty_wield_locations"
"worn_items", "wielded_items"]
"""
This component attaches itself to anything with a bodies.
It represents equipment worn or wielded
"""
def __init__(self):
super().__init__()
self.armor_restrictions = None
self.weapon_restrictions = None
self.weapon_size_restrictions = None
def on_register(self, host):
super().on_register(host)
host_restrictions = self.host.restrictions
if host_restrictions:
self.armor_restrictions = host_restrictions.armor
self.weapon_restrictions = host_restrictions.weapons
self.weapon_size_restrictions = host_restrictions.weapon_size
def copy(self):
return Equipment()
def remove(self, item):
found_slots = False
for item_slot in self.get_worn_item_slots():
if item_slot.item == item:
found_slots = True
item_slot.item = None
if found_slots:
return True
for item_slot in self.get_wielded_grasp_slots():
if item_slot.item == item:
item_slot.item = None
found_slots = True
if found_slots:
return True
return False
def wear(self, item):
if self.armor_restrictions and not self.armor_restrictions.can_wear(item.base):
return False
if not item.wearable:
return False
empty_item_slots = self.get_empty_item_slots()
for wear_location_set in item.wearable.wear_locations:
if hasattr(wear_location_set, '__iter__'):
# Multiple Location Slot
for slot in wear_location_set:
proper_slot = next((item_slot for item_slot in empty_item_slots
if item_slot.keyword == slot), None)
if proper_slot is not None:
proper_slot.item = item
else:
return False
context = contexts.Action(self.host, item)
message = StringBuilder(Actor, Verb("wear", Actor), Target, ".")
self.host.game.echo.see(self.host, message, context)
return True
else:
# Single Location Slot
proper_slot = next((item_slot for item_slot in empty_item_slots
if item_slot.keyword == wear_location_set), None)
if proper_slot is not None:
proper_slot.item = item
context = contexts.Action(self.host, item)
message = StringBuilder(Actor, Verb("wear", Actor), Target, ".")
self.host.game.echo.see(self.host, message, context)
return True
return False
def wield(self, item):
if self.weapon_restrictions and not self.weapon_restrictions.can_wield(item.base):
return False
hands = 1
if self.weapon_size_restrictions:
keyword = self.weapon_size_restrictions.can_wield(item.base)
if not keyword:
return False
else:
if keyword == self.weapon_size_restrictions.keywords.NeedsTwoHands:
hands = 2
empty_grasp_slots = self.get_empty_grasp_slots()
if len(empty_grasp_slots) >= hands:
while hands > 0:
item_slot = empty_grasp_slots.pop(0)
item_slot.item = item
hands -= 1
context = contexts.Action(self.host, item)
message = StringBuilder(Actor, Verb("wield", Actor), Target, ".")
self.host.game.echo.see(self.host, message, context)
return True
return False
def get_melee_total_armor_class(self):
all_items = self.get_all_items()
armor_ac = sum([item.armor.armor_class for item in all_items if item.armor])
shield_ac = sum([item.shield.armor_class_melee for item in all_items if item.shield])
return armor_ac + shield_ac
def get_ranged_total_armor_class(self):
all_items = self.get_all_items()
armor_ac = sum([item.armor.armor_class for item in all_items if item.armor])
shield_ac = sum([item.shield.armor_class_missile for item in all_items if item.shield])
return armor_ac + shield_ac
def get_all_items(self):
items = self.get_worn_items()
items.extend(self.get_wielded_items())
return items
def get_empty_item_slots(self):
body_parts = self.host.body.get_body_parts()
return [item_slot for body_part in body_parts for item_slot in body_part.item_slots if not item_slot.item]
def get_empty_grasp_slots(self):
body_parts = self.host.body.get_body_parts()
return [item_slot for body_part in body_parts for item_slot in body_part.grasp_slots if not item_slot.item]
def get_worn_items(self):
return [item_slot.item for item_slot in self.get_worn_item_slots()]
def get_worn_item_slots(self):
body_parts = self.host.body.get_body_parts()
return [item_slot for body_part in body_parts for item_slot in body_part.item_slots if item_slot.item]
def get_wielded_items(self):
return [item_slot.item for item_slot in self.get_wielded_grasp_slots()]
def get_wielded_grasp_slots(self):
body_parts = self.host.body.get_body_parts()
return [grasp_slot for body_part in body_parts for grasp_slot in body_part.grasp_slots if grasp_slot.item]
def get_load_of_worn_items(self):
worn_items = self.get_worn_items()
total_weight = units.Pound(0)
for item in worn_items:
total_weight += item.weight.score
return total_weight
| 2.25 | 2 |
hcat/backends/spatial_embedding.py | buswinka/hcat | 4 | 19204 | import torch
import hcat.lib.functional
from hcat.lib.functional import IntensityCellReject
from hcat.backends.backend import Backend
from hcat.models.r_unet import embed_model as RUnet
from hcat.train.transforms import median_filter, erosion
import hcat.lib.utils
from hcat.lib.utils import graceful_exit
import os.path
import wget
from typing import Dict, Optional
class SpatialEmbedding(Backend):
def __init__(self,
sigma: Optional[torch.Tensor] = torch.tensor([0.02, 0.02, 0.02]),
device: Optional[str] = 'cuda',
model_loc: Optional[str] = None,
postprocessing: Optional[bool] = True,
scale: Optional[int] = 25,
figure: Optional[str] = None,
archetecture: Optional[RUnet] = RUnet):
"""
Initialize Spatial embedding Algorithm.
:param sigma: torch.Tensor[sigma_x, sigma_y, sigma_z] values for gaussian probability estimation.
:param device: String value for torch device by which to run segmentation backbone on.
:param model_loc: Path to trained model files.
:param postprocessing: Disable segmentation postprocessing, namely
:param scale: scale factor based on max diameter of object
:param figure: filename and path of diagnostic figure which may be rendered
"""
super(SpatialEmbedding, self).__init__()
self.url = 'https://github.com/buswinka/hcat/blob/master/modelfiles/spatial_embedding.trch?raw=true'
# self.url = None
self.scale = torch.tensor(scale)
self.device = device
self.sigma = sigma.to(device)
self.postprocessing = postprocessing
self.figure = figure
if self.url:
self.model = self._model_loader_url(self.url, archetecture, device)
else:
self.model = self._model_loader_path(model_loc, archetecture, device)
self.vector_to_embedding = torch.jit.script(
hcat.lib.functional.VectorToEmbedding(scale=self.scale).requires_grad_(False).eval())
self.embedding_to_probability = torch.jit.script(
hcat.lib.functional.EmbeddingToProbability(scale=self.scale).requires_grad_(False).eval())
self.estimate_centroids = hcat.lib.functional.EstimateCentroids(scale=self.scale).requires_grad_(False)
self.filter = median_filter(kernel_targets=3, rate=1, device=device)
self.binary_erosion = erosion(device=device)
self.intensity_rejection = IntensityCellReject()
self.nms = hcat.lib.functional.nms().requires_grad_(False)
self.centroids = None
self.vec = None
self.embed = None
self.prob = None
@graceful_exit('\x1b[1;31;40m' + 'ERROR: Spatial Embedding Failed. Aborting...' + '\x1b[0m')
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Inputs an image and outputs a probability mask of everything seen in the image.
.. note::
Call the module as a function to execute this method (similar to torch.nn.module).
.. warning:
Will not raise an error upon failure, instead returns None and prints to standard out
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> import torch
>>> backend = SpatialEmbedding()
>>> image = torch.load('path/to/my/image.trch')
>>> assert image.ndim == 5 # Shape should be [B, C, X, Y, Z]
>>> masks = backend(image)
:param image: [B, C=4, X, Y, Z] input image
:return: [B, 1, X, Y, Z] output segmentation mask where each pixel value is a cell id (0 is background)
"""
assert image.ndim == 5
assert image.shape[1] == 1
assert image.min() >= -1
assert image.max() <= 1
# image = self.filter(image.to(self.device))
image = image.to(self.device)
b, c, x, y, z = image.shape
if self.image_reject and self._is_image_bad(image):
return torch.zeros((b, 0, x, y, z), device=self.device)
# Evaluate Neural Network Model
out: torch.Tensor = self.model(image)
# Assign Outputs
probability_map = out[:, [-1], ...]
out = out[:, 0:3:1, ...]
self.prob = probability_map.cpu()
self.vec = out.cpu()
out: torch.Tensor = self.vector_to_embedding(out)
self.embed = out.cpu()
centroids: Dict[str, torch.Tensor] = self.estimate_centroids(out, probability_map)
self.centroids = centroids
out: torch.Tensor = self.embedding_to_probability(out, centroids, self.sigma)
# Reject cell masks that overlap or meet min Myo7a criteria
if self.postprocessing:
out: torch.Tensor = self.intensity_rejection(out, image)
# print(centroids.shape, out.shape)
if out.numel() == 0:
return torch.zeros((b, 0, x, y, z), device=self.device)
ind = self.nms(out, 0.5)
out = out[:, ind, ...]
# Take probabilities and generate masks!
probability_map = probability_map.lt(0.8).squeeze(1)
for i in range(out.shape[1]):
out[:, i, ...][probability_map] = 0
self.zero_grad()
return out
def load(self, model_loc: str) -> None:
"""
Initializes model weights from a url or filepath.
Example:
>>> from hcat.backends.spatial_embedding import SpatialEmbedding
>>> backend = SpatialEmbedding()
>>>
>>> url = 'https://www.model_location.com/model.trch'
>>> backend.load(url) # Works with url
>>>
>>> model_path = 'path/to/my/model.trch'
>>> backend.load(model_path) # Also works with path
:param model_loc: url or filepath
:return: None
"""
if self._is_url(model_loc):
return self._model_loader_url(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
else:
return self._model_loader_path(model_loc, RUnet(in_channels=1).requires_grad_(False), self.device)
| 2.1875 | 2 |
bazel/antlr4_cc.bzl | kyle-winkelman/fhir | 0 | 19332 | <gh_stars>0
"""Build rules to create C++ code from an Antlr4 grammar."""
def antlr4_cc_lexer(name, src, namespaces = None, imports = None, deps = None, lib_import = None):
"""Generates the C++ source corresponding to an antlr4 lexer definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the lexer rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
imports: A list of antlr4 source imports to use when building the lexer.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Lexer")
out_files = [
"%sLexer.h" % base_file_prefix,
"%sLexer.cpp" % base_file_prefix,
]
native.java_binary(
name = "antlr_tool",
jvm_flags = ["-Xmx256m"],
main_class = "org.antlr.v4.Tool",
runtime_deps = ["@maven//:org_antlr_antlr4_4_7_1"],
)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwards.
_make_tool_invocation_command(namespaces[0], lib_import),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = ["antlr_tool"],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
def antlr4_cc_parser(
name,
src,
namespaces = None,
token_vocab = None,
imports = None,
listener = True,
visitor = False,
deps = None,
lib_import = None):
"""Generates the C++ source corresponding to an antlr4 parser definition.
Args:
name: The name of the package to use for the cc_library.
src: The antlr4 g4 file containing the parser rules.
namespaces: The namespace used by the generated files. Uses an array to
support nested namespaces. Defaults to [name].
token_vocab: The antlr g4 file containing the lexer tokens.
imports: A list of antlr4 source imports to use when building the parser.
listener: Whether or not to include listener generated files.
visitor: Whether or not to include visitor generated files.
deps: Dependencies for the generated code.
lib_import: Optional target for importing grammar and token files.
"""
suffixes = ()
if listener:
suffixes += (
"%sBaseListener.cpp",
"%sListener.cpp",
"%sBaseListener.h",
"%sListener.h",
)
if visitor:
suffixes += (
"%sBaseVisitor.cpp",
"%sVisitor.cpp",
"%sBaseVisitor.h",
"%sVisitor.h",
)
namespaces = namespaces or [name]
imports = imports or []
deps = deps or []
if not src.endswith(".g4"):
fail("Grammar must end with .g4", "src")
if token_vocab != None and not token_vocab.endswith(".g4"):
fail("Token Vocabulary must end with .g4", "token_vocab")
if (any([not imp.endswith(".g4") for imp in imports])):
fail("Imported files must be Antlr4 grammar ending with .g4", "imports")
file_prefix = src[:-3]
base_file_prefix = _strip_end(file_prefix, "Parser")
out_files = [
"%sParser.h" % base_file_prefix,
"%sParser.cpp" % base_file_prefix,
] + _make_outs(file_prefix, suffixes)
if token_vocab:
imports.append(token_vocab)
command = ";\n".join([
# Use the first namespace, we'll add the others afterwardsm thi .
_make_tool_invocation_command(namespaces[0], lib_import, listener, visitor),
_make_namespace_adjustment_command(namespaces, out_files),
])
native.genrule(
name = name + "_source",
srcs = [src] + imports,
outs = out_files,
cmd = command,
heuristic_label_expansion = 0,
tools = [
":antlr_tool",
],
)
native.cc_library(
name = name,
srcs = [f for f in out_files if f.endswith(".cpp")],
hdrs = [f for f in out_files if f.endswith(".h")],
deps = ["@antlr_cc_runtime//:antlr4_runtime"] + deps,
copts = [
"-fexceptions",
# FIXME: antlr generates broken C++ code that attempts to construct
# a std::string from nullptr. It's not clear whether the relevant
# constructs are reachable.
"-Wno-nonnull",
],
features = ["-use_header_modules"], # Incompatible with -fexceptions.
)
def _make_outs(file_prefix, suffixes):
return [file_suffix % file_prefix for file_suffix in suffixes]
def _strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:len(text) - len(suffix)]
def _to_c_macro_name(filename):
# Convert the filenames to a format suitable for C preprocessor definitions.
char_list = [filename[i].upper() for i in range(len(filename))]
return "ANTLR4_GEN_" + "".join(
[a if (("A" <= a) and (a <= "Z")) else "_" for a in char_list],
)
def _make_tool_invocation_command(package, lib_import, listener = False, visitor = False):
return "$(location :antlr_tool) " + \
"$(SRCS)" + \
(" -visitor" if visitor else " -no-visitor") + \
(" -listener" if listener else " -no-listener") + \
(" -lib $$(dirname $(location " + lib_import + "))" if lib_import else "") + \
" -Dlanguage=Cpp" + \
" -package " + package + \
" -o $(@D)" + \
" -Xexact-output-dir"
def _make_namespace_adjustment_command(namespaces, out_files):
if len(namespaces) == 1:
return "true"
commands = []
extra_header_namespaces = "\\\n".join(["namespace %s {" % namespace for namespace in namespaces[1:]])
for filepath in out_files:
if filepath.endswith(".h"):
commands.append("sed -i '/namespace %s {/ a%s' $(@D)/%s" % (namespaces[0], extra_header_namespaces, filepath))
for namespace in namespaces[1:]:
commands.append("sed -i '/} \/\/ namespace %s/i} \/\/ namespace %s' $(@D)/%s" % (namespaces[0], namespace, filepath))
else:
commands.append("sed -i 's/using namespace %s;/using namespace %s;/' $(@D)/%s" % (namespaces[0], "::".join(namespaces), filepath))
return ";\n".join(commands)
| 1.96875 | 2 |
examples/finetune-bert/02-BERT-sst2-DeepSpeed.py | ceshine/pytorch-helper-bot | 10 | 19460 | <reponame>ceshine/pytorch-helper-bot
""" Finetuning BERT using DeepSpeed's ZeRO-Offload
"""
import json
import dataclasses
from pathlib import Path
from functools import partial
import nlp
import torch
import typer
import deepspeed
import numpy as np
from transformers import BertTokenizerFast
from transformers import BertForSequenceClassification
from sklearn.model_selection import train_test_split
from pytorch_helper_bot import (
DeepSpeedBot, MovingAverageStatsTrackerCallback, CheckpointCallback,
LearningRateSchedulerCallback, MultiStageScheduler, Top1Accuracy,
LinearLR, CosineAnnealingScheduler
)
CACHE_DIR = Path("cache/")
CACHE_DIR.mkdir(exist_ok=True)
APP = typer.Typer()
class SST2Dataset(torch.utils.data.Dataset):
def __init__(self, entries_dict):
super().__init__()
self.entries_dict = entries_dict
def __len__(self):
return len(self.entries_dict["label"])
def __getitem__(self, idx):
return (
self.entries_dict["input_ids"][idx],
self.entries_dict["attention_mask"][idx],
self.entries_dict["token_type_ids"][idx],
self.entries_dict["label"][idx]
)
@dataclasses.dataclass
class SST2Bot(DeepSpeedBot):
log_dir = CACHE_DIR / "logs"
def __post_init__(self):
super().__post_init__()
self.loss_format = "%.6f"
@staticmethod
def extract_prediction(output):
return output[0]
class Object(object):
pass
def convert_to_features(tokenizer, example_batch):
# Tokenize contexts and questions (as pairs of inputs)
encodings = tokenizer.batch_encode_plus(
example_batch['sentence'], padding='max_length', max_length=64, truncation=True)
return encodings
@APP.command(
context_settings={"allow_extra_args": True, "ignore_unknown_options": True}
)
def main(arch="bert-base-uncased", config="gpu.json"):
# Reference:
#
# * https://github.com/huggingface/nlp/blob/master/notebooks/Overview.ipynb
with open(config) as fin:
config_params = json.load(fin)
dataset = nlp.load_dataset('glue', "sst2")
print(set([x['label'] for x in dataset["train"]]))
tokenizer = BertTokenizerFast.from_pretrained(arch)
# Format our dataset to outputs torch.Tensor to train a pytorch model
columns = ['input_ids', 'token_type_ids', 'attention_mask', "label"]
for subset in ("train", "validation"):
dataset[subset] = dataset[subset].map(
partial(convert_to_features, tokenizer), batched=True)
dataset[subset].set_format(type='torch', columns=columns)
print(tokenizer.decode(dataset['train'][6]["input_ids"].numpy()))
print(dataset['train'][0]["attention_mask"])
valid_idx, test_idx = train_test_split(
list(range(len(dataset["validation"]))), test_size=0.5, random_state=42)
train_dict = {
"input_ids": dataset['train']["input_ids"],
"attention_mask": dataset['train']["attention_mask"],
"token_type_ids": dataset['train']["token_type_ids"],
"label": dataset['train']["label"]
}
valid_dict = {
"input_ids": dataset['validation']["input_ids"][valid_idx],
"attention_mask": dataset['validation']["attention_mask"][valid_idx],
"token_type_ids": dataset['validation']["token_type_ids"][valid_idx],
"label": dataset['validation']["label"][valid_idx]
}
test_dict = {
"input_ids": dataset['validation']["input_ids"][test_idx],
"attention_mask": dataset['validation']["attention_mask"][test_idx],
"token_type_ids": dataset['validation']["token_type_ids"][test_idx],
"label": dataset['validation']["label"][test_idx]
}
# Instantiate a PyTorch Dataloader around our dataset
train_loader = torch.utils.data.DataLoader(
SST2Dataset(train_dict), batch_size=config_params["train_batch_size"], shuffle=True)
valid_loader = torch.utils.data.DataLoader(
SST2Dataset(valid_dict), batch_size=config_params["train_batch_size"], drop_last=False)
test_loader = torch.utils.data.DataLoader(
SST2Dataset(test_dict), batch_size=config_params["train_batch_size"], drop_last=False)
model = BertForSequenceClassification.from_pretrained(arch)
# torch.nn.init.kaiming_normal_(model.classifier.weight)
# torch.nn.init.constant_(model.classifier.bias, 0)
# torch.nn.init.kaiming_normal_(model.bert.pooler.dense.weight)
# torch.nn.init.constant_(model.bert.pooler.dense.bias, 0);
args = Object()
setattr(args, "local_rank", 0)
setattr(args, "deepspeed_config", config)
if config[:3] == "cpu":
if "optimizer" in config_params:
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters()
)
else:
from deepspeed.ops.adam import DeepSpeedCPUAdam
optimizer = DeepSpeedCPUAdam(model.parameters(), lr=2e-5)
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters(),
optimizer=optimizer
)
else:
model, optimizer, _, _ = deepspeed.initialize(
args=args,
model=model,
model_parameters=model.parameters()
# optimizer=optimizer
)
total_steps = len(train_loader) * 3
# checkpoints = CheckpointCallback(
# keep_n_checkpoints=1,
# checkpoint_dir=CACHE_DIR / "model_cache/",
# monitor_metric="accuracy"
# )
lr_durations = [
int(total_steps*0.2),
int(np.ceil(total_steps*0.8))
]
break_points = [0] + list(np.cumsum(lr_durations))[:-1]
callbacks = [
MovingAverageStatsTrackerCallback(
avg_window=len(train_loader) // 8,
log_interval=len(train_loader) // 10
),
LearningRateSchedulerCallback(
MultiStageScheduler(
[
LinearLR(optimizer, 0.01, lr_durations[0]),
CosineAnnealingScheduler(optimizer, lr_durations[1])
],
start_at_epochs=break_points
)
),
# checkpoints
]
bot = SST2Bot(
model=model,
train_loader=train_loader,
valid_loader=valid_loader,
clip_grad=10.,
optimizer=optimizer, echo=True,
criterion=torch.nn.CrossEntropyLoss(),
callbacks=callbacks,
pbar=False,
use_tensorboard=False,
# use_amp=APEX_AVAILABLE,
metrics=(Top1Accuracy(),)
)
print(total_steps)
bot.train(
total_steps=total_steps,
checkpoint_interval=len(train_loader) // 2
)
# bot.load_model(checkpoints.best_performers[0][1])
# checkpoints.remove_checkpoints(keep=0)
# TARGET_DIR = CACHE_DIR / "sst2_bert_uncased"
# TARGET_DIR.mkdir(exist_ok=True)
# bot.model.save_pretrained(TARGET_DIR)
bot.eval(valid_loader)
bot.eval(test_loader)
if __name__ == "__main__":
APP()
| 2.078125 | 2 |
scripts/WIPS2015/WIPS_anydiag_time.py | eclee25/flu-SDI-exploratory-age | 3 | 19588 | <filename>scripts/WIPS2015/WIPS_anydiag_time.py
#!/usr/bin/python
##############################################
###Python template
###Author: <NAME>
###Date: 10/11/14
###Function: Any diagnosis per 100,000 population vs. week number for flu weeks (wks 40-20). Population size is from the calendar year of the week of calculation.
###Import data: SQL_export/anydiag_outpatient_allweeks.csv
### branch from v2/Supp_anydiag_time.py
###Command Line: python WIPS_anydiag_time.py
##############################################
### notes ###
# Incidence per 100,000 is normalized by total population by second calendar year of the flu season
### packages/modules ###
import csv
import matplotlib.pyplot as plt
## local modules ##
import functions_v5 as fxn
### data structures ###
### functions ###
### data files ###
anydiagin = open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/explore/SQL_export/anydiag_allweeks_outpatient.csv','r')
anydiagin.readline() # rm header
anydiag = csv.reader(anydiagin, delimiter=',')
### called/local plotting parameters ###
ps = fxn.pseasons
fw = fxn.gp_fluweeks
sl = fxn.gp_seasonlabels
colvec = fxn.gp_colors
wklab = fxn.gp_weeklabels
fs = 24
fssml = 16
### program ###
# dict_wk[week] = seasonnum, dict_any[week] = visits per 100,000 in US population in calendar year of week,d_any53ls[seasonnum] = [anydiag wk 40 per 100000, anydiag wk 41 per 100000,...]
d_wk, d_any, d_any53ls = fxn.week_anydiag_processing(anydiag)
# plot values
for s in ps:
plt.plot(xrange(53), d_any53ls[s], marker = fxn.gp_marker, color = colvec[s-2], label = sl[s-2], linewidth = fxn.gp_linewidth)
plt.fill([7, 8, 8, 7], [0, 0, 4000, 4000], facecolor='grey', alpha=0.4)
plt.fill([12, 14, 14, 12], [0, 0, 4000, 4000], facecolor='grey', alpha=0.4)
plt.xlim([0, fw-1])
plt.xticks(range(53)[::5], wklab[::5])
plt.ylim([0, 4000])
plt.xlabel('Week Number', fontsize=fs)
plt.ylabel('Outpatient Visit per 100,000', fontsize=fs)
plt.legend(loc='upper right')
plt.savefig('/home/elee/Dropbox/Department/Presentations/2015_WIPS/Figures/anydiag_time.png', transparent=False, bbox_inches='tight', pad_inches=0)
plt.close()
# plt.show()
| 1.664063 | 2 |
meiduo_mall/apps/orders/urls.py | MarioKarting/Django_meiduo_project | 0 | 19716 | # !/usr/bin/env python
# _*_ coding:utf-8 _*_
from django.conf.urls import url
from . import views
urlpatterns = [
# 1. 结算订单 orders/settlement/
url(r'^orders/settlement/$', views.OrdersSettlementView.as_view(), name='settlement'),
# 2. orders/commit/ 提交订单
url(r'^orders/commit/$', views.OrdersCommitView.as_view(), name='commit'),
# 3. 订单成功 -- orders/success/
url(r'^orders/success/$', views.OrdersSuccessView.as_view(), name='sucess'),
]
| 0.847656 | 1 |
hadoop/hadoop/node.py | DropletProbe/shellscripts | 0 | 19844 | import re
class Node:
def __init__(self, id, ip, hostname, type):
self.id = id
self.ip = ip
self.hostname = hostname
self.type = type
self.validate()
def validate(self):
self.illegal = False
if re.match("^(\d{1,3}\.){3}\d{1,3}$", self.ip):
self.illegal = reduce(lambda x, y : x and y, map(lambda x : True if int(x) <= 255 else False, self.ip.split(".")), True)
if self.illegal == False:
raise Exception("IP Format Error, " + self.ip + " is illegal.")
def __repr__(self):
return str(self)
def __str__(self):
return "<IP: %s, id: %s, hostname: %s, type: %s>" % (self.ip, self.id, self.hostname, self.type)
# if __name__ == "__main__":
# a = Node(1, "192.168.1.300", 1, 1)
# a.validate()
| 2.515625 | 3 |
dockerfiles/igv/igv.py | leipzig/gatk-sv | 76 | 19972 | <reponame>leipzig/gatk-sv
import sys
[_, varfile] = sys.argv
plotdir = "plots"
igvfile = "igv.txt"
igvsh = "igv.sh"
with open(varfile, 'r') as f:
for line in f:
dat = line.split('\t')
chr = dat[0]
start = dat[1]
end = dat[2]
data = dat[3].split(',')
| 1.101563 | 1 |
core/power_status_monitor.py | kangyifei/CloudSimPy | 0 | 20100 | <gh_stars>0
import json
class PowerStateMonitor(object):
def __init__(self, simulation):
self.simulation = simulation
self.env = simulation.env
self.event_file = simulation.event_file + "_power"
self.events = []
def __cal_machine_power(self):
machines = self.simulation.cluster.machines
sum = 0
for machine in machines:
power = 100 * machine.state['cpu_usage_percent'] + 2 * machine.state['memory_usage_percent']
sum += power
return sum
def __cal_cooling_equipment_power(self):
cooling_equipment = self.simulation.cluster.cooling_equipment
if ((cooling_equipment.state['inlet_temp'] - cooling_equipment.state['setting_temp']) < 0):
power = 0
else:
power = 100 * (cooling_equipment.state['inlet_temp'] - cooling_equipment.state['setting_temp'])
return power
def run(self):
machine_power_sum = 0
cooling_power_sum = 0
while not self.simulation.finished:
machine_power = round(self.__cal_machine_power(), 2)
cooling_power = round(self.__cal_cooling_equipment_power(), 2)
machine_power_sum += machine_power
cooling_power_sum += cooling_power
state = {
'timestamp': self.env.now,
'machine_power': machine_power,
'cooling_power': cooling_power
}
self.events.append(state)
yield self.env.timeout(1)
state = {
'timestamp': self.env.now,
'machine_power_sum': machine_power_sum,
'cooling_power_sum': cooling_power_sum
}
self.events.append(state)
self.__write_to_file()
def __write_to_file(self):
with open(self.event_file, 'w') as f:
json.dump(self.events, f, indent=4)
| 2.484375 | 2 |
lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py | cjsteel/python3-venv-ansible-2.10.5 | 0 | 20228 | <filename>lib/python3.8/site-packages/ansible_collections/community/postgresql/plugins/modules/postgresql_user_obj_stat_info.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2020, <NAME> (@Andersson007) <<EMAIL>>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: postgresql_user_obj_stat_info
short_description: Gather statistics about PostgreSQL user objects
description:
- Gathers statistics about PostgreSQL user objects.
version_added: '0.2.0'
options:
filter:
description:
- Limit the collected information by comma separated string or YAML list.
- Allowable values are C(functions), C(indexes), C(tables).
- By default, collects all subsets.
- Unsupported values are ignored.
type: list
elements: str
schema:
description:
- Restrict the output by certain schema.
type: str
db:
description:
- Name of database to connect.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting. The specified session_role must
be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
trust_input:
description:
- If C(no), check the value of I(session_role) is potentially dangerous.
- It makes sense to use C(no) only when SQL injections via I(session_role) are possible.
type: bool
default: yes
version_added: '0.2.0'
notes:
- C(size) and C(total_size) returned values are presented in bytes.
- For tracking function statistics the PostgreSQL C(track_functions) parameter must be enabled.
See U(https://www.postgresql.org/docs/current/runtime-config-statistics.html) for more information.
seealso:
- module: community.postgresql.postgresql_info
- module: community.postgresql.postgresql_ping
- name: PostgreSQL statistics collector reference
description: Complete reference of the PostgreSQL statistics collector documentation.
link: https://www.postgresql.org/docs/current/monitoring-stats.html
author:
- <NAME> (@Andersson007)
- <NAME> (@andytom)
extends_documentation_fragment:
- community.postgresql.postgres
'''
EXAMPLES = r'''
- name: Collect information about all supported user objects of the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
- name: Collect information about all supported user objects in the custom schema of the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
schema: custom
- name: Collect information about user tables and indexes in the acme database
community.postgresql.postgresql_user_obj_stat_info:
db: acme
filter: tables, indexes
'''
RETURN = r'''
indexes:
description: User index statistics
returned: always
type: dict
sample: {"public": {"test_id_idx": {"idx_scan": 0, "idx_tup_fetch": 0, "idx_tup_read": 0, "relname": "test", "size": 8192, ...}}}
tables:
description: User table statistics.
returned: always
type: dict
sample: {"public": {"test": {"analyze_count": 3, "n_dead_tup": 0, "n_live_tup": 0, "seq_scan": 2, "size": 0, "total_size": 8192, ...}}}
functions:
description: User function statistics.
returned: always
type: dict
sample: {"public": {"inc": {"calls": 1, "funcid": 26722, "self_time": 0.23, "total_time": 0.23}}}
'''
try:
from psycopg2.extras import DictCursor
except ImportError:
# psycopg2 is checked by connect_to_db()
# from ansible.module_utils.postgres
pass
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.postgresql.plugins.module_utils.database import (
check_input,
)
from ansible_collections.community.postgresql.plugins.module_utils.postgres import (
connect_to_db,
exec_sql,
get_conn_params,
postgres_common_argument_spec,
)
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class PgUserObjStatInfo():
"""Class to collect information about PostgreSQL user objects.
Args:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
Attributes:
module (AnsibleModule): Object of AnsibleModule class.
cursor (cursor): Cursor object of psycopg2 library to work with PostgreSQL.
executed_queries (list): List of executed queries.
info (dict): Statistics dictionary.
obj_func_mapping (dict): Mapping of object types to corresponding functions.
schema (str): Name of a schema to restrict stat collecting.
"""
def __init__(self, module, cursor):
self.module = module
self.cursor = cursor
self.info = {
'functions': {},
'indexes': {},
'tables': {},
}
self.obj_func_mapping = {
'functions': self.get_func_stat,
'indexes': self.get_idx_stat,
'tables': self.get_tbl_stat,
}
self.schema = None
def collect(self, filter_=None, schema=None):
"""Collect statistics information of user objects.
Kwargs:
filter_ (list): List of subsets which need to be collected.
schema (str): Restrict stat collecting by certain schema.
Returns:
``self.info``.
"""
if schema:
self.set_schema(schema)
if filter_:
for obj_type in filter_:
obj_type = obj_type.strip()
obj_func = self.obj_func_mapping.get(obj_type)
if obj_func is not None:
obj_func()
else:
self.module.warn("Unknown filter option '%s'" % obj_type)
else:
for obj_func in self.obj_func_mapping.values():
obj_func()
return self.info
def get_func_stat(self):
"""Get function statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_functions"
if self.schema:
query = "SELECT * FROM pg_stat_user_functions WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='functions',
schema_key='schemaname',
name_key='funcname')
def get_idx_stat(self):
"""Get index statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_indexes"
if self.schema:
query = "SELECT * FROM pg_stat_user_indexes WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='indexes',
schema_key='schemaname',
name_key='indexrelname')
def get_tbl_stat(self):
"""Get table statistics and fill out self.info dictionary."""
query = "SELECT * FROM pg_stat_user_tables"
if self.schema:
query = "SELECT * FROM pg_stat_user_tables WHERE schemaname = %s"
result = exec_sql(self, query, query_params=(self.schema,),
add_to_executed=False)
if not result:
return
self.__fill_out_info(result,
info_key='tables',
schema_key='schemaname',
name_key='relname')
def __fill_out_info(self, result, info_key=None, schema_key=None, name_key=None):
# Convert result to list of dicts to handle it easier:
result = [dict(row) for row in result]
for elem in result:
# Add schema name as a key if not presented:
if not self.info[info_key].get(elem[schema_key]):
self.info[info_key][elem[schema_key]] = {}
# Add object name key as a subkey
# (they must be uniq over a schema, so no need additional checks):
self.info[info_key][elem[schema_key]][elem[name_key]] = {}
# Add other other attributes to a certain index:
for key, val in iteritems(elem):
if key not in (schema_key, name_key):
self.info[info_key][elem[schema_key]][elem[name_key]][key] = val
if info_key in ('tables', 'indexes'):
schemaname = elem[schema_key]
if self.schema:
schemaname = self.schema
relname = '%s.%s' % (schemaname, elem[name_key])
result = exec_sql(self, "SELECT pg_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['size'] = result[0][0]
if info_key == 'tables':
result = exec_sql(self, "SELECT pg_total_relation_size (%s)",
query_params=(relname,),
add_to_executed=False)
self.info[info_key][elem[schema_key]][elem[name_key]]['total_size'] = result[0][0]
def set_schema(self, schema):
"""If schema exists, sets self.schema, otherwise fails."""
query = ("SELECT 1 FROM information_schema.schemata "
"WHERE schema_name = %s")
result = exec_sql(self, query, query_params=(schema,),
add_to_executed=False)
if result and result[0][0]:
self.schema = schema
else:
self.module.fail_json(msg="Schema '%s' does not exist" % (schema))
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
db=dict(type='str', aliases=['login_db']),
filter=dict(type='list', elements='str'),
session_role=dict(type='str'),
schema=dict(type='str'),
trust_input=dict(type="bool", default=True),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
filter_ = module.params["filter"]
schema = module.params["schema"]
if not module.params["trust_input"]:
check_input(module, module.params['session_role'])
# Connect to DB and make cursor object:
pg_conn_params = get_conn_params(module, module.params)
# We don't need to commit anything, so, set it to False:
db_connection = connect_to_db(module, pg_conn_params, autocommit=False)
cursor = db_connection.cursor(cursor_factory=DictCursor)
############################
# Create object and do work:
pg_obj_info = PgUserObjStatInfo(module, cursor)
info_dict = pg_obj_info.collect(filter_, schema)
# Clean up:
cursor.close()
db_connection.close()
# Return information:
module.exit_json(**info_dict)
if __name__ == '__main__':
main()
| 1.601563 | 2 |
train_end2end.py | lyn1874/daml | 0 | 20356 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 18:23:08 2019
This scrip is for training the experiement end2end
@author: li
"""
import tensorflow as tf
import models.AE as AE
import optimization.loss_tf as loss_tf
from data import read_frame_temporal as rft
import numpy as np
import os
import math
import cv2
import shutil
import const
def train_end2end(args, data_set, model_type, motion_method, version=0, bg_ind=None, augment_opt="none"):
model_mom_for_load_data = args.datadir
path_mom = args.expdir
if data_set == "ucsd1":
stat = [8,6,2,5]
train_ucsd1(stat, model_type, motion_method, version)
elif data_set == "ucsd2":
stat = [8,6,2,4]
train_ucsd2(stat, model_type, motion_method, version)
elif data_set == "avenue":
stat = [6,6,2,4]
train_avenue(stat, model_type, augment_opt, version)
elif data_set == "shanghaitech_allinone":
stat = [6,6,2,4]
train_shanghaitech_allinone(stat, model_type, version)
elif data_set == "shanghaitech_multiple":
stat = [6,6,2,4]
train_shanghaitech_multiple(stat, model_type, motion_method,
version, bg_ind)
# elif data_set is "moving_mnist":
# # 6, 6, 1, 4
# train_moving_mnist(model_mom_for_load_data, path_mom, stat, model_type, version)
def train_fps(model_mom_for_load_data, path_mom):
# 31,32,33,34
version = 0
interval_group = np.arange(11)[1:] * 2
learn_opt = "learn_fore"
data_set = "ucsd2"
motion_method = "conv3d"
model_type = "2d_2d_pure_unet"
time_step = 6
args.z_mse_ratio = 0.001
for single_interval in interval_group:
delta = single_interval
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method,
single_interval, version,
None, 4, learn_opt)
def train_ucsd1_group():
stat = [8, 6, 2, 5]
model_type = "2d_2d_pure_unet"
motion_method = "convlstm"
version = [0, 1, 2, 3]
for single_version in version:
train_ucsd1(stat, model_type, motion_method, single_version)
def train_ucsd1(stat, model_type, motion_method, version):
data_set = "ucsd1"
time_step, delta, interval, num_enc_layer = stat
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method,
interval, version,
None,
num_enc_layer, "learn_fore")
def train_ucsd2_group():
stat = [8, 6, 2, 4]
model_type = "2d_2d_pure_unet"
motion_method = "convlstm"
for single_version in [2, 3]:
train_ucsd2(stat, model_type, motion_method, single_version)
def train_ucsd2(stat, model_type, motion_method, version):
data_set = "ucsd2"
time_step, delta, interval, num_enc_layer = stat
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method, interval,
version, None, num_enc_layer, "learn_fore")
def train_avenue_group():
data_dir = args.datadir
model_dir = args.expdir
stat = [6, 6, 2, 4]
motion_method = "conv3d"
augment_opt = "none"
for single_version in [2, 3]:
train_avenue(data_dir, model_dir, stat, "2d_2d_pure_unet", motion_method,
augment_opt, single_version)
def train_avenue(stat, model_type, motion_method, augment_opt, version):
data_set = "avenue"
args.augment_option = augment_opt
if augment_opt == "add_dark_auto":
learn_opt = "learn_full"
else:
learn_opt = "learn_fore"
time_step, delta, interval, num_enc_layer = stat
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type, motion_method,
interval, version,
None,
num_enc_layer, learn_opt)
def train_shanghaitech_allinone(stat, model_type, version):
motion_method = "conv3d"
time_step, delta, interval, num_enc_layer = stat
data_set = "shanghaitech"
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_fore")
def train_shanghaitech_multiple(stat, model_type, motion_method, version,
bg_ind=None):
if bg_ind[0] == 0:
bg_ind = [2, 3, 7, 9, 11]
for single_bg_ind in bg_ind:
train_shanghaitech_for_per_bg(args.datadir, args.expdir, stat, model_type, motion_method,
single_bg_ind, version)
def train_shanghaitech_for_per_bg(model_mom_for_load_data, path_mom, stat, model_type, motion_method, bg_ind, version):
time_step, delta, interval, num_enc_layer = stat
data_set = "shanghaitech"
train_model(model_mom_for_load_data, path_mom, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_fore",
bg_index_pool=[bg_ind])
def train_moving_mnist():
motion_method = "conv3d"
data_set = "moving_mnist"
version = 2
model_type = "2d_2d_unet_no_shortcut"
z_mse_ratio = 0.001
args.z_mse_ratio = z_mse_ratio
num_layer = [5]
stat_group = [[6, 2, 1]]
for single_layer in num_layer:
for single_stat in stat_group:
time_step, delta, interval = single_stat
num_enc_layer = single_layer
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_full")
def train_moving_mnist_single_digit(model_group):
"""This function train a pure autoencoder for moving mnist single digit dataset
The goal of this type of experiments is to hope the latent can show some pattern between
anomalies and normal"""
motion_method = "conv3d"
data_set = "moving_mnist_single_digit"
version = 1 # version 1 means the activation layer in the last convolutional block is changed from
# learky-relu to tanh
args.z_mse_ratio = 0.001
num_layer = [5, 4]
stat = [6, 2, 1]
for model_type in model_group:
for single_layer in num_layer:
time_step, delta, interval = stat
num_enc_layer = single_layer
train_model(args.datadir, args.expdir, data_set, time_step, delta, model_type,
motion_method, interval, version, None, num_enc_layer, "learn_full")
def train_seq2seq(version):
data_set = "ucsd2"
motion_method = "conv3d"
model_type = "many_to_one"
for time_step in [4, 6, 8]:
stat = [time_step, 2, 2, 4]
train_model(args.datadir, args.expdir, data_set, stat[0], stat[1], model_type,
motion_method, stat[2], version, None, stat[-1], "learn_fore", None)
def train_model(model_mom_for_load_data, path_mom, data_set, time_step, delta, model_type, motion_method,
single_interval, version, ckpt_dir, num_enc_layer, learn_opt, bg_index_pool=None):
print("-------------------Start to train the model------------------------------")
args.data_set = data_set
interval_input = np.array([single_interval])
bg_index = None
args.num_encoder_layer = num_enc_layer
args.num_decoder_layer = num_enc_layer
args.time_step = time_step
args.single_interval = single_interval
args.delta = delta
args.learn_opt = learn_opt
args.bg_index_pool = bg_index_pool
model_dir = path_mom + "ano_%s_motion_end2end/" % args.data_set
if not bg_index_pool:
model_dir = model_dir + "time_%d_delta_%d_gap_%d_%s_%s_%s_enc_%d_version_%d" % (time_step,
delta, single_interval, model_type, motion_method, learn_opt, num_enc_layer, version)
else:
model_dir = model_dir + "time_%d_delta_%d_gap_%d_%s_%s_%s_enc_%d_bg_%d_version_%d" % (
time_step,
delta, single_interval, model_type, motion_method,
learn_opt, num_enc_layer, bg_index_pool[0], version)
tmf = TrainMainFunc(args, model_mom_for_load_data, model_dir, ckpt_dir, time_step, interval_input, delta,
train_index=bg_index,
bg_index_pool=bg_index_pool)
tmf.build_running()
def read_data(model_mom, data_set, concat_option, time_step, interval_input, delta, bg_index_pool=None):
if data_set != "shanghaitech":
train_im, test_im, imshape, targ_shape = rft.get_video_data(model_mom, data_set).forward()
train_im_interval, in_shape, out_shape = rft.read_frame_interval_by_dataset(data_set, train_im,
time_step, concat_option,
interval_input, delta)
else:
train_im_group = []
if not bg_index_pool:
bg_index_pool = np.arange(13)[1:]
for single_bg_index in bg_index_pool:
if single_bg_index < 10:
bg_index = "bg_index_0%d" % single_bg_index
else:
bg_index = "bg_index_%d" % single_bg_index
print("--------loading data from bg %s---------------" % bg_index)
test_im, test_la, imshape, targ_shape = rft.get_video_data(model_mom, args.data_set).forward(bg_index)
test_im_interval, in_shape, out_shape = rft.read_frame_interval_by_dataset(data_set, test_im,
time_step, concat_option,
interval=interval_input,
delta=delta)
train_im_group.append(test_im_interval)
train_im_interval = np.array([v for j in train_im_group for v in j])
return train_im_interval, imshape, targ_shape, in_shape, out_shape
class TrainMainFunc(object):
def __init__(self, args, model_mom, model_dir, ckpt_dir, time_step, interval_input=np.array([1]), delta=None,
train_index=None, bg_index_pool=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
concat_option = "conc_tr"
train_im_interval, imshape, targ_shape, in_shape, out_shape = read_data(model_mom, args.data_set,
concat_option, time_step,
interval_input, delta,
bg_index_pool=bg_index_pool)
args.output_dim = targ_shape[-1]
if concat_option == "conc_tr":
args.num_prediction = 1
else:
args.num_prediction = out_shape[0]
self.args = args
self.model_mom = model_mom
self.model_dir = model_dir
self.ckpt_dir = ckpt_dir
self.data_set = args.data_set
self.train_index = train_index
self.temp_shape = [in_shape, out_shape]
self.targ_shape = targ_shape
self.imshape = imshape
self.output_dim = args.output_dim
self.concat = "conc_tr"
self.time_step = time_step
self.delta = delta
self.interval = interval_input[0]
self.test_im = train_im_interval
self.input_option = args.input_option
self.augment_option = args.augment_option
self.darker_value = args.darker_value
self.learn_opt = args.learn_opt
self.model_type = args.model_type
self.z_mse_ratio = args.z_mse_ratio
[lrate_g_step, lrate_g], [lrate_z_step, lrate_z], [epoch, batch_size] = const.give_learning_rate_for_init_exp(self.args)
self.lrate_g_decay_step = lrate_g_step
self.lrate_g_init = lrate_g
self.lrate_z_decay_step = lrate_z_step
self.lrate_z_init = lrate_z
self.batch_size = batch_size
self.max_epoch = epoch
print(args)
def read_tensor(self):
imh, imw, ch = self.targ_shape
placeholder_shape = [None, 2, self.temp_shape[0][0]]
shuffle_option = True
if "/project/" in self.model_dir:
repeat = 20
else:
repeat = 1
images_in = tf.placeholder(tf.string, shape=placeholder_shape, name='tr_im_path')
image_queue = rft.dataset_input(self.model_mom, self.data_set, images_in, self.learn_opt,
self.temp_shape, self.imshape, self.targ_shape[:2], self.batch_size,
augment_option=self.augment_option,
darker_value=self.darker_value,
conc_option=self.concat, shuffle=shuffle_option,
train_index=None,
epoch_size=repeat)
image_init = image_queue.make_initializable_iterator()
image_batch = image_init.get_next()
x_input = image_batch[0] # [batch_size, num_input_channel, imh, imw, ch]
x_output = image_batch[1] # [batch_size, self.output_dim, imh, imw, ch]
im_background = image_batch[-1]
print("=========================================")
print("The input of the model", x_input)
print("The output of the model", x_output)
print("The background of the data", im_background)
print("=========================================")
x_input = tf.concat([x_input, x_output], axis=1) # th==already subtract the background.
if self.learn_opt == "learn_fore":
x_real_input = x_input + im_background
else:
x_real_input = x_input
self.x_real_input = tf.transpose(x_real_input, perm=(1, 0, 2, 3, 4))
x_input = tf.transpose(x_input, perm=(1, 0, 2, 3, 4)) # num_frame, batch_size, imh, imw, ch
# the last input of x_input is for prediction
im_background = tf.transpose(im_background, perm=(1, 0, 2, 3, 4)) # num_frame, batch_size, imh, imw, ch
if "crop" in self.input_option:
x_input = tf.reshape(x_input, shape=[(self.time_step + 1) * self.batch_size, imh, imw, ch])
crop_size = self.input_option.strip().split("crop_")[1]
crop_h, crop_w = crop_size.strip().split("_")
crop_h, crop_w = int(crop_h), int(crop_w)
x_input_crop, stride_size, crop_box_h_w = rft.get_crop_image(x_input, crop_h, crop_w)
x_input_crop = tf.concat([x_input_crop],
axis=0) # [num_regions, (num_time+1)*batch_size, crop_height, crop_weight,ch]
num_box = x_input_crop.get_shape().as_list()[0]
x_input_crop = tf.reshape(x_input_crop,
shape=[num_box, self.time_step + 1, self.batch_size, crop_h, crop_w, ch])
x_input_crop = tf.transpose(x_input_crop, perm=(1, 0, 2, 3, 4, 5))
x_input_crop = tf.reshape(x_input_crop,
shape=[self.time_step + 1, num_box * self.batch_size, crop_h, crop_w, ch])
x_input = x_input_crop # [time, num_box*batch, croph, cropw, ch]
x_input = tf.transpose(x_input, perm=(1, 0, 2, 3, 4)) # [batch, time, c_h, c_w, ch]
x_input = tf.random.shuffle(x_input)
if crop_h >= 128:
x_input = x_input[:4] # this is for batch size
print("The actual number of box", num_box)
x_input = tf.transpose(x_input, perm=(1, 0, 2, 3, 4)) # [time, batch, c_h, c_w, ch]
self.x_real_input = x_input
return images_in, x_input, image_init, im_background
def build_graph(self):
num_recons_output = self.time_step
image_placeholder, x_input, image_init, im_background = self.read_tensor()
# --build encoder-------------#
model_use = AE.DAML(self.args)
p_x_recons, p_x_pred, latent_space_gt, latent_space_pred = model_use.forward(x_input)
if "crop" not in self.input_option:
if self.learn_opt == "learn_full":
print("====the reconstruction is full frame=============")
elif self.learn_opt == "learn_fore":
print("====the reconstruction is frame - background=====")
if self.model_type != "many_to_one":
p_x_recons = p_x_recons + im_background
p_x_pred = p_x_pred + im_background
if self.model_type == "2d_2d_pure_unet":
x_recons_gt = self.x_real_input[1:self.time_step] # [num_recons, batch_size, imh, imw, ch]
elif self.model_type == "2d_2d_unet_no_shortcut":
x_recons_gt = self.x_real_input[:self.time_step]
else:
x_recons_gt = []
x_pred_gt = self.x_real_input[-1:]
print("=============================================================")
print("----the input for the model-----------------", x_input)
print("----the groundtruth for reconstruction------", x_recons_gt)
print("----the reconstructed frames----------------", p_x_recons)
print("----the groundtruth for prediction----------", x_pred_gt)
print("----the predicted frame---------------------", p_x_pred)
print("----the gt latent space---------------------", latent_space_gt)
print("----the predicted latent space--------------", latent_space_pred)
print("=============================================================")
if self.model_type== "2d_2d_pure_unet" or self.model_type== "2d_2d_unet_no_shortcut":
if "moving_mnist" not in self.data_set:
mse_pixel = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(x_recons_gt, p_x_recons), (-1, -2, -3)))
else:
mse_pixel = tf.keras.losses.binary_crossentropy(y_true=x_recons_gt, y_pred=p_x_recons,
from_logits=False)
mse_pixel = tf.reduce_mean(tf.reduce_sum(mse_pixel, (-1, -2, -3)))
mse_latent = tf.reduce_mean(
tf.reduce_sum(tf.squared_difference(latent_space_gt, latent_space_pred), (-1, -2, -3)))
elif self.model_type== "many_to_one":
mse_pixel = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(x_pred_gt-im_background, p_x_pred-im_background), (-1, -2, -3)))
mse_latent = tf.constant(0.0)
z_mse_ratio_placeholder = tf.placeholder(tf.float32, name="ratio_for_z_mse")
if self.model_type != "many_to_one":
loss_tot = mse_pixel + mse_latent * z_mse_ratio_placeholder
else:
loss_tot = mse_pixel
var_tot = tf.trainable_variables()
[print(v) for v in var_tot if 'kernel' in v.name]
# print("==========================================")
# print("encoder decoder trainable variables")
# [print(v) for v in var_tot if 'motion_latent' not in v.name]
# print("==========================================")
# print("motion trainable variables")
# [print(v) for v in var_tot if 'motion_latent' in v.name]
var_0 = var_tot
loss_tot = tf.add_n([loss_tot, tf.add_n(
[tf.nn.l2_loss(v) for v in var_0 if 'kernel' in v.name or 'weight' in v.name]) * args.regu_par])
g_lrate = tf.placeholder(tf.float32, name='g_lrate')
train_op_0 = loss_tf.train_op(loss_tot, g_lrate, var_opt=var_0, name='train_op_tot')
z_lrate = tf.placeholder(tf.float32, name='z_lrate')
if self.model_type != "many_to_one":
var_motion = [v for v in var_tot if 'motion_latent' in v.name]
loss_motion = mse_latent
loss_motion = tf.add_n([loss_motion, tf.add_n(
[tf.nn.l2_loss(v) for v in var_motion if 'kernel' in v.name or 'weight' in v.name]) * args.regu_par])
train_op_z = loss_tf.train_op(loss_motion, z_lrate, var_opt=var_motion, name='train_latent_z')
train_z_group = [z_lrate, train_op_z]
else:
train_z_group = [z_lrate, []]
saver_set_all = tf.train.Saver(tf.trainable_variables(), max_to_keep=1)
input_group = [image_init, image_placeholder, z_mse_ratio_placeholder]
loss_group = [mse_pixel, mse_latent, loss_tot]
train_group = [g_lrate, train_op_0, saver_set_all]
if self.model_type== "2d_2d_pure_unet" or self.model_type== "2d_2d_unet_no_shortcut":
im_stat = [p_x_recons, x_recons_gt, p_x_pred, x_pred_gt]
else:
im_stat = [p_x_pred, x_pred_gt]
return input_group, loss_group, train_group, train_z_group, im_stat
def build_train_op(self, sess, image_init, placeholder_group,
x_train, single_epoch, num_epoch_for_full, loss_group, train_op_group):
train_op_0, train_op_z = train_op_group
image_placeholder, z_mse_placeholder, g_lrate_placeholder, z_lrate_placeholder = placeholder_group
sess.run(image_init.initializer, feed_dict={image_placeholder: x_train})
num_tr_iter_per_epoch = np.shape(x_train)[0] // self.batch_size
lrate_g_npy = self.lrate_g_init * math.pow(0.1, math.floor(float(single_epoch) / float(self.lrate_g_decay_step)))
lrate_z_npy = self.lrate_z_init * math.pow(0.1, math.floor(float(single_epoch - num_epoch_for_full) / float(self.lrate_z_decay_step)))
loss_per_epoch = []
if single_epoch <= num_epoch_for_full:
fetches_tr = [train_op_0]
else:
fetches_tr = [train_op_z]
fetches_tr.append(loss_group)
for single_iter in range(num_tr_iter_per_epoch):
_, _loss_group = sess.run(fetches=fetches_tr, feed_dict={z_mse_placeholder: self.z_mse_ratio,
g_lrate_placeholder: lrate_g_npy,
z_lrate_placeholder: lrate_z_npy})
loss_per_epoch.append(_loss_group)
return np.mean(loss_per_epoch, axis=0)
def build_val_op(self, sess, image_init, image_placeholder, x_val, loss_group, image_stat, image_path, single_epoch):
sess.run(image_init.initializer, feed_dict={image_placeholder: x_val})
num_val_iter_per_epoch = np.shape(x_val)[0] // self.batch_size
# image_stat: [p_x_recons, p_x_pred, x_recons_gt, x_pred_gt]
# or
# image_stat: [p_x_pred, x_pred_gt]
loss_val_per_epoch = []
for single_val_iter in range(num_val_iter_per_epoch):
if single_val_iter != num_val_iter_per_epoch - 1:
_loss_val = sess.run(fetches=loss_group)
else:
_loss_val, _stat_use = sess.run(fetches=[loss_group, image_stat])
for single_input, single_path in zip(_stat_use, image_path):
for j in range(np.shape(single_input)[0]):
im_use = single_input[j, :]
shape_use = np.array(np.shape(im_use)[1:])
cv2.imwrite(os.path.join(single_path, "epoch_%d_frame_%d.jpg" % (single_epoch, j)),
(plot_canvas(im_use, shape_use)).astype('uint8')[:, :, ::-1])
loss_val_per_epoch.append(_loss_val)
return np.mean(loss_val_per_epoch, axis=0)
def build_running(self):
im_path = os.path.join(self.model_dir, 'recons_gt')
recons_path = os.path.join(self.model_dir, 'p_x_recons')
im_pred_path = os.path.join(self.model_dir, 'pred_gt')
pred_path = os.path.join(self.model_dir, 'p_x_pred')
if self.model_type== "2d_2d_pure_unet" or self.model_type== "2d_2d_unet_no_shortcut":
path_group = [recons_path, im_path, pred_path, im_pred_path]
else:
path_group = [pred_path, im_pred_path]
for i in path_group:
if not os.path.exists(i):
os.makedirs(i)
with tf.Graph().as_default():
input_group, loss_group, train_group, train_z_group, im_stat = self.build_graph()
image_init, image_placeholder, z_mse_ratio_placeholder = input_group
mse_pixel_loss, mse_latent_loss, mse_tot = loss_group
g_lrate, train_op, saver = train_group
#z_lrate, train_z_op = train_z_group
saver_restore = None
tot_num_frame = np.shape(self.test_im)[0]
test_im_shuffle = self.test_im[np.random.choice(np.arange(tot_num_frame),
tot_num_frame,
replace=False)]
placeholder_group = [image_placeholder, z_mse_ratio_placeholder, g_lrate, train_z_group[0]]
loss_group = [mse_pixel_loss, mse_latent_loss]
train_group = [train_op, train_z_group[-1]]
if "ucsd" in self.data_set:
x_train = test_im_shuffle[:-self.batch_size * 4]
x_val = test_im_shuffle[-self.batch_size * 4:]
elif "avenue" in self.data_set or "shanghaitech" in self.data_set:
x_train = test_im_shuffle[:-self.batch_size * 20]
x_val = test_im_shuffle[-self.batch_size * 20:]
else:
x_train = test_im_shuffle[:-self.batch_size * 2]
x_val = test_im_shuffle[-self.batch_size * 2:]
if self.data_set== "ucsd1" and self.model_type != "many_to_one":
num_epoch_for_full = 25
else:
num_epoch_for_full = self.lrate_g_decay_step
checkpoint_path = self.model_dir + '/model.ckpt'
print("====================================================================================")
print("There are %d frames in total" % np.shape(self.test_im)[0])
print("The shape of training and validation images", np.shape(x_train), np.shape(x_val))
print(
"%d input frames are loaded with %d stride for predicting furture frame at time t+%d" % (self.time_step,
self.interval,
self.delta))
print("The lr for whole process start from %.4f and decay 0.1 every %d epoch" % (
self.lrate_g_init, self.lrate_g_decay_step))
print("The lr for motion process start from %.4f and decay 0.1 every %d epoch" % (
self.lrate_z_init, self.lrate_z_decay_step))
print("The ratio for the latent space mse loss== ", self.z_mse_ratio)
print("The used background index is:", self.train_index)
print("I am only focusing on the reconstruction for the first %d epochs" % num_epoch_for_full)
print("====================================================================================")
with tf.Session() as sess:
if self.ckpt_dir== None:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
else:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
saver_restore.restore(sess, self.ckpt_dir)
print("restore parameter from ", self.ckpt_dir)
loss_tr_tot = np.zeros([self.max_epoch, 2])
loss_val_tot = []
try:
for single_epoch in range(self.max_epoch):
loss_per_epoch = self.build_train_op(sess, image_init, placeholder_group, x_train,
single_epoch, num_epoch_for_full, loss_group,
train_group)
loss_tr_tot[single_epoch, :] = loss_per_epoch
print("Epoch %d with training pixel mse loss %.3f z mse %.3f" % (single_epoch,
loss_tr_tot[single_epoch, 0],
loss_tr_tot[single_epoch, 1]))
if single_epoch % 5 == 0 or single_epoch == self.max_epoch - 1:
# sess, image_init, image_placeholder, x_val, loss_group, image_stat, image_path, single_epoch)
loss_val_per_epoch = self.build_val_op(sess, image_init, image_placeholder, x_val, loss_group, im_stat,
path_group, single_epoch)
loss_val_tot.append(loss_val_per_epoch)
print("Epoch %d with validation pixel mse loss %.3f z mse %.3f" % (single_epoch,
loss_val_tot[-1][0],
loss_val_tot[-1][1]))
if np.isnan(loss_tr_tot[single_epoch, 0]):
np.save(self.model_dir + '/tr_loss', loss_tr_tot)
np.save(self.model_dir + '/val_loss', np.array(loss_val_tot))
if single_epoch % 5 == 0 and single_epoch != 0:
np.save(self.model_dir + '/tr_loss', loss_tr_tot)
np.save(self.model_dir + '/val_loss', np.array(loss_val_tot))
saver.save(sess, checkpoint_path, global_step=single_epoch)
if single_epoch == self.max_epoch - 1:
saver.save(sess, checkpoint_path, global_step=single_epoch)
np.save(self.model_dir + '/tr_loss', loss_tr_tot)
np.save(self.model_dir + '/val_loss', np.array(loss_val_tot))
except tf.errors.OutOfRangeError:
print("---oh my god, my model again could't read the data----")
print("I am at step", single_iter, single_iter // num_tr_iter_per_epoch)
np.save(os.path.join(self.model_dir, 'tr_loss'), loss_tr_tot)
np.save(os.path.join(self.model_dir, 'val_loss'), np.array(loss_val_tot))
saver.save(sess, checkpoint_path, global_step=single_epoch)
pass
def plot_canvas(image, imshape, ny=8):
if np.shape(image)[0] < ny:
ny = np.shape(image)[0]
nx = np.shape(image)[0] // ny
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
targ_height, targ_width = imshape[0], imshape[1]
if np.shape(image)[-1] == 1:
image = np.repeat(image, 3, -1)
imshape[-1] = 3
canvas = np.empty((targ_height * nx, targ_width * ny, 3))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
canvas[(nx - i - 1) * targ_height:(nx - i) * targ_height,
j * targ_width:(j + 1) * targ_width, :] = np.reshape(image[i * ny + j], imshape)
return (canvas * 255.0).astype('uint8')
if __name__ == '__main__':
args = const.args
print("-------------------------------------------------------------------")
print("------------------argument for current experiment------------------")
print("-------------------------------------------------------------------")
for arg in vars(args):
print(arg, getattr(args, arg))
print("-------------------------------------------------------------------")
print(type(args.version), args.version)
if args.version == 0:
print("only running experiment once")
train_end2end(args, args.data_set, args.model_type,
args.motion_method, version=args.version, bg_ind=None, augment_opt="none")
else:
for s_version in range(args.version):
print("running experiment for version %d" % s_version)
train_end2end(args, args.data_set, args.model_type,
args.motion_method, version=s_version, bg_ind=None, augment_opt="none")
| 1.773438 | 2 |
scripts/pretty-printers/gdb/install.py | tobireinhard/cbmc | 412 | 20484 | <gh_stars>100-1000
#!/usr/bin/env python3
import os
from shutil import copyfile
def create_gdbinit_file():
"""
Create and insert into a .gdbinit file the python code to set-up cbmc pretty-printers.
"""
print("Attempting to enable cbmc-specific pretty-printers.")
home_folder = os.path.expanduser("~")
if not home_folder:
print(home_folder + " is an invalid home folder, can't auto-configure .gdbinit.")
return
# This is the code that should be copied if you're applying the changes by hand.
gdb_directory = os.path.dirname(os.path.abspath(__file__))
code_block_start = "cbmc_printers_folder = "
code_block = \
[
"{0}'{1}'".format(code_block_start, gdb_directory),
"if os.path.exists(cbmc_printers_folder):",
" sys.path.insert(1, cbmc_printers_folder)",
" from pretty_printers import load_cbmc_printers",
" load_cbmc_printers()",
]
gdbinit_file = os.path.join(home_folder, ".gdbinit")
lines = []
imports = { "os", "sys" }
if os.path.exists(gdbinit_file):
with open(gdbinit_file, 'r') as file:
lines = [ line.rstrip() for line in file ]
line_no = 0
while line_no < len(lines):
if lines[line_no].startswith('import '):
imports.add(lines[line_no][len("import "):].strip())
lines.pop(line_no)
else:
if lines[line_no].startswith(code_block_start):
print(".gdbinit already contains our pretty printers, not changing it")
return
line_no += 1
while len(lines) != 0 and (lines[0] == "" or lines[0] == "python"):
lines.pop(0)
backup_file = os.path.join(home_folder, "backup.gdbinit")
if os.path.exists(backup_file):
print("backup.gdbinit file already exists. Type 'y' if you would like to overwrite it or any other key to exit.")
choice = input().lower()
if choice != 'y':
return
print("Backing up {0}".format(gdbinit_file))
copyfile(gdbinit_file, backup_file)
lines = [ "python" ] + list(map("import {}".format, sorted(imports))) + [ "", "" ] + code_block + [ "", "" ] + lines + [ "" ]
print("Adding pretty-print commands to {0}.".format(gdbinit_file))
try:
with open(gdbinit_file, 'w+') as file:
file.write('\n'.join(lines))
print("Commands added.")
except:
print("Exception occured writing to file. Please apply changes manually.")
if __name__ == "__main__":
create_gdbinit_file()
| 1.460938 | 1 |
OpenAI-Gym/agents/ddpg.py | stmobo/Machine-Learning | 2 | 20612 | import tensorflow as tf
import prettytensor as pt
import numpy as np
import gym
import math
import random
from collections import deque
from agents import mixed_network, spaces, replay_buffer
tensorType = tf.float32
"""
Implements a Deep Deterministic Policy Gradient agent.
Adjustable parameters:
- Actor / Critic learning rates
- Temporal Difference discount factor
- Experience Replay buffer / batch sizes
"""
class DDPGAgent:
"""
Creates a new DDPG agent.
Args:
- actorGen and criticGen should be functions that create new
neural networks with supplied Placeholder input Tensors.
- state_shape will be the shape of the state input Placeholder.
- action_shape should be the shape of the tensors output by the
actor neural network.
- buf_sz is the size of the agent's internal experience replay buffer.
- batch_sz will be the size of each training batch (drawn from the replay buffer)
"""
def __init__(self, actorGen, criticGen, state_shape, action_shape, buf_sz=100000,
batch_sz=64, critic_learning_rate=0.001, actor_learning_rate=0.0001,
discount_factor=0.99, actor_mix_factor=0.001,
critic_mix_factor=0.001, actor_gradient_clipping=None, critic_gradient_clipping=None):
self.graph = tf.Graph()
self.session = tf.Session(graph=self.graph)
self.discount_factor = discount_factor
self.replay_buf = deque(maxlen=buf_sz)
self.batch_size = batch_sz
self.state_shape = state_shape
self.action_shape = action_shape
self.__single_state_shape = self.state_shape[:]
self.__single_state_shape[0] = 1
with self.graph.as_default():
self.state_in = tf.placeholder(tensorType, state_shape, name='state-in')
self.action_in = tf.placeholder(tensorType, action_shape, name='action-in')
with tf.variable_scope('critic'):
self.critic = mixed_network.MixedNetwork(self.graph, self.session,
tf.concat_v2([self.state_in, self.action_in], axis=1),
criticGen, target_mix_factor=critic_mix_factor,
prefix='critic/')
self.critic_prediction = tf.placeholder(tensorType, [None])
self.critic_loss = tf.reduce_mean( tf.square( self.critic_prediction - tf.squeeze(self.critic.main_out) ) )
critic_optimizer = tf.train.AdamOptimizer(critic_learning_rate)
if isinstance(critic_gradient_clipping, tuple):
critic_gradients = critic_optimizer.compute_gradients(self.critic_loss, self.critic.main_parameters)
clipped_grads = [ \
( tf.clip_by_value(gv[0], critic_gradient_clipping[0], critic_gradient_clipping[1]), gv[1]) \
for gv in critic_gradients ]
self.critic_optimize = critic_optimizer.apply_gradients(clipped_grads)
else:
self.critic_optimize = critic_optimizer.minimize(self.critic_loss, var_list=self.critic.main_parameters)
# gradient of the critic network w.r.t. the actions, averaged over all (s,a) pairs in batch
self.action_gradient = tf.div(tf.gradients(self.critic.main_out, self.action_in), tf.constant(self.batch_size, tensorType))
with tf.variable_scope('actor'):
self.actor = mixed_network.MixedNetwork(self.graph,
self.session, self.state_in, actorGen, prefix='actor/',
target_mix_factor=actor_mix_factor)
#self.aGrad_pl = tf.placeholder(tensorType, action_shape, name='action-gradient-placeholder')
self.actor_gradients = tf.gradients(self.actor.main_out, self.actor.main_parameters, self.action_gradient)
#self.actor_optimize = [p.assign(p + actor_learning_rate*g) \
#for p, g in zip(self.actor.main_parameters, self.actor_gradients)]
#self.actor_optimize = tf.train.GradientDescentOptimizer(actor_learning_rate).apply_gradients(
# zip(self.actor_gradients, self.actor.main_parameters)
#)
if isinstance(actor_gradient_clipping, tuple):
self.actor_gradients = [tf.clip_by_value(g, actor_gradient_clipping[0], actor_gradient_clipping[1]) for g in self.actor_gradients]
self.actor_gradients = [tf.negative(g) for g in self.actor_gradients]
self.actor_optimize = tf.train.AdamOptimizer(actor_learning_rate).apply_gradients(
zip(self.actor_gradients, self.actor.main_parameters)
)
self.session.run(tf.global_variables_initializer())
def act(self, observation):
return self.actor.get_main({ self.state_in: np.reshape(observation, self.__single_state_shape)})
def add_experience(self, state, action, reward, done, next_state):
self.replay_buf.append( (state, action, reward, done, next_state) )
def train(self):
sm = random.sample(self.replay_buf, min(len(self.replay_buf), self.batch_size))
state_shape = self.state_shape[:]
action_shape = self.action_shape[:]
state_shape[0] = action_shape[0] = len(sm)
states = np.reshape([ ts[0] for ts in sm ], state_shape)
actions = np.reshape([ ts[1] for ts in sm ], action_shape)
rewards = np.reshape([ ts[2] for ts in sm ], [len(sm)])
term_state = np.reshape([ ts[3] for ts in sm ], [len(sm)])
next_states = np.reshape([ ts[4] for ts in sm ], state_shape)
# Use target actor and critic networks to estimate TD targets
target_a = np.reshape(self.actor.get_target({self.state_in:next_states}), action_shape)
target_q = np.reshape(self.critic.get_target({ self.state_in:next_states, self.action_in:target_a }), [len(sm)])
td_targets = []
for i, t in enumerate(target_q):
if term_state[i]:
td_targets.append(rewards[i])
else:
td_targets.append(rewards[i] + (self.discount_factor * t))
_, crit_loss, predicted_q = self.session.run([self.critic_optimize, self.critic_loss, self.critic.main_out], {
self.state_in: states,
self.action_in: actions,
self.critic_prediction: np.squeeze(td_targets)
})
net_actions = np.reshape(self.actor.get_main({self.state_in: states}), action_shape)
self.session.run(self.actor_optimize, {self.state_in:states, self.action_in:net_actions})
#self.session.run(self.actor_optimize, {self.state_in:states, self.action_in:actions})
#actor_grad = self.session.run(self.actor_gradients, {self.state_in:states, self.action_in:net_actions})[0]
#assert not np.isnan(np.sum(actor_grad))
return np.squeeze(predicted_q), crit_loss
def update_targets(self):
self.actor.update_target()
self.critic.update_target()
| 1.9375 | 2 |
Latest/venv/Lib/site-packages/apptools/io/h5/utils.py | adamcvj/SatelliteTracker | 1 | 20740 | from contextlib import contextmanager
from .file import H5File
@contextmanager
def open_h5file(filename, mode='r+', **kwargs):
"""Context manager for reading an HDF5 file as an H5File object.
Parameters
----------
filename : str
HDF5 file name.
mode : str
Mode to open the file:
'r' : Read-only
'w' : Write; create new file (an existing file would be deleted).
'a' : Read and write to file; create if not existing
'r+': Read and write to file; must already exist
See `H5File` for additional keyword arguments.
"""
h5 = H5File(filename, mode=mode, **kwargs)
try:
yield h5
finally:
h5.close()
| 2.015625 | 2 |
shop_website/users/views.py | omar00070/django-shopping-website | 0 | 20868 | from django.shortcuts import render
from .forms import RegistrationForm, UserUpdateForm, ProfileUpdateForm
from django.shortcuts import redirect
from .models import Profile
from django.contrib.auth.decorators import login_required
def registration(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
return redirect('login')
else:
form = RegistrationForm()
return render(request, 'users/register.html', {'form': form})
@login_required()
def profile(request):
if request.method == 'POST':
u_form = UserUpdateForm(request.POST, instance=request.user)
p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
return redirect('profile')
else:
u_form = UserUpdateForm(instance=request.user)
p_form = ProfileUpdateForm(instance=request.user.profile)
context = {'u_form':u_form, 'p_form':p_form}
return render(request, 'users/profile.html', context) | 1.367188 | 1 |
pysparkling/sql/expressions/literals.py | ptallada/pysparkling | 260 | 20996 | from ..utils import AnalysisException
from .expressions import Expression
class Literal(Expression):
def __init__(self, value):
super().__init__()
self.value = value
def eval(self, row, schema):
return self.value
def __str__(self):
if self.value is True:
return "true"
if self.value is False:
return "false"
if self.value is None:
return "NULL"
return str(self.value)
def get_literal_value(self):
if hasattr(self.value, "expr") or isinstance(self.value, Expression):
raise AnalysisException("Value should not be a Column or an Expression,"
f" but got {type(self)}: {self}")
return self.value
def args(self):
return (self.value, )
__all__ = ["Literal"]
| 1.984375 | 2 |
django_watermark_images/items/migrations/0001_initial.py | abarto/django-watermark-images | 11 | 21124 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-10 16:15
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
import items.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created')),
('modified', django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified')),
('title', models.CharField(max_length=255, verbose_name='title')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('image', models.ImageField(upload_to=items.models.image_upload_to, verbose_name='original image')),
],
options={
'abstract': False,
},
),
]
| 0.957031 | 1 |
HUGGINGFACE.py | mkingopng/NBME_score_clinical_patient_notes | 1 | 21252 | import os
TRANSFORMERS = '/home/noone/documents/github/transformers'
TOKENIZERS = '/home/noone/documents/github/tokenizers'
DATASETS = '/home/noone/documents/github/datasets'
MODELS = os.path.join(TRANSFORMERS, 'src/transformers/models')
DEBERTA_V2 = os.path.join(MODELS, 'deberta_v2')
DEBERTA_V3 = os.path.join(MODELS, 'deberta-v3-base')
ENCODER_DECODER = os.path.join(MODELS, 'encoder_decoder')
HUGGINGFACE_HUB = '/home/noone/documents/github/huggingface_hub'
"""
Huggingface Repos Cloned:
- transformers
- tokenizers
= optimum
- datasets
- huggingface_hub
- accelerate
- notebooks
- blog
- huggingface sagemaker snowflake example
- education toolkit
- evaluate
- knockknock
- neuralcoref
- mongoku
- data-measurements-tool
- neural compressor
- allennlp
- pytorch-openai-transformer-lm
- pytorch pretrained bigGAN
- awesome NLP discussion papers
- torchMoji
- naacl_transfer_learning_tutorial
-
""" | 1.09375 | 1 |
grr/core/grr_response_core/lib/rdfvalue_test.py | khanhgithead/grr | 4,238 | 21380 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""Tests for utility classes."""
import datetime
import sys
import unittest
from absl import app
from absl.testing import absltest
from grr_response_core.lib import rdfvalue
from grr.test_lib import test_lib
long_string = (
"迎欢迎\n"
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Morbi luctus "
"ex sed dictum volutpat. Integer maximus, mauris at tincidunt iaculis, "
"felis magna scelerisque ex, in scelerisque est odio non nunc. "
"Suspendisse et lobortis augue. Donec faucibus tempor massa, sed dapibus"
" erat iaculis ut. Vestibulum eu elementum nulla. Nullam scelerisque "
"hendrerit lorem. Integer vitae semper metus. Suspendisse accumsan "
"dictum felis. Etiam viverra, felis sed ullamcorper vehicula, libero "
"nisl tempus dui, a porta lacus erat et erat. Morbi mattis elementum "
"efficitur. Pellentesque aliquam placerat mauris non accumsan.")
class RDFValueTest(absltest.TestCase):
"""RDFValue tests."""
def testStr(self):
"""Test RDFValue.__str__."""
self.assertEqual(str(rdfvalue.RDFInteger(1)), "1")
self.assertEqual(str(rdfvalue.RDFString(long_string)), long_string)
# TODO(hanuszczak): Current implementation of `repr` for RDF values is broken
# and not in line with Python guidelines. For example, `repr` should be
# unambiguous whereas current implementation will trim long representations
# with `...`. Moreover, the representation for most types is questionable at
# best.
#
# The implementation should be fixed and proper tests should be written.
class RDFBytesTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"zażółć gęślą jaźń"
result = rdfvalue.RDFBytes.FromHumanReadable(string)
expected = rdfvalue.RDFBytes.FromSerializedBytes(string.encode("utf-8"))
self.assertEqual(result, expected)
class RDFStringTest(absltest.TestCase):
def testFromHumanReadable(self):
string = u"pchnąć w tę łódź jeża lub ośm skrzyń fig"
result = rdfvalue.RDFString.FromHumanReadable(string)
self.assertEqual(str(result), string)
def testEqualWithBytes(self):
self.assertEqual(rdfvalue.RDFString(u"foo"), b"foo")
self.assertNotEqual(rdfvalue.RDFString(u"foo"), b"\x80\x81\x82")
def testLessThanWithBytes(self):
self.assertLess(rdfvalue.RDFString(u"abc"), b"def")
self.assertGreater(rdfvalue.RDFString(u"xyz"), b"ghi")
self.assertLess(rdfvalue.RDFString(u"012"), b"\x80\x81\x81")
# TODO: Python on Windows ships with UCS-2 by default, which does
# not properly support unicode.
@unittest.skipIf(
sys.maxunicode <= 65535,
"Your Python installation does not properly support Unicode (likely: "
"Python with no UCS4 support on Windows.")
def testLenOfEmoji(self):
self.assertLen(rdfvalue.RDFString("🚀🚀"), 2)
class RDFIntegerTest(absltest.TestCase):
def testFromHumanReadable(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"42")
self.assertEqual(result, rdfvalue.RDFInteger(42))
def testFromHumanReadablePositive(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"+108")
self.assertEqual(result, rdfvalue.RDFInteger(108))
def testFromHumanReadableNegative(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"-1337")
self.assertEqual(result, rdfvalue.RDFInteger(-1337))
def testFromHumanReadableZero(self):
result = rdfvalue.RDFInteger.FromHumanReadable(u"0")
self.assertEqual(result, rdfvalue.RDFInteger(0))
def testFromHumanReadableRaisesOnNonInteger(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12.3")
def testFromHumanReadableRaisesOnNonDecimal(self):
with self.assertRaises(ValueError):
rdfvalue.RDFInteger.FromHumanReadable(u"12A")
class RDFDateTimeTest(absltest.TestCase):
def testLerpMiddle(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = start_time + rdfvalue.Duration.From(10, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.5, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(5, rdfvalue.DAYS))
def testLerpZero(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, start_time)
def testLerpOne(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2020-01-01")
lerped_time = rdfvalue.RDFDatetime.Lerp(
1.0, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time, end_time)
def testLerpQuarter(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2000-01-01")
end_time = start_time + rdfvalue.Duration.From(4, rdfvalue.DAYS)
lerped_time = rdfvalue.RDFDatetime.Lerp(
0.25, start_time=start_time, end_time=end_time)
self.assertEqual(lerped_time,
start_time + rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesTypeErrorIfTimesAreNotRDFDatetime(self):
now = rdfvalue.RDFDatetime.Now()
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(0.0, start_time=10, end_time=now)
with self.assertRaisesRegex(TypeError, "non-datetime"):
rdfvalue.RDFDatetime.Lerp(
0.0,
start_time=now,
end_time=rdfvalue.Duration.From(1, rdfvalue.DAYS))
def testLerpRaisesValueErrorIfProgressIsNotNormalized(self):
start_time = rdfvalue.RDFDatetime.FromHumanReadable("2010-01-01")
end_time = rdfvalue.RDFDatetime.FromHumanReadable("2011-01-01")
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(1.5, start_time=start_time, end_time=end_time)
with self.assertRaises(ValueError):
rdfvalue.RDFDatetime.Lerp(-0.5, start_time=start_time, end_time=end_time)
def testFloorToMinutes(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(60, rdfvalue.SECONDS)), expected)
def testFloorToHours(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:00")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.HOURS)), expected)
def testFloorToDays(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34")
expected = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11")
self.assertEqual(
dt.Floor(rdfvalue.Duration.From(1, rdfvalue.DAYS)), expected)
def testFloorExact(self):
dt = rdfvalue.RDFDatetime.FromHumanReadable("2011-11-11 12:34:56")
self.assertEqual(dt.Floor(rdfvalue.Duration.From(1, rdfvalue.SECONDS)), dt)
class RDFDatetimeSecondsTest(absltest.TestCase):
def testFromDatetime_withMicroSeconds(self):
dt_with_micros = datetime.datetime(2000, 1, 1, microsecond=5000)
dt = datetime.datetime(2000, 1, 1)
self.assertEqual(
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt_with_micros),
rdfvalue.RDFDatetimeSeconds.FromDatetime(dt))
def testBug122716179(self):
d = rdfvalue.RDFDatetimeSeconds.FromSecondsSinceEpoch(1)
self.assertEqual(d.AsMicrosecondsSinceEpoch(), 1000000)
diff = rdfvalue.RDFDatetimeSeconds(10) - rdfvalue.Duration("3s")
self.assertEqual(diff.AsMicrosecondsSinceEpoch(), 7000000)
class DurationSecondsTest(absltest.TestCase):
def testPublicAttributes(self):
duration = rdfvalue.DurationSeconds.FromHumanReadable("1h")
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3600)
self.assertEqual(duration.ToInt(rdfvalue.MILLISECONDS), 3600 * 1000)
self.assertEqual(duration.microseconds, 3600 * 1000 * 1000)
def testFromDays(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(2, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("2d"))
self.assertEqual(
rdfvalue.DurationSeconds.From(31, rdfvalue.DAYS),
rdfvalue.DurationSeconds.FromHumanReadable("31d"))
def testFromHours(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(48, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("48h"))
self.assertEqual(
rdfvalue.DurationSeconds.From(24, rdfvalue.HOURS),
rdfvalue.DurationSeconds.FromHumanReadable("24h"))
def testFromSeconds(self):
self.assertEqual(
rdfvalue.DurationSeconds.From(1337,
rdfvalue.SECONDS).ToInt(rdfvalue.SECONDS),
1337)
def testFromMicroseconds(self):
duration = rdfvalue.DurationSeconds.From(3000000, rdfvalue.MICROSECONDS)
self.assertEqual(duration.microseconds, 3000000)
self.assertEqual(duration.ToInt(rdfvalue.SECONDS), 3)
def testFloatConstructorRaises(self):
with self.assertRaises(TypeError):
rdfvalue.DurationSeconds(3.14)
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.DurationSeconds.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.DurationSeconds.From(1, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.DurationSeconds.From(2, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.DurationSeconds.From(999, rdfvalue.SECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.DurationSeconds.From(1000,
rdfvalue.SECONDS).SerializeToBytes())
def testFromWireFormat(self):
for i in [0, 7, 1337]:
val = rdfvalue.DurationSeconds.FromWireFormat(i)
self.assertEqual(i, val.ToInt(rdfvalue.SECONDS))
val2 = rdfvalue.DurationSeconds.FromWireFormat(
val.SerializeToWireFormat())
self.assertEqual(val, val2)
MAX_UINT64 = 18446744073709551615
class DurationTest(absltest.TestCase):
def testInitializationFromMicroseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(i, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} us".format(i)))
self.assertEqual(val, rdfvalue.Duration(i))
def testInitializationFromMilliseconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000]:
val = rdfvalue.Duration.From(i, rdfvalue.MILLISECONDS)
self.assertEqual(i * 1000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} ms".format(i)))
def testInitializationFromSeconds(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 1000000]:
val = rdfvalue.Duration.From(i, rdfvalue.SECONDS)
self.assertEqual(i * 1000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} s".format(i)))
def testInitializationFromMinutes(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 60000000]:
val = rdfvalue.Duration.From(i, rdfvalue.MINUTES)
self.assertEqual(i * 60000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} m".format(i)))
def testInitializationFromHours(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 3600000000]:
val = rdfvalue.Duration.From(i, rdfvalue.HOURS)
self.assertEqual(i * 3600000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} h".format(i)))
def testInitializationFromDays(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 86400000000]:
val = rdfvalue.Duration.From(i, rdfvalue.DAYS)
self.assertEqual(i * 86400000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} d".format(i)))
def testInitializationFromWeeks(self):
for i in [0, 1, 7, 60, 1337, MAX_UINT64 // 604800000000]:
val = rdfvalue.Duration.From(i, rdfvalue.WEEKS)
self.assertEqual(i * 604800000000, val.microseconds)
self.assertEqual(val,
rdfvalue.Duration.FromHumanReadable("{} w".format(i)))
def testConversionToInt(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(val.ToInt(rdfvalue.MICROSECONDS), i)
self.assertEqual(val.ToInt(rdfvalue.MILLISECONDS), i // 1000)
self.assertEqual(val.ToInt(rdfvalue.SECONDS), i // (1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.MINUTES), i // (60 * 1000 * 1000))
self.assertEqual(val.ToInt(rdfvalue.HOURS), i // (60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.DAYS), i // (24 * 60 * 60 * 1000 * 1000))
self.assertEqual(
val.ToInt(rdfvalue.WEEKS), i // (7 * 24 * 60 * 60 * 1000 * 1000))
def testConversionToFractional(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MICROSECONDS), i)
self.assertAlmostEqual(val.ToFractional(rdfvalue.MILLISECONDS), i / 1000)
self.assertAlmostEqual(
val.ToFractional(rdfvalue.SECONDS), i / (1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.MINUTES), i / (60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.HOURS), i / (60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.DAYS), i / (24 * 60 * 60 * 1000 * 1000))
self.assertAlmostEqual(
val.ToFractional(rdfvalue.WEEKS),
i / (7 * 24 * 60 * 60 * 1000 * 1000))
def testStringDeserialization(self):
for i in [0, 1, 7, 60, 1337, 12345, 123456, 1234567, MAX_UINT64]:
val = rdfvalue.Duration.From(i, rdfvalue.MICROSECONDS)
self.assertEqual(
rdfvalue.Duration.FromSerializedBytes(val.SerializeToBytes()), val)
def testHumanReadableStringSerialization(self):
self.assertEqual("0 us", str(rdfvalue.Duration.From(0, rdfvalue.WEEKS)))
self.assertEqual("1 us",
str(rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS)))
self.assertEqual("2 us",
str(rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS)))
self.assertEqual("999 us",
str(rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS)))
self.assertEqual("1 ms",
str(rdfvalue.Duration.From(1, rdfvalue.MILLISECONDS)))
self.assertEqual(
"{} us".format(MAX_UINT64),
str(rdfvalue.Duration.From(MAX_UINT64, rdfvalue.MICROSECONDS)))
self.assertEqual("3 s", str(rdfvalue.Duration.From(3, rdfvalue.SECONDS)))
self.assertEqual("3 m", str(rdfvalue.Duration.From(3, rdfvalue.MINUTES)))
self.assertEqual("3 h", str(rdfvalue.Duration.From(3, rdfvalue.HOURS)))
self.assertEqual("3 d", str(rdfvalue.Duration.From(3, rdfvalue.DAYS)))
self.assertEqual("3 w", str(rdfvalue.Duration.From(21, rdfvalue.DAYS)))
def testSerializeToBytes(self):
self.assertEqual(
b"0",
rdfvalue.Duration.From(0, rdfvalue.WEEKS).SerializeToBytes())
self.assertEqual(
b"1",
rdfvalue.Duration.From(1, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"2",
rdfvalue.Duration.From(2, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"999",
rdfvalue.Duration.From(999, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"1000",
rdfvalue.Duration.From(1000, rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
str(MAX_UINT64).encode("utf-8"),
rdfvalue.Duration.From(MAX_UINT64,
rdfvalue.MICROSECONDS).SerializeToBytes())
self.assertEqual(
b"3000000",
rdfvalue.Duration.From(3, rdfvalue.SECONDS).SerializeToBytes())
def testAdditionOfDurationsIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 // 2]:
self.assertEqual(
rdfvalue.Duration(a) + rdfvalue.Duration(b),
rdfvalue.Duration(a + b))
def testSubtractionOfDurationsIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64]:
self.assertEqual(
rdfvalue.Duration(a) - rdfvalue.Duration(min(a, b)),
rdfvalue.Duration(a - min(a, b)))
def testFromWireFormat(self):
for i in [0, 7, 1337, MAX_UINT64]:
val = rdfvalue.Duration.FromWireFormat(i)
self.assertEqual(i, val.microseconds)
def testSubtractionFromDateTimeIsEqualToIntegerSubtraction(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(min(a, b))
result = lhs - rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a - min(a, b))
def testAdditionToDateTimeIsEqualToIntegerAddition(self):
for a in [0, 1, 7, 60, 1337]:
for b in [0, 1, 7, 60, 1337]:
lhs = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(a)
rhs = rdfvalue.Duration(b)
result = lhs + rhs
self.assertEqual(result.AsMicrosecondsSinceEpoch(), a + b)
def testComparisonIsEqualToIntegerComparison(self):
for a in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
for b in [0, 1, 7, 60, 1337, MAX_UINT64 - 1, MAX_UINT64]:
dur_a = rdfvalue.Duration(a)
dur_b = rdfvalue.Duration(b)
if a > b:
self.assertGreater(dur_a, dur_b)
if a >= b:
self.assertGreaterEqual(dur_a, dur_b)
if a == b:
self.assertEqual(dur_a, dur_b)
if a <= b:
self.assertLessEqual(dur_a, dur_b)
if a < b:
self.assertLess(dur_a, dur_b)
if a != b:
self.assertNotEqual(dur_a, dur_b)
class DocTest(test_lib.DocTest):
module = rdfvalue
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| 1.5625 | 2 |
users/models.py | uoe-compsci-grp30/campusgame | 0 | 21508 | <filename>users/models.py<gh_stars>0
import uuid
from django.contrib.auth.models import AbstractUser
from django.db import models
"""
The user model that represents a user participating in the game.
Implemented using the built-in Django user model: AbstractUser.
"""
class User(AbstractUser):
""" The User class that represents a user that has created an account.
Implemented using the built-in Django user model 'AbstractUser'.
The User class consists of an id that uniquely identifies a user. It uses a uuid in order to be more secure.
It also contains a profile picture that is uploaded by the user.
"""
id = models.UUIDField(default=uuid.uuid4, primary_key=True) # id uniquely identifies a user
is_gamekeeper = models.BooleanField(default=False) # is the user a gamekeeper?
class GameParticipation(models.Model):
"""
Game Participation class represents information about a user currently participating in a game. This is useful because it provides an easy way to store data about users currently playing a game. The class consists of a User that is currently playing the game. A Game that the user is currently participating in. The current Zone that the user is in. A boolean value of whether the user is alive. A boolean value of whether the user is eliminated
"""
user = models.ForeignKey(User, on_delete=models.CASCADE) # User that is currently participating in a game
game = models.ForeignKey("games.Game", on_delete=models.CASCADE) # What game is the user currently participating in
current_zone = models.ForeignKey("games.Zone", on_delete=models.DO_NOTHING) # What zone is the user currently in
score = models.IntegerField(default=0) # User score
is_alive = models.BooleanField(default=False) # Is the player alive
is_eliminated = models.BooleanField(default=False) # Is the player eliminated
| 2.3125 | 2 |
morpheus/algorithms/kmeans.py | amirsh/MorpheusPy | 12 | 21636 | # Copyright 2018 <NAME> and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import scipy.sparse as sp
from sklearn.base import BaseEstimator, ClusterMixin
class NormalizedKMeans(BaseEstimator, ClusterMixin):
def __init__(self, iterations=20, center_num=5):
self.center_num = center_num
self.iterations = iterations
def fit(self, X, k_center):
self.k_center, self.ya = self.k_means(X, self.iterations, self.center_num, k_center, X.shape[0])
return self
def k_means(self, data, iterations, center_num, k_center, rows):
all_one = np.matrix([1] * rows).T
all_one_k = np.matrix([1] * center_num)
all_one_c = np.matrix([1] * k_center.shape[0]).T
if sp.issparse(data):
t2 = (data.power(2)).sum(axis=1).dot(all_one_k)
else:
t2 = (np.power(data, 2)).sum(axis=1).reshape((-1, 1)) * all_one_k
t22 = data * 2
ya = None
for _ in range(iterations):
dist = t2 - t22 * k_center + all_one * np.power(k_center, 2).sum(axis=0)
ya = (dist == (np.amin(dist) * all_one_k))
k_center = (data.T * ya) / (all_one_c * ya.sum(axis=0))
return k_center, ya
| 2.421875 | 2 |
glow/generate_data_sources.py | tomcent-tom/glow | 0 | 21764 | from connectors.tableau.tableau import TableauConnector
from posixpath import join
from typing import List, Dict, Tuple
import argparse
import connectors.tableau
import os
import utils
import logging
import sys
import yaml
logging.basicConfig(level=logging.INFO)
MAIN_PATH = '/Users/tomevers/projects/airglow'
CONNECTIONS_CONF_FILE = 'airglow_connections.yml'
DS_FILENAME = 'data sources.yml'
DS_TEMPLATE = 'templates/data_source.md'
class ConnectionValidationError(Exception):
pass
def get_connections_config(yaml_format=True) -> dict:
yaml_file = os.path.join(MAIN_PATH, CONNECTIONS_CONF_FILE)
try:
return utils.get_file(yaml_file, yaml_format)
except FileNotFoundError:
logging.exception(FileNotFoundError('Airglow connections file can not be found.'))
sys.exit(1)
def store_ds(events_md: str, event: dict, docs_dir: str):
file_dir = os.path.join(docs_dir, 'data sources', event['category'])
file_name = event['name'] + '.md'
if not os.path.isdir(file_dir):
os.makedirs(file_dir)
with open(os.path.join(file_dir, file_name), 'w') as file:
file.write(events_md)
def generate_datasources_yaml():
conn_config = get_connections_config()
if 'connections' not in conn_config.keys():
logging.exception('connections info not found in airglow_connections config file.')
sys.exit(1)
tableau_config = conn_config['connections']['tableau']
tableau_connector = TableauConnector(server=tableau_config['server'],
sitename=tableau_config['sitename'],
password=tableau_config['password'],
username=tableau_config['username'])
ds = tableau_connector.fetch_datasources()
ds = [tableau_connector.generate_datasource_dag(datasource) for datasource in ds]
logging.info("storing data source")
with open(r'/Users/tomevers/projects/airglow/definitions/data sources.yml', 'w') as file:
documents = yaml.dump(ds, file, sort_keys=False)
return ds
def generate_markdown(datasource):
template_path = os.path.join(MAIN_PATH, DS_TEMPLATE)
with open(template_path, 'r') as file:
ds_md = file.read()
ds_md = ds_md.replace('{<yaml_header>}', yaml.dump(datasource))
return ds_md
def get_datasource_definitions(yaml_format=True) -> dict:
""" returns the data source definition yaml file as a dict.
Returns:
a dict with all data sources defined in the yaml file.
"""
yaml_file = os.path.join(MAIN_PATH, 'definitions', DS_FILENAME)
try:
return utils.get_file(yaml_file, yaml_format)
except FileNotFoundError:
logging.exception(FileNotFoundError('Datasource definition file can not be found.'))
sys.exit(1)
def main(args):
logging.info('Starting datasource generation script..')
logging.info('****************************************')
logging.info('** Step 1: Get all information')
logging.info('****************************************')
if args.use_local_definitions.lower() in ('true', '1', 't'):
logging.info('** Retrieving data source definitions from local yaml file')
datasource_defs = get_datasource_definitions()
else:
logging.info('** Retrieving data source definitions from Tableau')
datasource_defs = generate_datasources_yaml()
logging.info('****************************************')
logging.info('** Step 2: Generate and store event files.')
logging.info('****************************************')
for datasource in datasource_defs:
logging.info('generating datasource md file for {}'.format(datasource['data_source_name']))
ds_md = generate_markdown(datasource)
utils.store_md(ds_md, 'data sources', datasource['data_source_project'], datasource['data_source_name'], args.docs_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser('Script to convert event definitions file into markdown format.')
parser.add_argument('--docs_dir', type=str,
help='path to the folder where the generated docs should be stored. The script will need write access to this folder. Defaults to "./docs/"')
parser.add_argument('--use_local_definitions', type=str,
help='path to the folder where the generated docs should be stored. The script will need write access to this folder. Defaults to "./docs/"')
args = parser.parse_args()
main(args)
| 1.429688 | 1 |
utils/image.py | ariel415el/Efficient-GPNN | 7 | 21892 | import os
import cv2
import torch
from torch.nn import functional as F
from torchvision import transforms
import torchvision.utils
def save_image(img, path):
os.makedirs(os.path.dirname(path), exist_ok=True)
torchvision.utils.save_image(torch.clip(img, -1, 1), path, normalize=True)
def cv2pt(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img / 255.
img = img * 2 - 1
img = torch.from_numpy(img.transpose(2, 0, 1)).float()
return img
def aspect_ratio_resize(img, max_dim=256):
h, w, c = img.shape
if max(h, w) / max_dim > 1:
img = cv2.blur(img, ksize=(5, 5))
if w > h:
h = int(h/w*max_dim)
w = max_dim
else:
w = int(w/h*max_dim)
h = max_dim
return cv2.resize(img, (w, h), interpolation=cv2.INTER_AREA)
def downscale(img, pyr_factor):
assert 0 < pyr_factor < 1
new_w = int(pyr_factor * img.shape[-1])
new_h = int(pyr_factor * img.shape[-2])
return transforms.Resize((new_h, new_w), antialias=True)(img)
def blur(img, pyr_factor):
"""Blur image by downscaling and then upscaling it back to original size"""
if pyr_factor < 1:
d_img = downscale(img, pyr_factor)
img = transforms.Resize(img.shape[-2:], antialias=True)(d_img)
return img
def get_pyramid(img, min_height, pyr_factor):
res = [img]
while True:
img = downscale(img, pyr_factor)
if img.shape[-2] < min_height:
break
res = [img] + res
# ensure smallest size is of min_height
if res[0].shape[-2] != min_height:
new_width = int(min_height * res[0].shape[-1] / float(res[0].shape[-2]))
res[0] = transforms.Resize((min_height, new_width), antialias=True)(res[0])
res = [x.unsqueeze(0) for x in res]
return res
def match_image_sizes(input, target):
"""resize and crop input image so that it has the same aspect ratio as target"""
assert(len(input.shape) == len(target.shape) and len(target.shape) == 4)
input_h, input_w = input.shape[-2:]
target_h, target_w = target.shape[-2:]
input_scale_factor = input_h / input_w
target_scale_factor = target_h / target_w
if target_scale_factor > input_scale_factor:
input = transforms.Resize((target_h, int(input_w/input_h*target_h)), antialias=True)(input)
pixels_to_cut = input.shape[-1] - target_w
if pixels_to_cut > 0:
input = input[:, :, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
else:
input = transforms.Resize((int(input_h/input_w*target_w), target_w), antialias=True)(input)
pixels_to_cut = input.shape[-2] - target_h
if pixels_to_cut > 1:
input = input[:, :, int(pixels_to_cut / 2):-int(pixels_to_cut / 2)]
input = transforms.Resize(target.shape[-2:], antialias=True)(input)
return input
def extract_patches(src_img, patch_size, stride):
"""
Splits the image to overlapping patches and returns a pytorch tensor of size (N_patches, 3*patch_size**2)
"""
channels = 3
patches = F.unfold(src_img, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0)) # shape (b, 3*p*p, N_patches)
patches = patches.squeeze(dim=0).permute((1, 0)).reshape(-1, channels * patch_size**2)
return patches
def combine_patches(patches, patch_size, stride, img_shape):
"""
Combines patches into an image by averaging overlapping pixels
:param patches: patches to be combined. pytorch tensor of shape (N_patches, 3*patch_size**2)
:param img_shape: an image of a shape that if split into patches with the given stride and patch_size will give
the same number of patches N_patches
returns an image of shape img_shape
"""
patches = patches.permute(1,0).unsqueeze(0)
combined = F.fold(patches, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
# normal fold matrix
input_ones = torch.ones(img_shape, dtype=patches.dtype, device=patches.device)
divisor = F.unfold(input_ones, kernel_size=patch_size, dilation=(1, 1), stride=stride, padding=(0, 0))
divisor = F.fold(divisor, output_size=img_shape[-2:], kernel_size=patch_size, stride=stride)
divisor[divisor == 0] = 1.0
return (combined / divisor).squeeze(dim=0).unsqueeze(0) | 2.3125 | 2 |
my_oop/oop05.py | xxwqlee/pylearn | 1 | 22020 | """
继承调用关系
"""
class A:
def a_say(self):
print('执行A:', self)
class B(A):
def b_say(self):
A.a_say(self) # 效果与下面的语句相同
super().a_say() # super()方法调用父类的定义,
# 默认传入当前对象的引用self
A().a_say() # 类对象的直接使用,先创建一个类对象A
print('执行B:', self)
a = A()
b = B()
a.a_say()
b.a_say()
print("*" * 50)
b.b_say() # 仍然引用子类实例化的对象
print("*" * 50)
B().b_say()
| 1.859375 | 2 |
clifun.py | tdimiduk/clifun | 1 | 22148 | <reponame>tdimiduk/clifun<gh_stars>1-10
import datetime as dt
import importlib.util
import inspect
import itertools
import json
import os
import pathlib
import sys
import types
import typing
from typing import (
Any,
Callable,
Dict,
Generic,
Iterable,
Iterator,
List,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
S = TypeVar("S")
T = TypeVar("T")
O = TypeVar("O", Any, None)
StringInterpreters = Dict[Type[T], Callable[[str], T]]
def call(
c: Callable[..., T],
args: Optional[List[str]] = None,
string_interpreters: Optional[StringInterpreters] = None,
) -> T:
"""
Call a function from the command line
Assembles the inputs to a function from command line arguments, environment variables, and config files and call it.
"""
argv = sys.argv if args is None else args
interpreters = (
string_interpreters
if string_interpreters is not None
else default_string_interpreters()
)
annotated = annotate_callable(c, interpreters, [])
provided_inputs = assemble_input_sources(argv)
if provided_inputs.args.help:
print_usage(annotated, header=True)
sys.exit(0)
needed_inputs = all_needed_inputs(annotated)
unknown = invalid_args(provided_inputs.args.keyword.keys(), needed_inputs)
if unknown:
print(f"Unknown arguments: {unknown}")
print_usage(annotated)
sys.exit(1)
resolved_inputs, missing_inputs = resolve_inputs(needed_inputs, provided_inputs)
if missing_inputs:
print(f"Missing arguments: {missing_inputs}")
print_usage(annotated)
sys.exit(1)
return annotated(resolved_inputs)
################################################################################
# Interpreting strings into python types
################################################################################
def default_string_interpreters() -> StringInterpreters:
return {
int: int,
float: float,
str: str,
bool: interpret_bool,
dt.datetime: interpret_datetime,
dt.date: interpret_date,
}
class InterpretationError(ValueError):
def __init__(self, s: str, t: T):
self.s = s
self.t = t
def __str__(self):
return f"Could not interpret '{self.s}' as {self.t}"
def interpret_bool(s: str) -> bool:
"""
Slightly more intuitive bool iterpretation
Raw python's `bool("false")==True` since it is a non-empty string
"""
if s.lower() in {"t", "true", "yes", "y"}:
return True
elif s.lower() in {"f", "false", "no", "n"}:
return False
else:
raise InterpretationError(s, bool)
def interpret_datetime(s: str) -> dt.datetime:
"""
Date and time in isoformat
"""
if hasattr(dt.datetime, "fromisoformat"):
return dt.datetime.fromisoformat(s)
else:
# for python 3.6 where `fromisoformat` doesn't exist
import isodate # type: ignore
return isodate.parse_datetime(s)
def interpret_date(s: str) -> dt.date:
"""
Dates in YYYY-MM-DD format
"""
return dt.date(*[int(i) for i in s.split("-")])
def interpret_string_as_type(
s: str, t: Type[T], type_converters: StringInterpreters
) -> T:
try:
return (
type_converters[unwrap_optional(t)](s)
if is_optional(t)
else type_converters[t](s)
)
except KeyError:
raise InterpretationError(s, t)
################################################################################
# Data classes
#
# these should really be dataclasses, and will be converted when clifun drops compatability
# with python 3.6
################################################################################
class Arguments:
def __init__(
self, positional: List[str], keyword: Dict[str, str], help: bool = False
):
self.positional = positional
self.keyword = keyword
self.help = help
class ConfigFiles:
def __init__(self, configs: List[Dict[str, str]]):
self.configs = configs
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
for config in self.configs:
if key in config:
return config[key]
return default
Annotated = Union["AnnotatedParameter", "AnnotatedCallable"]
class AnnotatedCallable(Generic[T]):
def __init__(
self, callable: Callable[[...], T], name: str, needed_inputs: List[Annotated]
):
self.callable = callable
self.name = name
self.needed_inputs = needed_inputs
def __call__(self, inputs: Dict[str, str]):
def collect(needed: Annotated):
if isinstance(needed, AnnotatedParameter):
value = inputs[needed.prefixed_name]
if value is None:
if is_optional(needed.t):
return None
raise ValueError(
f"Somehow got None for non optional parameter {needed}"
)
return needed(value)
return needed(inputs)
collected_inputs = {
needed.name: collect(needed) for needed in self.needed_inputs
}
return self.callable(**collected_inputs)
def __str__(self) -> str:
return f"<callable: {self.name} {[str(i) for i in self.needed_inputs]}>"
class AnnotatedParameter(Generic[T]):
def __init__(
self, parameter: inspect.Parameter, from_string: Callable[[str], T], prefix
):
self.parameter = parameter
self.from_string = from_string
self.prefix = prefix
@property
def name(self):
return self.parameter.name
@property
def prefixed_name(self):
return ".".join(self.prefix + [self.name])
@property
def t(self):
return self.parameter.annotation
@property
def default(self):
return self.parameter.default
def __call__(self, input: Optional[str]) -> T:
return self.from_string(input)
def __str__(self) -> str:
return f"<parameter: {self.name}: {self.t}>"
class InputSources:
def __init__(self, args: Arguments, config_files: ConfigFiles):
self.args = args
self.config_files = config_files
def get(self, key: str, default: Optional[T] = None) -> Union[str, T, None]:
env_value = os.environ.get(key.upper(), default)
return self.args.keyword.get(key, self.config_files.get(key, env_value))
def get_value(self, value: AnnotatedParameter) -> Union[str, T, None]:
return self.get(value.prefixed_name, value.default)
################################################################################
# Assemble inputs from the "outside world"
################################################################################
def assemble_input_sources(args: List[str]) -> InputSources:
args_object = interpret_arguments(args)
return InputSources(args_object, load_config_files(args_object.positional))
def interpret_arguments(args: Optional[List[str]] = None) -> Arguments:
if args is None:
args = sys.argv
i = 1
keyword = {}
positional = []
while i < len(args):
arg = args[i]
key = arg[2:]
if arg in {"-h", "--help"}:
return Arguments([], {}, True)
if arg[:2] == "--":
if len(args) < i + 2:
raise ValueError(f"Missing value for argument: {key}")
keyword[key] = args[i + 1]
i += 2
else:
positional.append(arg)
i += 1
return Arguments(positional, keyword, not (keyword or positional))
def load_config_files(filenames: List[str]) -> ConfigFiles:
# reverse the order so that later config files override earlier ones
def load(name):
if not pathlib.Path(name).exists():
raise ValueError(f"Could not find config file {name}")
return json.load(open(name))
return ConfigFiles([load(name) for name in filenames[::-1]])
NOT_SPECIFIED = inspect._empty
def resolve_inputs(
needed_inputs: List[AnnotatedParameter], provided_inputs: InputSources
) -> Tuple[Dict[str, Optional[str]], Set[str]]:
missing = set()
def resolve(v):
s = provided_inputs.get_value(v)
if s is None:
if is_optional(v.t):
return None
else:
missing.add(v.prefixed_name)
if s == NOT_SPECIFIED:
missing.add(v.prefixed_name)
return s
collected = {value.prefixed_name: resolve(value) for value in needed_inputs}
return collected, missing
################################################################################
# Input validation and help
################################################################################
def check_usage(provided_inputs, needed_inputs) -> None:
check_help(provided_inputs, needed_inputs)
check_invalid_args(provided_inputs, needed_inputs)
def valid_args(values: List[AnnotatedParameter]) -> Set[str]:
return {v.prefixed_name for v in values}
def invalid_args(args, allowed_args):
return set(args) - valid_args(allowed_args)
def print_usage(annotated: AnnotatedCallable, header: bool = False) -> None:
needed_inputs = all_needed_inputs(annotated)
if header:
print(f"{annotated.name}\n")
doc = inspect.getdoc(annotated.callable)
if doc:
print(f"{doc}\n")
print(f"Usage: {sys.argv[0]} [config_file] [--key: value]")
print("\n".join(describe_needed(needed_inputs)))
def describe_needed(needed_inputs: List[AnnotatedParameter]) -> List[str]:
def desc(v):
base = f" --{v.prefixed_name}: {type_to_string(v.t)}"
if v.default != NOT_SPECIFIED:
default = f'"{v.default}"' if isinstance(v.default, str) else v.default
return f"{base} (default: {default})"
return base
return [desc(v) for v in needed_inputs]
################################################################################
# Determine what inputs a function needs
################################################################################
def all_needed_inputs(c: AnnotatedCallable) -> List[AnnotatedParameter]:
def inner():
for needed in c.needed_inputs:
if isinstance(needed, AnnotatedParameter):
yield needed
else:
yield from all_needed_inputs(needed)
return list(inner())
def inspect_parameters(t: Type[T]) -> Iterable[inspect.Parameter]:
return inspect.signature(t).parameters.values()
def is_optional(t: Type[T]) -> bool:
return Union[t, None] == t
def unwrap_optional(t: Optional[Type[T]]) -> Type[T]:
if hasattr(typing, "get_args"):
args = typing.get_args(t)
if len(args) == 0:
return t
else:
return args[0]
# fallback for python < 3.8. May be brittle since it depends on an `_`'d interface
# this should use typing.get_args, but that is not available until python 3.8
if type(t) != typing._GenericAlias:
return t
for s in t.__args__: # type: ignore
if s != type(None):
return s
def type_to_string(t: Type[O]) -> str:
if is_optional(t):
return f"Optional[{unwrap_optional(t).__name__}]"
return t.__name__
def annotate_parameter(
parameter: inspect.Parameter, interpreter: StringInterpreters, prefix: List[str]
) -> Union[AnnotatedParameter, AnnotatedCallable]:
if parameter.annotation == NOT_SPECIFIED:
raise Exception(f"Missing type annotation for {parameter}")
t = unwrap_optional(parameter.annotation)
if t in interpreter:
# We have found a "basic" value we know how to interpret
return AnnotatedParameter(parameter, from_string=interpreter[t], prefix=prefix)
# This is some kind of composite
prefix = prefix + [parameter.name]
return annotate_callable(parameter.annotation, interpreter, prefix, parameter.name)
def annotate_callable(
callable: Callable[[...], T],
interpreter: StringInterpreters,
prefix: List[str],
name: Optional[str] = None,
) -> AnnotatedCallable[T]:
needed = [
annotate_parameter(p, interpreter, prefix) for p in inspect_parameters(callable)
]
return AnnotatedCallable(
callable, name if name is not None else callable.__name__, needed
)
################################################################################
# Make clifun.py usable as a script to call functions in any module
################################################################################
def import_module_by_path(path: pathlib.Path) -> types.ModuleType:
spec = importlib.util.spec_from_file_location(target.name, str(target))
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
if __name__ == "__main__":
print(sys.argv)
if len(sys.argv) < 3:
print("Usage: clifun.py path_to_module function_name ...")
sys.exit(1)
target = pathlib.Path(sys.argv[1]).resolve()
function_name = sys.argv[2]
arguments = sys.argv[2:]
module = import_module_by_path(target)
function = getattr(module, function_name)
print(call(function, arguments))
| 2.125 | 2 |
challenge/eval.py | CodeCrawl/deep_learning | 8 | 22276 | ##
## Evaluation Script
##
import numpy as np
import time
from sample_model import Model
from data_loader import data_loader
from generator import Generator
def evaluate(label_indices = {'brick': 0, 'ball': 1, 'cylinder': 2},
channel_means = np.array([147.12697, 160.21092, 167.70029]),
data_path = '../data',
minibatch_size = 32,
num_batches_to_test = 10,
checkpoint_dir = 'tf_data/sample_model'):
print("1. Loading data")
data = data_loader(label_indices = label_indices,
channel_means = channel_means,
train_test_split = 0.5,
data_path = data_path)
print("2. Instantiating the model")
M = Model(mode = 'test')
#Evaluate on test images:
GT = Generator(data.test.X, data.test.y, minibatch_size = minibatch_size)
num_correct = 0
num_total = 0
print("3. Evaluating on test images")
for i in range(num_batches_to_test):
GT.generate()
yhat = M.predict(X = GT.X, checkpoint_dir = checkpoint_dir)
correct_predictions = (np.argmax(yhat, axis = 1) == np.argmax(GT.y, axis = 1))
num_correct += np.sum(correct_predictions)
num_total += len(correct_predictions)
accuracy = round(num_correct/num_total,4)
return accuracy
def calculate_score(accuracy):
score = 0
if accuracy >= 0.92:
score = 10
elif accuracy >= 0.9:
score = 9
elif accuracy >= 0.85:
score = 8
elif accuracy >= 0.8:
score = 7
elif accuracy >= 0.75:
score = 6
elif accuracy >= 0.70:
score = 5
else:
score = 4
return score
if __name__ == '__main__':
program_start = time.time()
accuracy = evaluate()
score = calculate_score(accuracy)
program_end = time.time()
total_time = round(program_end - program_start,2)
print()
print("Execution time (seconds) = ", total_time)
print('Accuracy = ' + str(accuracy))
print("Score = ", score)
print()
| 2.03125 | 2 |
mayan/apps/web_links/migrations/0004_make_labes_unique.py | nattangwiwat/Mayan-EDMS-recitation | 343 | 22404 | <reponame>nattangwiwat/Mayan-EDMS-recitation<filename>mayan/apps/web_links/migrations/0004_make_labes_unique.py
from django.db import migrations
def operation_make_labels_unique(apps, schema_editor):
WebLink = apps.get_model(app_label='web_links', model_name='WebLink')
for web_link in WebLink.objects.using(schema_editor.connection.alias).all():
# Look for instances with the same label
duplicate_queryset = WebLink.objects.using(
schema_editor.connection.alias
).filter(label=web_link.label).exclude(pk=web_link.pk)
if duplicate_queryset:
# If a duplicate is found, append the id to the original instance
# label
web_link.label = '{}__{}'.format(web_link.label, web_link.pk)
web_link.save()
def operation_make_labels_unique_reverse(apps, schema_editor):
WebLink = apps.get_model(app_label='web_links', model_name='WebLink')
for web_link in WebLink.objects.using(schema_editor.connection.alias).all():
if web_link.label.endswith('__{}'.format(web_link.pk)):
web_link.label = web_link.label.replace(
'__{}'.format(web_link.pk), ''
)
web_link.save()
class Migration(migrations.Migration):
dependencies = [
('web_links', '0003_auto_20191211_0233'),
]
operations = [
migrations.RunPython(
code=operation_make_labels_unique,
reverse_code=operation_make_labels_unique_reverse
),
]
| 1.835938 | 2 |
youwol/backends/cdn/resources_initialization.py | youwol/py-youwol | 0 | 22532 | import asyncio
import os
from youwol_utils import WhereClause, QueryBody, Query, Path, flatten
from .configurations import Configuration
from .utils import format_download_form, post_storage_by_chunk, md5_from_folder
from .utils_indexing import format_doc_db_record, post_indexes, get_version_number_str
async def init_resources(config: Configuration):
print("### Ensure database resources ###")
headers = await config.admin_headers if config.admin_headers else {}
doc_db = config.doc_db
storage = config.storage
table_ok, bucket_ok = await asyncio.gather(
doc_db.ensure_table(headers=headers),
storage.ensure_bucket(headers=headers)
)
if bucket_ok and not table_ok:
print("Need to re-index stuffs of bucket")
raise Exception("The table index is not up-to-date w/ bucket content, manual index-synchronisation needed")
clauses = [[WhereClause(column="library_name", relation="eq", term=lib.split("#")[0]),
WhereClause(column="version_number", relation="eq", term=get_version_number_str(lib.split("#")[1]))
]
for lib in Configuration.required_libs]
bodies = [QueryBody(query=Query(where_clause=c)) for c in clauses]
responses = await asyncio.gather(*[doc_db.query(query_body=b, owner=Configuration.owner, headers=headers)
for b in bodies])
if all([len(r['documents']) == 1 for r in responses]):
print("Found required resources")
return
print("post initial resources")
await synchronize(Path(__file__).parent / "initial_resources", "", config, headers=headers)
print("### resources initialization done ###")
async def synchronize(dir_path: Path, zip_dir_name: str, configuration: any, headers: any):
paths = flatten([[Path(root) / f for f in files] for root, _, files in os.walk(str(dir_path))])
paths = list(paths)
forms = await asyncio.gather(*[format_download_form(path, Path(), dir_path / zip_dir_name, False)
for path in paths])
await post_storage_by_chunk(configuration.storage, list(forms), 1, headers)
paths_index = flatten([[Path(root) / f for f in files if f == "package.json"]
for root, _, files in os.walk(str(dir_path))])
check_dum = md5_from_folder(dir_path)
indexes = [format_doc_db_record(package_path=path, fingerprint=check_dum) for path in paths_index]
namespaces = {d["namespace"] for d in indexes}
await post_indexes(configuration.doc_db, indexes, 25, headers)
return len(forms), len(indexes), namespaces
| 1.265625 | 1 |
ros/src/waypoint_updater/waypoint_updater.py | dan-fern/CarND-Capstone-P9 | 0 | 22660 | #!/usr/bin/env python
import rospy as rp
import numpy as np
import math as math
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
# Number of waypoints we will publish.
LOOKAHEAD_WPS = 150
MAX_DECEL = 0.5
class MotionState( ):
Go, Stop = range( 2 )
class WaypointUpdater( object ):
def __init__( self ):
rp.init_node( 'waypoint_updater' )
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rp.Subscriber( '/current_pose', PoseStamped, self.pose_cb )
rp.Subscriber( '/base_waypoints', Lane, self.waypoints_cb )
rp.Subscriber( '/traffic_waypoint', Int32, self.traffic_cb )
rp.Subscriber( '/current_velocity', TwistStamped, self.velocity_cb )
self.final_waypoints_pub = rp.Publisher(
'final_waypoints',
Lane,
queue_size=1 )
# TODO: Add other member variables you need below
self.base_lane = None
self.pose = None
self.waypoints_2d = None
self.waypoint_tree = None
self.nearest_light = None
self.vehicle_velocity = None # in m/s
self.motion_state = MotionState.Go
self.deceleration_rate = None
self.acceleration_rate = 0.75 # m/s
self.previous_velocity = None
self.loop( )
def loop( self ):
rate = rp.Rate( 10 )
while not rp.is_shutdown( ):
if self.pose and self.base_lane and self.waypoint_tree:
# get closest waypoint
#closest_waypoint_index = self.get_closest_waypoint_id( )
self.publish_waypoints( )
self.previous_velocity = self.vehicle_velocity
rate.sleep( )
def publish_waypoints( self ):
self.final_waypoints_pub.publish( self.generate_lane( ) )
def generate_lane( self ):
lane = Lane( )
closest_idx = self.get_closest_waypoint_id( )
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_lane[ closest_idx:farthest_idx ]
if self.nearest_light != None and \
self.nearest_light <= farthest_idx and \
self.nearest_light >= closest_idx:
self.motion_state = MotionState.Stop
base_waypoints = self.decelerate( base_waypoints, closest_idx )
elif self.motion_state == MotionState.Stop:
self.motion_state = MotionState.Go
self.deceleration_rate = None
if self.motion_state == MotionState.Go:
if abs( self.vehicle_velocity - self.get_waypoint_velocity( \
base_waypoints[ 0 ] ) ) > 1.0:
if self.previous_velocity == None:
start_vel = self.vehicle_velocity
else:
start_vel = max(
self.previous_velocity + 0.2,
self.vehicle_velocity )
base_waypoints = self.accelerate( base_waypoints, start_vel )
else:
self.acceleration_start_velocity = None
lane.waypoints = base_waypoints
return lane
def accelerate( self, waypoints, start_velocity ):
new_waypoints = [ ]
for i, wp in enumerate( waypoints ):
p = Waypoint( )
p.pose = wp.pose
distance = self.distance( waypoints, 0, i )
target_vel = start_velocity + distance * self.acceleration_rate
if target_vel < 0.5:
target_vel = 0.5
p.twist.twist.linear.x = min(
target_vel,
self.get_waypoint_velocity( wp ) )
new_waypoints.append( p )
return new_waypoints
def decelerate( self, waypoints, start_idx ):
new_waypoints = [ ]
speed = self.vehicle_velocity
# two waypoints back from line so front of car stops earlier
stop_idx = self.nearest_light - start_idx - 2
for i, wp in enumerate( waypoints ):
p = Waypoint( )
p.pose = wp.pose
dist = self.distance( waypoints, i, stop_idx )
if i >= stop_idx:
target_vel = 0
elif dist < 15:
if self.deceleration_rate == None:
self.deceleration_rate = self.vehicle_velocity / dist
target_vel = self.deceleration_rate * dist
if target_vel <= 1.0:
target_vel = 0.0
target_vel = min( target_vel, self.get_waypoint_velocity( wp ) )
else:
target_vel = self.get_waypoint_velocity( wp )
p.twist.twist.linear.x = target_vel
new_waypoints.append( p )
return new_waypoints
def get_closest_waypoint_id( self ):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.waypoint_tree.query( [x, y], 1 )[1]
# Check if closest waypoint is ahead or behind the vehicle
closest_wp = np.array( self.waypoints_2d[ closest_idx ] )
previous_wp = np.array( self.waypoints_2d[ closest_idx - 1 ] )
# Equation for hyperplane through closest_coords
waypoint_vector = closest_wp - previous_wp
position_vector = np.array( [x, y] ) - closest_wp
val = np.dot( waypoint_vector, position_vector )
if val > 0:
closest_idx = ( closest_idx + 1 ) % len( self.waypoints_2d )
return closest_idx
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
def waypoints_cb( self, waypoints ):
# TODO: Implement
self.base_lane = waypoints.waypoints
if not self.waypoints_2d:
self.waypoints_2d = [ [ waypoint.pose.pose.position.x,
waypoint.pose.pose.position.y ]
for waypoint in waypoints.waypoints ]
self.waypoint_tree = KDTree( self.waypoints_2d )
def traffic_cb( self, msg ):
# TODO: Callback for /traffic_waypoint message. Implement
if( msg.data == -1 ):
self.nearest_light = None
else:
self.nearest_light = msg.data
def velocity_cb( self, velocity ):
self.vehicle_velocity = velocity.twist.linear.x
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity( self, waypoint ):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity( self, waypoints, waypoint, velocity ):
waypoints[ waypoint ].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rp.ROSInterruptException:
rp.logerr('Could not start waypoint updater node.')
| 2.125 | 2 |
fewshot/clis/score_simple.py | armancohan/flex | 63 | 22788 | <filename>fewshot/clis/score_simple.py
import json
from typing import TextIO
from functools import partial
import click
import numpy as np
from scipy.stats import sem
import pandas as pd
from fewshot.bootstrap import bootstrap
from fewshot.bootstrap import ci
from fewshot.challenges.utils import get_gold_dataset
from . import score_utils as su
def statistics(a, estimator=np.mean, conf_interval=95, n_boot=1000, seed=0):
"""With 95% CI"""
[ci_lower, ci_upper] = ci(
bootstrap(
a,
func=estimator,
n_boot=n_boot,
seed=seed,
),
conf_interval
)
stat = estimator(a)
return {
'stat': stat,
'stat_ci_lower': stat - ci_lower,
'stat_ci_upper': ci_upper - stat,
'stat_ci_sem': sem(a, ddof=1) * 1.96,
'std': np.std(a),
'n': len(a),
}
@click.command()
@click.option('--challenge_name', type=click.STRING, required=True)
@click.option(
'--predictions',
type=click.File('r'),
help='Path to the file containing system predictions',
required=True,
)
@click.option(
'--output',
'-o',
type=click.File('w'),
help='Output results to this file.',
)
@click.option('--by_way_shot', is_flag=True, default=False)
@click.option('--by_few', is_flag=True, default=False)
@click.option('--for_leaderboard', is_flag=True, default=False)
def score(
challenge_name: str,
predictions: TextIO,
output: TextIO,
by_way_shot: bool,
by_few: bool,
for_leaderboard: bool,
):
"""Score a predictions.json file."""
gold_data = pd.DataFrame(get_gold_dataset(challenge_name))
joined_data = su.join_predictions_and_gold(
predictions=predictions,
gold_data=gold_data,
)
df, metrics = su.score_joined_data(data=joined_data)
if by_way_shot:
df['shot'] = df.apply(lambda row: str(int(row['n_train'] / row['way']))
if row['balanced_train'] else '', axis=1)
grouped = df.groupby(by=['dataset', 'way', 'shot'])['accuracy'].apply(partial(statistics, estimator=np.mean))
grouped.index = grouped.index.set_names('stat', level=3)
res = grouped
elif by_few or for_leaderboard:
df['few'] = df['n_train'].map(lambda v: v > 0)
grouped = df.groupby(by=['dataset', 'few'])['accuracy'].apply(partial(statistics, estimator=np.mean))
grouped.index = grouped.index.set_names('stat', level=2)
ways = df.groupby(by=['dataset', 'few'])['way'].apply(lambda x: '/'.join(str(i) for i in sorted(x.unique())))
res = pd.merge(
grouped.reset_index(),
ways.reset_index(),
on=['dataset', 'few']
).set_index(['dataset', 'way', 'few', 'stat'])
else:
grouped = df.groupby(by=['dataset'])['accuracy'].apply(partial(statistics, estimator=np.mean))
means = grouped.xs('stat', level=1)
stds = grouped.xs('std', level=1)
cis_upper = grouped.xs('stat_ci_upper', level=1)
cis_lower = grouped.xs('stat_ci_lower', level=1)
cis_lower.index = cis_lower.index + '_acc_ci_lower'
cis_upper.index = cis_upper.index + '_acc_ci_upper'
means.index = means.index + '_acc'
stds.index = stds.index + '_acc_std'
res = pd.concat([means, cis_upper, cis_lower, stds], axis=0)
res.loc['overall_acc'] = means.mean()
res.loc['overall_acc_std'] = stds.mean()
if for_leaderboard:
res = res.reset_index()
res['few_string'] = res['few'].map(lambda v: 'few' if v else '0')
res['name'] = res['dataset'] + '-' + res['few_string']
accuracies = res[res.stat == 'stat']
overall_0_acc = accuracies[~accuracies.few]['accuracy'].mean()
overall_few_acc = accuracies[accuracies.few]['accuracy'].mean()
accuracies = accuracies.append([
{'name': 'overall-0', 'accuracy': overall_0_acc},
{'name': 'overall-few', 'accuracy': overall_few_acc},
{'name': 'overall', 'accuracy': 0.5 * (overall_0_acc + overall_few_acc)},
])
uppers = res[res.stat == 'stat_ci_upper']
uppers = uppers.assign(name=lambda x: x['name'] + '_ci_upper')
lowers = res[res.stat == 'stat_ci_lower']
lowers = lowers.assign(name=lambda x: x['name'] + '_ci_lower')
stds = res[res.stat == 'std']
stds = stds.assign(name=lambda x: x['name'] + '_std')
res = pd.concat([accuracies, uppers, lowers, stds], axis=0)
res = res[['name', 'accuracy']].set_index('name')
res = res['accuracy']
print(type(res))
if output:
if for_leaderboard:
# Add episode-level accuracy values under 'episode_accuracies' key
res = json.loads(res.to_json())
grouped = (
df.groupby(by=['few', 'dataset'])[['task_id', 'accuracy']]
.apply(lambda x: x.sort_values('task_id')['accuracy'].tolist())
.reset_index(name='accuracies')
)
grouped['few_string'] = grouped['few'].map(lambda v: 'few' if v else '0')
grouped['name'] = grouped['dataset'] + '-' + grouped['few_string']
res['episode_accuracies'] = grouped.set_index('name')[['accuracies']].to_dict()['accuracies']
json.dump(res, output)
elif output.name.endswith('.json'):
res.to_json(output)
else:
res.to_csv(output)
else:
pd.set_option("display.max_rows", None)
print(res.sort_index())
| 1.609375 | 2 |
test/auth/test_client_credentials.py | membranepotential/mendeley-python-sdk | 103 | 22916 | from oauthlib.oauth2 import InvalidClientError, MissingTokenError
import pytest
from test import configure_mendeley, cassette
def test_should_get_authenticated_session():
mendeley = configure_mendeley()
auth = mendeley.start_client_credentials_flow()
with cassette('fixtures/auth/client_credentials/get_authenticated_session.yaml'):
session = auth.authenticate()
assert session.token['access_token']
assert session.host == 'https://api.mendeley.com'
def test_should_throw_exception_on_incorrect_credentials():
mendeley = configure_mendeley()
mendeley.client_secret += '-invalid'
auth = mendeley.start_client_credentials_flow()
# We should never get an access token back
# and the OAuth library should be unhappy about that
with cassette('fixtures/auth/client_credentials/incorrect_credentials.yaml'), pytest.raises(MissingTokenError):
auth.authenticate()
| 1.382813 | 1 |
advisor/api/urls.py | Sachin-c/api-test | 0 | 23044 | <filename>advisor/api/urls.py<gh_stars>0
from django.urls import path
from advisor.api.views import (
# api_advisor_view,
api_advisor_view_post,
)
app_name = 'advisor'
urlpatterns = [
path('admin/advisor/', api_advisor_view_post, name="post"),
# path('user/<int:id>/advisor/', api_advisor_view, name="detail"),
]
| 0.984375 | 1 |
mindhome_alpha/erpnext/accounts/doctype/fiscal_year/test_fiscal_year.py | Mindhome/field_service | 1 | 23172 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, unittest
from erpnext.accounts.doctype.fiscal_year.fiscal_year import FiscalYearIncorrectDate
test_records = frappe.get_test_records('Fiscal Year')
test_ignore = ["Company"]
class TestFiscalYear(unittest.TestCase):
def test_extra_year(self):
if frappe.db.exists("Fiscal Year", "_Test Fiscal Year 2000"):
frappe.delete_doc("Fiscal Year", "_Test Fiscal Year 2000")
fy = frappe.get_doc({
"doctype": "Fiscal Year",
"year": "_Test Fiscal Year 2000",
"year_end_date": "2002-12-31",
"year_start_date": "2000-04-01"
})
self.assertRaises(FiscalYearIncorrectDate, fy.insert)
| 1.359375 | 1 |
tools/multiscale_shape.py | marvin-eisenberger/hamiltonian-interpolation | 5 | 23300 | import torch
from shape_utils import Shape, load_shape_pair, scatter_shape_pair
from torch_geometric.nn import knn
from param import *
from arap_potential import arap_vert
def load_multiscale_shapes(folder_path, file_name, scales, offset=0.5*torch.ones([3], device=device, dtype=torch.float32)):
"""Like 'load_shape_pair' but for shapes with different resolutions"""
vert_x_array = []
triv_x_array = []
vert_y_array = []
triv_y_array = []
for i_scale in range(len(scales)):
file_load = folder_path + "sub_" + str(scales[i_scale]) + "/" + file_name
shape_x, shape_y = load_shape_pair(file_load, offset)
vert_x_array.append(shape_x.vert)
vert_y_array.append(shape_y.vert)
triv_x_array.append(shape_x.triv)
triv_y_array.append(shape_y.triv)
shape_x = MultiscaleShape(vert_x_array, triv_x_array)
shape_y = MultiscaleShape(vert_y_array, triv_y_array)
return shape_x, shape_y
class MultiscaleShape(Shape):
"""Class for shapes with multiple resolutions.
Attributes beyond the base class 'Shape' are:
vert_array: List of vertices with different resolutions
triv_array: List of triangles with different resolutions
scale_idx: The index describing the current resolution --
The current vertices are vert_array[scale_idx]
ass_[array/vecs/weights]: attributes needed to apply an interpolation
on scale 'scale_idx' to the next resolution '(scale_idx+1)'
"""
def __init__(self, vert_array, triv_array):
super().__init__(vert_array[0], triv_array[0])
self.vert_array = vert_array
self.triv_array = triv_array
self.scale_idx = 0
self.scale_idx_len = len(vert_array)
self.ass_array = None
self.ass_vecs = None
self.ass_weights = None
self.init_upscale()
def set_scale_idx(self, scale_idx):
assert scale_idx >= 0 and scale_idx < self.scale_idx_len, "new index out of bounds"
self.vert_array[self.scale_idx] = self.vert
self.scale_idx = scale_idx
self.vert = self.vert_array[scale_idx]
self.triv = self.triv_array[scale_idx]
self.samples = list(range(self.vert.shape[0]))
self.neigh = None
def increase_scale_idx(self):
self.set_scale_idx(self.scale_idx+1)
def next_resolution(self):
return self.vert_array[self.scale_idx+1].shape
def init_upscale(self, num_knn=3):
self.ass_array = []
self.ass_vecs = []
self.ass_weights = []
for idx in range(self.scale_idx_len-1):
vert_i = self.vert_array[idx].to(device_cpu)
vert_ip1 = self.vert_array[idx+1].to(device_cpu)
ass_curr = knn(vert_i, vert_ip1, num_knn)
ass_curr = ass_curr[1, :].view(-1, num_knn)
self.ass_array.append(ass_curr.to(device)) #[n_vert_tp1, num_knn]
vec_curr = vert_ip1.unsqueeze(1) - vert_i[ass_curr, :]
self.ass_vecs.append(vec_curr.to(device)) #[n_vert_tp1, num_knn, 3]
weights_curr = 1/(torch.norm(vec_curr, dim=2, keepdim=True)+1e-5)
weights_curr = weights_curr / torch.sum(weights_curr, dim=1, keepdim=True)
self.ass_weights.append(weights_curr.to(device)) #[n_vert_tp1, num_knn, 1]
def apply_upsampling(self, vert_t):
R = arap_vert(vert_t, self.vert, self.get_neigh()) #[n_vert_tp1, 3, 3]
ass_curr = self.ass_array[self.scale_idx]
vec_curr = self.ass_vecs[self.scale_idx]
weights_curr = self.ass_weights[self.scale_idx]
vert_tp1 = vert_t[ass_curr, :] + torch.matmul(R[ass_curr], vec_curr.unsqueeze(3)).squeeze() #[n_vert_tp1, num_knn, 3]
vert_tp1 = torch.sum(weights_curr * vert_tp1, dim=1)
return vert_tp1
def rotate(self, R):
for i in range(self.scale_idx_len):
self.vert_array[i] = torch.mm(self.vert_array[i], R.transpose(0, 1))
self.vert = self.vert_array[self.scale_idx]
self.init_upscale()
def to_box(self, shape_y):
scale_idx = self.scale_idx
for i in range(self.scale_idx_len):
self.set_scale_idx(i)
shape_y.set_scale_idx(i)
super().to_box(shape_y)
self.set_scale_idx(scale_idx)
shape_y.set_scale_idx(scale_idx)
self.init_upscale()
def scale(self, factor, shift=True):
scale_idx = self.scale_idx
for i in range(self.scale_idx_len):
self.set_scale_idx(i)
super().scale(factor, shift)
self.set_scale_idx(scale_idx)
self.init_upscale()
if __name__ == "__main__":
print("main of multiscale_shape.py")
| 1.992188 | 2 |
tests/testJobQueue.py | hartloff/Tango | 2 | 23428 | import unittest
import redis
from jobQueue import JobQueue
from tangoObjects import TangoIntValue, TangoJob
from config import Config
class TestJobQueue(unittest.TestCase):
def setUp(self):
if Config.USE_REDIS:
__db = redis.StrictRedis(
Config.REDIS_HOSTNAME, Config.REDIS_PORT, db=0)
__db.flushall()
self.job1 = TangoJob(
name="sample_job_1",
vm="ilter.img",
outputFile="sample_job_1_output",
input=[],
timeout=30,
notifyURL="notifyMeUrl",
maxOutputFileSize=4096)
self.job2 = TangoJob(
name="sample_job_2",
vm="ilter.img",
outputFile="sample_job_2_output",
input=[],
timeout=30,
notifyURL="notifyMeUrl",
maxOutputFileSize=4096)
self.jobQueue = JobQueue(None)
self.jobQueue.reset()
self.jobId1 = self.jobQueue.add(self.job1)
self.jobId2 = self.jobQueue.add(self.job2)
def test_sharedInt(self):
if Config.USE_REDIS:
num1 = TangoIntValue("nextID", 1000)
num2 = TangoIntValue("nextID", 3000)
self.assertEqual(num1.get(), 1000)
self.assertEqual(num1.get(), num2.get())
else:
return
def test_job(self):
self.job1.makeUnassigned()
self.assertTrue(self.job1.isNotAssigned())
job = self.jobQueue.get(self.jobId1)
self.assertTrue(job.isNotAssigned())
self.job1.makeAssigned()
print "Checkout:"
self.assertFalse(self.job1.isNotAssigned())
self.assertFalse(job.isNotAssigned())
def test_add(self):
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 2)
def test_addDead(self):
return self.assertEqual(1, 1)
def test_remove(self):
self.jobQueue.remove(self.jobId1)
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 1)
self.jobQueue.remove(self.jobId2)
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 0)
def test_delJob(self):
self.jobQueue.delJob(self.jobId1, 0)
info = self.jobQueue.getInfo()
self.assertEqual(info['size'], 1)
self.assertEqual(info['size_deadjobs'], 1)
self.jobQueue.delJob(self.jobId1, 1)
info = self.jobQueue.getInfo()
self.assertEqual(info['size_deadjobs'], 0)
return False
def test_get(self):
ret_job_1 = self.jobQueue.get(self.jobId1)
self.assertEqual(str(ret_job_1.id), self.jobId1)
ret_job_2 = self.jobQueue.get(self.jobId2)
self.assertEqual(str(ret_job_2.id), self.jobId2)
def test_getNextPendingJob(self):
self.jobQueue.assignJob(self.jobId2)
self.jobQueue.unassignJob(self.jobId1)
exp_id = self.jobQueue.getNextPendingJob()
self.assertMultiLineEqual(exp_id, self.jobId1)
def test_getNextPendingJobReuse(self):
return False
def test_assignJob(self):
self.jobQueue.assignJob(self.jobId1)
job = self.jobQueue.get(self.jobId1)
self.assertFalse(job.isNotAssigned())
def test_unassignJob(self):
self.jobQueue.assignJob(self.jobId1)
job = self.jobQueue.get(self.jobId1)
self.assertTrue(job.assigned)
self.jobQueue.unassignJob(self.jobId1)
job = self.jobQueue.get(self.jobId1)
return self.assertEqual(job.assigned, False)
def test_makeDead(self):
info = self.jobQueue.getInfo()
self.assertEqual(info['size_deadjobs'], 0)
self.jobQueue.makeDead(self.jobId1, "test")
info = self.jobQueue.getInfo()
self.assertEqual(info['size_deadjobs'], 1)
def test__getNextID(self):
init_id = self.jobQueue.nextID
for i in xrange(1, Config.MAX_JOBID + 100):
id = self.jobQueue._getNextID()
self.assertNotEqual(str(id), self.jobId1)
self.jobQueue.nextID = init_id
if __name__ == '__main__':
unittest.main()
| 1.5625 | 2 |
scripts/utils/merge.py | GabrielTavernini/TelegramMap | 3 | 23556 | <reponame>GabrielTavernini/TelegramMap
import pandas as pd
import sys
from dotenv import load_dotenv
load_dotenv()
src = pd.read_csv(sys.argv[1])
dst = pd.read_csv(os.getenv('FILE_PATH'))
fdf = pd.concat([dst, src])
fdf = fdf[~((fdf['user'].duplicated(keep='first')) & (fdf['user']!='Point'))]
fdf = fdf[~fdf.duplicated(keep='first')]
fdf.to_csv(os.getenv('FILE_PATH'), index=False) | 1.585938 | 2 |
src/jsonengine/main.py | youhengzhou/json-crud-engine | 2 | 23684 | <gh_stars>1-10
# JSON engine 21 9 16
# database
# eng.json
# engine
# eng.py
import os
import json
path = os.getcwd() + '\\json_engine_database\\'
path_string = ''
def set_path(string):
global path
path = os.getcwd() + string
def dictionary_kv(dictionary, key, value):
dictionary[key] = value
return dictionary
def set_path_string(args,create_flag):
global path_string
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string)==False:
if create_flag == True:
os.makedirs(path + path_string)
else:
return False
return path_string
def create(dictionary, *args):
path_string = set_path_string(args,True)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(dictionary, outfile, indent=4)
def retrieve(*args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
return(json.load(f))
def retrieve_k(key, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
with open(path + path_string + 'eng.json', 'r') as f:
return(json.load(f)[key])
else:
return False
def update(dictionary, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(dictionary, outfile, indent=4)
return True
def update_kv(key, value, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump({key: value}, outfile, indent=4)
return True
def patch(dictionary, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
data=(json.load(f))
data.update(dictionary)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
def patch_kv(key, value, *args):
path_string = set_path_string(args,False)
if path_string == False:
return False
with open(path + path_string + 'eng.json', 'r') as f:
data=(json.load(f))
data.update({key: value})
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
def delete(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
os.remove(path + path_string + 'eng.json')
os.rmdir(path + path_string)
return True
else:
return False
def delete_k(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
data = json.load(f)
data.pop(key)
with open(path + path_string + 'eng.json', 'w') as outfile:
json.dump(data, outfile, indent=4)
return True
else:
return False
else:
return False
def display(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
print(json.load(f))
return True
else:
print('The selected file does not exist')
return False
def display_key(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
print(key + ' ' + str(json.load(f)[key]))
return True
else:
print('The selected file does not exist')
return False
def display_nkv(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
if key in json.load(f):
data = json.load(f)
data.pop(key,'key not found')
print(data)
return True
else:
print('The selected file does not exist')
return False
def display_ind(*args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
print(json.dumps(json.load(f), indent=4))
else:
print('The selected file does not exist')
def display_ind_nkv(key, *args):
if (args):
path_string = str(args[0]) + '\\'
if os.path.exists(path + path_string + 'eng.json'):
with open(path + path_string + 'eng.json', 'r') as f:
data = json.load(f)
data.pop(key,'key not found')
print(json.dumps(data, indent=4))
else:
print('The selected file does not exist')
| 1.789063 | 2 |
00-basic/04_01_data_structures_lists.py | TranXuanHoang/Python | 0 | 23812 | <filename>00-basic/04_01_data_structures_lists.py
# Lists
# Basics
foods = ['rice', 'Meat', 'vegetables', 'Eggs']
print(foods)
# Same as foods[len(foods):] = ['butter']
foods.append('butter')
print(foods)
# Same as foods[len(foods):] = ['tomatoes', 'chili sauce']
foods.extend(['tomatoes', 'Chili sauce'])
print(foods)
# Reverse order of elements in the list
foods.reverse()
print(foods)
# Copy the list
copy_of_foods = foods.copy()
print(copy_of_foods)
# Sort in ascending order
foods.sort()
print(foods)
# Sort in descending order without considering lower or upper case
copy_of_foods.sort(key=str.lower, reverse=True)
print(copy_of_foods)
# Using Lists as Stacks
stack_normal = ['+', 4, '*', 7, '-', 3, 6]
stack_error = ['+', 4, '?', 7, '-', 3, 6]
def evaluate(stack):
expression = ''
round = 0
while len(stack) >= 3:
first_operand = stack.pop()
second_operand = stack.pop()
operator = stack.pop()
subexpression = str(first_operand) + ' ' + operator + \
' ' + str(second_operand)
if round == 0:
expression = '(' + subexpression + ')'
else:
expression = '(' + expression + ' ' + operator + \
' ' + str(second_operand) + ')'
round += 1
if operator == '+':
stack.append(first_operand + second_operand)
elif operator == '-':
stack.append(first_operand - second_operand)
elif operator == '*':
stack.append(first_operand * second_operand)
elif operator == '/':
stack.append(first_operand / second_operand)
else:
stack.append('Error [Invalid Operator]: ' + subexpression)
break
result = str(stack.pop())
if 'Error' in result:
return result
else:
return expression + ' = ' + result
print(evaluate(stack_normal))
print(evaluate(stack_error))
# Using List as Queues
from collections import deque
queue = deque(["(", "c", "+", "d", ")"])
print(queue)
queue.append('/')
queue.append('d')
print(queue)
queue.appendleft('*')
queue.appendleft('a')
print(queue)
# List Comprehensions
drinks = [' Beer ', ' Tea', 'Coca Cola ', ' Pepsi', 'Water']
trimmed_drinks = [drink.strip()
for drink in drinks] # trim all trailing spaces
print(drinks)
print(trimmed_drinks)
# filter drinks whose name length is longer that or equal to 5
print([drink for drink in trimmed_drinks if len(drink) >= 5])
foods = ['rice', 'Meat', 'vegetables', 'Eggs']
menus = [(food.upper(), drink.lower())
for food in foods for drink in trimmed_drinks]
print(menus)
vector = [
[1, 2, 3],
['Monday', 'Tuesday', 'Wednesday'],
['Morning', 'Afternoon', 'Night']
]
# [1, 2, 3, 'Monday', 'Tuesday', 'Wednesday', 'Morning', 'Afternoon', 'Night']
flatten_vector = [el for row in vector for el in row]
print(flatten_vector)
# [
# [1, 'Monday', 'Morning'],
# [2, 'Tuesday', 'Afternoon'],
# [3, 'Wednesday', 'Night']
# ]
transposed_vector = [[row[i] for row in vector] for i in range(3)]
print(transposed_vector)
| 3.140625 | 3 |
Python/linprog/simplex.py | bashardudin/LinearPrograms | 22 | 23940 | #!/usr/bin/env python
# _*_ encoding: utf-8 _*_
"""simplex.py: Simplex algorithm with rational coefficients"""
import numpy as np
import fractions as frac
__author__ = "<NAME>"
__email__ = "<EMAIL>"
class RestrictedSimplex(object):
def __init__(self, leaving_index=None, entering_index=None):
if not leaving_index:
def func(l):
m = 0
while not l[m] and m < len(l):
m += 1
if m == len(l):
return 0
for i in range(len(l)):
if l[i] and l[m] > l[i]:
m = i
return m
leaving_index = func
if not entering_index:
def func(l):
return l.index(min(l))
entering_index = func
self.leaving_index = leaving_index
self.entering_index = entering_index
def __call__(self, lin_p, recursion_limit=100):
""" Runs a restricted version of the simplex algorithm
Runs simplex algorithm on linear programs having feasible basic
solution. It takes in an integer to limit the number of recursions.
:return: a linear program whose basic solution has maximal objective
value.
"""
a = lin_p.table
if not lin_p.has_feasible_basic:
raise TypeError("Linear program doesn't have feasible base solution")
n = 0
while any(a[0, :-1] < 0) and n < recursion_limit:
entering_choices = [i for i in map(lambda x: 0 if x > 0 else x,
a[0, :-1])]
e = self.entering_index(entering_choices)
leaving_choices = [None]*lin_p.shape[0]
for i in range(lin_p.shape[0]):
if a[i+1, e] > 0:
leaving_choices[i] = (a[i+1, -1]/a[i+1, e])
if not [i for i in leaving_choices if i]:
raise OverflowError("Linear program unbounded | check model and state.")
else:
l = 1 + self.leaving_index(leaving_choices)
lin_p.pivot(e, l)
n += 1
form = "Basic solution = " + \
"(" + "{}, " * (lin_p.shape[1] - 1) + "{})" + \
" with objective value = {}."
print(form.format(*lin_p.basic_solution(), lin_p.table[0, -1]), end="\n\n")
return lin_p.basic_solution(), lin_p.table[0, -1]
class Simplex(RestrictedSimplex):
def is_feasible(self, lin_p):
""" Checks if linear program is feasible..
Has side effect: transforms linear program if not basic feasible
into an equivalent linear program having basic feasible solution.
:return: boolean.
"""
print(" ### Checking feasibility of linear program", lin_p, sep="\n\n")
if lin_p.has_feasible_basic():
print(" ### Input linear program has feasible basic solution", end="\n\n")
return True
print(" ### Basic solution is not feasible: using auxiliary linear program in next step", end="\n\n")
gain_fun = np.copy(lin_p.table[0])
lin_p.shape = (lin_p.shape[0], lin_p.shape[1] + 1)
lin_p.table = np.insert(lin_p.table, 0, frac.Fraction(-1, 1), axis=1)
lin_p.table[0] = np.hstack((np.ones(1, dtype=frac.Fraction),
np.zeros(lin_p.shape[1], dtype=frac.Fraction)))
lin_p.basic = [i+1 for i in lin_p.basic]
l = 1 + np.argmin(lin_p.table[1:, -1])
lin_p.pivot(0, l) # Now program has feasible basic solution
if RestrictedSimplex.__call__(self, lin_p)[1] == 0:
print(" ### Input linear program is thus feasible", end="\n\n")
if 0 in lin_p.basic:
l = lin_p.basic.index(0)
e = 0
while e < lin_p.shape and lin_p.table[l, e] == 0:
# There is a at least an e with this property
# Unbounded otherwise
e += 1
lin_p.pivot(e, l) # 0 not basic anymore
lin_p.basic = [i-1 for i in lin_p.basic]
lin_p.table = lin_p.table[:, 1:]
lin_p.shape = (lin_p.shape[0], lin_p.shape[1] - 1)
lin_p.table[0] = gain_fun
for i in lin_p.basic:
lin_p.table[0, :] = lin_p.table[0, :] - \
lin_p.table[0, i] * \
lin_p.table[1 + lin_p.basic.index(i), :]
lin_p.table[0, -1] = -lin_p.table[0, -1]
return True
else:
return False
def __call__(self, lin_p, recursion_limit=100):
""" Simplex algorithm.
:return: a linear program whose basic solution has maximal objective
value.
"""
if self.is_feasible(lin_p):
simplex = RestrictedSimplex(self.leaving_index,
self.entering_index)
print(" ### Getting back to linear program equivalent to input with feasible basic solution", end="\n\n")
return simplex(lin_p, recursion_limit=recursion_limit)
else:
raise Exception("Linear program is not feasible.")
| 3.21875 | 3 |
tests/unit_tests/test_features.py | constantinpape/mc_luigi | 0 | 24068 | import unittest
import os
from subprocess import call
import z5py
import vigra
from test_class import McLuigiTestCase
class TestDataTasks(McLuigiTestCase):
@classmethod
def setUpClass(cls):
super(TestDataTasks, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestDataTasks, cls).tearDownClass()
def check_features(self, feature_path):
rag_path = './cache/StackedRegionAdjacencyGraph_sampleA_watershed.h5'
self.assertTrue(os.path.exists(rag_path))
n_edges = vigra.readHDF5(rag_path, 'numberOfEdges')
self.assertTrue(os.path.exists(feature_path))
features = z5py.File(feature_path, use_zarr_format=False)['data'][:]
self.assertEqual(n_edges, len(features))
for feat_id in range(features.shape[1]):
self.assertFalse((features[:, feat_id] == 0).all())
def test_region_features(self):
call(['python', './executables/features.py', 'region'])
feat_path = ''
self.check_features(feat_path)
if __name__ == '__main__':
unittest.main()
| 1.171875 | 1 |
text-classify/test.py | ubear/MachineLearn | 0 | 24196 | #coding:utf-8
import re
import math
from docclass import Classifier
def test_infc_func():
c = Classifier(getfeatures=None)
c.infc("python", "good")
c.infc("python", "good")
c.infc("the", "bad")
c.infc("the", "good")
print c.fc
if __name__ == "__main__":
test_infc_func()
| 1.085938 | 1 |
api/views.py | seanpierce/django-itr | 0 | 24324 | # import json
import uuid
from django.apps import apps
from django.core import serializers
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.shortcuts import render
from django.shortcuts import redirect
from django.conf import settings
from .api_helpers import *
Episode = apps.get_model('episodes', 'Episode')
SubscriptionRequest = apps.get_model('subscribers', 'SubscriptionRequest')
Subscriber = apps.get_model('subscribers', 'Subscriber')
def get_episodes(request):
episodes = Episode.objects.all()
res = serializers.serialize("json", episodes)
return HttpResponse(res, content_type='application/json')
@csrf_exempt
def create_new_subscription_request(request):
if not valid_method('POST', request):
return error_response('Error: Method must be POST', 405)
email = request.POST.get('email', False)
if not email:
return error_response('Error: No email provided in request', 422)
subscription_request, created_new = SubscriptionRequest.objects.get_or_create(
email=email)
if not created_new:
subscription_request.token = uuid.uuid4()
subscription_request.save()
if send_confirmation_email(subscription_request):
return response('Email sent to ' + email)
else:
return error_response('Unable to send email to ' + email, 500)
def create_subscriber(request):
email = request.GET.get('email', False)
token = request.GET.get('token', False)
if (not email or not token):
return error_response("Error: Unable to process request. Missing information", 422)
subscription_request = SubscriptionRequest.objects.get(email=email, token=token)
if not subscription_request:
return error_response("Error: Subscription request not found", 404)
subscriber, created_new = Subscriber.objects.get_or_create(email=email)
if created_new:
exists = 'False'
else:
exists = 'True'
return redirect('/thanks/?email=' + email + '&exists=' + exists)
def thanks(request):
root = settings.HOST_URL
email = request.GET.get('email', False)
exists = request.GET.get('exists', False)
return render(request,
'api/thanks.html',
{
'email': email,
'root': root,
'exists': exists
})
| 1.398438 | 1 |
main.py | JakeSichley/Discord-Bot | 1 | 24452 | <filename>main.py
"""
MIT License
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from os import getenv
from sys import version
from dotenv import load_dotenv
from dreambot import DreamBot
import logging
import discord
def main() -> None:
"""
Driver method.
"""
print(f'Current Python Version: {version}')
print(f'Current Discord Version: {discord.__version__}')
logging.basicConfig(level=logging.INFO, format='%(asctime)s: %(levelname)s:%(name)s: %(message)s',
datefmt='%I:%M %p on %A, %B %d, %Y')
load_dotenv()
# required
token = getenv('DISCORD_TOKEN')
owner = int(getenv('OWNER_ID'))
prefix = getenv('PREFIX', '>')
database = getenv('DATABASE')
environment = getenv('ENVIRONMENT', 'DEV')
# optional
options = {
'status_type': discord.ActivityType(int(getenv('STATUS_TYPE', 1))),
'status_text': getenv('STATUS_TEXT')
}
# explicitly disabled cogs
try:
options['disabled_cogs'] = getenv('DISABLED_COGS').split(',')
except AttributeError:
pass
# git optionals
git_options = {
'git_user': getenv('GITHUB_USER'),
'git_repo': getenv('GITHUB_REPO'),
'git_token': getenv('GITHUB_TOKEN')
}
if all(git_options.values()):
options['git'] = git_options
# specify intents (members requires explicit opt-in via dev portal)
intents = discord.Intents(guilds=True, members=True, bans=True, emojis=True, voice_states=True, messages=True,
reactions=True)
dream_bot = DreamBot(intents, database, prefix, owner, environment, options=options)
dream_bot.run(token)
# Run the bot
if __name__ == '__main__':
main()
| 1.398438 | 1 |
pytests/backup/ibr.py | ramalingam-cb/testrunner | 0 | 24580 | __author__ = 'ashvinder'
import re
import os
import gc
import logger
import time
from TestInput import TestInputSingleton
from backup.backup_base import BackupBaseTest
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.documentgenerator import BlobGenerator
from couchbase_helper.documentgenerator import DocumentGenerator
from memcached.helper.kvstore import KVStore
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.data_analysis_helper import *
from memcached.helper.data_helper import VBucketAwareMemcached
from view.spatialquerytests import SimpleDataSet
from view.spatialquerytests import SpatialQueryTests
from membase.helper.spatial_helper import SpatialHelper
from couchbase_helper.cluster import Cluster
from membase.helper.bucket_helper import BucketOperationHelper
from couchbase_helper.document import DesignDocument, View
import copy
class IBRTests(BackupBaseTest):
def setUp(self):
super(IBRTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
gen_load = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
def tearDown(self):
super(IBRTests, self).tearDown()
def restoreAndVerify(self, bucket_names, kvs_before, expected_error=None):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
errors, outputs = self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
errors.extend(outputs)
error_found = False
if expected_error:
for line in errors:
if line.find(expected_error) != -1:
error_found = True
break
self.assertTrue(error_found, "Expected error not found: %s" % expected_error)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if expected_error:
for bucket in self.buckets:
bucket.kvs[1] = KVStore()
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def verify_dir_structure(self, total_backups, buckets, nodes):
cmd = 'find ' + self.backup_location + ' -type f'
if self.shell.info.type.lower() == 'windows':
cmd = 'cmd.exe /C "dir /s /b C:\\tmp\\backup"'
output, error = self.shell.execute_command(cmd)
self.log.info("output = {0} error = {1}".format(output,error))
if error:
raise Exception('Got error {0}',format(error))
expected_design_json = total_backups * buckets
expected_data_cbb = total_backups * buckets * nodes
expected_meta_json = total_backups * buckets * nodes
expected_failover_json = total_backups * buckets * nodes
timestamp = '\d{4}\-\d{2}\-\d{2}T\d+Z'
pattern_mode = '(full|accu|diff)'
timestamp_backup = timestamp + '\-' + pattern_mode
pattern_bucket = 'bucket-\w+'
pattern_node = 'node\-\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}.+'
pattern_design_json = timestamp + '/|\\\\' + timestamp_backup + \
'/|\\\\' + pattern_bucket
pattern_backup_files = pattern_design_json + '/|\\\\' + pattern_node
data_cbb = 0
failover = 0
meta_json = 0
design_json = 0
for line in output:
if 'data-0000.cbb' in line:
if re.search(pattern_backup_files, line):
data_cbb += 1
if 'failover.json' in line:
if re.search(pattern_backup_files, line):
failover += 1
if self.cb_version[:5] != "4.5.1" and 'meta.json' in line:
if re.search(pattern_backup_files, line):
meta_json += 1
if 'design.json' in line:
if re.search(pattern_design_json, line):
design_json += 1
self.log.info("expected_data_cbb {0} data_cbb {1}"
.format(expected_data_cbb, data_cbb))
self.log.info("expected_failover_json {0} failover {1}"
.format(expected_failover_json, failover))
if self.cb_version[:5] != "4.5.1":
self.log.info("expected_meta_json {0} meta_json {1}"
.format(expected_meta_json, meta_json))
""" add json support later in this test
self.log.info("expected_design_json {0} design_json {1}"
.format(expected_design_json, design_json)) """
if self.cb_version[:5] != "4.5.1":
if data_cbb == expected_data_cbb and failover == expected_failover_json and \
meta_json == expected_meta_json:
# add support later in and design_json == expected_design_json:
return True
else:
if data_cbb == expected_data_cbb and failover == expected_failover_json:
return True
return False
def testFullBackupDirStructure(self):
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testMultipleFullBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(120)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testIncrBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleIncrBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.log.info("sleeping for 30 secs")
self.sleep(30)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testMultipleIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Incr and Diff Backup')
def testMultipleFullIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options, delete_backup=False)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Full,Incr and Diff Backups')
def testDiffBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=5)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testIncrementalBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testDifferentialBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Differential backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testIncrementalBackupConflict(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self.lww = self.num_mutate_items = self.input.param("lww_new", False)
self._bucket_creation()
self.sleep(20)
expected_error = self.input.param("expected_error", None)
self.restoreAndVerify(bucket_names, kvs_before, expected_error)
class IBRJsonTests(BackupBaseTest):
def setUp(self):
super(IBRJsonTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
gen_load = DocumentGenerator('load_by_id_test', template, range(5),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if self.test_with_view:
view_list = []
bucket = "default"
if self.dev_view:
prefix_ddoc="dev_ddoc"
else:
prefix_ddoc="ddoc"
ddoc_view_map = self.bucket_ddoc_map.pop(bucket, {})
for ddoc_count in xrange(self.num_ddocs):
design_doc_name = prefix_ddoc + str(ddoc_count)
view_list = self.make_default_views("views", self.num_views_per_ddoc)
self.create_views(self.master, design_doc_name, view_list,\
bucket, self.wait_timeout * 2)
ddoc_view_map[design_doc_name] = view_list
self.bucket_ddoc_map[bucket] = ddoc_view_map
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
self.sleep(2)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def restoreAndVerify(self,bucket_names,kvs_before):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
self.shell.restore_backupFile(self.couchbase_login_info,\
self.backup_location, bucket_names)
self.sleep(10)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
""" add design doc and view """
if self.test_with_view:
result = False
query = {"stale" : "false", "full_set" : "true", \
"connection_timeout" : 60000}
for bucket, ddoc_view_map in self.bucket_ddoc_map.items():
for ddoc_name, view_list in ddoc_view_map.items():
for view in view_list:
try:
result = self.cluster.query_view(self.master,\
ddoc_name, view.name, query,\
self.num_items, timeout=10)
except Exception:
pass
if not result:
self.fail("There is no: View: {0} in Design Doc:"\
" {1} in bucket: {2}"\
.format(view.name, ddoc_name, bucket))
self.log.info("DDoc Data Validation Successful")
def tearDown(self):
super(IBRJsonTests, self).tearDown()
def testMultipleBackups(self):
if not self.command_options:
self.command_options = []
options = self.command_options
if self.backup_type is not None:
if "accu" in self.backup_type:
options = self.command_options + [' -m accu']
if "diff" in self.backup_type:
options = self.command_options + [' -m diff']
diff_backup = [" -m diff"]
accu_backup = [" -m accu"]
current_backup = [" -m diff"]
for count in range(self.number_of_backups):
if "mix" in self.backup_type:
if current_backup == diff_backup:
current_backup = accu_backup
options = self.command_options + accu_backup
elif current_backup == accu_backup:
current_backup = diff_backup
options = self.command_options + diff_backup
# Update data
template = '{{ "mutated" : {0}, "age": {0}, "first_name": "{1}" }}'
gen_update = DocumentGenerator('load_by_id_test', template, range(5),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a backup
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
class IBRSpatialTests(SpatialQueryTests):
def setUp(self):
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.log = logger.Logger.get_logger()
self.helper = SpatialHelper(self, "default")
self.helper.setup_cluster()
self.cluster = Cluster()
self.default_bucket = self.input.param("default_bucket", True)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.standard_buckets = self.input.param("standard_buckets", 0)
self.memcached_buckets = self.input.param("memcached_buckets", 0)
self.servers = self.helper.servers
self.shell = RemoteMachineShellConnection(self.master)
info = self.shell.extract_remote_info()
self.os = info.type.lower()
self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
self.input.membase_settings.rest_password)
self.backup_location = self.input.param("backup_location", "/tmp/backup")
self.command_options = self.input.param("command_options", '')
def tearDown(self):
self.helper.cleanup_cluster()
def test_backup_with_spatial_data(self):
num_docs = self.helper.input.param("num-docs", 5000)
self.log.info("description : Make limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
time.sleep(2)
self.buckets = RestConnection(self.master).get_buckets()
bucket_names = [bucket.name for bucket in self.buckets]
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
gc.collect()
self.helper._create_default_bucket()
self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
SimpleDataSet(self.helper, num_docs)._create_views()
self._query_test_init(data_set)
| 1.351563 | 1 |
rdfdatabank/lib/reqclassifier.py | dataflow/RDFDatabank | 4 | 24708 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from webob import Request
import zope.interface
from repoze.who.classifiers import default_request_classifier
from repoze.who.interfaces import IRequestClassifier
import ConfigParser
from pylons import config
def custom_request_classifier(environ):
""" Returns one of the classifiers 'app', 'browser' or any
standard classifiers returned by
repoze.who.classifiers:default_request_classifier
"""
classifier = default_request_classifier(environ)
if classifier == 'browser':
login_form_url = '/login'
login_handler = '/login_handler'
logout_handler = '/logout_handler'
logout_url = '/logout'
# Decide if the client is a (user-driven) browser or an application
if config.has_key("who.config_file"):
config_file = config["who.config_file"]
config_who = ConfigParser.ConfigParser()
config_who.readfp(open(config_file))
login_form_url = config_who.get("plugin:friendlyform", "login_form_url")
login_handler = config_who.get("plugin:friendlyform", "login_handler_path")
logout_handler = config_who.get("plugin:friendlyform", "logout_handler_path")
logout_url = config_who.get("plugin:friendlyform", "post_logout_url")
path_info = environ['PATH_INFO']
#request = Request(environ)
#if not request.accept.best_match(['application/xhtml+xml', 'text/html']):
# # In our view, any client who doesn't support HTML/XHTML is an "app",
# # not a (user-driven) "browser".
# classifier = 'app'
if not path_info in [login_form_url, login_handler, logout_handler, logout_url]:
# In our view, any client who hasn't come in from the login url is an app
classifier = 'app'
return classifier
zope.interface.directlyProvides(custom_request_classifier, IRequestClassifier)
| 1.210938 | 1 |
app/routes.py | apigram/HospitalWaiterAuthService | 0 | 24836 | from app import app
from flask_restful import Api
from app.resources.auth import TokenResource
api = Api(app)
# Token resource
api.add_resource(TokenResource, '/authservice/token', endpoint='auth_token')
| 0.878906 | 1 |
src/dal_select2/__init__.py | pandabuilder/django-autocomplete-light | 0 | 24964 | <reponame>pandabuilder/django-autocomplete-light<gh_stars>0
"""Select2 support for DAL."""
# default_app_config = 'dal_select2.apps.DefaultApp'
| 0.492188 | 0 |
documentation/examples/policy_aggregation.py | oscardavidtorres1994/cadCAD | 1 | 25092 | import pandas as pd
from tabulate import tabulate
from cadCAD.configuration import append_configs
from cadCAD.configuration.utils import config_sim
from cadCAD.engine import ExecutionMode, ExecutionContext, Executor
from cadCAD import configs
# Policies per Mechanism
def p1m1(_g, step, sH, s):
return {'policy1': 1}
def p2m1(_g, step, sH, s):
return {'policy2': 2}
def p1m2(_g, step, sH, s):
return {'policy1': 2, 'policy2': 2}
def p2m2(_g, step, sH, s):
return {'policy1': 2, 'policy2': 2}
def p1m3(_g, step, sH, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
def p2m3(_g, step, sH, s):
return {'policy1': 1, 'policy2': 2, 'policy3': 3}
# Internal States per Mechanism
def add(y, x):
return lambda _g, step, sH, s, _input: (y, s[y] + x)
def policies(_g, step, sH, s, _input):
y = 'policies'
x = _input
return (y, x)
# Genesis States
genesis_states = {
'policies': {},
's1': 0
}
variables = {
's1': add('s1', 1),
"policies": policies
}
psubs = {
"m1": {
"policies": {
"p1": p1m1,
"p2": p2m1
},
"variables": variables
},
"m2": {
"policies": {
"p1": p1m2,
"p2": p2m2
},
"variables": variables
},
"m3": {
"policies": {
"p1": p1m3,
"p2": p2m3
},
"variables": variables
}
}
sim_config = config_sim(
{
"N": 1,
"T": range(3),
}
)
append_configs(
sim_configs=sim_config,
initial_state=genesis_states,
partial_state_update_blocks=psubs,
policy_ops=[lambda a, b: a + b, lambda y: y * 2] # Default: lambda a, b: a + b
)
exec_mode = ExecutionMode()
local_proc_ctx = ExecutionContext(context=exec_mode.local_mode)
run = Executor(exec_context=local_proc_ctx, configs=configs)
raw_result, tensor_field, sessions = run.execute()
result = pd.DataFrame(raw_result)
print()
print("Tensor Field:")
print(tabulate(tensor_field, headers='keys', tablefmt='psql'))
print("Output:")
print(tabulate(result, headers='keys', tablefmt='psql'))
print()
| 1.476563 | 1 |
2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py | andremichalowski/CSN1 | 0 | 25220 | <filename>2_CS_Medium/Leetcode/Interview_Easy/DLC_9_Other.py
1. Number of 1 Bits (HammingWeight):
https://leetcode.com/explore/interview/card/top-interview-questions-easy/99/others/565/
# Easy way
def hammingWeight(self, n: int) -> int:
return bin(n).count('1')
# Harder way - https://stackoverflow.com/questions/21237767/python-a-b-meaning
def hammingWeight(self, n):
c = 0
while n:
n &= n - 1
c += 1
return c
2. Hamming Distance:
# Easy Way
bin(x ^ y).count('1')
#Right way (Bitwise Operators): https://code.tutsplus.com/articles/understanding-bitwise-operators--active-11301
# Approach 1: Just check every bit in both numbers and increment when they are different
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
while x != 0 or y != 0:
if x % 2 != y % 2:
hamming_distance += 1
x = x >> 1
y = y >> 1
return hamming_distance
# Approach 2: Just make XOR of x and y and after that count the number of '1' bits.
# because XOR of two different bits is always 1
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
if new % 2 == 1:
hamming_distance += 1
new = new >> 1
return hamming_distance
# Approach 3: Again make XOR of x and y but when we count the number of '1' bits
# we make the trick n&(n-1) which removes last '1' bit
def hammingDistance(self, x: int, y: int) -> int:
hamming_distance = 0
new = x ^ y
while new > 0:
new = new & (new-1)
hamming_distance += 1
return hamming_distance
# Good explanation of XOR solution: https://www.youtube.com/watch?v=UP4GhCxeC4I
3. Reverse Bits (Reverse Bits of a 32 bits unsigned integer):
# https://leetcode.com/explore/featured/card/top-interview-questions-easy/99/others/648/discuss/54932/Three-different-solutions-in-python
def reverseBits(self, n):
bit_str = '{0:032b}'.format(n) # Format n into bit string (length of 32)
reverse_str = bit_str[::-1] # Reverse bit_string with slice fxnality
return int(reverse_str, 2) # Return string as int w/ 2
4. Pascals Triangle:
def generate(self, numRows):
lists = []
for i in range(numRows):
lists.append([1]*(i+1))
if i>1 :
for j in range(1,i):
lists[i][j]=lists[i-1][j-1]+lists[i-1][j]
return lists
5. Valid Parenthesis:
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
# The stack to keep track of opening brackets.
stack = []
# Hash map for keeping track of mappings. This keeps the code very clean.
# Also makes adding more types of parenthesis easier
mapping = {")": "(", "}": "{", "]": "["}
# For every bracket in the expression.
for char in s:
# If the character is an closing bracket
if char in mapping:
# Pop the topmost element from the stack, if it is non empty
# Otherwise assign a dummy value of '#' to the top_element variable
top_element = stack.pop() if stack else '#'
# The mapping for the opening bracket in our hash and the top
# element of the stack don't match, return False
if mapping[char] != top_element:
return False
else:
# We have an opening bracket, simply push it onto the stack.
stack.append(char)
# In the end, if the stack is empty, then we have a valid expression.
# The stack won't be empty for cases like ((()
return not stack
def isValid(self, s):
stack = []
mapping = {")": "(", "}": "{", "]": "["}
for char in s:
if char in mapping:
top_element = stack.pop() if stack else '#'
if mapping[char] != top_element:
return False
else:
stack.append(char)
return not stack
6. Missing Number: (Missing number in an array)
# One line
def missingNumber(self, nums):
return sum(range(len(nums)+1)) - sum(nums)
# Two lines
def missingNumber(self, nums):
n = len(nums)
return n * (n+1) / 2 - sum(nums)
| 3.09375 | 3 |
label_studio/ml/examples/object_detection.py | zoumt1633/label-studio | 0 | 25348 | <filename>label_studio/ml/examples/object_detection.py<gh_stars>0
# -*- coding: utf-8 -*-
# @Time : 2020/7/12 14:19
# @Author : zoumaotai
# @Email : <EMAIL>
# @File : object_detection.py
# @Software: PyCharm
import random
import urllib
from gluoncv import model_zoo, data
from label_studio.ml import LabelStudioMLBase
import mxnet as mx
class ObjectDetectionModel(LabelStudioMLBase):
def __init__(self, **kwargs):
super(ObjectDetectionModel, self).__init__(**kwargs)
from_name, schema = list(self.parsed_label_config.items())[0]
self.from_name = from_name
self.to_name = schema['to_name'][0]
self.labels = schema['labels']
self.net = model_zoo.get_model('faster_rcnn_resnet50_v1b_voc', pretrained=True)
def predict(self, tasks, **kwargs):
results = []
for task in tasks:
image_url = task.get('data').get('image')
image_url = f'http://localhost:8080{image_url}' if not image_url.startswith('http') else image_url
urllib.request.urlretrieve(image_url, "test.jpg")
src = mx.img.imread('test.jpg')
org_h, org_w, _ = src.shape
x, orig_img = data.transforms.presets.rcnn.load_test("test.jpg")
h, w, _ = orig_img.shape
ratio_h = org_h/h
ratio_w = org_w/w
print('h缩放比例', ratio_h)
print('w缩放比例', ratio_w)
box_ids, scores, bboxes = self.net(x)
result_list = []
for bbox, box_id, score in zip(bboxes[0].asnumpy().tolist(), box_ids[0].asnumpy().tolist(),
scores[0].asnumpy().tolist()):
if bbox[0] == -1:
break
label = self.net.classes[int(box_id[0])]
score = score[0]
x = bbox[0] * ratio_w * 100 / org_w
y = bbox[1] * ratio_h * 100 / org_h
height = (bbox[3] - bbox[1]) * ratio_h * 100 / org_h
width = (bbox[2] - bbox[0]) * ratio_w * 100 / org_w
if score > 0.8:
result_list.append(
{
"from_name": "label",
"id": "t5sp3TyXPo",
"source": "$image",
"to_name": "image",
"type": "rectanglelabels",
"value": {
"height": height, # 高度占比
"rectanglelabels": [
label
],
"rotation": 0,
"width": width, # 宽度占比
"x": x,
"y": y
}
})
results.append({
"result": result_list,
'score': 0.9
})
print(results)
return results
def fit(self, completions, workdir=None, **kwargs):
return {'random': random.randint(1, 10)}
| 1.75 | 2 |
courses/migrations/0009_alter_skills_program_duration_and_more.py | sisekelohub/sisekelo | 1 | 25476 | <reponame>sisekelohub/sisekelo
# Generated by Django 4.0 on 2022-01-02 21:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0008_alter_learnership_duration'),
]
operations = [
migrations.AlterField(
model_name='skills_program',
name='duration',
field=models.CharField(max_length=50, null=True),
),
migrations.AlterField(
model_name='specialized_course',
name='duration',
field=models.CharField(max_length=50, null=True),
),
]
| 0.789063 | 1 |