max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
cogs/autoupdate_ko.py | PLM912/Keter | 0 | 51204 | <gh_stars>0
import discord
from discord.ext import commands
from evs import default
from evs import permissions, default, http, dataIO
import requests
import os
class Autoupdate_ko(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = default.get("config.json")
# Commands
@commands.command()
@commands.check(permissions.is_owner)
async def 업데이트(self, ctx, filename: str):
await ctx.trigger_typing()
await ctx.send("소스코드 업데이트 중...")
link = "https://raw.githubusercontent.com/Shio7/Keter/master/cogs/" + filename + ".py"
r = requests.get(link, allow_redirects=True)
if os.path.isfile('./cogs/' + filename + ".py"):
try:
self.bot.unload_extension(f"cogs.{filename}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Unloaded extension **{filename}.py**")
os.remove('./cogs/' + filename + ".py")
open('./cogs/' + filename + ".py", 'wb').write(r.content)
else:
open('./cogs/' + filename + ".py", 'wb').write(r.content)
await ctx.send("업데이트 완료: "+filename+".py")
""" Loads an extension. """
try:
self.bot.load_extension(f"cogs.{filename}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"**{filename}.py 로드 완료**")
@commands.command()
@commands.check(permissions.is_owner)
async def 지우기(self, ctx, filename: str):
if os.path.isfile('./cogs/' + filename + ".py"):
try:
self.bot.unload_extension(f"cogs.{filename}")
except Exception as e:
return await ctx.send(default.traceback_maker(e))
await ctx.send(f"Unloaded extension **{filename}.py**")
os.remove('./cogs/' + filename + ".py")
await ctx.send(f"**{filename}.py** 삭제완료")
else:
await ctx.send(f"**{filename}.py 찾을 수 없음**")
def setup(bot):
bot.add_cog(Autoupdate_ko(bot))
| 1.554688 | 2 |
Medium/621. Task Scheduler/solution (1).py | czs108/LeetCode-Solutions | 3 | 51332 | # 621. Task Scheduler
class Solution:
# Greedy
def leastInterval(self, tasks: List[str], n: int) -> int:
# Maximum possible number of idle slots is defined by the frequency of the most frequent task.
freq = [0] * 26
for t in tasks:
freq[ord(t) - ord('A')] += 1
freq.sort()
max_freq = freq.pop()
idle_time = (max_freq - 1) * n
while freq and idle_time > 0:
idle_time -= min(max_freq - 1, freq.pop())
idle_time = max(0, idle_time)
return idle_time + len(tasks) | 2.609375 | 3 |
test/jpypetest/test_sql_generic.py | pitmanst/jpype | 531 | 51460 | # This file is Public Domain and may be used without restrictions.
import _jpype
import jpype
from jpype.types import *
from jpype import java
import jpype.dbapi2 as dbapi2
import common
import time
try:
import zlib
except ImportError:
zlib = None
class SQLModuleTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def assertIsSubclass(self, a, b):
self.assertTrue(issubclass(a, b), "`%s` is not a subclass of `%s`" % (a.__name__, b.__name__))
def testConstants(self):
self.assertEqual(dbapi2.apilevel, "2.0")
self.assertEqual(dbapi2.threadsafety, 2)
self.assertEqual(dbapi2.paramstyle, "qmark")
def testExceptions(self):
self.assertIsSubclass(dbapi2.Warning, Exception)
self.assertIsSubclass(dbapi2.Error, Exception)
self.assertIsSubclass(dbapi2.InterfaceError, dbapi2.Error)
self.assertIsSubclass(dbapi2.DatabaseError, dbapi2.Error)
self.assertIsSubclass(dbapi2._SQLException, dbapi2.Error)
self.assertIsSubclass(dbapi2.DataError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.OperationalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.IntegrityError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.InternalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.InternalError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.ProgrammingError, dbapi2.DatabaseError)
self.assertIsSubclass(dbapi2.NotSupportedError, dbapi2.DatabaseError)
def testConnectionExceptions(self):
cx = dbapi2.Connection
self.assertEqual(cx.Warning, dbapi2.Warning)
self.assertEqual(cx.Error, dbapi2.Error)
self.assertEqual(cx.InterfaceError, dbapi2.InterfaceError)
self.assertEqual(cx.DatabaseError, dbapi2.DatabaseError)
self.assertEqual(cx.DataError, dbapi2.DataError)
self.assertEqual(cx.OperationalError, dbapi2.OperationalError)
self.assertEqual(cx.IntegrityError, dbapi2.IntegrityError)
self.assertEqual(cx.InternalError, dbapi2.InternalError)
self.assertEqual(cx.InternalError, dbapi2.InternalError)
self.assertEqual(cx.ProgrammingError, dbapi2.ProgrammingError)
self.assertEqual(cx.NotSupportedError, dbapi2.NotSupportedError)
def test_Date(self):
d1 = dbapi2.Date(2002, 12, 25) # noqa F841
d2 = dbapi2.DateFromTicks( # noqa F841
time.mktime((2002, 12, 25, 0, 0, 0, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(d1),str(d2))
def test_Time(self):
t1 = dbapi2.Time(13, 45, 30) # noqa F841
t2 = dbapi2.TimeFromTicks( # noqa F841
time.mktime((2001, 1, 1, 13, 45, 30, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Timestamp(self):
t1 = dbapi2.Timestamp(2002, 12, 25, 13, 45, 30) # noqa F841
t2 = dbapi2.TimestampFromTicks( # noqa F841
time.mktime((2002, 12, 25, 13, 45, 30, 0, 0, 0))
)
# Can we assume this? API doesn't specify, but it seems implied
# self.assertEqual(str(t1),str(t2))
def test_Binary(self):
b = dbapi2.Binary(b"Something")
b = dbapi2.Binary(b"") # noqa F841
def test_STRING(self):
self.assertTrue(hasattr(dbapi2, "STRING"), "module.STRING must be defined")
def test_BINARY(self):
self.assertTrue(
hasattr(dbapi2, "BINARY"), "module.BINARY must be defined."
)
def test_NUMBER(self):
self.assertTrue(
hasattr(dbapi2, "NUMBER"), "module.NUMBER must be defined."
)
def test_DATETIME(self):
self.assertTrue(
hasattr(dbapi2, "DATETIME"), "module.DATETIME must be defined."
)
def test_ROWID(self):
self.assertTrue(hasattr(dbapi2, "ROWID"), "module.ROWID must be defined.")
class SQLTablesTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testStr(self):
for i in dbapi2._types:
self.assertIsInstance(str(i), str)
def testRepr(self):
for i in dbapi2._types:
self.assertIsInstance(repr(i), str)
| 1.242188 | 1 |
web_app/routes/GET_PUT_API.py | AmyBeisel/BW_Med_Cabinet | 0 | 51588 | <filename>web_app/routes/GET_PUT_API.py
# GET_PUT_API.py
# This is the test version of the API, it can both GET and POST JSON data
# An NLP model is imported as the recommend function, recommends strains and sends results
#Imports
import pandas as pd
import requests
import json
from flask import Blueprint, request, jsonify, render_template
from web_app.Recommend import recommend
# Make Blueprint for __init__.py
GET_PUT_API = Blueprint("GET_PUT_API", __name__)
# GET_PUT_API template
@GET_PUT_API.route('/predict', methods=['GET', 'PUT'])
def template():
return render_template("predict.html", message = "DS Med Cabinet API using natural language processing to recommend the best cannabis strains to Med Cabinet members.")
# GET_PUT_API get_predict_put
def get_predict_put():
# GET JSON User Data
if request.method=='GET':
# Request .json from Web
get_data = request.json
# Extracting id, first name, last name, and effects from the json get_data
user_id = get_data["id"]
first_name = get_data["First Name"]
last_name = get_data["Last Name"]
effects = get_data["Effects"]
# Make recommendation
results = recommend(effects)
# Return results
return results
# PUT JSON User Data and Recommendation
elif request.method=='PUT':
# User Data to be sent to backend API
post_data = {"id": user_id,
"First Name": first_name,
"Last Name": last_name,
"Desired_Effects": effects,
"Reccommendation": results}
# Recommendation
reccommendation = json.dumps(post_data) #(post_data, indent=2, separators=(', ', ': '))
return reccommendation
else:
return ("OK, waiting.")
| 1.726563 | 2 |
run.py | nunenuh/crnn.pytorch | 1 | 51716 | import os
import sys
# from lmdb.cffi import version as ver
sys.path.append(os.getcwd())
import torch
from iqra.models.crnn import *
from iqra.modules.feature import *
if __name__ == '__main__':
image_data = torch.rand(3,1,224,224)
text_data = torch.rand(3,512).long()
# text_data = torch.LongTensor(text_data)
# fe = FeatureExtraction(in_channels=1, version=50)
# hype = fe.feature.last_channels
# print(fe)
# print(fe(image_data))
# print()
# print(fe(image_data).shape)
# out = enc(test_data)
# # print(out)
num_class = 96
im_size = (32, 100)
model = OCRNet(num_class = num_class, im_size=im_size)
out = model(image_data, text_data)
print(out)
print(out.shape)
| 1.140625 | 1 |
alchemist_lib/database/aum_history.py | Dodo33/alchemist-lib | 5 | 51844 | from sqlalchemy import DateTime, String, ForeignKey, Integer, Column, Float
from sqlalchemy.orm import relationship
from . import Base
class AumHistory(Base):
"""
Map class for table AumHistory.
- **aum_id**: Integer, primary_key.
- **aum_datetime**: DateTime, not null.
- **aum**: Float(20, 8), not null.
- **ts_name**: String(150), not null, foreign_key(ts.ts_name).
Relationships:
- **ts**: TradingSystem instance. (Many-to-One)
"""
__tablename__ = "aum_history"
aum_id = Column(Integer, primary_key = True)
aum_datetime = Column(DateTime, nullable = False)
aum = Column(Float(precision = 20, scale = 8, asdecimal = True), nullable = False)
ts_name = Column(String(150), ForeignKey("ts.ts_name"), nullable = False)
ts = relationship("Ts")
def __repr__(self):
return "<AumHistory(datetime={}, aum={}, ts={})>".format(self.aum_datetime,
self.aum,
self.ts_name
)
| 1.875 | 2 |
src/cobald/daemon/runners/trio_runner.py | thoto/cobald | 7 | 51972 | import trio
from functools import partial
from .base_runner import BaseRunner
from .async_tools import raise_return, AsyncExecution
class TrioRunner(BaseRunner):
"""Runner for coroutines with :py:mod:`trio`"""
flavour = trio
def __init__(self):
self._nursery = None
super().__init__()
def register_payload(self, payload):
super().register_payload(partial(raise_return, payload))
def run_payload(self, payload):
execution = AsyncExecution(payload)
super().register_payload(execution.coroutine)
return execution.wait()
def _run(self):
return trio.run(self._await_all)
async def _await_all(self):
"""Async component of _run"""
delay = 0.0
# we run a top-level nursery that automatically reaps/cancels for us
async with trio.open_nursery() as nursery:
while self.running.is_set():
await self._start_payloads(nursery=nursery)
await trio.sleep(delay)
delay = min(delay + 0.1, 1.0)
# cancel the scope to cancel all payloads
nursery.cancel_scope.cancel()
async def _start_payloads(self, nursery):
"""Start all queued payloads"""
with self._lock:
for coroutine in self._payloads:
nursery.start_soon(coroutine)
self._payloads.clear()
await trio.sleep(0)
| 1.804688 | 2 |
xv_leak_tools/network/linux/network_services.py | UAEKondaya1/expressvpn_leak_testing | 219 | 52100 | <reponame>UAEKondaya1/expressvpn_leak_testing
import ctypes
import netifaces
import NetworkManager # pylint: disable=import-error
from xv_leak_tools.exception import XVEx
from xv_leak_tools.log import L
from xv_leak_tools.process import check_subprocess
class _NetworkObject:
def __init__(self, conn):
self._settings = conn.GetSettings()
self._id = self._settings['connection']['id']
self._uuid = self._settings['connection']['uuid']
def __str__(self):
return "{} ({})".format(self.id(), self.uuid())
def __repr__(self):
return str(self)
def __eq__(self, other):
return self.uuid() == other.uuid()
def uuid(self):
return self._uuid
def id(self):
return self._id
def name(self):
# TODO: Decide on this API.
return self._id
class NetworkService(_NetworkObject):
def active(self):
active_conns = NetworkManager.NetworkManager.ActiveConnections
active_conns = [NetworkService(conn.Connection) for conn in active_conns]
if self in active_conns:
return True
return False
def enable(self):
L.debug("Enabling connection {}".format(self.name()))
check_subprocess(['nmcli', 'connection', 'up', self.name()])
def disable(self):
L.debug("Disabling connection {}".format(self.name()))
check_subprocess(['nmcli', 'connection', 'down', self.name()])
def interface(self):
# TODO: Reject this idea? Maybe interfaces should be chosen without
# regard to connection status, if NM can't be trusted.
# In which case, tests that get a list of interfaces should just use
# netifaces directly.
try:
return self._settings['connection']['interface-name']
except KeyError:
connection_type = self._settings['connection']['type']
# TODO: Test this on different types.
mac_address = self._settings[connection_type]['mac-address']
for iface in netifaces.interfaces():
iface_mac = netifaces.ifaddresses(iface)[netifaces.AF_LINK][0]['addr'].lower()
if mac_address.lower() == iface_mac:
return iface
raise XVEx("Couldn't find any connection interfaces")
def enable_interface(self):
L.debug("Enabling interface {}".format(self.interface()))
# TODO: Move to unix tools or use "ip link set dev iface up"?
check_subprocess(['ifconfig', self.interface(), 'up'])
def disable_interface(self):
L.debug("Disabling interface {}".format(self.interface()))
# TODO: Move to unix tools or use "ip link set dev iface up"?
check_subprocess(['ifconfig', self.interface(), 'down'])
class LinuxNetwork:
@staticmethod
def network_services_in_priority_order():
conns = NetworkManager.Settings.ListConnections()
conns = list(
filter(lambda x: 'autoconnect-priority' in x.GetSettings()['connection'], conns))
# NetworkManager uses int32s so we need to "cast" the autoconnect-priority value.
def uint32(signed_integer):
return int(ctypes.c_uint32(signed_integer).value)
conns.sort(
key=lambda x: uint32(x.GetSettings()['connection']['autoconnect-priority']),
reverse=True)
return [NetworkService(conn) for conn in conns]
| 1.578125 | 2 |
apidaora/myapp.py | sarincr/Python-Web-Frameworks-and-Template-Engines | 0 | 52228 | <filename>apidaora/myapp.py<gh_stars>0
from apidaora import appdaora, route
@route.get('/')
def hello_controller(name: str) -> str:
return f'Hello World!'
app = appdaora(hello_controller)
| 1 | 1 |
Day-48/Wiki_Interaction/interaction.py | MihirMore/100daysofcode-Python | 4 | 52356 | <reponame>MihirMore/100daysofcode-Python
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
chrome_driver_path = "C:\Program Files\chromedriver_win32\chromedriver.exe"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
driver.get("http://secure-retreat-92358.herokuapp.com/")
number_of_articles = driver.find_element_by_css_selector("#articlecount a")
print(number_of_articles)
number_of_articles.click()
all_portals = driver.find_element_by_partial_link_text("All portals")
all_portals.click()
search = driver.find_element_by_name("search")
search.send_keys("Python")
search.send_keys(Keys.ENTER)
f_name = driver.find_element_by_name("fName")
f_name.send_keys("Mihir")
l_name = driver.find_element_by_name("lName")
l_name.send_keys("More")
email = driver.find_element_by_name("email")
email.send_keys("<EMAIL>")
button = driver.find_element_by_css_selector("form button")
button.click()
| 1.148438 | 1 |
src/mapstp/cli/runner.py | MC-kit/map-stp | 0 | 52484 | <gh_stars>0
"""Application to transfer meta information from STP.
For given STP file creates Excel table with a list
of STP paths to STP components, corresponding to cells
in MCNP model, would it be generated from the STP with SuperMC.
The excel also contains material numbers, densities, correction factors,
and RWCL id. The values can be specified in the names of STP
components as special tags. A tag is denoted with bracket enclosed
specification at the end of component name: "Component name [<spec>]".
The spec may contain space separated entries:
- m:<mnemonic> - first column in a special material-index.xlxs file.
- f:<factor> - float number for density correction factor
- r:<rwcl> - any label to categorize the components for RWCL
If MCNP file is also specified as the second `mcnp` argument,
then produces output MCNP file with STP paths inserted
as end of line comments after corresponding cells with prefix
"sep:". The material numbers and densities are set according
to the meta information provided in the STP.
"""
from dataclasses import dataclass
from pathlib import Path
import click
from mapstp import __name__ as package_name
from mapstp import __summary__, __version__
from mapstp.excel import create_excel
from mapstp.materials import get_used_materials, load_materials_map
from mapstp.merge import correct_start_cell_number, join_paths, merge_paths
from mapstp.utils.io import can_override, select_output
# TODO dvp: add customized configuring from a configuration toml-file.
from mapstp.workflow import create_path_info
# from .logging import logger
# from click_loguru import ClickLoguru
# LOG_FILE_RETENTION = 3
# NO_LEVEL_BELOW = 30
#
#
# def stderr_log_format_func(msg_dict):
# """Do level-sensitive formatting.
#
# Just a copy from click-loguru so far."""
#
# if msg_dict["level"].no < NO_LEVEL_BELOW:
# return "<level>{message}</level>\n"
# return "<level>{level}</level>: <level>{message}</level>\n"
#
#
# click_loguru = ClickLoguru(
# NAME,
# VERSION,
# stderr_format_func=stderr_log_format_func,
# retention=LOG_FILE_RETENTION,
# log_dir_parent=".logs",
# timer_log_level="info",
# )
@dataclass
class Config:
override: bool = False
_USAGE = f"""
{__summary__}
For given STP file creates Excel table with a list
of STP paths to STP components, corresponding to cells
in MCNP model, would it be generated from the STP with SuperMC.
If MCNP file is also specified as the second `mcnp-file` argument,
then produces output MCNP file with STP paths inserted
as end of line comments after corresponding cells with prefix
"sep:". The material numbers and densities are set according
to the meta information provided in the STP.
"""
# @click_loguru.logging_options
# @click.group(help=meta.__summary__, name=NAME)
@click.command(help=_USAGE, name=package_name)
# @click_loguru.init_logger()
# @click_loguru.stash_subcommand()
@click.option(
"--override/--no-override",
default=False,
help="Override existing files, (default: no)",
)
@click.option(
"--output",
"-o",
metavar="<output>",
type=click.Path(dir_okay=False),
required=False,
help="File to write the MCNP with marked cells (default: stdout)",
)
@click.option(
"--excel",
"-e",
metavar="<excel-file>",
type=click.Path(dir_okay=False),
required=False,
help="Excel file to write the component paths",
)
@click.option(
"--materials",
metavar="<materials-file>",
type=click.Path(dir_okay=False, exists=True),
required=False,
help="Text file containing MCNP materials specifications."
"If present, the selected materials present in this file are printed"
"to the `output` MCNP model, so, it becomes complete valid model",
)
@click.option(
"--materials-index",
"-m",
metavar="<materials-index-file>",
type=click.Path(dir_okay=False, exists=True),
required=False,
help="Excel file containing materials mnemonics and corresponding references for MCNP model "
"(default: file from the package internal data corresponding to ITER C-model)",
)
@click.option(
"--separator",
metavar="<separator>",
type=click.STRING,
default="/",
help="String to separate components in the STP path",
)
@click.option(
"--start-cell-number",
metavar="<number>",
type=click.INT,
required=False,
help="Number to start cell numbering in the Excel file "
"(default: the first cell number in `mcnp` file, if specified, otherwise 1)",
)
@click.argument(
"stp", metavar="<stp-file>", type=click.Path(dir_okay=False, exists=True)
)
@click.argument(
"mcnp",
metavar="[mcnp-file]",
type=click.Path(dir_okay=False, exists=True),
required=False,
)
@click.version_option(__version__, prog_name=package_name)
# @logger.catch(reraise=True)
@click.pass_context
# ctx, verbose: bool, quiet: bool, logfile: bool, profile_mem: bool, override: bool
def mapstp(
ctx,
override: bool,
output,
excel,
materials,
materials_index,
separator,
start_cell_number,
stp,
mcnp,
) -> None:
f"""Transfers meta information from STP to MCNP model and Excel.
Args:
ctx:
override:
output:
excel:
materials:
materials_index:
separator:
start_cell_number:
stp:
mcnp:
Returns:
"""
if not (mcnp or excel):
raise click.UsageError(
"Nor `excel`, neither `mcnp` parameter is specified - nothing to do"
)
# if quiet:
# logger.level("WARNING")
# if verbose:
# logger.level("TRACE")
# logger.info("Running {}", NAME)
# logger.debug("Working dir {}", Path(".").absolute())
#
cfg = ctx.ensure_object(Config)
# obj["DEBUG"] = debug
cfg.override = override
paths, path_info = create_path_info(materials_index, stp)
materials_map = load_materials_map(materials) if materials else None
used_materials_text = (
get_used_materials(materials_map, path_info) if materials_map else None
)
if mcnp:
_mcnp = Path(mcnp)
with select_output(override, output) as _output:
joined_paths = join_paths(paths, separator)
merge_paths(_output, joined_paths, path_info, _mcnp, used_materials_text)
if excel:
start_cell_number = correct_start_cell_number(start_cell_number, mcnp)
_excel = Path(excel)
can_override(_excel, override)
create_excel(_excel, paths, path_info, separator, start_cell_number)
# TODO dvp: add logging
if __name__ == "__main__":
mapstp()
| 1.828125 | 2 |
Missing_data.py | itskhagendra/Mortality-Rate- | 1 | 52612 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 15 18:47:17 2017
@author: Khagendra
The following fills the missing data
"""
from sklearn.preprocessing import Imputer
from numba import jit
@jit
def MisDat(X):
#Filling the most frequent value in place of empty values
imputer=Imputer(missing_values="NaN",strategy="most_frequent",axis=0)
imputer=imputer.fit(X[:,9:38])
X[:,9:38]=imputer.transform(X[:,9:38])
return X | 1.96875 | 2 |
noicesoup/noicesoup.py | richeyphu/noicesoup | 0 | 52740 | """
A simple python package for scraping and downloading images from Google
Usage:
$ noicesoup.py [-h] -k KEYWORD [-cd CHROMEDRIVER]
NOTE: Default webdriver is Chrome in relative path "chromedriver"
Images will be saved in "downloads/<keyword>"
This package is currently under development...
"""
import threading
import time
import urllib.request
import os
import argparse
from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
from pathlib import Path
def get_driver():
path = 'chromedriver'
driver = webdriver.Chrome(executable_path=path)
driver.get(f'https://www.google.com/search?q={keyword}&tbm=isch')
for i in range(0, 7):
driver.execute_script('window.scrollBy(0,document.body.scrollHeight)')
try:
# for clicking show more results button
driver.find_element(
'//*[@id="islmp"]/div/div/div/div/div[2]/div[2]/input').click()
except Exception:
pass
time.sleep(3)
return driver
def download_images(driver):
soup = BeautifulSoup(driver.page_source, 'html.parser')
img_tags = soup.find_all('img', class_='rg_i')
length = len(img_tags)
# get pics and download
for i, v in enumerate(img_tags):
try:
loading_bar(i + 1, length)
urllib.request.urlretrieve(
v['src'], f"{downloads_path}/{keyword}/{str(i + 1)}.jpg")
except Exception:
pass
print()
def loading_bar(n, l):
print("\rDownloading : {} ({:.2f}%)".format(
"█" * round(n / l * 100 / 2), n / l * 100), end="")
def loading_spinner():
msg = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏"
len_msg = len(msg)
counter = 0
while True:
displayed = ""
displayed += msg[(counter + 1) % len_msg]
print(f"\r{displayed} Loading {keyword=}", end="")
sleep(0.05)
counter = (counter + 1) % len_msg
if stop_thread:
break
def create_dir():
try:
os.makedirs(f'{downloads_path}/{keyword}')
except Exception:
pass
def main():
global keyword
global driver_path
global downloads_path
global stop_thread
downloads_path = os.path.join(
str(Path.home()), 'Downloads', 'noicesoup_dl')
parser = argparse.ArgumentParser(
description='A simple python package for scraping and downloading images from Google')
parser.add_argument('-k', '--keyword',
help='Input search keyword', required=True)
parser.add_argument('-cd', '--chromedriver',
help='Input ChromeDriver path', default="chromedriver")
args = parser.parse_args()
keyword = args.keyword
driver_path = args.chromedriver
stop_thread = False
thr = threading.Thread(target=loading_spinner)
thr.start()
create_dir()
driver = get_driver()
stop_thread = True
print('\r'+'=' * os.get_terminal_size().columns)
download_images(driver)
print('=' * os.get_terminal_size().columns)
print('Done!')
if "__main__" == __name__:
main()
| 2.046875 | 2 |
slurmer/__init__.py | jmigual/slurmer | 0 | 52868 | <reponame>jmigual/slurmer
"""Slurmer package.
Use this package to run tasks in a computing cluster. Supported task schedulers are:
- `SLURM <https://slurm.schedmd.com/documentation.html>`_
"""
from .task_runner import Task, TaskFailedError, TaskParameters, TaskResult
__all__ = ["Task", "TaskFailedError", "TaskParameters", "TaskResult"]
| 1.164063 | 1 |
Web App/sbadmin/core.py | DinhLamPham/PredictiveHRA | 0 | 52996 | import matplotlib as mpl
import matplotlib.pyplot as plt
import networkx as nx
import sbadmin.convertGraph as convert
G = nx.generators.directed.random_k_out_graph(10, 3, 0.5)
pos = nx.layout.spring_layout(G)
node_sizes = [3 + 10 * i for i in range(len(G))]
M = G.number_of_edges()
edge_colors = range(2, M + 2)
edge_alphas = [(5 + i) / (M + 4) for i in range(M)]
nodes = nx.draw_networkx_nodes(G, pos, node_size=node_sizes, node_color='blue')
edges = nx.draw_networkx_edges(G, pos, node_size=node_sizes, arrowstyle='->',
arrowsize=10, edge_color=edge_colors,
edge_cmap=plt.cm.Blues, width=2)
# set alpha value for each edge
for i in range(M):
edges[i].set_alpha(edge_alphas[i])
pc = mpl.collections.PatchCollection(edges, cmap=plt.cm.Blues)
pc.set_array(edge_colors)
plt.colorbar(pc)
ax = plt.gca()
ax.set_axis_off()
plt.show()
output = convert.cytoscape_data(G)
print(output)
cy = nx.readwrite.json_graph.cytoscape_data(G)
print(cy) | 1.679688 | 2 |
Utility/draw_stat.py | jeorjebot/kp-anonymity | 1 | 53124 | from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
with open(Path('tmp.txt'), 'r') as f:
lines_read = f.readlines()
lines = list()
for line in lines_read:
lines.append(line.split())
labels = list()
naive_time = list()
kapra_time = list()
for index, line in enumerate(lines):
if index % 2 == 0: # naive
labels.append(line[1])
naive_time.append(float(line[2]))
else: # kapra
kapra_time.append(float(line[2]))
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, naive_time, width, label='Naive')
rects2 = ax.bar(x + width/2, kapra_time, width, label='Kapra')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Time (s)')
ax.set_xlabel('Number of instances')
ax.set_title('Time efficiency')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
def autolabel(rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
ax.annotate('{}'.format(height),
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords="offset points",
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
fig.tight_layout()
#plt.show()
plt.savefig('stat.png')
| 2.453125 | 2 |
examples/default/app.py | lucuma/authcode | 11 | 53252 | # coding=utf-8
from flask import Flask
from mailshake import ToConsoleMailer, SMTPMailer
from sqlalchemy_wrapper import SQLAlchemy
import settings
app = Flask(__name__)
app.config.from_object(settings)
db = SQLAlchemy(settings.SQLALCHEMY_URI, app)
if settings.DEBUG:
mailer = ToConsoleMailer()
else:
mailer = SMTPMailer(**settings.MAILER_SETTINGS)
| 1.054688 | 1 |
drf_tools/validation/base.py | seebass/drf-toolbox | 5 | 53380 | from abc import ABCMeta, abstractmethod
from django_tooling.exceptions import ValidationError
class FailedValidation():
def __init__(self, code, details, msg):
self.code = code
self.details = details
self.msg = msg
if msg and details:
self.msg = msg.format(**details)
class Validation(metaclass=ABCMeta):
"""
Base class for all validations.
The registered key is the app name plus the snake_case version of the class name.
NameTooLong in secretobject will be available as secretobject_name_too_long
"""
def __init__(self, fieldName=None):
self.__fieldName = fieldName
self.__failedValidations = list()
@abstractmethod
def _validate(self):
pass
def validate(self, raiseError=True):
self._validate()
if self.__failedValidations and raiseError:
raise ValidationError([failedValidation.msg for failedValidation in self.__failedValidations], self.__fieldName)
def _addFailure(self, code, details=None, msg=None):
self.__failedValidations.append(FailedValidation(code, details, msg))
def getFailedValidations(self):
return self.__failedValidations
| 1.953125 | 2 |
tests/test_models.py | nyu-devops-squad/shopcarts | 3 | 53508 | <gh_stars>1-10
"""
Test cases for YourResourceModel Model
"""
import logging
import unittest
import os
from services.models import Shopcart, DataValidationError, db
from tests.factories import ShopcartFactory
from services import app
from werkzeug.exceptions import NotFound
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgres://postgres:postgres@localhost:5432/postgres"
)
######################################################################
# S H O P C A R T M O D E L T E S T C A S E S
######################################################################
class TestShopcart(unittest.TestCase):
""" Test Cases for Shopcart Model """
@classmethod
def setUpClass(cls):
""" This runs once before the entire test suite """
app.config['TESTING'] = True
app.config['DEBUG'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
Shopcart.init_db(app)
@classmethod
def tearDownClass(cls):
""" This runs once after the entire test suite """
# db.session.close()
pass
def setUp(self):
""" This runs before each test """
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
def tearDown(self):
""" This runs after each test """
db.session.remove()
db.drop_all()
######################################################################
# T E S T C A S E S
######################################################################
def test_create_shopcart(self):
""" Test create shopcart """
fake_shopcart = ShopcartFactory()
shopcart = Shopcart(
customer_id = fake_shopcart.customer_id,
product_id = fake_shopcart.product_id,
product_name = fake_shopcart.product_name,
product_price = fake_shopcart.product_price,
quantity = fake_shopcart.quantity
)
self.assertTrue(shopcart != None)
self.assertEqual(shopcart.customer_id, fake_shopcart.customer_id)
self.assertEqual(shopcart.product_id, fake_shopcart.product_id)
self.assertEqual(shopcart.product_name, fake_shopcart.product_name)
self.assertEqual(shopcart.product_price, fake_shopcart.product_price)
self.assertEqual(shopcart.quantity, fake_shopcart.quantity)
# self.assertTrue(True)
def test_update_a_shopcart(self):
"""Update a Shopcart"""
# shopcart = Shopcart(customer_id=123, product_id=231, product_name="a",product_price=23.1,quantity=1).create()
# self.assertEqual(shopcart.customer_id, 123)
# Change it an save it
# Fetch it back and make sure the customer_id,product_id hasn't changed
# but the data did change
shopcart = ShopcartFactory()
shopcart.create()
product = shopcart.find_by_shopcart_item(shopcart.customer_id,shopcart.product_id)
self.assertEqual(product.quantity, shopcart.quantity)
product.quantity = 3
product.update()
self.assertEqual(product.customer_id, shopcart.customer_id)
self.assertEqual(product.product_id, shopcart.product_id)
self.assertEqual(product.quantity, 3)
def test_delete_shopcart(self):
fake_shopcart = ShopcartFactory()
logging.debug(fake_shopcart)
fake_shopcart.create()
logging.debug(fake_shopcart)
self.assertEqual(len(fake_shopcart.all()), 1)
# delete the shopcart and make sure it isn't in the database
fake_shopcart.delete()
self.assertEqual(len(fake_shopcart.all()), 0)
def test_serialize_shopcart(self):
""" Test serialization of a Shopcart """
shopcart = ShopcartFactory()
data = shopcart.serialize()
self.assertNotEqual(data, None)
self.assertIn("customer_id", data)
self.assertEqual(data["customer_id"], shopcart.customer_id)
self.assertIn("product_id", data)
self.assertEqual(data["product_id"], shopcart.product_id)
self.assertIn("product_name", data)
self.assertEqual(data["product_name"], shopcart.product_name)
self.assertIn("product_price", data)
self.assertEqual(data["product_price"], shopcart.product_price)
self.assertIn("quantity", data)
self.assertEqual(data["quantity"], shopcart.quantity)
def test_deserialize_shopcart(self):
""" Test deserialization of a Shopcart """
data = {
"customer_id": 123,
"product_id": 321,
"product_name": "abc",
"product_price": 1234,
"quantity": 2
}
shopcart = Shopcart()
shopcart.deserialize(data)
self.assertNotEqual(shopcart, None)
self.assertEqual(shopcart.customer_id, 123)
self.assertEqual(shopcart.product_id, 321)
self.assertEqual(shopcart.product_name, "abc")
self.assertEqual(shopcart.product_price, 1234)
self.assertEqual(shopcart.quantity, 2)
def test_deserialize_shopcart_with_bad_data(self):
shopcart = Shopcart()
# shopcart.deserialize(data)
self.assertRaises(DataValidationError, shopcart.deserialize, {})
# self.assertRaises(DataValidationError, shopcart.deserialize, [])
# def test_find_or_404_not_found(self):
# """ Find or return 404 NOT found """
# self.assertRaises(NotFound, Shopcart.find_or_404, 0)
def test_find_by_shopcart_item(self):
Shopcart(customer_id=123, product_id=231, product_name="a",product_price=23.1,quantity=1).create()
Shopcart(customer_id=124, product_id=232, product_name="b",product_price=25,quantity=2).create()
shopcart = Shopcart.find_by_shopcart_item(123,231)
self.assertEqual(shopcart.product_name, "a")
self.assertEqual(shopcart.product_price, 23.1)
self.assertEqual(shopcart.quantity, 1)
def test_find_by_customer_id(self):
Shopcart(customer_id=123, product_id=231, product_name="a",product_price=23.1,quantity=1).create()
Shopcart(customer_id=124, product_id=232, product_name="b",product_price=25,quantity=2).create()
shopcart = Shopcart.find_by_customer_id(123).first()
self.assertEqual(shopcart.product_id, 231)
self.assertEqual(shopcart.customer_id, 123)
self.assertEqual(shopcart.product_name, "a")
self.assertEqual(shopcart.product_price, 23.1)
self.assertEqual(shopcart.quantity, 1)
def test_deserialize_bad_data(self):
""" Test deserialization of bad data """
bad_shopcart1 = Shopcart(customer_id="a", product_id="a", product_name="a",product_price="a",quantity="a")
bad_shopcart2 = Shopcart(customer_id=1, product_id="a", product_name="a",product_price="a",quantity="a")
bad_shopcart3 = Shopcart(customer_id=1, product_id=1, product_name="a",product_price="a",quantity="a")
bad_shopcart4 = Shopcart(customer_id=1, product_id=1, product_name="a",product_price=1,quantity="a")
shopcart = Shopcart()
self.assertRaises(DataValidationError, shopcart.deserialize, bad_shopcart1)
self.assertRaises(DataValidationError, shopcart.deserialize, bad_shopcart2)
self.assertRaises(DataValidationError, shopcart.deserialize, bad_shopcart3)
self.assertRaises(DataValidationError, shopcart.deserialize, bad_shopcart4)
def test_find_shopcart_item_by_price_by_customer_id(self):
""" Find Shopcart items above a price for a customer"""
Shopcart(customer_id=123, product_id=231, product_name="a",product_price=102.1,quantity=1).create()
Shopcart(customer_id=123, product_id=232, product_name="b",product_price=25,quantity=2).create()
shopcart = Shopcart.find_shopcart_items_price_by_customer_id(123, 100)[0]
self.assertEqual(shopcart.product_id, 231)
self.assertEqual(shopcart.customer_id, 123)
self.assertEqual(shopcart.product_name, "a")
self.assertEqual(shopcart.product_price, 102.1)
self.assertEqual(shopcart.quantity, 1)
def test_find_shopcart_item_by_price(self):
""" Find Shopcart items above a price """
Shopcart(customer_id=123, product_id=231, product_name="a",product_price=10.1,quantity=1).create()
Shopcart(customer_id=123, product_id=233, product_name="a",product_price=102.1,quantity=1).create()
Shopcart(customer_id=121, product_id=232, product_name="b",product_price=106,quantity=2).create()
Shopcart(customer_id=121, product_id=234, product_name="b",product_price=10,quantity=2).create()
shopcart = Shopcart.find_shopcart_items_price(100)[0]
self.assertEqual(shopcart.product_id, 233)
self.assertEqual(shopcart.customer_id, 123)
self.assertEqual(shopcart.product_name, "a")
self.assertEqual(shopcart.product_price, 102.1)
self.assertEqual(shopcart.quantity, 1)
shopcart = Shopcart.find_shopcart_items_price(100)[1]
self.assertEqual(shopcart.product_id, 232)
self.assertEqual(shopcart.customer_id, 121)
self.assertEqual(shopcart.product_name, "b")
self.assertEqual(shopcart.product_price, 106)
self.assertEqual(shopcart.quantity, 2)
| 1.648438 | 2 |
package/scripts/utils.py | xiaoxiaopan118/Ambari-Doris-Service | 5 | 53636 | <filename>package/scripts/utils.py
from resource_management import *
from resource_management.core.resources.system import Execute, Directory, File, Link
import os
import socket
import time
def install():
import params
if not is_service_installed(params):
# download doris tar.gz
cmd = format("mkdir -p {doris_install_dir}; cd {doris_install_dir}; wget {download_url} ")
Execute(cmd, user=params.default_user)
# install doris
cmd = format("cd {doris_install_dir}; tar -xf {doris_filename}.tar.gz")
Execute(cmd, user=params.default_user)
# remove doris installation file
cmd = format("cd {doris_install_dir}; rm -rf {doris_filename}.tar.gz")
Execute(cmd, user=params.default_user)
def is_service_installed(params):
"""
Judge if service installed
:param params: params.py
:return: installed
"""
install_dirs = params.doris_dir
installed = False
if os.path.isdir(install_dirs):
installed = True
Logger.info(format("Service already installed."))
return installed
def split_be_storage_path(params):
"""
split_be_storage_path
:return:
"""
split_path = params.doris_be_storage_root_path.split(';')
Logger.info("Doris BE storage path is {0}.".format(split_path))
if len(split_path) >= 1:
for tmp_path in split_path:
path = tmp_path.split('.')
if None != path:
Logger.info("Starting mkdir Doris BE storage path, The path is {0}.".format(path))
Directory(path,
create_parents = True,
owner=params.default_user,
group=params.default_group)
def change_root_passowrd(params):
"""
change root passowrd
# TODO the function is not appropriate
:return:
"""
doris_fe_hostname = params.doris_fe_hostname[0]
doris_root_password = params.doris_fe_root_password
doris_fe_query_port = params.doris_fe_query_port
doris_be_heartbeat_service_port = params.doris_be_heartbeat_service_port
cmd_no_password = format("mysql -uroot -h {doris_fe_hostname} -P {doris_fe_query_port} "
"-e \"SET PASSWORD FOR \'root\' = PASSWORD(\'{<PASSWORD>}\') \" ")
cmd_has_password = format("mysql -uroot -p{doris_root_password} -h {doris_fe_hostname} -P {doris_fe_query_port} "
"-e \"SET PASSWORD FOR \'root\' = PASSWORD(\'{<PASSWORD>}\') \" ")
try:
Logger.info("Add Doris Server password, commonds is {0}.".format(cmd_no_password))
Execute(cmd_no_password, user=params.default_user, logoutput=True, tries=10, try_sleep=3)
except:
Logger.info("Changed Doris Server password, commonds is {0}.".format(cmd_has_password))
Execute(cmd_has_password, user=params.default_user, logoutput=True, ignore_failures=True, tries=10, try_sleep=3)
def add_doris_backend(params):
"""
add_doris_backend
# TODO the function is not appropriate
:return:
"""
doris_fe_hostname = params.doris_fe_hostname[0]
doris_root_password = params.doris_fe_root_password
doris_fe_query_port = params.doris_fe_query_port
doris_be_heartbeat_service_port = params.doris_be_heartbeat_service_port
if None != params.doris_be_hostname:
for be_host in params.doris_be_hostname:
cmd = format("mysql -uroot -p{doris_root_password} -h {doris_fe_hostname} -P {doris_fe_query_port} "
"-e \"ALTER SYSTEM ADD BACKEND \'{be_host}:{doris_be_heartbeat_service_port}\' \"")
Logger.info("Starting Doris FE Server, commonds is {0}.".format(cmd))
Execute(cmd, user=params.default_user, logoutput=True, tries=5, try_sleep=5)
def add_frontend(fe_role, params):
"""
add_frontend
:return:
"""
# TODO the function is not appropriate
# Before add backend change root password, This is not appropriate.
change_root_passowrd(params)
# add doris fe
doris_fe_hostname = params.doris_fe_hostname[0]
doris_fe_observer_hostname = params.doris_fe_observer_hostname
doris_root_password = params.doris_fe_root_password
doris_fe_query_port = params.doris_fe_query_port
doris_fe_edit_log_port = params.doris_fe_edit_log_port
if (len(params.doris_fe_hostname) >= 1) and (fe_role == 'FOLLOWER'):
for fe_host in params.doris_fe_hostname:
if fe_host != doris_fe_hostname:
cmd = format("mysql -uroot -p{doris_root_password} -h {doris_fe_hostname} -P {doris_fe_query_port} "
"-e \"ALTER SYSTEM ADD {fe_role} \'{fe_host}:{doris_fe_edit_log_port}\' \"")
Logger.info("Adding Doris FE Follower Server, commonds is {0}.".format(cmd))
Execute(cmd, user=params.default_user, logoutput=True, tries=5, try_sleep=5)
if (len(params.doris_fe_observer_hostname) >= 1) and (fe_role == 'OBSERVER'):
for fe_observer in params.doris_fe_observer_hostname:
cmd = format("mysql -uroot -p{doris_root_password} -h {doris_fe_hostname} -P {doris_fe_query_port} "
"-e \"ALTER SYSTEM ADD {fe_role} \'{fe_observer}:{doris_fe_edit_log_port}\' \"")
Logger.info("Adding Doris FE Follower Server, commonds is {0}.".format(cmd))
Execute(cmd, user=params.default_user, logoutput=True, tries=5, try_sleep=5)
def fe_init_start(fe_role,params):
"""
the first start follower and observer fe
:return:
"""
cmd = None
doris_fe_hostname = params.doris_fe_hostname[0]
doris_fe_edit_log_port = params.doris_fe_edit_log_port
# stop it first begin start
# if os.path.exists(params.doris_fe_pid_file):
# self.stop()
# if doris_fe is follower and observer,add them
if (fe_role == 'FOLLOWER') or (fe_role == 'OBSERVER'):
cmd = format("cd {doris_fe_bin_path};"
"sh start_fe.sh --helper {doris_fe_hostname}:{doris_fe_edit_log_port} --daemon")
else:
cmd = format("cd {doris_fe_bin_path}; "
"sh start_fe.sh --daemon")
Logger.info("Starting Doris FE Server, commonds is {0}.".format(cmd))
Execute(cmd, user=params.default_user, logoutput=True)
def wait_fe_started(params):
"""
check if master fe is started.
waiting 36 * 5 sec
:param params:
:return:
"""
started = False
result = ''
interval = 5
times = 36
cmd = format("grep 'success on {doris_fe_query_port}' {doris_fe_sys_log_dir}/fe.log")
for go in range(times):
if started:
Logger.info("Doris FE Master Server started, {0}.".format(result.read()))
break
else:
time.sleep(interval)
Logger.info("Waiting Doris FE Master Server start, waiting time:{0}.".format( go * 5 ))
result = os.popen(cmd)
if (result.read() != ''):
started = True
| 1.585938 | 2 |
flow/utils/commons.py | jander99/flow | 36 | 53764 | <filename>flow/utils/commons.py
#!/usr/bin/python
#commons.py
import json
import os
import re
import subprocess
import sys
from enum import Enum
from pydispatch import dispatcher
from flow.logger import Logger
class Commons:
quiet = False
content_json = 'application/json'
build_config_file = 'buildConfig.json'
forward_slash = '/'
content_oct_stream = 'application/octet-stream'
clazz = 'commons'
def flush_out(string):
method = 'flush_out'
print_msg(clazz, method, string)
sys.stdout.flush()
def byteify(input_str):
if isinstance(input_str, dict):
return {byteify(key): byteify(value)
for key, value in input_str.items()}
elif isinstance(input_str, list):
return [byteify(element) for element in input_str]
elif isinstance(input_str, str):
return input_str.encode('utf-8')
else:
return input_str
def print_msg(class_name, method, message, level='DEBUG'):
if level.lower() != 'error' and Commons.quiet:
return
log_level = '[' + level + ']'
log_message = '{:7s} {:11s} {:35s} {!s:s}'.format(log_level, class_name, method, message)
try:
print(log_message)
Logger(log_message)
except:
print(log_message.encode('utf-8'))
if level == 'ERROR':
SIGNAL = 'publish-error-signal'
sender = {}
new_message = ''.join(str(v) for v in message)
dispatcher.send(signal=SIGNAL, sender=sender, message=new_message, class_name=class_name, method_name=method)
def write_to_file(path, text, open_func=open, mode="a"):
with open_func(path, mode) as f:
f.write(text)
def get_files_of_type_from_directory(file_type, directory):
out = os.listdir(directory)
out = [os.path.join(directory, element) for element in out]
out = [file for file in filter(os.path.isfile, out) if file.lower().endswith(file_type)]
out = [os.path.basename(file) for file in filter(os.path.isfile, out)]
return out
# TODO convert all popens that need decoding to call this
def execute_command(cmd):
process = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
# output from subprocess is always a unicode bytearray and not ascii
# need to read it and go ahead and convert it to a string
output = process.stdout.read().decode("UTF8")
return output
def verify_version(config):
method = 'verify_version'
if config.version_number is None:
print_msg(clazz, method, 'Version not defined. Is your repo tagged with a version number?', 'ERROR')
exit(1)
class DeploymentState(Enum):
failure = 'fail'
success = 'success'
class Object:
def to_JSON(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=False, indent=4)
| 1.601563 | 2 |
Utilities/calculate_embeddings.py | noah-hoffmann/CGAT | 0 | 53892 | <gh_stars>0
import pickle
import gzip as gz
from argparse import ArgumentParser
from CGAT.lightning_module import LightningModel, collate_fn
from CGAT.data import CompositionData
from torch.utils.data import DataLoader
import os
from glob import glob
import torch
from tqdm import tqdm
def load(file):
return pickle.load(gz.open(file))
def save(data, file):
pickle.dump(data, gz.open(file, 'wb'))
def main():
parser = ArgumentParser()
parser.add_argument('--data-path', '-d',
type=str,
required=True,
nargs='+')
parser.add_argument('--target-path', '-t',
type=str,
required=True)
parser.add_argument('--model-path', '-m',
type=str,
required=True)
parser.add_argument('--fea-path', '-f',
type=str,
default=None)
parser.add_argument('--batch-size', '-b',
type=int,
default=100)
args = parser.parse_args()
model = LightningModel.load_from_checkpoint(args.model_path, train=False)
model.cuda()
for data_path in tqdm(args.data_path):
if os.path.isdir(data_path):
files = glob(os.path.join(data_path, '*.pickle.gz'))
else:
files = [data_path]
if os.path.isfile(args.target_path):
raise ValueError("'target-path' must not be a directory and not an existing file!")
if not os.path.isdir(args.target_path):
os.makedirs(args.target_path)
for file in tqdm(files):
data = load(file)
dataset = CompositionData(
data=data,
fea_path=args.fea_path if args.fea_path else model.hparams.fea_path,
max_neighbor_number=model.hparams.max_nbr,
target=model.hparams.target
)
loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn)
embedding_list = []
for batch in loader:
with torch.no_grad():
embedding_list.append(model.evaluate(batch, return_graph_embedding=True).cpu())
embedding = torch.cat(embedding_list).numpy()
data['input'] = embedding
if len(args.data_path) == 1:
save(data, os.path.join(args.target_path, os.path.basename(file)))
else:
save(data, os.path.join(args.target_path, os.path.basename(data_path), os.path.basename(file)))
if __name__ == '__main__':
main()
| 1.640625 | 2 |
tests/core/test_injection.py | keelerm84/antidote | 0 | 54020 | <reponame>keelerm84/antidote<gh_stars>0
import typing
import pytest
from antidote._internal.argspec import Arguments
from antidote.core import DependencyContainer, inject
from antidote.exceptions import DependencyNotFoundError
class Service:
pass
class AnotherService:
pass
@pytest.mark.parametrize(
'expected,kwargs',
[
pytest.param((None, None),
dict(),
id='nothing'),
pytest.param((Service, None),
dict(dependencies=dict(first=Service)),
id='dependencies:dict-first'),
pytest.param((Service, None),
dict(dependencies=(Service,)),
id='dependencies:tuple-first'),
pytest.param((None, Service),
dict(dependencies=dict(second=Service)),
id='dependencies:dict-second'),
pytest.param((None, Service),
dict(dependencies=(None, Service)),
id='dependencies:tuple-second'),
pytest.param(('first', 'second'),
dict(dependencies=lambda s: s),
id='dependencies:callable'),
pytest.param((Service, Service),
dict(dependencies=lambda s: Service),
id='dependencies:callable2'),
pytest.param((None, None),
dict(dependencies=lambda s: None),
id='dependencies:callable3'),
pytest.param(('first', 'second'),
dict(dependencies="{arg_name}"),
id='dependencies:str'),
pytest.param(('prefix:first', 'prefix:second'),
dict(dependencies="prefix:{arg_name}"),
id='dependencies:str2'),
pytest.param(('first', 'second'),
dict(use_names=True),
id='use_names:True'),
pytest.param((None, None),
dict(use_names=False),
id='use_names:False'),
pytest.param((None, 'second'),
dict(use_names=['second']),
id='use_names:list')
]
)
def test_without_type_hints(expected, kwargs):
container = DependencyContainer()
container.update_singletons({Service: Service()})
container.update_singletons({AnotherService: AnotherService()})
container.update_singletons({'first': object()})
container.update_singletons({'second': object()})
container.update_singletons({'prefix:first': object()})
container.update_singletons({'prefix:second': object()})
default = object()
@inject(container=container, **kwargs)
def f(first=default, second=default):
return first, second
class A:
@inject(container=container, **kwargs)
def method(self, first=default, second=default):
return first, second
@inject(container=container, **kwargs)
@classmethod
def class_method(cls, first=default, second=default):
return first, second
@inject(container=container, **kwargs)
@staticmethod
def static_method(first=default, second=default):
return first, second
expected = tuple((
container.get(d) if d is not None else default
for d in expected
))
assert expected == f()
assert expected == A().method()
assert expected == A.class_method()
assert expected == A.static_method()
a, b = object(), object()
assert (a, b) == f(a, b)
assert (a, b) == A().method(a, b)
assert (a, b) == A.class_method(a, b)
assert (a, b) == A.static_method(a, b)
@pytest.mark.parametrize(
'expected, kwargs',
[
pytest.param((Service, None),
dict(),
id='nothing'),
pytest.param((Service, None),
dict(dependencies=dict(first=Service)),
id='dependencies:dict-first'),
pytest.param((Service, None),
dict(dependencies=(Service,)),
id='dependencies:tuple-first'),
pytest.param((Service, Service),
dict(dependencies=dict(second=Service)),
id='dependencies:dict-second'),
pytest.param((Service, Service),
dict(dependencies=(None, Service)),
id='dependencies:tuple-second'),
pytest.param(('first', 'second'),
dict(dependencies=lambda s: s),
id='dependencies:callable'),
pytest.param((Service, Service),
dict(dependencies=lambda s: Service),
id='dependencies:callable2'),
pytest.param((Service, None),
dict(dependencies=lambda s: None),
id='dependencies:callable3'),
pytest.param(('first', 'second'),
dict(dependencies="{arg_name}"),
id='dependencies:str'),
pytest.param(('prefix:first', 'prefix:second'),
dict(dependencies="prefix:{arg_name}"),
id='dependencies:str2'),
pytest.param((Service, 'second'),
dict(use_names=True),
id='use_names:True'),
pytest.param((Service, None),
dict(use_names=False),
id='use_names:False'),
pytest.param((Service, None),
dict(use_names=['first']),
id='use_names:list-first'),
pytest.param((Service, 'second'),
dict(use_names=['second']),
id='use_names:list-second'),
pytest.param((Service, None),
dict(use_type_hints=True),
id='use_type_hints:True'),
pytest.param((Service, None),
dict(use_type_hints=['first']),
id='use_type_hints:list-first'),
pytest.param((Service, 'second'),
dict(use_type_hints=['first'], use_names=True),
id='use_type_hints:list-first+use_names=True'),
pytest.param((None, None),
dict(use_type_hints=['second']),
id='use_type_hints:list-second'),
pytest.param(('first', 'second'),
dict(use_type_hints=['second'], use_names=True),
id='use_type_hints:list-second+use_names=True'),
pytest.param((None, None),
dict(use_type_hints=False),
id='use_type_hints:False'),
pytest.param(('first', 'second'),
dict(use_type_hints=False, use_names=True),
id='use_type_hints:False+use_names=True'),
]
)
def test_with_type_hints(expected, kwargs):
container = DependencyContainer()
container.update_singletons({Service: Service(),
AnotherService: AnotherService(),
'first': object(),
'second': object(),
'prefix:first': object(),
'prefix:second': object()})
default = object()
@inject(container=container, **kwargs)
def f(first: Service = default, second: str = default):
return first, second
class A:
@inject(container=container, **kwargs)
def method(self, first: Service = default, second: str = default):
return first, second
@inject(container=container, **kwargs)
@classmethod
def class_method(cls, first: Service = default, second: str = default):
return first, second
@inject(container=container, **kwargs)
@staticmethod
def static_method(first: Service = default, second: str = default):
return first, second
expected = tuple((
container.get(d) if d is not None else default
for d in expected
))
assert expected == f()
assert expected == A().method()
assert expected == A.class_method()
assert expected == A.static_method()
a, b = object(), object()
assert (a, b) == f(a, b)
assert (a, b) == A().method(a, b)
assert (a, b) == A.class_method(a, b)
assert (a, b) == A.static_method(a, b)
@pytest.mark.parametrize(
'type_hint',
[str, int, float, set, list, dict, complex, type, tuple, bytes, bytearray,
typing.Optional, typing.Sequence]
)
def test_ignored_type_hints(type_hint):
container = DependencyContainer()
container.update_singletons({type_hint: object()})
@inject(container=container)
def f(x: type_hint):
pass
with pytest.raises(TypeError):
f()
def test_arguments():
container = DependencyContainer()
container.update_singletons(dict(a=12, b=24))
def f(a, b):
pass
arguments = Arguments.from_callable(f)
@inject(arguments=arguments, use_names=True, container=container)
def g(**kwargs):
return kwargs
assert dict(a=12, b=24) == g()
@pytest.mark.parametrize(
'error,kwargs',
[
pytest.param(TypeError,
dict(),
id="unknown-dependency"),
pytest.param(DependencyNotFoundError,
dict(dependencies=(Service,)),
id="dependencies:unknown-dependency-tuple"),
pytest.param(DependencyNotFoundError,
dict(dependencies=dict(x=Service)),
id="dependencies:unknown-dependency-dict"),
pytest.param(DependencyNotFoundError,
dict(dependencies=lambda s: Service),
id="dependencies:unknown-dependency-callable"),
pytest.param(DependencyNotFoundError,
dict(dependencies="unknown:{arg_name}"),
id="dependencies:unknown-dependency-str"),
pytest.param((ValueError, TypeError),
dict(dependencies=(None, None)),
id="dependencies:too-much-arguments"),
pytest.param(TypeError,
dict(dependencies=object()),
id="dependencies:unsupported-type"),
pytest.param(TypeError,
dict(dependencies={1: 'x'}),
id="dependencies:invalid-key-type"),
pytest.param(ValueError,
dict(dependencies=dict(unknown=DependencyContainer)),
id="dependencies:unknown-argument-dict"),
pytest.param(TypeError,
dict(use_names=False),
id="use_names:unknown-dependency-False"),
pytest.param(DependencyNotFoundError,
dict(use_names=True),
id="use_names:unknown-dependency-True"),
pytest.param(DependencyNotFoundError,
dict(use_names=['x']),
id="use_names:unknown-dependency-list"),
pytest.param(ValueError,
dict(use_names=['y']),
id="use_names:unknown-argument-list"),
pytest.param(ValueError,
dict(use_names=['x', 'y']),
id="use_names:unknown-argument-list2"),
pytest.param(TypeError,
dict(use_names=[]),
id="use_names:empty"),
pytest.param(TypeError,
dict(use_names=object()),
id="use_names:unsupported-type"),
pytest.param(TypeError,
dict(use_names=[1]),
id="use_names:invalid-name-type"),
pytest.param(TypeError,
dict(use_type_hints=object()),
id="use_type_hints:unsupported-type"),
pytest.param(TypeError,
dict(use_type_hints=[1]),
id="use_type_hints:invalid-name-type"),
pytest.param(ValueError,
dict(use_type_hints=['y']),
id="use_type_hints:unknown-arg"),
]
)
def test_invalid(error, kwargs):
container = DependencyContainer()
with pytest.raises(error):
@inject(container=container, **kwargs)
def f(x):
return x
f()
with pytest.raises(error):
class A:
@inject(container=container, **kwargs)
def method(self, x):
return x
A().method()
with pytest.raises(error):
class A:
@inject(container=container, **kwargs)
@classmethod
def classmethod(cls, x):
return x
A.classmethod()
with pytest.raises(error):
class A:
@inject(container=container, **kwargs)
@staticmethod
def staticmethod(x):
return x
A.staticmethod()
@pytest.mark.parametrize(
'error,kwargs',
[
pytest.param(ValueError,
dict(dependencies=dict(self='x')),
id="dependencies"),
pytest.param(ValueError,
dict(use_names=('self',)),
id="use_names"),
pytest.param(ValueError,
dict(use_type_hints=('self',)),
id="use_type_hints"),
]
)
def test_cannot_inject_self(error, kwargs):
container = DependencyContainer()
container.update_singletons(dict(x=object(), y=object()))
with pytest.raises(error):
class A:
@inject(container=container, **kwargs)
def method(self, x=None):
return x
A()
with pytest.raises(error):
class A:
@inject(container=container, **kwargs)
@classmethod
def classmethod(self, x=None):
return x
A()
def test_invalid_type_hint():
@inject(container=DependencyContainer())
def f(x: Service):
return x
with pytest.raises(DependencyNotFoundError):
f()
def test_no_injections():
container = DependencyContainer()
def f(x):
return x
injected_f = inject(f, container=container)
# When nothing can be injected, the same function should be returned
assert injected_f is f
def test_already_injected():
container = DependencyContainer()
@inject(container=container, use_names=True)
def f(x):
return x
injected_f = inject(f, container=container)
# When the function has already its arguments injected, the same function should
# be returned
assert injected_f is f
def test_class_inject():
container = DependencyContainer()
with pytest.raises(TypeError):
@inject(container=container)
class Dummy:
pass
| 1.351563 | 1 |
future-release/api/migrations/0006_auto_20210606_1218.py | shauray8/we_must_know_website | 0 | 54148 | <gh_stars>0
# Generated by Django 3.1.4 on 2021-06-06 06:48
import api.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_auto_20210528_1632'),
]
operations = [
migrations.AlterField(
model_name='room',
name='code',
field=models.CharField(default=api.models.generate_unique_code, max_length=200, unique=True),
),
]
| 0.863281 | 1 |
GOTE/utils/logger.py | Lenferd/ANSYS-OpenFOAM | 0 | 54276 | from enum import IntEnum
class LogLvl(IntEnum):
LOG_ERROR = 0
LOG_INFO = 1
LOG_DEBUG = 2
def to_str(self):
return "[" + self.name + "] "
class Logger:
def __init__(self, log_lvl=LogLvl.LOG_INFO):
self.log_lvl = log_lvl
def log(self, msg_log_lvl=LogLvl.LOG_INFO, message=""):
generated_msg = self._generate_message(msg_log_lvl, message)
if len(generated_msg):
print(generated_msg)
def error(self, message):
self.log(LogLvl.LOG_ERROR, message)
def info(self, message):
self.log(LogLvl.LOG_INFO, message)
def debug(self, message):
self.log(LogLvl.LOG_DEBUG, message)
def _generate_message(self, msg_log_lvl=LogLvl.LOG_INFO, message=""):
if self.log_lvl >= msg_log_lvl:
message = message.replace('\n', '\n' + msg_log_lvl.to_str())
return "{header}{body}".format(header=msg_log_lvl.to_str(), body=message)
else:
return ""
def set_level(self, log_lvl):
self.log_lvl = log_lvl
| 2.21875 | 2 |
tests/text/test_pipeline_copy.py | javispp/biome-text | 62 | 54404 | <reponame>javispp/biome-text
import pytest
from numpy.testing import assert_allclose
from biome.text import Dataset
from biome.text import Pipeline
from biome.text import Trainer
from biome.text import TrainerConfiguration
@pytest.fixture
def pipeline():
return Pipeline.from_config(
{
"name": "test_pipeline_copy",
"head": {
"type": "TextClassification",
"labels": ["a", "b"],
},
}
)
@pytest.fixture
def dataset():
return Dataset.from_dict(
{
"text": ["this is", "a test"],
"label": ["a", "b"],
}
)
def test_copy(pipeline):
prediction = pipeline.predict("check this")
pipeline_copy = pipeline.copy()
prediction_copy = pipeline_copy.predict("check this")
assert_allclose(prediction["probabilities"], prediction_copy["probabilities"])
def test_train_from_pretrained(pipeline, dataset, tmp_path):
output_path = tmp_path / "test_train_from_pretrained_output"
trainer_config = TrainerConfiguration(max_epochs=1, batch_size=2, gpus=0)
trainer = Trainer(
pipeline=pipeline, train_dataset=dataset, trainer_config=trainer_config
)
trainer.fit(output_path)
prediction = pipeline.predict("a test")
pipeline_loaded = Pipeline.from_pretrained(output_path / "model.tar.gz")
prediction_loaded = pipeline_loaded.predict("a test")
assert_allclose(prediction["probabilities"], prediction_loaded["probabilities"])
| 1.453125 | 1 |
Operators/ArithmeticOperators.py | dsabhrawal/python-examples | 1 | 54532 | <filename>Operators/ArithmeticOperators.py
#Following are the operators supported
# + Addition
# - Subration
# * Multiplication
# / Division
# % Modulus
# // Integer Division
# ** Exponential
#
#If any of operand is float the result is float
print(3+2) #prints 5
print(3-2) #prints 1
print(3*2) #prints 6
print(2.5+2) #Prints 4.5 (float)
#In division result is always float irrespective of Operand
print(10/2) #Prints 5.0
#In Modulus if both the operands are Integer the result is Integer and If one operand is float the result is float
print(5%2) #Prints 1
print(14.75%4) #prints 2.75
#In Exponential if both the operands are Integer the result is Integer and If one operand is float the result is float
print(3.5**2) #Prints 12.25
print(3**3) #Prints 27
#Integer division is also called as floor division. First performs the normal division and then applies floor() to result
print(10.5//2) #Prints 5.0
print(-5//2) #Prints -3
#Arithmetic Operations on Strings
print('2'+'3') #Prints 23
print('abc'+str(2+3)) #Prints abc5
print(3*'Hello') #Prints HelloHelloHello
print(3*True) #Prints 3 (True converted to 1)
#Arithmetic Operations on Complex Numbers
e = 2+3j
f = 4-6j
print(e+f) #Prints (6-3j)
print(e*f) #Prints (26+0j)
print(e-f) #Prints (-2+9j)
| 2.890625 | 3 |
documents/models.py | acdh-oeaw/thunau-old | 0 | 54660 | from django.db import models
from django.core.urlresolvers import reverse
from vocabs.models import SkosConcept
from places.models import Place
from bib.models import Book
class Institution(models.Model):
name = models.CharField(max_length=300, blank=True)
abbreviation = models.CharField(max_length=300, blank=True)
identifier = models.CharField(max_length=300, blank=True)
parent_institution = models.ForeignKey('Institution', blank=True, null=True)
def __str__(self):
return "{}".format(self.name)
class Person(models.Model):
forename = models.CharField(max_length=300, blank=True)
name = models.CharField(max_length=300, blank=True)
institution = models.ForeignKey(Institution, blank=True, null=True)
identifier = models.CharField(max_length=300, blank=True)
def __str__(self):
return "{}".format(self.name)
class Document(models.Model):
legacy_id = models.CharField(max_length=300, blank=True, verbose_name='ID')
filename = models.CharField(max_length=300, blank=True, verbose_name="Dateiname")
entry_order = models.CharField(
max_length=300, blank=True, verbose_name="Ordnungskriterium/Eingabe"
)
medium = models.ForeignKey(
SkosConcept, blank=True, null=True, related_name='medium', verbose_name="Medium"
)
analogue_format = models.ForeignKey(
SkosConcept, blank=True, null=True, related_name="analogue_format",
verbose_name="Analoges Format"
)
author = models.ManyToManyField(
Person, blank=True, related_name="author", verbose_name="Autor"
)
institution = models.ManyToManyField(
Institution, blank=True, verbose_name="Institution", related_name="institution_document"
)
date_analogue = models.CharField(max_length=300, blank=True, verbose_name="Analoges Datum")
date_digitization = models.DateField(
auto_now=False, blank=True, null=True, verbose_name="Datum der Digitalisierung"
)
digital_format = models.ForeignKey(
SkosConcept, blank=True, null=True, related_name="digital_format",
verbose_name="Speicherformat"
)
note = models.TextField(blank=True, verbose_name="Anmerkung")
content = models.TextField(blank=True, verbose_name="Inhalt")
topic_group = models.ForeignKey(
SkosConcept, blank=True, null=True, related_name="topic_group",
verbose_name="Gruppe"
)
combination = models.CharField(max_length=300, blank=True, verbose_name="Kombination")
location_id = models.CharField(max_length=300, blank=True, verbose_name="Fundnummer in FDB")
place = models.ForeignKey(Place, blank=True, null=True, verbose_name="KG/Areal")
location_digitized_object = models.CharField(
max_length=300, blank=True, verbose_name="Aufbewahrung Datei"
)
location_analogue = models.CharField(max_length=300, blank=True, verbose_name="Standort analog")
curator = models.ForeignKey(
Person, blank=True, null=True, verbose_name="Bearbeiter Digitalisierung"
)
filesize = models.FloatField(blank=True, null=True, verbose_name="Dateigröße KB")
place_digizization = models.ForeignKey(
Institution, blank=True, null=True, related_name="place_digizization",
verbose_name="Ort der Digitalisierung"
)
reference = models.ManyToManyField(Book, blank=True, verbose_name="Literaturzitate")
path = models.CharField(max_length=300, blank=True, verbose_name="Dateipfad")
amendments = models.TextField(blank=True, verbose_name="Ergänzungen")
def __str__(self):
return "{}".format(self.filename)
def get_absolute_url(self):
return reverse('documents:document_detail', kwargs={'pk': self.id})
| 1.289063 | 1 |
src/animal_avatar/shapes/patterns.py | sprotg/animal-avatar-generator | 1 | 54788 | <reponame>sprotg/animal-avatar-generator<filename>src/animal_avatar/shapes/patterns.py
from animal_avatar.utils.colors import darken
PATTERNS = (
lambda color:
f'<path fill="{darken(color, -30)}" '
'd="M156 387.1c-57.8-12.3-96.7-42-96.7-107 0-9.4.8-18.6 2.4-27.6 '
'19.1 3.4 39.3 17 53.6 38.1a105 105 0 015 8.2 73.6 73.6 0 0021 '
'23.8c4.9 3.6 9.5 8.3 13.3 14 12.3 18.2 12.6 40 1.3 50.5z"/>',
lambda color:
f'<ellipse cx="323.8" cy="217.4" fill="{darken(color, -30)}" '
'rx="52.3" ry="77.6" transform="rotate(-32.5 323.8 217.4)"/>',
lambda color:
f'<path fill="{darken(color, 30)}" '
'd="M235 161.3c14.4 27.5 0 71-41.1 115.2-31.8 34.1-86.6 16.8-101-10.8s7.5-67.4 48.9-89 78.9-43 93.3-15.4z"/>',
)
| 1.359375 | 1 |
ftputil/tool.py | pombredanne/https-hg.sschwarzer.net-ftputil | 0 | 54916 | # Copyright (C) 2013-2020, <NAME>
# and ftputil contributors (see `doc/contributors.txt`)
# See the file LICENSE for licensing terms.
"""
tool.py - helper code
"""
import os
__all__ = ["same_string_type_as", "as_str", "as_str_path"]
# Encoding to convert between byte string and unicode string. This is
# a "lossless" encoding: Strings can be encoded/decoded back and forth
# without information loss or causing encoding-related errors. The
# `ftplib` module under Python 3 also uses the "latin1" encoding
# internally. It's important to use the same encoding here, so that users who
# used `ftplib` to create FTP items with non-ASCII characters can access them
# in the same way with ftputil.
LOSSLESS_ENCODING = "latin1"
def same_string_type_as(type_source, string):
"""
Return a string of the same type as `type_source` with the content from
`string`.
If the `type_source` and `string` don't have the same type, use
`LOSSLESS_ENCODING` above to encode or decode, whatever operation is needed.
"""
if isinstance(type_source, bytes) and isinstance(string, str):
return string.encode(LOSSLESS_ENCODING)
elif isinstance(type_source, str) and isinstance(string, bytes):
return string.decode(LOSSLESS_ENCODING)
else:
return string
def as_str(string):
"""
Return the argument `string` converted to a unicode string if it's
a `bytes` object. Otherwise just return the string.
If `string` is neither `str` nor `bytes`, raise a `TypeError`.
"""
if isinstance(string, bytes):
return string.decode(LOSSLESS_ENCODING)
elif isinstance(string, str):
return string
else:
raise TypeError("`as_str` argument must be `bytes` or `str`")
def as_str_path(path):
"""
Return the argument `path` converted to a unicode string if it's
a `bytes` object. Otherwise just return the string.
Instead of passing a `bytes` or `str` object for `path`, you can
pass a `PathLike` object that can be converted to a `bytes` or
`str` object.
If the `path` can't be converted to a `bytes` or `str`, a `TypeError`
is raised.
"""
path = os.fspath(path)
return as_str(path)
| 2.09375 | 2 |
python_utility/spreadsheet/spreadsheet_service.py | FunTimeCoding/python-utility | 0 | 55044 | import cherrypy
from gspread import CellNotFound
from python_utility.spreadsheet.simple_spreadsheet import SimpleSpreadsheet
class SpreadsheetService:
@staticmethod
def read_status() -> str:
try:
from python_utility.build import Build
except ImportError:
# TODO: Understand the best practice.
from python_utility.build_undefined import Build # type: ignore
return 'Version: ' + Build.GIT_TAG + '\n' \
+ 'Git hash: ' + Build.GIT_HASH + '\n' \
+ 'Build date: ' + Build.BUILD_DATE + '\n'
@cherrypy.expose
def index(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return 'Hello friend.\n'
@cherrypy.expose
@cherrypy.tools.json_out()
@cherrypy.tools.json_in()
def spreadsheet(self):
request = cherrypy.request.json
if 'search' not in request:
response = 'search missing'
elif 'replace' not in request:
response = 'replace missing'
elif 'x-offset' not in request:
response = 'x-offset missing'
else:
search = request['search']
replace = request['replace']
x_offset = request['x-offset']
spreadsheet = SimpleSpreadsheet()
spreadsheet.connect()
try:
cell = spreadsheet.search(search)
spreadsheet.edit_coordinates(
cell.row,
cell.col + int(x_offset),
replace
)
response = 'Success'
except CellNotFound as e:
response = 'Not found: ' + str(e)
return response
@cherrypy.expose
def status(self):
cherrypy.response.headers['Content-Type'] = 'text/plain'
return SpreadsheetService.read_status()
| 1.539063 | 2 |
aprepi/settings.py | MikaelSantilio/aprepi-django | 0 | 55172 | <reponame>MikaelSantilio/aprepi-django
from pathlib import Path
from datetime import timedelta
import os
import environ
env = environ.Env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = <KEY>'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = eval(os.getenv('DEBUG', default='True'))
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
# Third-party APPs
'django_extensions',
'widget_tweaks',
"rest_framework",
'corsheaders',
# "rest_framework.authtoken",
"django_filters",
"drf_yasg",
# Project APPs
'users',
'donations',
'core',
'events',
'member',
]
MIDDLEWARE = [
"whitenoise.middleware.WhiteNoiseMiddleware",
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'aprepi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [str(BASE_DIR / "templates")],
'OPTIONS': {
# https://docs.djangoproject.com/en/dev/ref/settings/#template-loaders
# https://docs.djangoproject.com/en/dev/ref/templates/api/#loader-types
"loaders": [
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'aprepi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(BASE_DIR / "staticfiles")
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = "/static/"
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(BASE_DIR / "static")]
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = [
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
]
# MEDIA
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = str(BASE_DIR / "media")
# https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = "/media/"
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "core:dashboard"
LOGOUT_REDIRECT_URL = "core:home"
LOGIN_URL = "users:login"
LOGOUT_URL = "users:logout"
MESSAGE_STORAGE = 'django.contrib.messages.storage.session.SessionStorage'
TOKEN_MERCADO_PAGO = str(os.getenv('TOKEN_MERCADO_PAGO', default='AKJSJ1J2O10332BJ2KBKDA'))
# django-rest-framework
# -------------------------------------------------------------------------------
# django-rest-framework - https://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"DEFAULT_AUTHENTICATION_CLASSES": (
# "rest_framework.authentication.SessionAuthentication",
# "rest_framework.authentication.TokenAuthentication",
"rest_framework_simplejwt.authentication.JWTAuthentication",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 5,
"DEFAULT_SCHEMA_CLASS": "rest_framework.schemas.coreapi.AutoSchema",
"DEFAULT_PERMISSION_CLASSES": ("rest_framework.permissions.IsAuthenticated",),
"DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"],
"DEFAULT_THROTTLE_CLASSES": [
"rest_framework.throttling.AnonRateThrottle",
"rest_framework.throttling.UserRateThrottle"
],
"DEFAULT_THROTTLE_RATES": {
"anon": "50/day",
"user": "500/day"
}
}
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=60),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'AUTH_HEADER_TYPES': ('JWT',),
'AUTH_TOKEN_CLASSES': ('rest_framework_simplejwt.tokens.AccessToken',),
}
CORS_ORIGIN_ALLOW_ALL = True
| 1.164063 | 1 |
aospy_user/calcs/mse_from_hypso_budget.py | spencerahill/aospy-obj-lib | 3 | 55300 | <gh_stars>1-10
"""MSE budget functions, with height computed using hypsometric equation."""
from indiff.advec import Upwind
from indiff.deriv import LatCenDeriv, LonCenDeriv
from .. import LAT_STR, LON_STR, PLEVEL_STR
from .advection import (zonal_advec_upwind, merid_advec_upwind,
horiz_advec_upwind)
from .thermo import cpt_lvq, mse_from_hypso
def mse_from_hypso_merid_deriv(ps, temp, sphum):
"""Meridional derivative of frozen MSE on pressure coordinates."""
deriv_obj = LatCenDeriv(mse_from_hypso(ps, temp, sphum), LAT_STR)
return deriv_obj.deriv()
def mse_from_hypso_zonal_deriv(ps, temp, sphum):
"""Zonal derivative of frozen MSE on pressure coordinates."""
deriv_obj = LonCenDeriv(mse_from_hypso(ps, temp, sphum), LON_STR)
return deriv_obj.deriv()
def mse_from_hypso_zonal_advec_upwind(ps, temp, sphum, u, radius, order=2):
"""Zonal advection of moist static energy using upwind scheme."""
return zonal_advec_upwind(mse_from_hypso(ps, temp, sphum), u, radius,
order=order)
def mse_from_hypso_merid_advec_upwind(ps, temp, sphum, v, radius, order=2):
"""Meridional advection of moist static energy using upwind scheme."""
return merid_advec_upwind(mse_from_hypso(ps, temp, sphum), v, radius,
order=order)
def mse_from_hypso_horiz_advec_upwind(ps, temp, sphum, u, v, radius, order=2):
"""Horizontal moist static energy advection using upwind scheme."""
return horiz_advec_upwind(mse_from_hypso(ps, temp, sphum), u, v, radius,
order=order)
def mse_from_hypso_vert_advec_upwind(ps, temp, sphum, omega, p, order=2):
"""Upwind vertical advection of moist static energy."""
return Upwind(omega, mse_from_hypso(ps, temp, sphum), PLEVEL_STR,
coord=p, order=order, fill_edge=True).advec()
def cpt_lvq_merid_deriv(temp, sphum):
"""Meridional derivative of c_p*T + L_v*q on pressure coordinates."""
deriv_obj = LatCenDeriv(cpt_lvq(temp, sphum), LAT_STR)
return deriv_obj.deriv()
def cpt_lvq_zonal_deriv(temp, sphum):
"""Zonal derivative of c_p*T + L_v*q on pressure coordinates."""
deriv_obj = LonCenDeriv(cpt_lvq(temp, sphum), LON_STR)
return deriv_obj.deriv()
def cpt_lvq_zonal_advec_upwind(temp, sphum, u, radius, order=2):
"""Zonal advection of moist static energy using upwind scheme."""
return zonal_advec_upwind(cpt_lvq(temp, sphum), u, radius,
order=order)
def cpt_lvq_merid_advec_upwind(temp, sphum, v, radius, order=2):
"""Meridional advection of moist static energy using upwind scheme."""
return merid_advec_upwind(cpt_lvq(temp, sphum), v, radius,
order=order)
def cpt_lvq_horiz_advec_upwind(temp, sphum, u, v, radius, order=2):
"""Horizontal moist static energy advection using upwind scheme."""
return horiz_advec_upwind(cpt_lvq(temp, sphum), u, v, radius,
order=order)
| 1.734375 | 2 |
main.py | Lizt1996/VariationalQuantumGeneralizedEigensolver | 0 | 55428 | import GradientBasedOptimization as gbopt
import openpyxl as pyxl
import time
from QAnsatz import *
from QSubspaceEigensolver import *
import k_nearest_data as k_data
from QMeasure import HadamardTest_Analytical
W_Hamiltonian = Hamiltonian_in_Pauli_String(qubits=3,
unitary=['X0', 'X1', 'X1X0', 'X1Z0', 'Z1X0', 'X2', 'X2X0',
'X2Z0', 'X2X1', 'X2X1X0', 'X2X1Z0', 'X2Y1Y0', 'X2Z1',
'X2Z1X0', 'X2Z1Z0', 'Y2Y0', 'Y2Y1', 'Y2Y1Z0', 'Y2Z1Y0',
'Z2X0', 'Z2X1', 'Z2X1Z0', 'Z2Y1Y0', 'Z2Z1X0'],
coefficient=[6 / 8, 6 / 8, 4 / 8, 2 / 8, -2 / 8, 6 / 8, 2 / 8, -2 / 8,
2 / 8, 4 / 8, 2 / 8, -4 / 8, -2 / 8, 2 / 8, -2 / 8, 2 / 8,
-2 / 8, -2 / 8, 2 / 8, -2 / 8, -2 / 8, 2 / 8, -4 / 8, -2 / 8],
hamiltonian_mat=k_data.W)
D_Hamiltonian = Hamiltonian_in_Pauli_String(qubits=3,
unitary=['I2I1I0', 'Z0', 'Z1', 'Z1Z0', 'Z2', 'Z2Z1Z0'],
coefficient=[30 / 8, 2 / 8, -2 / 8, 2 / 8, -4 / 8, 4 / 8, ],
hamiltonian_mat=k_data.D)
I_Hamiltonian = Hamiltonian_in_Pauli_String(qubits=3,
unitary=['I2I1I0'],
coefficient=[1],
hamiltonian_mat=np.eye(2 ** 3))
L_Hamiltonian = D_Hamiltonian - W_Hamiltonian
if __name__ == '__main__':
eig = 0
iterations = 5
state_scale = 3
layer = 5
parameter_num = layer * np.math.floor(state_scale / 2) * 12
timestamp = time.strftime("_%Y%m%d_%H%M%S", time.localtime())
filename = 'xlsxdata\\data' + timestamp + '.xlsx'
sigma = 1
sigma_analytical = [0,
0.4495,
0.9015,
1.0000,
1.1949,
1.3027,
1.5000,
1.6514]
tracers = []
print('----------------------------------------------')
print('eig = ' + str(eig))
init_parameter = [0 for i in range(parameter_num)]
res_parameter = init_parameter
tracer_parameter = []
for it in range(iterations):
H = L_Hamiltonian - sigma * D_Hamiltonian
print('eig = ' + str(eig) + ' it = ' + str(it))
Solver = SubspaceEigSolver_ClassicalEfficientSimulator(Hamiltonian=H,
ansatze=HardwareEfficientAnsatze_halflayer(state_scale,
layer,
res_parameter),
weight_list=[i + 1 for i in range(eig + 1)])
if it == 0:
initvec = np.zeros(2 ** state_scale)
initvec[0] = 1
check_circuit = QuantumCircuit(state_scale)
check_circuit.initialize(initvec, [i for i in range(state_scale)])
check_circuit.compose(Solver.ansatze.circuit(), [i for i in range(state_scale)], inplace=True)
job = execute(check_circuit, state_backend)
result = job.result()
eigvec = result.get_statevector(check_circuit, decimals=3)
delta_vec = np.dot(H.hamiltonian_mat, eigvec)
norm = np.real(np.dot(delta_vec, delta_vec.conj()))
tracers.append((sigma, norm))
res_parameter = gbopt.steepest(Solver.getLossFunctionAnalytical,
Solver.GetJacobianAnalytical,
res_parameter,
alpha=0.5,
iters=100,
direct='-',
tol=1e-7)
tracer_parameter.append(res_parameter)
''' get the ith eigenvector '''
for j in range(eig + 1):
initvec = np.zeros(2 ** state_scale)
initvec[j] = 1
check_circuit = QuantumCircuit(state_scale)
check_circuit.initialize(initvec, [i for i in range(state_scale)])
check_circuit.compose(Solver.ansatze.circuit(), [i for i in range(state_scale)], inplace=True)
job = execute(check_circuit, state_backend)
result = job.result()
state = result.get_statevector(check_circuit, decimals=3)
lamb = np.real(np.dot(np.dot(state.conj(), H.hamiltonian_mat), state))
print('eig_' + str(j) + ' = ' + str(lamb))
initvec = np.zeros(2 ** state_scale)
initvec[0] = 1
check_circuit = QuantumCircuit(state_scale)
check_circuit.initialize(initvec, [i for i in range(state_scale)])
check_circuit.compose(Solver.ansatze.circuit(), [i for i in range(state_scale)], inplace=True)
''' update sigma '''
AE = L_Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=check_circuit,
active_qubits=[i for i in range(state_scale)])
BE = D_Hamiltonian.ExpectationMeasurement(MeasurementMethod=HadamardTest_Analytical,
test_circuit=check_circuit,
active_qubits=[i for i in range(state_scale)])
sigma = AE / BE
job = execute(check_circuit, state_backend)
result = job.result()
eigvec = result.get_statevector(check_circuit, decimals=3)
H_ana = L_Hamiltonian - sigma_analytical[eig] * D_Hamiltonian
delta_vec = np.dot(H_ana.hamiltonian_mat, eigvec)
norm = np.real(np.dot(delta_vec, delta_vec.conj()))
tracers.append((sigma, norm))
print('sigma = ' + str(sigma))
print('----------------------------------------------')
# Create xlsx file
wb = pyxl.Workbook()
wb.save(filename)
wb = pyxl.load_workbook(filename)
# Record experiment data per experiment
trace_sheet = wb.create_sheet(title='Experiment Data')
trace_sheet.cell(1, 1).value = 'timestep'
trace_sheet.cell(1, 2).value = 'eigval_' + str(eig)
trace_sheet.cell(1, 3).value = 'norm_' + str(eig)
for piece in range(len(tracers)):
trace_sheet.cell(2 + piece, 2).value = tracers[piece][0]
trace_sheet.cell(2 + piece, 3).value = tracers[piece][1]
wb.save(filename)
wb = pyxl.load_workbook(filename)
sh = wb.create_sheet(title='parameters')
for i in range(len(tracer_parameter)):
for j in range(len(tracer_parameter[i])):
sh.cell(i + 1, 1 + j).value = tracer_parameter[i][j]
wb.save(filename)
| 1.570313 | 2 |
python/paddle/v2/framework/tests/test_multiplex_op.py | shenchaohua/Paddle | 3 | 55556 | <reponame>shenchaohua/Paddle
import unittest
import numpy as np
from op_test import OpTest
class TestMultiplexOp(OpTest):
def setUp(self):
self.op_type = "multiplex"
rows = 4
index = np.arange(0, rows).astype('int32')
np.random.shuffle(index)
index = np.reshape(index, (rows, 1))
ins1 = np.random.random((rows, 10)).astype("float32")
ins2 = np.random.random((rows, 10)).astype("float32")
ins3 = np.random.random((rows, 10)).astype("float32")
ins4 = np.random.random((rows, 10)).astype("float32")
self.inputs = {
'Ids': index,
'X': [('x1', ins1), ('x2', ins2), ('x3', ins3), ('x4', ins4)]
}
# multiplex output
output = np.zeros_like(ins1)
for i in range(0, rows):
k = index[i][0]
output[i] = self.inputs['X'][k][1][i]
self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['x1', 'x2', 'x3', 'x4'], 'Out')
def test_check_grad_ignore_x1(self):
self.check_grad(['x2', 'x3', 'x4'], 'Out', no_grad_set=set('x1'))
def test_check_grad_ignore_x1_x2(self):
self.check_grad(['x3', 'x4'], 'Out', no_grad_set=set(['x1', 'x2']))
def test_check_grad_ignore_x3(self):
self.check_grad(['x1', 'x2', 'x4'], 'Out', no_grad_set=set('x3'))
if __name__ == '__main__':
unittest.main()
| 1.84375 | 2 |
1000-1100q/1044.py | rampup01/Leetcode | 990 | 55684 | '''
Given a string S, consider all duplicated substrings: (contiguous) substrings of S that occur 2 or more times. (The occurrences may overlap.)
Return any duplicated substring that has the longest possible length. (If S does not have a duplicated substring, the answer is "".)
Example 1:
Input: "banana"
Output: "ana"
Example 2:
Input: "abcd"
Output: ""
Note:
2 <= S.length <= 10^5
S consists of lowercase English letters.
'''
class Suffix(object):
def __init__(self):
self.index = 0
self.first_rank = -1
self.adjacent_rank = -1
def __lt__(self, other):
if self.first_rank == other.first_rank:
return self.adjacent_rank < other.adjacent_rank
return self.first_rank < other.first_rank
def create_suffix_array(s):
N = len(s)
suffix_array = []
for index, char in enumerate(s):
suffix_obj = Suffix()
suffix_obj.index = index
suffix_obj.first_rank = ord(char)-ord('a')
suffix_obj.adjacent_rank = ord(s[index+1])-ord('a') if (index+1 < N) else -1
suffix_array.append(suffix_obj)
suffix_array.sort()
no_char = 4
index_map = {}
while no_char < 2*N:
rank = 0
prev_rank, suffix_array[0].first_rank = suffix_array[0].first_rank, rank
index_map[suffix_array[0].index] = 0
for index in range(1, N):
if suffix_array[index].first_rank == prev_rank and suffix_array[index].adjacent_rank == suffix_array[index-1].adjacent_rank:
suffix_array[index].first_rank = rank
else:
rank += 1
prev_rank, suffix_array[index].first_rank = suffix_array[index].first_rank, rank
index_map[suffix_array[index].index] = index
for index in range(N):
adjacent_index = suffix_array[index].index + (no_char/2)
suffix_array[index].adjacent_rank = suffix_array[index_map[adjacent_index]] if adjacent_index < N else -1
suffix_array.sort()
no_char *= 2
return [suffix.index for suffix in suffix_array]
def lcp_w_suffix_str(array, s):
N = len(array)
lcp_array = [0]*N
inv_suffix = [0]*N
for index in range(N):
inv_suffix[array[index]] = index
maxLen = 0
for index in range(N):
if inv_suffix[index] == N-1:
maxLen = 0
continue
index_j = array[inv_suffix[index]+1]
while(index+maxLen < N and index_j+maxLen < N and s[index+maxLen] == s[index_j+maxLen]):
maxLen += 1
lcp_array[inv_suffix[index]] = maxLen
if maxLen > 0:
maxLen -= 1
return lcp_array
class Solution(object):
def longestDupSubstring(self, S):
"""
:type S: str
:rtype: str
"""
suffix_array = create_suffix_array(S)
lcp_array = lcp_w_suffix_str(suffix_array, S)
start, end = 0, 0
for index in range(len(S)):
if lcp_array[index] > end:
end = lcp_array[index]
start = suffix_array[index]
if end == 0:
return ""
# print start, end
return S[start:start+end]
| 3 | 3 |
server.py | tjeason/contactor | 0 | 55812 | <gh_stars>0
#!/usr/bin/env python
# File: server.py
import BaseHTTPServer
import cgi
import SimpleHTTPServer
import sys
import time
from mailgunner import MailGunHandler
from mandriller import MandrillHandler
from log import logColor
ServerClass = BaseHTTPServer.HTTPServer
# HTTP requests handler.
class ContactorRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
print logColor.INFO + "[", time.asctime(), "] INFO: Getting index page." + logColor.END
self.path = '/content/index.html'
if self.path == '/test':
self.path = '/content/test.html'
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
print "+++++ ctype:", ctype
if ctype == 'multipart/form-data':
print "+++++ self.rfile:", self.rfile
print "+++++ pdict:", pdict
post_vars = cgi.parse_multipart(self.rfile, pdict)
print "+++++ post_vars:", post_vars
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers.getheader('content-length'))
post_vars = cgi.parse_qs(self.rfile.read(length), keep_blank_values = 1)
else:
post_vars = {}
# Parse values from post_vars
if len(post_vars) > 0:
from_name = post_vars.get('fromName', [''])[0]
from_email = post_vars.get('fromEmail', [''])[0]
to_name = post_vars.get('toName', [''])[0]
to_email = post_vars.get('toEmail', [''])[0]
subject = post_vars.get('subject', [''])[0]
message = post_vars.get('msg', [''])[0]
attachment = post_vars.get('attachment', [''])[0]
# Mandrill API is sending the contact information.
if self.path == '/md/send':
print logColor.INFO + "[", time.asctime(), "] INFO: Received Mandrill POST request. Sending message..." + logColor.END
MandrillHandler().send_simple_message(from_name, from_email, to_name, to_email, subject, message)
# Mandrill API is sending the contact information with an attached file.
if self.path == '/md/send/file':
print logColor.INFO + "[", time.asctime(), "] INFO: Received Mandrill POST request. Sending message with file attached..." + logColor.END
MandrillHandler().send_complex_message(from_name, from_email, to_email, to_name, subject, message, attachment)
# Mailgun is sending the contact information.
if self.path == '/mg/send':
print logColor.INFO + "[", time.asctime(), "] INFO: Received Mailgun POST request. Sending message..." + logColor.END
MailGunHandler().send_simple_message(from_name, from_email, to_name, to_email, subject, message)
# Mailgun is sending the contact information with an attached file.
if self.path == '/mg/send/file':
print logColor.INFO + "[", time.asctime(), "] INFO: Received Mailgun POST request. Sending message with file attached..." + logColor.END
MailGunHandler().send_complex_message(from_name, from_email, to_email, subject, message, attachment)
# At least some contact form data is missing.
else:
print logColor.ERROR + "[", time.asctime(), "] ERROR: Could not retrieve contact information." + logColor.END
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
return
def showIntro():
intro = """
_________ __ __
\_ ___ \ ____ _____/ |______ _____/ |_ _____________
/ \ \/ / _ \ / \ __\__ \ _/ ___\ __\/ _ \_ __ /
\ \___( <_> ) | \ | / __ \ \___| | ( <_> ) | \/
\______ /\____/|___| /__| (____ /\___ >__| \____/|__|
\/ \/ \/ \/
"""
print logColor.OKBLUE + intro + logColor.END
if __name__ == "__main__":
try:
if sys.argv[1:]:
port = int(sys.argv[1])
else:
port = 9000
server_address = ('0.0.0.0', port)
Handler = ContactorRequestHandler
httpd = ServerClass(server_address, Handler)
serv = httpd.socket.getsockname()
showIntro()
print logColor.INFO + "[", time.asctime(), "] INFO: Serving running on", serv[0], "using port", serv[1], ". Use Control-C to shutdown the server..." + logColor.END
httpd.serve_forever()
except KeyboardInterrupt:
print logColor.WARN + "[", time.asctime(), "] WARN: Server shutting down." + logColor.END
httpd.socket.close()
| 1.601563 | 2 |
runService.py | hunkguo/learn_quantitative_finance | 0 | 55940 | # encoding: UTF-8
"""
定时服务,可无人值守运行,实现每日自动下载更新历史行情数据到数据库中。
"""
from DataService.tushareData import *
from datetime import datetime
if __name__ == '__main__':
taskCompletedDate = None
# 生成一个随机的任务下载时间,用于避免所有用户在同一时间访问数据服务器
taskTime = datetime.now().replace(hour=17, minute=30, second=0)
# 进入主循环
while True:
t = datetime.now()
# 每天到达任务下载时间后,执行数据下载的操作
if t.time() > taskTime.time() and (taskCompletedDate is None or t.date() != taskCompletedDate):
downloadTradeCalendar()
downloadAllStock()
if (taskCompletedDate is None):
downloadTradeDataDaily(5)
else:
downloadTradeDataDaily(1)
downloadTradeDataTick(2)
downloadTradeDataRealtimeQuotes()
# 更新任务完成的日期
taskCompletedDate = t.date()
sleep(600)
#break
| 1.421875 | 1 |
python-for-beginners/13 - Functions/code_challenge_solution_13.py | jasonwklaw/c9-python-getting-started | 0 | 56068 | # Create a calculator function
# The function should accept three parameters:
# first_number: a numeric value for the math operation
# second_number: a numeric value for the math operation
# operation: the word 'add' or 'subtract'
# the function should return the result of the two numbers added or subtracted
# based on the value passed in for the operator
#
# Test your function with the values 6,4, add
# Should return 10
#
# Test your function with the values 6,4, subtract
# Should return 2
#
# BONUS: Test your function with the values 6, 4 and divide
# Have your function return an error message when invalid values are received
def calculator(first_number, second_number, operation):
if operation == 'add':
answer = first_number + second_number
elif operation == 'subtract':
answer = first_number - second_number
elif operation == 'divide':
answer = first_number / second_number
return answer
first_number = float(input('Please enter a number: '))
second_number = float(input('Please enter another number: '))
operation = input('Please enter an operation: ').lower()
answer = calculator(first_number, second_number, operation)
print(answer) | 3.25 | 3 |
test/integration/sync_tests.py | jupierce/gogitit | 1 | 56196 | """Integration tests for the sync CLI command."""
import os.path
import fixture
class SyncTests(fixture.IntegrationFixture):
def _assert_exists(self, output_path, exists=True, i=1):
if exists:
self.assertTrue(os.path.exists(os.path.join(self.output_dir,
output_path)), "%s does not exist on loop %s" % (output_path, i))
else:
self.assertFalse(os.path.exists(os.path.join(self.output_dir,
output_path)), "%s exists on loop %s" % (output_path, i))
def test_file_from_tag(self):
manifest = self.build_manifest_str('v0.2', [('playbooks/playbook1.yml', 'playbook1.yml')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbook1.yml')
def test_file_to_dir(self):
manifest = self.build_manifest_str('master', [('playbooks/playbook1.yml', 'playbooks/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbooks/playbook1.yml')
def test_file_to_top_lvl_dir(self):
manifest = self.build_manifest_str('master', [('playbooks/playbook1.yml', '')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbook1.yml')
def test_file_glob_to_dir(self):
manifest = self.build_manifest_str('v0.2', [('playbooks/*.yml', 'playbooks/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('playbooks/playbook1.yml')
def test_dir_from_tag(self):
manifest = self.build_manifest_str('v0.2', [('roles/', 'roles')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml')
# Doesn't exist in v0.2 tag.
self._assert_exists('roles/dummyrole3/tasks/main.yml', False)
def test_dir_from_branch(self):
manifest = self.build_manifest_str('master', [('roles/', 'roles')])
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole3/tasks/main.yml', i=i)
self._assert_exists('roles/roles/dummyrole1/tasks/main.yml', False, i=i)
def test_dir_from_branch_trailing_dst_slash(self):
manifest = self.build_manifest_str('master', [('roles/', 'roles/')])
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', i=i)
def test_dir_top_level_dst(self):
manifest = self.build_manifest_str('master', [('roles', '')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('dummyrole1/tasks/main.yml')
self._assert_exists('dummyrole2/tasks/main.yml')
self._assert_exists('roles/dummyrole1/tasks/main.yml', False)
self._assert_exists('roles/dummyrole2/tasks/main.yml', False)
def test_glob_dir(self):
manifest = self.build_manifest_str('master', [('roles/*', 'roles')])
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles/roles/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('dummyrole1/tasks/main.yml', False, i=i)
def test_glob_dir_dst_slash(self):
manifest = self.build_manifest_str('v0.2', [('roles/*', 'roles/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml')
def test_subdir(self):
manifest = self.build_manifest_str('master', [('roles/dummyrole1', 'roles/dummyrole1')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml', False)
def test_top_level_dir(self):
manifest = self.build_manifest_str('master', [('./', 'vendor/output')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('vendor/output/roles/dummyrole1/tasks/main.yml')
self._assert_exists('vendor/output/roles/dummyrole2/tasks/main.yml')
self._assert_exists('vendor/output/.git', False)
def test_subdir_dst_slash(self):
manifest = self.build_manifest_str('master', [('roles/dummyrole1', 'roles/dummyrole1/')])
result = self._run_sync(manifest)
for i in range(2):
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles/dummyrole2/tasks/main.yml', False, i=i)
self._assert_exists('roles/dummyrole1/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles/dummyrole1/roles/dummyrole1/tasks/main.yml', False, i=i)
def test_dir_rename_dst_exists(self):
m1 = self.build_manifest_str('master', [('roles', 'roles2')])
m2 = self.build_manifest_str('master', [('roles', 'roles2/')])
for manifest in [m1, m2]:
for i in range(2):
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles2/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles2/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles2/roles/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles2/roles2/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles2/roles/dummyrole2/tasks/main.yml', False, i=i)
# If we run again, make sure we don't nest:
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles2/dummyrole1/tasks/main.yml', i=i)
self._assert_exists('roles2/dummyrole2/tasks/main.yml', i=i)
self._assert_exists('roles2/roles/dummyrole1/tasks/main.yml', False, i=i)
self._assert_exists('roles2/roles/dummyrole2/tasks/main.yml', False, i=i)
def test_merge_two_dirs(self):
manifest = self.build_manifest_str('master', [
('roles/', 'merged/'),
('playbooks/*', 'merged/'),
])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('merged/dummyrole1/tasks/main.yml')
self._assert_exists('merged/dummyrole2/tasks/main.yml')
self._assert_exists('merged/playbook1.yml')
def test_dir_clobber(self):
# Testing a bug where files in roles get clobbered by later copying everything
# from a source roles dir in.
manifest = self.build_manifest_str('master', [('roles/dummyrole2/tasks/main.yml', 'roles/main.yml'),
('roles/*', 'roles/')])
result = self._run_sync(manifest)
self.assertEqual(0, result.exit_code)
self._assert_exists('roles/dummyrole1/tasks/main.yml')
self._assert_exists('roles/dummyrole2/tasks/main.yml')
self._assert_exists('roles/main.yml')
# Re-run to trigger cleanup of previous dirs:
| 1.671875 | 2 |
producer.py | QualiChain/Mediator | 0 | 56324 | from clients.rabbitmq_client import RabbitMQClient
import json
from settings import APP_QUEUE
if __name__ == "__main__":
rabbit_mq = RabbitMQClient()
payload = {
"component": "DOBIE",
"message": {
"tasks": [
{
"label": "95671c903a5b97a9",
"jobDescription": "memcached, win32, pig, rdf, linear programming"
}
]
}
}
# data = """SELECT ?subject ?predicate ?object
# WHERE {
# ?subject ?predicate ?object
# }
# LIMIT 100000"""
# payload = {
# "component": "QE",
# "message": {
# "query": payload
# }
# }
rabbit_mq.producer(queue=APP_QUEUE, message=json.dumps(payload))
| 1.179688 | 1 |
hrl_fabric_based_tactile_sensor/src/hrl_fabric_based_tactile_sensor/tactile_sensor_model.py | gt-ros-pkg/hrl-haptic-manip | 1 | 56452 | #!/usr/bin/python
# <NAME>'s initial attempt to model the force -> digital signal
# curves for a single taxel.
#
# + First version written on June 4, 2012.
# + Cleaned up, documented, and made minor edits June 5, 2012
import matplotlib.pylab as pl
def logistic(t):
return(1.0/(1.0 + pl.exp(-t)))
def norm_logistic(t):
return(2.0 * (logistic(t)-0.5))
def abbot_curve(t):
# total hack in attempt to make curve that looks like the Abbot's
# Curve in the paper haven't visualized it, yet...
#
# assume t in in range [0, 1]
tmp = logistic((t-0.5) * 12.0)
return(tmp)
class TaxelModel:
'''Attempts to model the digital signal that results from a normal
force applied to a taxel. It assumes that the force is uniformly
distributed over an area. The contact area is specified as a
percentage of the taxel area.'''
def __init__(self, contact_area_percent=50.0):
######################################
# begin: parameters to be specified
self.contact_area_percent = contact_area_percent
# resistor that is in series with the taxel (Ohms)
self.r1 = 47.0
# total voltage across the taxel and r1, which are in serise (Volts)
self.vtot = 5.0
# the maximum resistance of the taxel when no pressure is applied (Ohms)
self.rtax_max = 50.0
# the minimum force that will be applied to the taxel (Newtons)
self.fz_min = 0.0
# the maximum force that will be applied to the taxel (Newtons)
self.fz_max = 45.0
# the number of bits for the analog to digital conversion
self.adc_bits = 10
# the pressure sensitive area of the taxel (meters^2)
self.taxel_area = 0.04 * 0.04
# pressure that results in minimum resistance after which
# further pressure does not result in a reduction in the
# signal, since the sensor is saturated (Pascals = N/m^2)
self.pressure_max = self.fz_max/(0.4 * self.taxel_area)
# hack to specify the minimum resistance of the taxel, which
# is associated with the maximum pressure. for now, it's
# specified as a percentage of the maximum resistance, which
# is associated with 0 applied pressure (no contact)
self.r_min_percent_of_r_no_contact = 0.001 #
# end
######################################
self.r_no_contact = self.taxel_area * self.rtax_max
self.r_min = self.r_no_contact * (self.r_min_percent_of_r_no_contact/100.0)
self.fz_array = pl.arange(self.fz_min, self.fz_max, 0.001) # N
self.adc_range = pow(2.0, self.adc_bits)
self.volts_per_adc_unit = self.vtot/self.adc_range # V
self.contact_area = self.taxel_area * (self.contact_area_percent/100.0) # m^2
self.no_contact_area = self.taxel_area - self.contact_area # m^2
self.pressure_array = pl.array([f/self.contact_area for f in self.fz_array]) # Pascals = N/m^2
self.rtax_array = pl.array([self.rtax(f) for f in self.pressure_array])
self.vdigi_array = pl.array([self.output_voltage(r) for r in self.rtax_array])
self.vdigi_max = self.output_voltage(self.rtax_max)
self.adc_bias = self.vdigi_max/self.volts_per_adc_unit
self.adc_array = self.vdigi_array/self.volts_per_adc_unit
self.adc_plot = self.adc_bias - self.adc_array
def output_voltage(self, r):
'''given the resistance for the entire taxel, this returns the
voltage across the taxel, which is what the analog to digital
converter reads'''
return( (self.vtot/(self.r1 + r)) * r )
def pressure2resistance(self, p):
'''given an applied pressure, returns the resistivity of the
contacted region of the taxel. this uses a simple linear
model, where:
0 Pascals -> r_no_contact
pressure max -> r_min Ohms
'''
r = ((self.r_no_contact-self.r_min) * ((self.pressure_max - p)/self.pressure_max)) + self.r_min
if r < self.r_min:
print "r<r_min = %f<%f" % (r, self.r_min)
r = self.r_min
elif r > self.r_no_contact:
r = self.r_no_contact
print "r>r_no_contact"
return(r)
def pressure2resistance_2(self, p):
'''given an applied pressure, returns the resistivity of the
contacted region of the taxel. this uses a logistic model,
where:
0 Pascals -> r_no_contact
pressure max -> r_min Ohms
'''
p = self.pressure_max * norm_logistic(6.0 * (p/self.pressure_max))
norm_pressure = (self.pressure_max - p)/self.pressure_max
r = ((self.r_no_contact - self.r_min) * norm_pressure) + self.r_min
if r < self.r_min:
print "r<r_min = %f<%f" % (r, self.r_min)
r = self.r_min
elif r > self.r_no_contact:
r = self.r_no_contact
print "r>r_no_contact"
return(r)
def pressure2resistance_3(self, p):
'''given an applied pressure, returns the resistivity of the
contacted region of the taxel. this was a quick attempt to use
a model similar to "The Working Principle of Resistive Tactile
Sensor Cells" by <NAME> and <NAME>. It doesn't
work, yet?
'''
r_surface_resistance = self.r_min
# wikipedia:
# young's modulud (elastic_modulus) of nylon is 2-4 GPa = 2-4 x 10^9 Pa
#
# according to
# "Spandex Fiber Reinforced Shape Memory Polymer Composites and their Mechanical Properties"
# Journal Advanced Materials Research (Volume 410)
# Volume Processing and Fabrication of Advanced Materials
# Online since November, 2011
# Authors <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# the elastic modulus of spandex is 25 MPa = 25 x 10^6 Pa
#
# according to the material data sheet for EEONTEX LR-SL-PA-10E5
# it is 69% nylon + 31% spandex
e_nylon = 3.0 * 10**9
e_spandex = 25.0 * 10**6
# this is a very poor model and should be improved with simple
# parallel fibers and constant displacement model or something
elastic_modulus = (0.69 * e_nylon) + (0.31 * e_spandex)
# uses hacked qualitative Abbot's curve, which I have not even
# checked by visualizing
r = (1.0/abbot_curve(1000.0 * (p/elastic_modulus))) * (10.0*r_surface_resistance)
if r < self.r_min:
print "r<r_min = %f<%f" % (r, self.r_min)
r = self.r_min
elif r > self.r_no_contact:
print "r>r_no_contact = %f<%f" % (r, self.r_no_contact)
r = self.r_no_contact
return(r)
def rtax(self, p):
'''given the pressure uniformly applied across the contact
area this returns the resistance for the entire taxel.
it essentially models the taxel as parallel resistors, where
resistors in the contact area have a resistance dependent on
the applied pressure, and resistors in the non-contact area
have the maximum resistance. i started with a discrete model,
and then made a continuous approximation, which appears to
correspond with using two volumes in parallel with different
resistivities.
based on wikipedia, it looks like i've been using something
called resistivity (rho). so, this model can use the equation
r = rho*(length/area). if we assume length is constant, then
this leads to r_contact = rho_contact/area_contact and the
same for not_contact. assuming that they are in parallel. then
R_total = 1/((area_contact/rho_contact) +
(area_non_contact/rho_non_contact)). if we assume length
changes, then we can make rho_contact = resistivity_contact *
length_contact.
this should be more carefully investigated, but
it seems right...
the biggest unknown is the function that converts pressure to
resistivity. there are currently three models of this function
in this code. fitting a parametric model to the data would be
a good next step. probably use an optimizer like Nelder-Mead
that only requires function evaluations and use a cost
function that compares the fz->adc mapping to empirically
collected data.
'''
# the function that converts pressure to resistivity appears
# to be the big unknown for this model. based on the data, it
# seems like it needs to be a non-linear function. so far,
# i've had the best success with a logistic model
#r_contact = self.pressure2resistance(p)
r_contact = self.pressure2resistance_2(p) # best performance, so far
#r_contact = self.pressure2resistance_3(p)
r = 1.0/((self.contact_area/r_contact) +
(self.no_contact_area/self.r_no_contact))
return(r)
def plot_fz_adc(self):
'''plot the curve relating applied normal force to the analog
to digital converter output. this corresponds with the
empirically generated scatter plots from a real taxel'''
pl.plot(self.adc_plot, self.fz_array, label="contact area = {0:.0f}%".format(self.contact_area_percent))
pl.xlabel("ADC bias - ADC (adc_bias - adc)")
pl.ylabel("FT_z (Force applied to tactile sensor, fz)")
def plot_pressure_z_adc(self):
'''plot the curve relating applied normal force to the analog
to digital converter output. this corresponds with the
empirically generated scatter plots from a real taxel'''
pl.plot(self.adc_plot, self.fz_array / self.contact_area_percent, label="contact area = {0:.0f}%".format(self.contact_area_percent))
pl.xlabel("ADC bias - ADC (adc_bias - adc)")
pl.ylabel("pressure_z (Force applied to tactile sensor, pz)")
def plot_rtax_vout(self):
'''plot the curve relating the total taxel resistance and the
voltage across the taxel, which corresponds to the voltage
converted to a digital signal'''
pl.plot(self.vdigi_array, self.rtax_array, label="contact area = {0:.0f}%".format(self.contact_area_percent))
pl.xlabel("Volts at digitizer (vdigi) proportional to ADC")
pl.ylabel("Resistance of tactile sensor (rtax)")
def debug(self):
'''print out many of the key member variables of the TaxelModel object'''
print "fz_array", self.fz_array
print "pressure_array =", self.pressure_array
print "adc_range", self.adc_range
print "rtax_array =", self.rtax_array
print "volts_per_adc_unit =", self.volts_per_adc_unit
print "vdigi_array =", self.vdigi_array
print "adc_bias =", self.adc_bias
print "adc_array =", self.adc_array
print "adc_plot =", self.adc_plot
pl.title("Single Taxel Model")
for area_percent in [100.0, 80.0, 60.0, 40.0, 30.0, 20.0, 10.0, 5.0, 2.0, 1.0, 0.1, 0.001]:
tm = TaxelModel(area_percent)
#tm.plot_rtax_vout()
#tm.plot_fz_adc()
tm.plot_pressure_z_adc()
#tm.debug()
pl.legend(loc="upper left", prop={'size':8})
pl.show()
| 2.859375 | 3 |
python/estrutura de dados/array/ExemploAula.py | luiscarlosjunior/100-days-of-code | 0 | 56580 | <filename>python/estrutura de dados/array/ExemploAula.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 24 10:04:30 2020
@author: luisc
"""
#x = 'Estou aqui'
try:
print(p)
except NameError:
print('Ops, deu um erro de variável não inicializada')
except:
print('Outro tipo de erro')
finally:
print('Estou sempre aqui')
# Raise - lança uma exceção para o usuário
"""
valor = -10
if valor < 0:
raise Exception('O valor é negativo')
""" | 2.28125 | 2 |
torabot/mods/feed/ut.py | Answeror/torabot | 42 | 56708 | def entry_id(entry):
for field in ['id', 'link']:
ret = getattr(entry, field, None)
if ret:
return ret
raise Exception('no id field found in entry: {}'.format(entry))
| 1.226563 | 1 |
backend/prog-hist/backend/src/code/test/playground/chapter02_11.py | halilagin/d3studies | 0 | 56836 | import code.book_plots as bp
import code.gh_internal as gh
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
import numpy as np;
from filterpy.discrete_bayes import normalize
def scaled_update (hall, belief, z, prob):
scale_ = prob/(1-prob)
belief[hall==1] *=scale_
normalize(belief)
belief = np.array([0.1]*10)
hallway = np.array([1, 1, 0, 0, 0, 0, 0, 0, 1, 0])
reading = 1
scaled_update(hallway, belief, reading, prob=0.75)
belief /= sum(belief);
print("belief:", belief)
print ("sum = ", sum(belief))
plt.figure()
bp.bar_plot(belief).show()
| 2.09375 | 2 |
hydromet/models/hist_gr4j_model.py | amacd31/hydromet-toolkit | 1 | 56964 | <filename>hydromet/models/hist_gr4j_model.py
import calendar
import numpy as np
from hydromet.models.gr4j_model import GR4J
class HistGR4J(GR4J):
def forecast(self, fc_date, predictors):
warmup_data = {
'P': predictors['P'][self.warmup_start_date:fc_date],
'PE': predictors['PE'][self.warmup_start_date:fc_date],
}
self.warmup(warmup_data)
p = predictors['P']
grp = p.groupby(p.index.dayofyear)
p_ens = {}
start_dayofyear = fc_date.utctimetuple().tm_yday
end_dayofyear = start_dayofyear + calendar.monthrange(fc_date.year, fc_date.month)[1]
nens = len(grp.groups[1])
p_ens = []
for dayofyear in range(start_dayofyear, end_dayofyear):
p_ens.append(p.ix[grp.groups[dayofyear]].values)
p_ens = np.array(p_ens).T
sims = []
for ens in p_ens:
fc_data = {
'P': ens,
'PE': predictors['PE']
}
sims.append(np.sum(self.run(fc_data)))
return sims
| 1.382813 | 1 |
Cards/helper/test_helper.py | vabene1111/LearningCards | 1 | 57092 | from Cards.models import Question, TestQuestion, Test, QuestionLog
def create_new_test(user, course):
test = Test()
test.user = user
test.course = course
test.save()
questions = Question.objects.filter(course=course).all()
test_questions = []
for q in questions:
tq = TestQuestion()
tq.test = test
tq.question = q
test_questions.append(tq)
TestQuestion.objects.bulk_create(test_questions)
return test
def finish_test_question(user, tq, type):
log = QuestionLog()
log.user = user
log.question = tq.question
log.type = type
log.save()
tq.type = type
tq.save()
| 0.96875 | 1 |
sgmcmcjax/examples/sgmcmc_nuts_demo.py | ColCarroll/SGMCMCJax | 0 | 57220 | # copied from https://github.com/probml/pyprobml/blob/master/scripts/sgmcmc_nuts_demo.py
# Compare NUTS, SGLD and Adam on sampling from a multivariate Gaussian
from collections import namedtuple
from typing import Any, Callable, Dict, List, NamedTuple, Optional, Tuple, Union
import jax.numpy as jnp
import optax
from blackjax import nuts, stan_warmup
from jax import jit, random, vmap
from jax.lax import scan
from jax.random import normal, split
from sgmcmcjax.optimizer import build_optax_optimizer
from sgmcmcjax.samplers import build_sgld_sampler
from .sgmcmc_utils import build_nuts_sampler
# We use the 'quickstart' example from
# https://github.com/jeremiecoullon/SGMCMCJax
def loglikelihood(theta, x):
return -0.5 * jnp.dot(x - theta, x - theta)
def logprior(theta):
return -0.5 * jnp.dot(theta, theta) * 0.01
# generate dataset
N, D = 1000, 100
key = random.PRNGKey(0)
mu_true = random.normal(key, (D,))
X_data = random.normal(key, shape=(N, D)) + mu_true
# Adam
batch_size = int(0.1 * N)
opt = optax.adam(learning_rate=1e-2)
optimizer = build_optax_optimizer(opt, loglikelihood, logprior, (X_data,), batch_size)
Nsamples = 10_000
params, log_post_list = optimizer(key, Nsamples, jnp.zeros(D))
print(log_post_list.shape)
print(params.shape)
assert jnp.allclose(params, mu_true, atol=1e-1)
print("adam test passed")
# SGLD
batch_size = int(0.1 * N)
dt = 1e-5
sampler = build_sgld_sampler(dt, loglikelihood, logprior, (X_data,), batch_size)
Nsamples = 10_000
samples = sampler(key, Nsamples, jnp.zeros(D))
print(samples.shape)
mu_est = jnp.mean(samples, axis=0)
assert jnp.allclose(mu_est, mu_true, atol=1e-1)
print("sgld test passed")
# NUTS / blackjax
num_warmup = 500
sampler = build_nuts_sampler(num_warmup, loglikelihood, logprior, (X_data,))
Nsamples = 10_000
samples = sampler(key, Nsamples, jnp.zeros(D))
print(samples.shape)
mu_est = jnp.mean(samples, axis=0)
assert jnp.allclose(mu_est, mu_true, atol=1e-1)
print("nuts test passed")
| 2.03125 | 2 |
ascii_to_wide.py | bmintz/python-snippets | 2 | 57348 | <gh_stars>1-10
#!/usr/bin/env python3
table = {i: chr(i + 0xFEE0) for i in range(33, 127)}
table[ord(' ')] = '\N{IDEOGRAPHIC SPACE}'
def ascii_to_fullwidth(text):
return text.translate(table)
def main():
assert ascii_to_fullwidth('h') == 'h'
if __name__ == '__main__':
main()
| 2.234375 | 2 |
data_extraction/run_scrapers.py | kfilyk/quoracle | 0 | 57476 | <filename>data_extraction/run_scrapers.py
from RedditScraper import RedditScraper
from TwitterScraper import TwitterScraper
import os
from os import path
import json
if __name__ == "__main__":
from_reddit = RedditScraper().scrape()
from_twitter = TwitterScraper().scrape()
joined_results = from_reddit + from_twitter
output_filename = "scrapers_output.txt"
if (path.exists(output_filename)):
os.remove(output_filename)
with open(output_filename, "w") as f:
f.write(json.dumps(joined_results))
print(f"successfully wrote {len(joined_results)} articles to {output_filename}") | 1.398438 | 1 |
users/migrations/0004_auto_20200901_1858.py | yanfreitas/Django-blog-project | 0 | 57604 | # Generated by Django 3.1 on 2020-09-01 21:58
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('users', '0003_auto_20200830_2005'),
]
operations = [
migrations.RenameModel(
old_name='Profiles',
new_name='Profile',
),
]
| 0.699219 | 1 |
tests/test_jit/perftarget/arraysum.py | jstnlef/zebu-vm | 2 | 57732 | <gh_stars>1-10
# Copyright 2017 The Australian National University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.jit import JitDriver
d = JitDriver(greens=[], reds='auto')
def arraysum(arr, sz):
sum = rffi.r_longlong(0)
for i in range(sz):
d.jit_merge_point()
sum += arr[i]
return sum
def setup(n):
lst, _ = rand_list_of(n)
arr = lltype.malloc(rffi.CArray(rffi.LONGLONG), n, flavor='raw')
for i, k in enumerate(lst):
arr[i] = k
return arr, rffi.cast(lltype.Unsigned, n)
def teardown(arr, n):
lltype.free(arr, 'raw')
def rand_list_of(n):
# 32 extend to 64-bit integers (to avoid overflow in summation
from random import randrange, setstate
init_state = (3, (
2147483648L, 3430835514L, 2928424416L, 3147699060L, 2823572732L, 2905216632L, 1887281517L, 14272356L,
1356039141L,
2741361235L, 1824725388L, 2228169284L, 2679861265L, 3150239284L, 657657570L, 1407124159L, 517316568L,
653526369L,
139268705L, 3784719953L, 2212355490L, 3452491289L, 1232629882L, 1791207424L, 2898278956L, 1147783320L,
1824413680L,
1993303973L, 2568444883L, 4228847642L, 4163974668L, 385627078L, 3663560714L, 320542554L, 1565882322L,
3416481154L,
4219229298L, 315071254L, 778331393L, 3961037651L, 2951403614L, 3355970261L, 102946340L, 2509883952L, 215897963L,
3361072826L, 689991350L, 3348092598L, 1763608447L, 2140226443L, 3813151178L, 2619956936L, 51244592L,
2130725065L,
3867113849L, 1980820881L, 2600246771L, 3207535572L, 257556968L, 2223367443L, 3706150033L, 1711074250L,
4252385224L,
3197142331L, 4139558716L, 748471849L, 2281163369L, 2596250092L, 2804492653L, 484240110L, 3726117536L,
2483815933L,
2173995598L, 3765136999L, 3178931194L, 1237068319L, 3427263384L, 3958412830L, 2268556676L, 360704423L,
4113430429L,
3758882140L, 3743971788L, 1685454939L, 488386L, 3511218911L, 3020688912L, 2168345327L, 3149651862L, 1472484695L,
2011779229L, 1112533726L, 1873931730L, 2196153055L, 3806225492L, 1515074892L, 251489714L, 1958141723L,
2081062631L,
3703490262L, 3211541213L, 1436109217L, 2664448365L, 2350764370L, 1285829042L, 3496997759L, 2306637687L,
1571644344L,
1020052455L, 3114491401L, 2994766034L, 1518527036L, 994512437L, 1732585804L, 2089330296L, 2592371643L,
2377347339L,
2617648350L, 1478066246L, 389918052L, 1126787130L, 2728695369L, 2921719205L, 3193658789L, 2101782606L,
4284039483L,
2704867468L, 3843423543L, 119359906L, 1882384901L, 832276556L, 1862974878L, 1943541262L, 1823624942L,
2146680272L,
333006125L, 929197835L, 639017219L, 1640196300L, 1424826762L, 2119569013L, 4259272802L, 2089277168L,
2030198981L,
2950559216L, 621654826L, 3452546704L, 4085446289L, 3038316311L, 527272378L, 1679817853L, 450787204L,
3525043861L,
3838351358L, 1558592021L, 3649888848L, 3328370698L, 3247166155L, 3855970537L, 1183088418L, 2778702834L,
2820277014L,
1530905121L, 1434023607L, 3942716950L, 41643359L, 310637634L, 1537174663L, 4265200088L, 3126624846L,
2837665903L,
446994733L, 85970060L, 643115053L, 1751804182L, 1480207958L, 2977093071L, 544778713L, 738954842L, 3370733859L,
3242319053L, 2707786138L, 4041098196L, 1671493839L, 3420415077L, 2473516599L, 3949211965L, 3686186772L,
753757988L,
220738063L, 772481263L, 974568026L, 3190407677L, 480257177L, 3620733162L, 2616878358L, 665763320L, 2808607644L,
3851308236L, 3633157256L, 4240746864L, 1261222691L, 268963935L, 1449514350L, 4229662564L, 1342533852L,
1913674460L,
1761163533L, 1974260074L, 739184472L, 3811507072L, 2880992381L, 3998389163L, 2673626426L, 2212222504L,
231447607L,
2608719702L, 3509764733L, 2403318909L, 635983093L, 4233939991L, 2894463467L, 177171270L, 2962364044L,
1191007101L,
882222586L, 1004217833L, 717897978L, 2125381922L, 626199402L, 3694698943L, 1373935523L, 762314613L, 2291077454L,
2111081024L, 3758576304L, 2812129656L, 4067461097L, 3700761868L, 2281420733L, 197217625L, 460620692L,
506837624L,
1532931238L, 3872395078L, 3629107738L, 2273221134L, 2086345980L, 1240615886L, 958420495L, 4059583254L,
3119201875L,
3742950862L, 891360845L, 2974235885L, 87814219L, 4067521161L, 615939803L, 1881195074L, 2225917026L, 2775128741L,
2996201447L, 1590546624L, 3960431955L, 1417477945L, 913935155L, 1610033170L, 3212701447L, 2545374014L,
2887105562L,
2991635417L, 3194532260L, 1565555757L, 2142474733L, 621483430L, 2268177481L, 919992760L, 2022043644L,
2756890220L,
881105937L, 2621060794L, 4262292201L, 480112895L, 2557060162L, 2367031748L, 2172434102L, 296539623L,
3043643256L,
59166373L, 2947638193L, 1312917612L, 1798724013L, 75864164L, 339661149L, 289536004L, 422147716L, 1134944052L,
1095534216L, 1231984277L, 239787072L, 923053211L, 1015393503L, 2558889580L, 4194512643L, 448088150L, 707905706L,
2649061310L, 3081089715L, 3432955562L, 2217740069L, 1965789353L, 3320360228L, 3625802364L, 2420747908L,
3116949010L,
442654625L, 2157578112L, 3603825090L, 3111995525L, 1124579902L, 101836896L, 3297125816L, 136981134L,
4253748197L,
3809600572L, 1668193778L, 4146759785L, 3712590372L, 2998653463L, 3032597504L, 1046471011L, 2843821193L,
802959497L,
3307715534L, 3226042258L, 1014478160L, 3105844949L, 3209150965L, 610876993L, 2563947590L, 2482526324L,
3913970138L,
2812702315L, 4281779167L, 1026357391L, 2579486306L, 402208L, 3457975059L, 1714004950L, 2543595755L, 2421499458L,
478932497L, 3117588180L, 1565800974L, 1757724858L, 1483685124L, 2262270397L, 3794544469L, 3986696110L,
2914756339L,
1952061826L, 2672480198L, 3793151752L, 309930721L, 1861137379L, 94571340L, 1162935802L, 3681554226L,
4027302061L,
21079572L, 446709644L, 1587253187L, 1845056582L, 3080553052L, 3575272255L, 2526224735L, 3569822959L,
2685900491L,
918305237L, 1399881227L, 1554912161L, 703181091L, 738501299L, 269937670L, 1078548118L, 2313670525L, 3495159622L,
2659487842L, 11394628L, 1222454456L, 3392065094L, 3426833642L, 1153231613L, 1234517654L, 3144547626L,
2148039080L,
3790136587L, 684648337L, 3956093475L, 1384378197L, 2042781475L, 759764431L, 222267088L, 3187778457L,
3795259108L,
2817237549L, 3494781277L, 3762880618L, 892345749L, 2153484401L, 721588894L, 779278769L, 3306398772L,
4221452913L,
1981375723L, 379087895L, 1604791625L, 1426046977L, 4231163093L, 1344994557L, 1341041093L, 1072537134L,
1829925137L,
3791772627L, 3176876700L, 2553745117L, 664821113L, 473469583L, 1076256869L, 2406012795L, 3141453822L,
4123012649L,
3058620143L, 1785080140L, 1181483189L, 3587874749L, 1453504375L, 707249496L, 2022787257L, 2436320047L,
602521701L,
483826957L, 821599664L, 3333871672L, 3024431570L, 3814441382L, 416508285L, 1217138244L, 3975201118L,
3077724941L,
180118569L, 3754556886L, 4121534265L, 3495283397L, 700504668L, 3113972067L, 719371171L, 910731026L, 619936911L,
2937105529L, 2039892965L, 3853404454L, 3783801801L, 783321997L, 1135195902L, 326690505L, 1774036419L,
3476057413L,
1518029608L, 1248626026L, 427510490L, 3443223611L, 4087014505L, 2858955517L, 1918675812L, 3921514056L,
3929126528L,
4048889933L, 1583842117L, 3742539544L, 602292017L, 3393759050L, 3929818519L, 3119818281L, 3472644693L,
1993924627L,
4163228821L, 2943877721L, 3143487730L, 4087113198L, 1149082355L, 1713272081L, 1243627655L, 3511633996L,
3358757220L,
3812981394L, 650044449L, 2143650644L, 3869591312L, 3719322297L, 386030648L, 2633538573L, 672966554L,
3498396042L,
3907556L, 2308686209L, 2878779858L, 1475925955L, 2701537395L, 1448018484L, 2962578755L, 1383479284L,
3731453464L,
3659512663L, 1521189121L, 843749206L, 2243090279L, 572717972L, 3400421356L, 3440777300L, 1393518699L,
1681924551L,
466257295L, 568413244L, 3288530316L, 2951425105L, 2624424893L, 2410788864L, 2243174464L, 1385949609L,
2454100663L,
1113953725L, 2127471443L, 1775715557L, 3874125135L, 1901707926L, 3152599339L, 2277843623L, 1941785089L,
3171888228L,
802596998L, 3397391306L, 1743834429L, 395463904L, 2099329462L, 3761809163L, 262702111L, 1868879810L,
2887406426L,
1160032302L, 4164116477L, 2287740849L, 3312176050L, 747117003L, 4048006270L, 3955419375L, 2724452926L,
3141695820L,
791246424L, 524525849L, 1794277132L, 295485241L, 4125127474L, 825108028L, 1582794137L, 1259992755L, 2938829230L,
912029932L, 1534496985L, 3075283272L, 4052041116L, 1125808104L, 2032938837L, 4008676545L, 1638361535L,
1649316497L,
1302633381L, 4221627277L, 1206130263L, 3114681993L, 3409690900L, 3373263243L, 2922903613L, 349048087L,
4049532385L,
3458779287L, 1737687814L, 287275672L, 645786941L, 1492233180L, 3925845678L, 3344829077L, 1669219217L,
665224162L,
2679234088L, 1986576411L, 50610077L, 1080114376L, 1881648396L, 3818465156L, 1486861008L, 3824208930L,
1782008170L,
4115911912L, 656413265L, 771498619L, 2709443211L, 1919820065L, 451888753L, 1449812173L, 2001941180L,
2997921765L,
753032713L, 3011517640L, 2386888602L, 3181040472L, 1280522185L, 1036471598L, 1243809973L, 2985144032L,
2238294821L,
557934351L, 347132246L, 1797956016L, 624L), None)
setstate(init_state)
return [rffi.r_longlong(randrange(-(1 << 31), (1 << 31) - 1)) for _ in range(n)]
def measure(N):
args = setup(N)
from time import time
t0 = time()
arraysum(*args)
t1 = time()
teardown(*args)
return t0, t1
def rpy_entry(N):
t0, t1 = measure(N)
# from rpython.rlib import rfloat
# print rfloat.double_to_string(t1 - t0, 'e', %(fprec)d, rfloat.DTSF_ADD_DOT_0)
return t1 - t0
if __name__ == '__main__':
import sys
t0, t1 = measure(int(sys.argv[1]))
print '%.15f' % (t1 - t0)
def target(*args):
from rpython.rlib.entrypoint import export_symbol
export_symbol(rpy_entry)
return rpy_entry, [int] | 2.125 | 2 |
Old_Exams/Books.py | bozhikovstanislav/Python-Fundamentals | 0 | 57860 | <filename>Old_Exams/Books.py
class BookStore:
def __init__(self, book):
self.booklst = book
class Book:
def __init__(self, title, author, chapter, price):
self.title = title
self.author = author
self.chapter = list(chapter)
self.price = int(price)
book1 = Book("<NAME>", "Tangra", ['1', '3', '4', '2'], 1)
book2 = Book("<NAME>", "Vitosha", ['12', '123', '55', '22'], 2)
book3 = Book("<NAME>", "StaraPlanina", ['155', '33', '41', '245'], 3)
lst_book = [book1, book2, book3]
bookstor = BookStore(lst_book)
| 2 | 2 |
LakshmiErrors.py | jakenjarvis/Lakshmi | 1 | 57988 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from discord.ext.commands.errors import CommandError
class PermissionNotFoundException(Exception):
pass
class ArgumentOutOfRangeException(CommandError):
pass
class SubcommandNotFoundException(CommandError):
pass
class UnsupportedSitesException(CommandError):
pass
class NotCallOfCthulhuInvestigatorException(CommandError):
pass
class CharacterNotFoundException(CommandError):
pass
class ImageNotFoundException(CommandError):
pass
| 1.09375 | 1 |
Match.py | gregorscholz/Pyke | 0 | 58116 | <reponame>gregorscholz/Pyke
import requests
import Pyke
class Match_History:
def __init__(self, puuid: str, region: str = 'europe', start_time: int = None, end_time: int = None, queue: int = None, type: str = None, start: int = 0, count: int = 20) -> None:
__json = Match_History.__match_history_data(puuid, region, start_time, end_time, queue, type, start, count)
self.match_history = Match_History.__match_history(__json)
def __match_history_data(puuid: str, region: str, start_time: int, end_time: int, queue: int, type: str, start: int, count: int):
response = requests.get(f'https://{region}.api.riotgames.com/lol/match/v5/matches/by-puuid/{puuid}/ids?startTime={start_time}&endTime={end_time}&queue={queue}&type={type}&start={start}&count={count}&api_key={Pyke.Pyke.token}')
response.encoding = 'ISO-8859-1'
return response.json()
def __match_history(__json):
return __json[0]
class Match:
def __init__(self, id: str, region: str = 'europe') -> None:
__json = Match.__match_data()
# metadata
self.data_version = self.__data_version(__json['metadata']) # Match data version
self.match_id = id # Match id
self.participants_puuids = self.__participants_puuids(__json['metadata']) # A list of participant PUUIDs
# info
self.game_creation = self.__game_creation(__json['info']) # Unix timestamp for when the game is created on the game server (i.e., the loading screen)
self.game_duration = self.__game_duration(__json['info']) # Prior to patch 11.20, this field returns the game length in milliseconds calculated from gameEndTimestamp - gameStartTimestamp. Post patch 11.20, this field returns the max timePlayed of any participant in the game in seconds, which makes the behavior of this field consistent with that of match-v4. The best way to handling the change in this field is to treat the value as milliseconds if the gameEndTimestamp field isn't in the response and to treat the value as seconds if gameEndTimestamp is in the response.
self.game_end_timestamp = self.__game_end_timestamp(__json['info']) # Unix timestamp for when match ends on the game server. This timestamp can occasionally be significantly longer than when the match "ends". The most reliable way of determining the timestamp for the end of the match would be to add the max time played of any participant to the gameStartTimestamp. This field was added to match-v5 in patch 11.20 on Oct 5th, 2021.
self.game_id = self.__game_id(__json['info']) #
self.game_mode = self.__game_mode(__json['info']) # Refer to the Game Constants documentation
self.game_name = self.__game_name(__json['info']) #
self.game_start_timestamp = self.__game_start_timestamp(__json['info']) # Unix timestamp for when match starts on the game server
self.game_type = self.__game_type(__json['info']) #
self.game_version = self.__game_version(__json['info']) # The first two parts can be used to determine the patch a game was played on
self.map_id = self.__map_id(__json['info']) # Refer to the Game Constants documentation
self.participants = self.__participants(__json['info']) #
self.platform_id = self.__platform_id(__json['info']) # Platform where the match was played
self.queue_id = self.__queue_id(__json['info']) # Refer to the Game Constants documentation
self.teams = self.__teams(__json['info']) #
self.tournament_code = self.__tournament_code(__json['info']) # Tournament code used to generate the match. This field was added to match-v5 in patch 11.13 on June 23rd, 2021
def __match_data(id: str, region):
response = requests.get(f'https://{region}.api.riotgames.com/lol/match/v5/matches/{id}?api_key={Pyke.Pyke.token}')
response.encoding = 'ISO-8859-1'
return response.json()
# metadata
def __data_version(__json):
return __json['dataVersion']
def __participants_puuids(__json):
return __json['participants']
# info
def __game_creation(__json):
return __json['gameCreation']
def __game_duration(__json):
return __json['gameDuration']
def __game_end_timestamp(__json):
return __json['gameEndTimestamp']
def __game_id(__json):
return __json['gameId']
def __game_mode(__json):
return __json['gameMode']
def __game_name(__json):
return __json['gameName']
def __game_start_timestamp(__json):
return __json['gameStartTimestamp']
def __game_type(__json):
return __json['gameType']
def __game_version(__json):
return __json['gameVersion']
def __map_id(__json):
return __json['mapId']
def __participants(__json):
return __json['participants']
def __platform_id(__json):
return __json['platformId']
def __queue_id(__json):
return __json['queueId']
def __teams(__json):
return __json['teams']
def __tournament_code(__json):
return __json['tournamentCode'] | 1.804688 | 2 |
Missions_to_Mars/BCMmarsscrape.py | ITDork/Web-Scraping-Challenge | 0 | 58244 | <gh_stars>0
from bs4 import BeautifulSoup as bs
from splinter import Browser
import os
import pandas as pd
import time
import requests
from sqlalchemy import create_engine
from urllib.parse import urlsplit
from selenium import webdriver
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape ():
browser = init_browser()
marsinfo = {}
# Visit the web page
mars_url = 'https://mars.nasa.gov/news/'
browser.visit(mars_url)
# give page time to load
time.sleep(3)
#using bs to write it into html
html = browser.html
soup = bs(html,'html.parser')
news_title = soup.find('div',class_= 'content_title').text
news_paragraph = soup.find('div', class_= 'article_teaser_body').text
image_url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(image_url)
time.sleep(2)
#Base url
base_url = 'https://www.jpl.nasa.gov'
#get image url using BeautifulSoup
image_url = browser.html
soup = bs(image_url, 'html.parser')
img_url = soup.find(id='full_image').get('data-fancybox-href')
fullimgurl = base_url + img_url
#get mars weather's latest tweet from the website
weather_url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(weather_url)
time.sleep(2)
weather_html = browser.html
soup = bs(weather_html, 'html.parser')
marsweather = soup.find('p', class_= 'TweetTextSize TweetTextSize--normal js-tweet-text tweet-text').text
marsfacts_url = 'https://space-facts.com/mars/'
table = pd.read_html(marsfacts_url)
table[0]
marsfacts_df = table[1]
marsfacts_df.columns = ['Parameter', 'Values']
marsfacts_df.set_index(['Parameter'])
marsfacts_html = marsfacts_df.to_html(index=False)
marsfacts_html = marsfacts_html.replace("\n", "")
marsfacts_html
hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemisphere_url)
time.sleep(2)
#Getting the base url
# hemisphere_base_url = 'https://astrogeology.usgs.gov'
# scrape images of Mars' hemispheres from the USGS site
mars_hemisphere_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
hemisphere_results = []
for i in range(1,9,2):
hemisphere_dict = {}
browser.visit(mars_hemisphere_url)
time.sleep(1)
hemispheres_html = browser.html
hemispheres_soup = bs(hemispheres_html, 'html.parser')
hemispherenamelinks = hemispheres_soup.find_all('a', class_='product-item')
hemispherename = hemispherenamelinks[i].text.strip('Enhanced')
linkdetail = browser.find_by_css('a.product-item')
linkdetail[i].click()
time.sleep(1)
browser.find_link_by_text('Sample').first.click()
time.sleep(1)
browser.windows.current = browser.windows[-1]
hemisphereimgage_html = browser.html
browser.windows.current = browser.windows[0]
browser.windows[-1].close()
hemisphereimgage_soup = bs(hemisphereimgage_html, 'html.parser')
hemisphereimage_path = hemisphereimgage_soup.find('img')['src']
hemisphere_dict['title'] = hemispherename.strip()
hemisphere_dict['img_url'] = hemisphereimage_path
hemisphere_results.append(hemisphere_dict)
# create a dictionary containing the collected data for later use in flask app
marsinfo={"news_title":news_title,
"news_paragraph":news_paragraph,
"fullimgurl":fullimgurl,
"marsweather":marsweather,
"marsfacts_html":marsfacts_html,
"hemisphere_results":hemisphere_results
}
#Close the browser after scraping
browser.quit()
#Return data
return marsinfo | 1.695313 | 2 |
algebra/fundamentals.py | gaosanyong/cp_algorithms | 0 | 58372 | """
NAME - fundamentals
DESCRIPTION
This module implements below algorithms:
* binary exponentiation
* Euclidean algorithms for computing the greatest common divisor
* extended Euclidean algorithm
* linear Diophantine equations
* Fibonacci Numbers
o closed-form solution
o fast doubling method
FUNCTIONS
binpow(x, n, m)
Return x**n % m.
gcd(x, y)
Return greatest common divisor via Euclidean algo.
lcm(x, y)
Return least common multiple.
choose(n, k)
Return binomial coefficient of n choose k.
Catalan(n)
Return nth Catalan number.
"""
"""
BINARY EXPONENTIATION
Binary exponentiation calculates x**n % m in O(logN).
"""
from typing import List
def binpow(x: int, n: int, m: int) -> int:
"""Return x**n % m."""
ans = 1
while n:
if n & 1: ans = ans * x % m
x = x * x % m
n >>= 1
return ans
"""
EUCLIDEAN ALGORITHM FOR COMPUTING THE GREATEST COMMON DIVISOR
Euclidean algorithm calculates greatest common divisor in O(log(min(M, N))).
"""
def gcd(x: int, y: int) -> int:
"""Return gcd via Euclidean algo."""
while y: x, y = y, x%y
return abs(x)
def lcm(x: int, y: int) -> int:
"""Return least common multiple."""
if x == 0 or y == 0: return 0
return abs(x*y)//gcd(x, y)
"""
EXTENDED EUCLIDEAN ALGORITHM
Extended Euclidean algorithm calculates greatest common divisor
and returns coefficients to arrive at it in O(log(min(M, N))).
"""
def euclidean(x: int, y: int) -> List[int]:
"""Return greatest common divisor and coefficients such that
a * x + b * y == gcd(x, y)
via extended Euclidean algo."""
a = bb = 1
b = aa = 0
while y:
q = int(x / y)
a, aa = aa, a - q*aa
b, bb = bb, b - q*bb
x, y = y, x - q*y
return x, a, b
"""
LINEAR DIOPHANTINE EQUATIONS (a * x + b * y == c)
1) finding one solution
2) finding all solutions
3) finding the number of solutions and the solutions themselves in a given interval
4) finding a solution with minimum value of x+y
"""
"""
FIBONACCI NUMBERS
Cassini's identity: F(n-1)*F(n+1) - F(n)*F(n) = (-1)**n
the "addition" rule: F(n+k) = F(k)*F(n+1) + F(k-1)*F(n) or F(2*n) = F(n)*(F(n-1) + F(n+1))
GCD identity: gcd(F(m), F(n)) = F(gcd(m, n))
Zeckendorf's theorem:
Any natural number n can be uniquely represented as a sum of Fibonacci numbers
n = F(k1) + F(k2) + … + F(kr)
such that k1 ≥ k2+2, k2 ≥ k3+2, …, kr ≥ 2 (i.e.: the representation cannot use two consecutive Fibonacci numbers).
"""
def fibonacci(n):
"""Return nth Fibonacci number."""
return (((1+sqrt(5))/2)**n - ((1-sqrt(5))/2)**n) / sqrt(5)
def fibonacci(n):
return round(((1+sqrt(5))/2)**n/sqrt(5))
def fibonacci(n: int) -> Tuple[int]:
"""
Return F(n) and F(n+1) as a pair via "fast doubling method".
F(2*k) = F(k) * (2*F(k+1) - F(k))
F(2*k + 1) = F(k) * F(k) + F(k+1) * F(k+1)
"""
if n == 0: return (0, 1)
x, y = fibonacci(n//2)
xx = x*(2*y - x)
yy = x*x + y*y
return (yy, xx+yy) if n&1 else (xx, yy) | 3.453125 | 3 |
spacy_legacy/architectures/textcat.py | rspeer/spacy-legacy | 0 | 58500 | from typing import Optional, List
from thinc.types import Floats2d
from thinc.api import Model, with_cpu
from spacy.attrs import ID, ORTH, PREFIX, SUFFIX, SHAPE, LOWER
from spacy.util import registry
from spacy.tokens import Doc
# TODO: replace with registered layer after spacy v3.0.7
from spacy.ml import extract_ngrams
def TextCatCNN_v1(
tok2vec: Model, exclusive_classes: bool, nO: Optional[int] = None
) -> Model[List[Doc], Floats2d]:
"""
Build a simple CNN text classifier, given a token-to-vector model as inputs.
If exclusive_classes=True, a softmax non-linearity is applied, so that the
outputs sum to 1. If exclusive_classes=False, a logistic non-linearity
is applied instead, so that outputs are in the range [0, 1].
"""
chain = registry.get("layers", "chain.v1")
reduce_mean = registry.get("layers", "reduce_mean.v1")
Logistic = registry.get("layers", "Logistic.v1")
Softmax = registry.get("layers", "Softmax.v1")
Linear = registry.get("layers", "Linear.v1")
list2ragged = registry.get("layers", "list2ragged.v1")
# extract_ngrams = registry.get("layers", "spacy.extract_ngrams.v1")
with Model.define_operators({">>": chain}):
cnn = tok2vec >> list2ragged() >> reduce_mean()
if exclusive_classes:
output_layer = Softmax(nO=nO, nI=tok2vec.maybe_get_dim("nO"))
model = cnn >> output_layer
model.set_ref("output_layer", output_layer)
else:
linear_layer = Linear(nO=nO, nI=tok2vec.maybe_get_dim("nO"))
model = cnn >> linear_layer >> Logistic()
model.set_ref("output_layer", linear_layer)
model.set_ref("tok2vec", tok2vec)
model.set_dim("nO", nO)
model.attrs["multi_label"] = not exclusive_classes
return model
def TextCatBOW_v1(
exclusive_classes: bool,
ngram_size: int,
no_output_layer: bool,
nO: Optional[int] = None,
) -> Model[List[Doc], Floats2d]:
chain = registry.get("layers", "chain.v1")
Logistic = registry.get("layers", "Logistic.v1")
SparseLinear = registry.get("layers", "SparseLinear.v1")
softmax_activation = registry.get("layers", "softmax_activation.v1")
with Model.define_operators({">>": chain}):
sparse_linear = SparseLinear(nO)
model = extract_ngrams(ngram_size, attr=ORTH) >> sparse_linear
model = with_cpu(model, model.ops)
if not no_output_layer:
output_layer = softmax_activation() if exclusive_classes else Logistic()
model = model >> with_cpu(output_layer, output_layer.ops)
model.set_ref("output_layer", sparse_linear)
model.attrs["multi_label"] = not exclusive_classes
return model
def TextCatEnsemble_v1(
width: int,
embed_size: int,
pretrained_vectors: Optional[bool],
exclusive_classes: bool,
ngram_size: int,
window_size: int,
conv_depth: int,
dropout: Optional[float],
nO: Optional[int] = None,
) -> Model:
# Don't document this yet, I'm not sure it's right.
HashEmbed = registry.get("layers", "HashEmbed.v1")
FeatureExtractor = registry.get("layers", "spacy.FeatureExtractor.v1")
Maxout = registry.get("layers", "Maxout.v1")
StaticVectors = registry.get("layers", "spacy.StaticVectors.v1")
Softmax = registry.get("layers", "Softmax.v1")
Linear = registry.get("layers", "Linear.v1")
ParametricAttention = registry.get("layers", "ParametricAttention.v1")
Dropout = registry.get("layers", "Dropout.v1")
Logistic = registry.get("layers", "Logistic.v1")
build_bow_text_classifier = registry.get("architectures", "spacy.TextCatBOW.v1")
list2ragged = registry.get("layers", "list2ragged.v1")
chain = registry.get("layers", "chain.v1")
concatenate = registry.get("layers", "concatenate.v1")
clone = registry.get("layers", "clone.v1")
reduce_sum = registry.get("layers", "reduce_sum.v1")
with_array = registry.get("layers", "with_array.v1")
uniqued = registry.get("layers", "uniqued.v1")
residual = registry.get("layers", "residual.v1")
expand_window = registry.get("layers", "expand_window.v1")
cols = [ORTH, LOWER, PREFIX, SUFFIX, SHAPE, ID]
with Model.define_operators({">>": chain, "|": concatenate, "**": clone}):
lower = HashEmbed(
nO=width, nV=embed_size, column=cols.index(LOWER), dropout=dropout, seed=10
)
prefix = HashEmbed(
nO=width // 2,
nV=embed_size,
column=cols.index(PREFIX),
dropout=dropout,
seed=11,
)
suffix = HashEmbed(
nO=width // 2,
nV=embed_size,
column=cols.index(SUFFIX),
dropout=dropout,
seed=12,
)
shape = HashEmbed(
nO=width // 2,
nV=embed_size,
column=cols.index(SHAPE),
dropout=dropout,
seed=13,
)
width_nI = sum(layer.get_dim("nO") for layer in [lower, prefix, suffix, shape])
trained_vectors = FeatureExtractor(cols) >> with_array(
uniqued(
(lower | prefix | suffix | shape)
>> Maxout(nO=width, nI=width_nI, normalize=True),
column=cols.index(ORTH),
)
)
if pretrained_vectors:
static_vectors = StaticVectors(width)
vector_layer = trained_vectors | static_vectors
vectors_width = width * 2
else:
vector_layer = trained_vectors
vectors_width = width
tok2vec = vector_layer >> with_array(
Maxout(width, vectors_width, normalize=True)
>> residual(
(
expand_window(window_size=window_size)
>> Maxout(
nO=width, nI=width * ((window_size * 2) + 1), normalize=True
)
)
)
** conv_depth,
pad=conv_depth,
)
cnn_model = (
tok2vec
>> list2ragged()
>> ParametricAttention(width)
>> reduce_sum()
>> residual(Maxout(nO=width, nI=width))
>> Linear(nO=nO, nI=width)
>> Dropout(0.0)
)
linear_model = build_bow_text_classifier(
nO=nO,
ngram_size=ngram_size,
exclusive_classes=exclusive_classes,
no_output_layer=False,
)
nO_double = nO * 2 if nO else None
if exclusive_classes:
output_layer = Softmax(nO=nO, nI=nO_double)
else:
output_layer = Linear(nO=nO, nI=nO_double) >> Dropout(0.0) >> Logistic()
model = (linear_model | cnn_model) >> output_layer
model.set_ref("tok2vec", tok2vec)
if model.has_dim("nO") is not False:
model.set_dim("nO", nO)
model.set_ref("output_layer", linear_model.get_ref("output_layer"))
model.attrs["multi_label"] = not exclusive_classes
return model
| 1.9375 | 2 |
test/unittest/test_get_fence_array.py | quincy-125/DigiPath_MLTK | 1 | 58628 | <filename>test/unittest/test_get_fence_array.py<gh_stars>1-10
import unittest
from unittest import TestCase
import numpy as np
import sys
try:
from digipath_mltk.toolkit import get_fence_array
print('using package installation of digipath_mltk ')
except:
sys.path.insert(0, '../digipath_mltk')
from toolkit import get_fence_array
pass
class Test_fence_array_output(TestCase):
def setUp(self):
self.overall_length = 100
self.patch_length = 10
self.fence_array = np.array([ [ 0, 9], [10, 19], [20, 29], [30, 39], [40, 49],
[50, 59], [60, 69], [70, 79], [80, 89], [90, 99] ] )
def tearDown(self):
del self.overall_length
del self.patch_length
del self.fence_array
def test_get_fence_array(self):
fence_array = get_fence_array(self.patch_length, self.overall_length)
self.assertTrue((fence_array == self.fence_array).all())
if __name__ == '__main__':
unittest.main()
| 1.304688 | 1 |
i2c-hats_video_1/test3.py | raspihats/test_scripts | 0 | 58756 | <filename>i2c-hats_video_1/test3.py
from raspihats.i2c_hats import Rly10, Di16
from time import sleep
board1 = Rly10(0x50)
board2 = Rly10(0x51)
board3 = Di16(0x40)
while True:
state = board3.di_get_state(0) # can use label 'Di1.1' instead of index 0
board1.do_set_state(0, state) # can use label 'Rly1' instead of index 0
board2.do_set_state(1, state) # can use label 'Rly2' instead of index 1
sleep(0.03)
| 1.234375 | 1 |
wmark/wmark.py | stur86/watermarker | 0 | 58884 | <gh_stars>0
# Python 2-to-3 compatibility code
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from PIL import Image
def watermark_photo(photo, wmark, targ_rect=(0.1, 0.1, 0.9, 0.9),
fill_method=0, rescaling=Image.BICUBIC):
"""
Apply a watermark to a photo
Args:
| photo (PIL.Image.Image): the photo to watermark
| wmark (PIL.Image.Image): the watermark image
| targ_rect ([float]*4): the rectangle (in fractional coordinates) over
| which to apply the watermark
| fill_method (int): the method with which to fill the target rectangle:
| 0 (default) - always resize the one that is too big
| 1 - resize photo to fit watermark
| 2 - resize watermark to fit photo
| rescaling (PIL resize method): method with which to resize the image.
| Default is BICUBIC
Returns:
| photo_wmarked (PIL.Image.Image): the watermarked photo
"""
# Both have to be PIL Images
if (not isinstance(photo, Image.Image) or
not isinstance(wmark, Image.Image)):
raise TypeError('Photos passed are not PIL images')
# Sizes?
sp = photo.size
sw = wmark.size
# Target rectangle?
tr = [int(sp[i % 2]*t) for i, t in enumerate(targ_rect)]
tr_size = [tr[2]-tr[0], tr[3]-tr[1]]
ratio = [(sw[i]/t) for i, t in enumerate(tr_size)]
scaling = max(ratio)
# Resize photo and watermark as needed
if fill_method == 0:
if (scaling > 1):
wmark = wmark.resize([int(s/scaling) for s in sw], rescaling)
sw = wmark.size
else:
photo = photo.resize([int(s*scaling) for s in sp], rescaling)
sp = photo.size
elif fill_method == 1:
photo = photo.resize([int(s*scaling) for s in sp], rescaling)
sp = photo.size
elif fill_method == 2:
wmark = wmark.resize([int(s/scaling) for s in sw], rescaling)
sw = wmark.size
# Add alpha channel if not present
photo = photo.convert('RGBA')
# Now apply watermark
try:
photo.alpha_composite(wmark, dest=(int(targ_rect[0]*sp[0]),
int(targ_rect[1]*sp[1])))
except ValueError:
raise RuntimeError('Watermark is too big')
return photo
| 2.9375 | 3 |
made_with_twd_project/showcase/forms.py | xerlivex/tango-with-django | 1 | 59012 | <gh_stars>1-10
__author__ = 'leif'
from django.contrib.auth.models import User
from django import forms
from models import Team, Rating, Demo, RATING_CHOICES
class UserForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
username = forms.CharField()
email = forms.EmailField()
class Meta:
model = User
fields = ('username','email','password')
class TeamForm(forms.ModelForm):
name = forms.CharField(label='Team Name', widget=forms.TextInput(attrs={'size':'64','maxlength':'512'}))
logo = forms.ImageField(label='Team Logo')
members = forms.CharField(label='Team Members',widget=forms.TextInput(attrs={'size':'128','maxlength':'512'}))
photo = forms.ImageField(label='Photo of Team Members')
class Meta:
model = Team
exclude = ('user',)
class RatingForm(forms.ModelForm):
comment = forms.CharField(label='What do you think of this app?', widget=forms.Textarea(attrs={'rows':'5','maxlength':'512'}))
score = forms.ChoiceField(label='How do you rate this app?', choices = RATING_CHOICES)
class Meta:
model = Rating
exclude = ('rater','demo',)
class DemoForm(forms.ModelForm):
class Meta:
model = Demo
exclude = ('team','rating_count','rating_sum',)
| 1.492188 | 1 |
dtect_connector/UUIDTools.py | tetradsensors/gcp-functions | 0 | 59140 | <reponame>tetradsensors/gcp-functions<filename>dtect_connector/UUIDTools.py<gh_stars>0
import uuid
from enum import Enum
class UUIDInstanceTypes(Enum):
DEVICE = 0
SENSOR = 1
COMPONENT = 2
MESSAGE = 3
def UUIDGenerator(
instance_type,
make=None,
model=None,
serial=None,
sensor=None,
component_name=None,
component_type=None
):
sensor_ns = uuid.UUID(hex='5a4af76d-acb4-4f04-9dbf-76f8e071a4bc')
device_ns = uuid.UUID(hex='55e83bb7-1066-4319-9a75-c344164067dd')
algorithm_ns = uuid.UUID(hex='dd6654f6-b08c-409c-b440-949ae96c1b08')
component_ns = sensor_ns
def _generate_uuid(ns, a, b, c):
s = f'{a}_{b}_{c}'
return uuid.uuid5(ns, s)
assert instance_type in UUIDInstanceTypes
if instance_type == UUIDInstanceTypes.DEVICE:
if not all((make, model, serial)):
return None
return _generate_uuid(device_ns, make, model, serial)
elif instance_type == UUIDInstanceTypes.SENSOR:
if not all((make, model, serial)):
return None
return _generate_uuid(sensor_ns, make, model, serial)
elif instance_type == UUIDInstanceTypes.COMPONENT:
if not all((sensor, component_name, component_type)):
return None
return _generate_uuid(component_ns, sensor, component_type, component_name)
elif instance_type == UUIDInstanceTypes.MESSAGE:
return uuid.uuid4()
else:
raise ValueError("bad instance_type")
| 2.015625 | 2 |
srt_modules/light_sources.py | sjkcarnahan/simple_ray_trace | 0 | 59268 | '''
<NAME>
simple ray trace - tools and classes to specify and instantiate rays
'''
import numpy as np
from srt_modules.useful_math import euler1232C
class Ray:
def __init__(self, pos=None, dirs=None):
self.X = pos # 3 x N position vectors of rays
self.d = dirs # direction vectors of rays in same frame
return
def set_pos(self, ray_starts):
self.X = ray_starts
return
def set_dir(self, ray_dirs):
self.d = ray_dirs
return
class AngledCircleRayDef:
# definition/inputs to make a light source which is a set of rays in concentric circles
# for a less naive generation of concentric circles of rays, vary the number of rays with sqrt(radius) of each ring.
def __init__(self):
self.rad = 0.5 # [m] radius of largest circle of rays
self.angles = [0.] # [arc sec] angle of rays measure wrt the instrument primary axis. providing a list will generate
# multiple sets of rays to be used in multiple runs of the experiment.
self.num_circ = 15 # number of concentric circles
self.per_circ = 150 # number of rays per circle
def make_angled_circle_rays(inputs):
rad_inc = inputs.rad / inputs.num_circ # radius increment
theta_inc = np.pi * 2 / inputs.per_circ # angle increment
ray_set_list = [] # set of sets of start points
for angle in inputs.angles:
rays = []
angle = angle / 3600. * np.pi / 180. # convert from arc sec to radians
for i in range(inputs.num_circ):
r = rad_inc * i
for j in range(inputs.per_circ):
# note x = 0 always. We assume the rays start at the y-z plane in the lab frame.
x, y, z = 0., r * np.cos(theta_inc * j), r * np.sin(theta_inc * j)
rays.append(np.array([x, y, z]))
rays = np.array(rays).transpose()
ray_dirs = np.array([np.array([1, 0, 0])] * np.shape(rays)[1]).transpose() # rays initialize down x-axis
DCM = euler1232C([0., 0., angle]).transpose()
ray_dirs = np.dot(DCM, ray_dirs) # rays rotated by given angle
ray_set_list.append(Ray(rays, ray_dirs))
return ray_set_list # here we have a list of ray sets. one set per angle given. many rays per set
def make_one_edge_ray(rad, angle):
# rad is radius of primary
# angle is the desired angle of the ray relative to primary centerline
# make one ray, starts at the edge of the generating circle at a specified angle. For checking secondary diameter
x, y, z = 0., rad, 0.,
L_X = np.array([x,y,z]).reshape([3, 1])
angle = angle/3600. * np.pi/180.
dir = np.array([np.cos(angle), -np.sin(angle), 0]).reshape([3, 1])
return Ray(L_X, dir) | 2.890625 | 3 |
Data Preprocessing/Data Denoising/TechIndicator/__init__.py | xuyuanjian/Dynamic-Portfolio-Optimization | 1 | 59396 | from TechIndicator.BasicIndicator import *
from TechIndicator.TAIndicator import *
from TechIndicator.Denoising import * | 0.201172 | 0 |
setup.py | cedwards036/JHUHandshakeDataTools | 0 | 59524 | <reponame>cedwards036/JHUHandshakeDataTools
import pathlib
from setuptools import setup, find_packages
from jhu_handshake_data_tools import __version__, __author__, __email__
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(
name="jhu_handshake_data_tools",
version=__version__,
description="A library for cleaning and working with majors from JHU's Handshake environment",
long_description=README,
url="https://github.com/cedwards036/JHUHandshakeDataTools",
author=__author__,
author_email=__email__,
license="MIT",
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
],
packages=find_packages(),
include_package_data=True
)
| 1.09375 | 1 |
Discord/Controller/Economy.py | Maxim-Beglyanov/MorjWPG | 0 | 59652 | <reponame>Maxim-Beglyanov/MorjWPG<filename>Discord/Controller/Economy.py
import sys; sys.path.append('..'); sys.path.append('.')
from nextcord import Interaction, Member
from Service.Economy import Economy
from Service.Country import OneCountry
from Discord.Cogs.Cog import MyCog
from Discord.Controller.defaults import CountryParameters
async def get_balance(
inter: Interaction, cog: MyCog,
user: Member
):
user = await cog.get_player(inter, user)
country = OneCountry(cog.get_country_name(user))
economy = Economy(country)
money = economy.money
income = economy.income
await cog.send(inter, 'Balance', f'Деньги: {money}, Доход: {income}', user)
async def edit_money(
inter: Interaction, cog: MyCog,
country_parameters: CountryParameters,
money: float|int
):
economy = Economy(country_parameters.as_country(inter, cog))
economy.edit_money(money)
await cog.send(inter, 'Edit Money', 'Деньги были изменены')
async def delete_money(
inter: Interaction, cog: MyCog,
country_parameters: CountryParameters
):
economy = Economy(country_parameters.as_country(inter, cog))
economy.delete_money()
await cog.send(inter, 'Delete Money', 'Деньги были у всех удалены')
async def pay(
inter: Interaction, cog: MyCog,
payee: Member, money: float
):
cog.check_player(payee)
country_payer = OneCountry(cog.get_country_name(inter.user))
country_payee = OneCountry(cog.get_country_name(payee))
economy = Economy(country_payer)
economy.pay(country_payee, money)
await cog.send(inter, 'Pay', 'Деньги переведены', inter.user)
| 1.382813 | 1 |
community_detection/nclusters.py | pelperscience/arctic-connectivity | 0 | 59780 | <gh_stars>0
"""Group communities in different solutions in clusters based on the Jaccard distance between communities."""
import numpy as np
from scipy import sparse
import xarray as xr
import pandas as pd
from itertools import combinations
import networkx as nx
import sys
import pickle
import copy
from glob import glob
from importlib import reload
sys.path.append('/science/users/4302001/arctic-connectivity/tools')
sys.path.append('/Users/daanreijnders/surfdrive/Thesis/repository/tools')
import plot
import community
import fieldsetter_cmems
import advectParticles
readDir = "/data/oceanparcels/input_data/CMEMS/GLOBAL_REANALYSIS_PHY_001_030/"
meanDir = "/data/oceanparcels/input_data/CMEMS/GLOBAL_REANALYSIS_PHY_001_030_monthly/"
fieldFiles = sorted(glob(readDir + "mercatorglorys12v1_gl12_mean_*.nc"))
writedir = '/scratch/DaanR/psets/'
psetdir = "/data/oceanparcels/output_data/data_Daan/psets/"
matdir = "/data/oceanparcels/output_data/data_Daan/matrices/"
netdir = "/data/oceanparcels/output_data/data_Daan/networks/"
comdir = "/data/oceanparcels/output_data/data_Daan/communities/"
with open('/scratch/DaanR/meshes/ico_mesh_hex_r7.pickle', 'rb') as meshPick:
meshDict = pickle.load(meshPick)
myBins = community.hexCountBins(meshDict['points'],
np.degrees(meshDict['lons']),
np.degrees(meshDict['lats']),
meshDict['permutation'],
meshDict['simplices'])
myMask = community.hexMask(myBins, -180, 180, 60, 90)
myMask.growToLevel(4)
myBins.calculate_voronoi(myMask, innerMaskLevel=2, outerMaskLevel=3)
#myBins.add_regular_rim()
myBins.calculate_neighbors()
del meshDict
with open("oceanMask_no_rim.pickle", 'rb') as pickFile:
myBins.oceanMask = pickle.load(pickFile)
myParts = community.particles.from_pickle('/scratch/DaanR/meshes/ico_mesh_parts_deg_arctic_r11_delland.pickle')
print("Number of particles:", myParts.n)
ensembleCommunityID = {}
codelengths = []
for run in range(1, 101):
myBins.load_communities(comdir + f"infomap_ensemble/masked_network_Rcmems_Pico11_S2018-3-1_D90_DT20_ODT24_Cico7_mt2_multirunN{run}.clu")
ensembleCommunityID[run-1] = myBins.communityID
codelengths.append(myBins.codelength)
with open("/data/oceanparcels/output_data/data_Daan/misc/jaccard_masked_network_Rcmems_Pico11_S2018-3-1_D90_DT20_ODT24_Cico7_mt2_ensemble100.pickle", "rb") as pickFile:
jaccardDistances = pickle.load(pickFile)
nSolutionClusters = np.ma.masked_array(np.zeros(myBins.bindex.shape), ~myBins.oceanMask)
combos = list(combinations(list(range(0,100)), 2))
clSorter = np.argsort(codelengths)
tolerance = 0.25
for idx in myBins.bindex[myBins.oceanMask]:
localJaccardDistances = jaccardDistances[idx].copy()
sortedIdx = clSorter.tolist()
clustered = []
nClusters = 0
while len(sortedIdx) > 0 and len(clustered) <= 95:
clusterCenter = sortedIdx.pop(0)
clustered.append(clusterCenter)
nClusters += 1
for unclustered in sortedIdx:
if unclustered < clusterCenter:
pair = (unclustered, clusterCenter)
else:
pair = (clusterCenter, unclustered)
pairIdx = combos.index(pair)
if jaccardDistances[idx][pairIdx] > 1 - tolerance:
clustered.append(sortedIdx.pop(sortedIdx.index(unclustered)))
nSolutionClusters[idx] = nClusters
with open("/data/oceanparcels/output_data/data_Daan/misc/nclusters_masked_network_Rcmems_Pico11_S2018-3-1_D90_DT20_ODT24_Cico7_mt2_ensemble100_threshold_0.25_95percent.pickle", "wb") as pickFile:
pickle.dump(nSolutionClusters, pickFile) | 2.46875 | 2 |
tests/settings.py | danihodovic/django-allauth-ui | 15 | 59908 | from pathlib import Path
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "very-secret"
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": "db.sqlite3"}}
ROOT_URLCONF = "tests.urls"
DJANGO_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.messages",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.staticfiles",
]
THIRD_PARTY_APPS = [
"django_extensions",
"allauth_ui",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.github",
"allauth.socialaccount.providers.facebook",
"allauth.socialaccount.providers.linkedin",
"allauth.socialaccount.providers.digitalocean",
"widget_tweaks",
"django_browser_reload",
"debug_toolbar",
]
LOCAL_APPS = ["tests"]
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django_browser_reload.middleware.BrowserReloadMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
AUTHENTICATION_BACKENDS = [
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
]
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
INTERNAL_IPS = ["127.0.0.1"]
ALLOWED_HOSTS = ["*"]
SITE_ID = 1
STATIC_URL = "/static/"
MEDIA_URL = "/media/"
MEDIA_ROOT = Path(__file__).parent / "media"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_LOGIN_ATTEMPTS_LIMIT = 1000
| 0.945313 | 1 |
actions/menus.py | matteobarbieri/libtcod-tutorial | 1 | 60036 | from .action import Action
from .exceptions import ShowMenuException
from game_state import GamePhase
from loader_functions.data_loaders import save_game
class ShowMenuAction(Action):
def _execute(self):
# Go back to main menu
# TODO When going back to main game it's always player's turn, maybe
# consider removing it from the required arguments of `save_game`?
save_game(self.player, self.game_map, self.message_log,
GamePhase.PLAYERS_TURN)
# Raise an exception which will cause the game to exit the main loop
raise ShowMenuException()
class ShowCharacterScreenAction(Action):
def _execute(self):
# Return outcome
outcome = {
'next_state': GamePhase.CHARACTER_SCREEN,
}
return outcome
class SelectInventoryItemAction(Action):
def __init__(self, item_letter):
self.item_letter = item_letter
def _execute(self):
try:
item_index = self.player.inventory.item_letters.index(
self.item_letter)
item = self.player.inventory.items[item_index]
# print("Selected {}!".format(item))
next_phase = GamePhase.INVENTORY_ITEM_MENU
except ValueError as e:
# print("Value error!")
item = None
next_phase = GamePhase.INVENTORY_MENU
except IndexError as e:
# print("Index error!")
item = None
next_phase = GamePhase.INVENTORY_MENU
except Exception as e:
print("Uncaught Exception!")
raise e
# Return outcome
outcome = {
'selected_inventory_item': item,
'next_state': next_phase
}
return outcome
class ShowInventoryAction(Action):
def _execute(self):
# Return outcome
outcome = {
'selected_inventory_item': None,
'next_state': GamePhase.INVENTORY_MENU,
}
return outcome
class BackToInventoryMenuAction(Action):
def _execute(self):
"""
Simply reset the state to show inventory menu
"""
# Return outcome
outcome = {
'next_state': GamePhase.INVENTORY_MENU,
'redraw_terrain': True,
}
return outcome
class BackToGameAction(Action):
def _execute(self):
"""
Simply reset the state to player's turn
"""
# Return outcome
outcome = {
'next_state': GamePhase.PLAYERS_TURN,
'redraw_terrain': True,
}
return outcome
| 1.84375 | 2 |
engram/tests/utils_test.py | rgrannell1/engram.py | 0 | 60164 | #!/usr/bin/env python3
import unittest
import os
import sys
import requests
import utils_test
from multiprocessing import Process
import time
sys.path.append(os.path.abspath('engram'))
import engram
class EngramTestCase(unittest.TestCase):
def setUp(self):
self.process = Process(target = engram.create, args = (':memory', True))
self.process.start()
print('running tests in four seconds...')
time.sleep(4)
def tearDown(self):
try:
self.process.terminate()
self.process.join()
except Exception as err:
print('failed to terminate process.')
print(err)
| 1.359375 | 1 |
src/mridata/migrations/0013_remove_tempdata_thumbnail_fftshift_along_z.py | Abesachs01/mridata | 8 | 60292 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-08-01 03:24
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mridata', '0012_auto_20180731_1714'),
]
operations = [
migrations.RemoveField(
model_name='tempdata',
name='thumbnail_fftshift_along_z',
),
]
| 0.714844 | 1 |
robot/main.py | EnyoYaTing/DevOps-Vue-GraphQL | 4 | 60420 | import src.library as s_lib
# s_lib.update_team_table()
# s_lib.test_func()
# s_lib.update_result()
st = '2018-10-29'
end = '2019-05-23'
s_lib.update_period_result(st, end)
| 0.648438 | 1 |
electron apps/old/general_neural_network/python/test.py | quasarbright/quasarbright.github.io | 1 | 60548 | <gh_stars>1-10
import sys, time
for x in range(5):
print(sys.argv)
# time.sleep(2)
| 1.15625 | 1 |
math_study/numpy_basics/statistics/statistic_min_max.py | PitPietro/pascal-triangle | 1 | 60676 | import numpy as np
from math_study.numpy_basics.statistics.statistics import random_data
if __name__ == '__main__':
print('Numpy - Statistic - min & max')
print('\nrandom multidimensional array:')
print(random_data)
print('\nget the min value from the array:')
print(random_data.min())
print('\nget the min value from the array along the specified axis:')
print(random_data.min(axis=0)) # array filled with vertical min values: min of the values in the same column
print(random_data.min(axis=1)) # array filled with horizontal min values: min of the values in the same rows
# if you exceed the number of dimensions, you'll get a 'numpy.AxisError'
try:
print(random_data.min(axis=3))
except np.AxisError as axis_error:
print(axis_error)
print(str('- ' * 20))
print('\nget the max value from the array:')
print(random_data.max())
print('\nget the min value from the array along the specified axis:')
print(random_data.max(axis=0)) # array filled with vertical max values: max of the values in the same column
print(random_data.max(axis=1)) # array filled with horizontal max values: max of the values in the same rows
# if you exceed the number of dimensions, you'll get a 'numpy.AxisError'
try:
print(random_data.max(axis=3))
except np.AxisError as axis_error:
print(axis_error)
| 2.8125 | 3 |
piconly.py | robinwyss/raspberry-scripts | 0 | 60804 | #!/usr/bin/env python
import camera
print("taking a picture")
imagePath = camera.capture()
print("captured %s" % imagePath)
| 1.023438 | 1 |
NetCatKS/Dispatcher/api/interfaces/validator/__init__.py | dimddev/NetCatKS-CP | 0 | 60932 | <gh_stars>0
__author__ = 'dimd'
from zope.interface import Interface
class IDispatchAPIValidator(Interface):
def validate(check_one, check_two):
"""
validate whether in_dict.keys.sort() is equal to self.to_dict().keys().sort()
:param in_dict: dict
:return: bool
"""
| 1.390625 | 1 |
deploy/alfresco.py | Redpill-Linpro/Alfresco-Tooling | 1 | 61060 | #!/usr/bin/env python
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path as path
import shutil
from subprocess import call
class Alfresco(object):
def __init__(self,args):
self.tomcat = path.abspath(args.tomcat)
self.tomcat_share = args.tomcat_share and path.abspath(args.tomcat_share) or path.abspath(args.tomcat) #if not set defualt to repo tomcat
self.share = path.abspath(args.share)
self.repo = path.abspath(args.repo)
self.notification = args.no_notification and None or args.notification
self.quiet = args.quiet
self.host = args.tomcat_host
self.share_host = args.tomcat_share_host
def notify(self,msg):
"""Send out a notification message"""
if self.notification is not None:
call(self.notification % msg,shell=True)
def call(self,*args,**kwargs):
if not self.quiet:
print "Executing: ",args[0]
call(*args,**kwargs)
def sync(self,scope):
if scope == "repo":
self.sync_repo()
elif scope == "share":
self.sync_share()
self.sync_webapp()
elif scope == "webapp":
self.sync_webapp()
elif scope == "all":
self.sync_webapp()
self.sync_repo()
self.sync_share()
def reload(self,scope):
if scope == "repo":
self.reload_repo()
elif scope == "share":
self.reload_share()
elif scope == "all":
self.reload_repo()
self.reload_share()
def reload_repo(self):
""" Reload repo webscripts using it's webservice (with curl)"""
self.call('curl -o /tmp/curl.html --user admin:admin --data-urlencode "reset=on" %s/alfresco/service/index %s' % (self.host,self.quiet and '> /dev/null 2>&1' or ''),shell=True)
def reload_share(self):
""" Reload share webscripts using it's webservice (with curl)"""
self.call('curl -o /tmp/curl.html --user admin:admin --data-urlencode "reset=all" %s/share/page/console %s' % (self.share_host,self.quiet and '> /dev/null 2>&1' or ''),shell=True)
def sync_repo(self):
""" rsync repo webscripts"""
self.call("rsync -avh %s %s %s" % (path.join(self.repo,'config/'),path.join(self.tomcat,'shared/classes/'),self.quiet and '> /dev/null 2>&1' or ''),shell=True)
def sync_webapp(self):
""" rsync webbapp, i.e. client side js,css and images """
self.call("rsync -avh %s %s %s" % (path.join(self.share,'webapp/'),path.join(self.tomcat_share,'webapps/share/'),self.quiet and '> /dev/null 2>&1' or ''),shell=True)
def sync_share(self):
""" rsync share webscripts"""
self.call("rsync -avh %s %s %s" % (path.join(self.share,'config/'),path.join(self.tomcat_share,'shared/classes/'),self.quiet and '> /dev/null 2>&1' or ''),shell=True)
def sync_all(self):
"""Syncs both repo and share """
self.sync_webapp()
self.sync_repo()
self.sync_share()
def reload_all(self):
"""reloads share and repo """
self.reload_repo()
self.reload_share()
def override(self,pth):
"""Override a file in alfresco, i.e copy it to our source tree in a proper place"""
pth = path.abspath(pth)
if not path.isfile(pth):
print "Can't find file: %s" % pth
return
def copy(src_pth):
#there might not be a proper folder for it
folder = path.dirname(src_pth)
if not path.exists(folder):
os.makedirs(folder)
print "Copying file"
print "From:",pth
print "To:",src_pth
shutil.copy(pth,src_pth)
#check if it's share or repo
if '/share/' in pth:
#is it in config or in
if '/WEB-INF/classes/alfresco/' in pth:
copy(path.join(self.share,'config/alfresco/web-extension' ,pth.split('/WEB-INF/classes/alfresco/')[1]))
elif '/WEB-INF/classes' in pth:
print "I don't know how to override that, sorry"
else:
#must be web
copy(path.join(self.share,'webapp' ,pth.split('/share/')[1]))
else: #hence repo
print "Overriding repo webscript, you might need to manually copy these to webapps alfresco since overriding doesn't always work here"
copy(path.join(self.repo,'config',pth.split('/WEB-INF/classes/')[1]))
if __name__ == "__main__":
import sys
import argparse
def using(tomcat,tomcat_share,repo,share):
print """Using:
tomcat: %s
tomcat share: %s
repo: %s
share: %s
""" % (tomcat,tomcat_share or 'Not set', repo,share)
#some mac os x love
notification_cmd = sys.platform.startswith('darwin') and 'growlnotify -m "%s"' or 'notify-send --hint=int:transient:1 "%s"'
parser = argparse.ArgumentParser(description="Hotdeployment script for alfresco")
parser.add_argument('-q','--quiet',action="store_true", default=False,help="Disable output")
parser.add_argument('-N','--no-notification',action="store_true", default=False,help="Disable notfications")
parser.add_argument('-n','--notification',default=notification_cmd,help="Notification binary")
parser.add_argument('-t','--tomcat',default="/opt/alfresco/tomcat" ,help="Path to tomcat")
parser.add_argument('-ts','--tomcat-share',default=None ,help="Path to tomcat used for share if it's different than repo. Only required when using two tomcats and 'reload' or 'deploy' 'all'")
parser.add_argument('-th','--tomcat-host',default="http://localhost:8080",help="Host used for tomcat, i.e. http://localhost:8080")
parser.add_argument('-tsh','--tomcat-share-host',default="http://localhost:8080",help="Host used for alfresco share tomcat, i.e. http://localhost:8080")
parser.add_argument('-s','--share' ,default="trunk/share/src/main/",help="Path to share src")
parser.add_argument('-r','--repo' ,default="trunk/repo/src/main/" ,help="Path to repo src")
sub = parser.add_subparsers(help="Action subparser")
#sync
sync = sub.add_parser('sync',help="Sync files with rsync")
sync.add_argument('scope',choices=['repo','share','webapp','all'],help="What to sync, 'share' will also sync webapp and 'all' syncs them all")
def sync_func(args):
if not args.quiet:
using(args.tomcat,args.tomcat_share,args.repo,args.share)
a = Alfresco(args )
a.sync(args.scope)
a.notify("Synced %s" % args.scope)
sync.set_defaults(func=sync_func)
#reload
rel = sub.add_parser('reload',help="Reload wbescript with curl")
rel.add_argument('scope',choices=['repo','share','all'],help="What to reload, 'repo','share' or 'all'")
def rel_func(args):
if not args.quiet:
using(args.tomcat,args.tomcat_share,args.repo,args.share)
a = Alfresco(args)
a.reload(args.scope)
a.notify("Reloaded %s" % args.scope)
rel.set_defaults(func=rel_func)
#deploy
deploy = sub.add_parser('deploy',help="Sync and reload")
deploy.add_argument('scope',choices=['repo','share','all'],help="What to sync and reload: 'repo','share' or 'all'")
def deploy_func(args):
if not args.quiet:
using(args.tomcat,args.tomcat_share,args.repo,args.share)
a = Alfresco(args)
a.sync(args.scope)
a.reload(args.scope)
a.notify("Synced and reloaded %s" % args.scope)
deploy.set_defaults(func=deploy_func)
#override
override = sub.add_parser('override',help="Override a file in Alfresco")
override.add_argument('file',help="Path to file to overide")
def override_func(args):
if not args.quiet:
using(args.tomcat,args.tomcat_share,args.repo,args.share)
a = Alfresco(args)
a.override(args.file)
override.set_defaults(func=override_func)
args = parser.parse_args()
args.func(args)
| 1.429688 | 1 |
numbaExercise/numba_begin.py | terasakisatoshi/pythonCodes | 0 | 61188 | """
http://yutori-datascience.hatenablog.com/entry/2014/12/10/123157
"""
from numba import cuda
import numpy as np
from numba import double
from numba.decorators import jit
from numba import guvectorize
import time
import math
@jit
def pairwise_numba(X,D):
M,N=X.shape[0],X.shape[1]
for i in range(M):
for j in range(M):
d=0.0
for k in range(N):
tmp=X[i,k]-X[j,k]
d+=tmp *tmp
D[i,j]=np.sqrt(d)
@jit('void(f8[:,:],f8[:,:])')
def pairwise_numba_with_type(X,D):
M,N=X.shape[0],X.shape[1]
for i in range(M):
for j in range(M):
d=0.0
for k in range(N):
tmp=X[i,k]-X[j,k]
d+=tmp *tmp
D[i,j]=np.sqrt(d)
@guvectorize(['void(f8[:, :], f8[:, :])'], '(x, y)->(x, x)')
def pairwise_vectorize(X, D):
M = X.shape[0]
N = X.shape[1]
for i in range(M):
for j in range(M):
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = np.sqrt(d)
def pairwise_python(X,D):
M,N=X.shape[0],X.shape[1]
for i in range(M):
for j in range(M):
d=0.0
for k in range(N):
tmp=X[i,k]-X[j,k]
d+=tmp *tmp
D[i,j]=np.sqrt(d)
@cuda.jit('void(f8[:, :], f8[:, :])')
def pairwise_numba_cuda1(X, D):
M = X.shape[0]
N = X.shape[1]
i, j = cuda.grid(2)
if i < M and j < M:
d = 0.0
for k in range(N):
tmp = X[i, k] - X[j, k]
d += tmp * tmp
D[i, j] = math.sqrt(d)
def measure_time(func,X,D):
start=time.time()
func(X,D)
end=time.time()
print("elapsed time",end-start)
def main():
griddim = (100, 100)
blockdim =(16, 16)
SIZE=5000
X=np.random.random((SIZE,3))
D=np.empty((SIZE,SIZE))
measure_time(pairwise_python,X,D)
measure_time(pairwise_numba,X,D)
measure_time(pairwise_numba_with_type,X,D)
measure_time(pairwise_vectorize,X, D)
start=time.time()
pairwise_numba_cuda1[griddim, blockdim](X, D)
end=time.time()
print("elapsed gpu=",end-start)
if __name__ == '__main__':
main() | 2.375 | 2 |
agents/alphazero.py | kxvrh/GomokuAI | 4 | 61316 | <reponame>kxvrh/GomokuAI<filename>agents/alphazero.py
from core import Policy
from .mcts import MCTSAgent
def PyConvNetAgent(network, c_puct, **constraint):
return MCTSAgent(
policy=Policy(eval_state=network.eval_state, c_puct=c_puct),
**constraint
)
def AlphaZeroAgent(**constraint):
pass
def main():
from .utils import botzone_interface
from config import MCTS_CONFIG
from network import PolicyValueNetwork
model_file = "path/to/model/file"
network = PolicyValueNetwork(model_file)
botzone_interface(PyConvNetAgent(network, **MCTS_CONFIG))
if __name__ == "__main__":
main()
| 1.046875 | 1 |
polar/polar_common.py | Akorsvang/WIRT-implementation | 0 | 61444 | <gh_stars>0
"""
Shared functions used for polar encoding and decoding
"""
from functools import lru_cache
import numpy as np
def idx(phi, beta, lamb):
return phi + (beta << lamb)
def polar_calculate_weights(N):
"""
Calculate polar weights
Right now only supports N <= 2**8, for more look at
np.unpackbits(np.array([3]).byteswap().view(np.uint8))
"""
if np.log2(N) > 8:
raise ValueError("Ordering calculation does not support above 2**8")
beta = 2**(1 / 4)
I = np.arange(N, dtype=np.uint8)
beta_power = (beta**np.arange(7, -1, -1))
W = np.empty(I.shape)
for i in I:
W[i] = (np.unpackbits(i) * beta_power).sum()
W_index = np.argsort(W)
return W_index
@lru_cache()
def polar_hpw(N):
"""
Calculate polar weights using the higher order method
"""
beta = 2**(1 / 4)
I = np.arange(N, dtype='>u4') # Creating this as a big-endian array, so we don't have to byteswap
beta_power = (beta**np.arange(31, -1, -1))
beta_power_quad = (beta**((1 / 4) * np.arange(31, -1, -1)))
elem_bits = np.unpackbits(I.view(np.uint8)).reshape(-1, 32)
W = (elem_bits * (beta_power + (1/4) * beta_power_quad)).sum(axis=1)
W_index = np.argsort(W)
return W_index
@lru_cache()
def polar_find_ordering(N):
o = np.arange(N, dtype='>u4')
n_bits = np.log2(N).astype(int)
# We use some view tricks here to find the bits that correspond to the entries in o
elem_bits = np.unpackbits(o.view(np.uint8)).reshape(-1, 32)
# Flip the bit order, roll the bits down to the correct order and revert the view from before
return np.packbits(np.roll(np.fliplr(elem_bits), 32 - n_bits)).view('>u4')
@lru_cache()
def polar_find_G(N, reorder=True):
n = np.log2(N).astype(int)
F = np.array([[1, 0], [1, 1]], dtype=np.uint8)
G = F.copy()
for _ in range(1, n):
G = np.kron(G, F)
if reorder:
G_shuffled = G[polar_find_ordering(N)]
else:
G_shuffled = G
return G_shuffled
def polar_transform_pipelined(u, reorder=True):
N = len(u)
N_half = N//2
n = np.log2(N).astype(int)
working_bits = u.copy()
for n_i in range(n):
u2 = working_bits[1::2].copy()
working_bits[:N_half] = working_bits[::2] ^ u2
working_bits[N_half:] = u2
if reorder:
order = polar_find_ordering(N)
working_bits = working_bits[order]
return working_bits
def polar_transform(u):
"""
This should be a way to use encode using the bit reversal structure -> more efficient in HW.
Based on http://pfister.ee.duke.edu/courses/ecen655/polar.pdf
"""
if len(u) == 1:
x = u
else:
u1u2 = np.mod(u[::2] + u[1::2], 2)
u2 = u[1::2]
x = np.concatenate((polar_transform(u1u2), polar_transform(u2)))
return x | 2.59375 | 3 |
pipeline.py | ooominds/ndltenses | 0 | 61572 | from ndl_tense.data_preparation import create_sentence_file,annotate_tenses, prepare_data, prepare_ndl_events, extract_infinitive, extract_ngrams, prepare_ngrams, prepare_cues
#below import commented out for now, uncomment if you want to run step 6
from ndl_tense.simulations import ndl_model
#from ndl_tense.post_processing import top_cues_for_sen, sample_sentences
from ndl_tense import file_tools
from param_file import *
from os import chdir
import logging
logging.basicConfig(level=logging.INFO)
def step_1():
# create folders specified by the list stored in EXTRACT_SNETNECES_DIRS
file_tools.manage_directories(EXTRACT_SENTENCES_DIRS, False)
chdir(WD_EXTRACT) # change working directory to the path in WD_EXTRACT
# create folders specified by the list stored in EXTRACT_SNETNECES_FILES
# the "True" means that the paths in the list are for files and not directories
file_tools.manage_directories(EXTRACT_SENTENCES_FILES, True)
# (The default paramenters are the ones set here)
# create_sentence_file: (list of file paths),
# {dictionary of token:tag pairs to remove from corpus} - # we remove colloquial spelling tokens like "gon", "wan" and "innit" here,
# "True" = create a .tsv of the output,
# True = keep the original sentence | False = "clean" it to be used for training an ndl model
# the final parameter is for verbosity True = print the output of the process as we go along
# The default paramenters are the ones set here
# so this can be run with a call like create_sentence_file.run(EXTRACT_SENTENCES_FILES, {"gon":"VVG", "wan":"VVB", "innit":"VBB"}) and have the same result
create_sentence_file.run(EXTRACT_SENTENCES_FILES, {"gon":"VVG", "wan":"VVB", "innit":"VBB"}, False, False, True)
def step_2():
# create folders specified by the list stored in ANNOTATE_DIRS
file_tools.manage_directories(ANNOTATE_DIRS, False)
chdir(WD_ANNOTATE) # change working directory to the path in WD_EXTRACT
# create folders specified by the list stored in ANNOTATE_FILES
# the "True" means that the paths in the list are for files and not directories
file_tools.manage_directories(ANNOTATE_FILES, True)
# the final parameter is for verbosity (True = print the output of the process as we go along)
annotate_tenses.run(ANNOTATE_FILES, True)
def step_3():
file_tools.manage_directories(PREPDAT_DIRS, False)
chdir(WD_PREPDAT)
file_tools.manage_directories(PREPDAT_FILES, True)
file_tools.manage_directories(PREPARE_TRAIN_VALID_TEST_FILES, True)
#optional
#sample_sentences.run(TENSES_ONE_SENT_PER_VERB_WITH_MODALS, kets, ratios, 500, False)
# the final parameter is for verbosity (True = print the output of the process as we go along)
file_tools.manage_directories(CREATE_TRAIN_VALID_TEST_FILES, True)
prepare_data.run(PREPDAT_FILES, True)
chdir(WD_PREPDAT)
prepare_ndl_events.prepare_files(CREATE_TRAIN_VALID_TEST_FILES, PROP_TEST, PROP_VALID, True)
# the final parameter is for verbosity (True = print the output of the process as we go along)
prepare_ndl_events.run(PREPARE_TRAIN_VALID_TEST_FILES, 'NgramCuesWithInfinitive', True)
def step_4():
file_tools.manage_directories(EXTRACT_SENTENCES_FOLDERS, False)
file_tools.manage_directories(EXTRACT_INFINITIVE_FILES, True)
chdir(WD_EXTRACT_INF)
# the final parameter is for verbosity (True = print the output of the process as we go along)
extract_infinitive.run(EXTRACT_INFINITIVE_FILES, True)
def step_5():
file_tools.manage_directories(NGRAM_FOLDERS, False)
chdir(WD_EXTRACT_NGRAM)
file_tools.manage_directories(NGRAM_FILES, True)
file_tools.manage_directories(TARGETS_FILES, True)
# extracting ngrams by frequency is optional
extract_ngrams.run(TENSES_GZ, NGRAM_FILES, TEMP_DIR_EXT, NUM_THREADS)
# the final parameter is for verbosity (whether to print the output of the process as we go along)
prepare_ngrams.run(NGRAM_FILES, K_NGRAMS, TARGETS_FILES, False)
def step_6():
file_tools.manage_directories([WD_CUES], False)
chdir(WD_CUES)
# the final parameter is for verbosity (whether to print the output of the process as we go along)
prepare_cues.run(NGRAMS, INFINITIVES, ALL_CUES, True)
def step_7():
file_tools.manage_directories(SIM_DIR, False)
chdir(WD_SIM)
ndl_model.run(SIM_FILES, SIM_PARAMS)
def main():
# uncomment by deleting hashtag for each step you wish to complete
#step_1()
#step_2()
#step_3()
#step_4()
#step_5()
#step_6()
step_7() #requires you to uncomment an import line at the top
if __name__ == "__main__":
main() | 2.078125 | 2 |
ax/runners/botorch_test_problem.py | mpolson64/Ax-1 | 0 | 61700 | <filename>ax/runners/botorch_test_problem.py
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Set, Iterable, Any, Dict
import torch
from ax.core.base_trial import TrialStatus, BaseTrial
from ax.core.runner import Runner
from botorch.test_functions.base import BaseTestProblem
class BotorchTestProblemRunner(Runner):
"""A Runner for evaluation Botorch BaseTestProblems.
Given a trial the Runner will evaluate the BaseTestProblem.forward method for each
arm in the trial, as well as return some metadata about the underlying Botorch
problem such as the noise_std. We compute the full result on the Runner (as opposed
to the Metric as is typical in synthetic test problems) because the BoTorch problem
computes all metrics in one stacked tensor in the MOO case, and we wish to avoid
recomputation per metric.
"""
def __init__(self, test_problem: BaseTestProblem) -> None:
self.test_problem = test_problem
def run(self, trial: BaseTrial) -> Dict[str, Any]:
return {
"Ys": {
arm.name: self.test_problem.forward(
torch.tensor([value for _key, value in arm.parameters.items()])
).tolist()
for arm in trial.arms
},
}
def poll_trial_status(
self, trials: Iterable[BaseTrial]
) -> Dict[TrialStatus, Set[int]]:
return {TrialStatus.COMPLETED: {t.index for t in trials}}
| 1.4375 | 1 |
back.py | julioalvesMS/barbie-web | 1 | 61828 | from barbie.barbie import *
import barbie.susy_interface as susy
import subprocess
import os.path
import sys
from shutil import rmtree
from threading import Timer
constant_DirectoryDuration = 36
# Pode ser removido depois dos testes
import time
def barbiefy(dir, codes, disc, turma, lab):
# Try to compile the source code
try:
exec_file, gcc_f, sucess = compile_c(codes, temp=True, dir=dir)
assert sucess, "Falha na compilação"
# If there was a compilation problem
except AssertionError as e:
eprint("Falha na compilação!\n")
# Show the compilation output and end the program
with open(gcc_f, 'r') as gcc:
eprint(gcc.read())
return None
"""
# Code for testing the submit page while susy has no open class
time.sleep(3)
# Temporary while susy is offile
results = list()
for i in range(1, 11):
results.append(BarbieTest(0, None, None, i, None, 'None', None))
for i in range(11, 21):
results.append(BarbieTest(i*3, 'Susy: Vamos dizer que isso fudeu', 'Barbie: Vamos dizer que isso fudeu', i, 'None', 'None', 'None'))
return results
"""
tests_dir_name = os.path.join(dir, 'testes/')
in_files = None
res_files = None
# Connect to susy system and discover the test page url
url = susy.discover_susy_url(disc, turma, lab)
# List all susy files of open tests
in_files, res_files = susy.get_susy_files(url)
# Download all the open tests
susy.download_tests(url, in_files, res_files, tests_dir_name)
results = list()
# If we sucessufuly got all needed files,
# we may run all tests and compare our output with the expected
if in_files and res_files:
results = run_and_compare(exec_file, in_files, res_files, tests_dir_name)
return results
def cleanUp(folder):
rmtree(folder, ignore_errors=True)
def timedCleanUp(folder):
tmr = Timer(constant_DirectoryDuration, cleanUp, args=[folder])
tmr.start()
| 1.554688 | 2 |
rooms/roomballot/models.py | dowjcr/rooms | 5 | 61956 | <reponame>dowjcr/rooms
"""
MODELS
Defines database models to be used in the room ballot application.
Author <NAME>
"""
from django.db import models
# =================== BAND =======================
# Represents a discrete pricing band.
class Band(models.Model):
band_id = models.AutoField(primary_key=True)
band_name = models.CharField(max_length=10)
weekly_price = models.IntegerField(null=True, blank=True)
weekly_price_old = models.IntegerField(null=True, blank=True)
def __str__(self):
return "Band " + self.band_name
# ================ SYNDICATE =====================
# Represents a syndicate between Students.
class Syndicate(models.Model):
YEAR_CHOICES = (
(1, 'First Ballot Year'),
(2, 'Second Ballot Year'),
)
syndicate_id = models.AutoField(primary_key=True)
owner_id = models.CharField(max_length=10)
complete = models.BooleanField(default=False, editable=False)
year = models.IntegerField(choices=YEAR_CHOICES)
rank = models.IntegerField(null=True, editable=False)
def __str__(self):
return str(self.syndicate_id)
# ================ STAIRCASE =====================
# Represents a staircase.
class Staircase(models.Model):
staircase_id = models.AutoField(primary_key=True)
identifier = models.CharField(max_length=10, default=None, null=True)
name = models.CharField(max_length=30)
contract_length = models.IntegerField('Number of contract weeks?')
description = models.CharField(max_length=1000, default=None, null=True, blank=True)
def __str__(self):
return self.name
# ================== STUDENT =====================
# Stores user information, and implements many-to-one
# relationship to Syndicate. Note that user_id
# corresponds to CRSid.
class Student(models.Model):
YEAR_CHOICES = (
(1, 'First Ballot Year'),
(2, 'Second Ballot Year'),
)
user_id = models.CharField('CRSid', primary_key=True, max_length=10)
first_name = models.CharField('First Name', max_length=50)
surname = models.CharField('Surname', max_length=50)
year = models.IntegerField(choices=YEAR_CHOICES)
in_ballot = models.BooleanField(default=True, editable=False)
has_allocated = models.BooleanField(default=False, editable=False)
rank = models.IntegerField(null=True, blank=True, editable=False)
syndicate = models.ForeignKey(Syndicate, on_delete=models.SET_DEFAULT, default=None, null=True, editable=False)
accepted_syndicate = models.BooleanField(default=False, editable=False)
picks_at = models.DateTimeField(null=True, blank=True, editable=False)
name_set = models.BooleanField(default=True, editable=False)
def __str__(self):
return self.first_name + " " + self.surname
# ================== ROOM ========================
# Represents a room. Implements one-to-one
# relationship to Student.
class Room(models.Model):
FLOOR_CHOICES = (
(1, 'Ground'),
(2, 'First'),
(3, 'Second'),
(4, 'Third')
)
BATHROOM_CHOICES = (
(1, '1'),
(2, '2'),
(3, '3'),
(4, '4'),
(5, '5+')
)
TYPE_CHOICES = (
(1, 'JCR Freshers'),
(2, 'JCR Ballot'),
(3, 'JCR Outside Ballot'),
(4, 'MCR')
)
# Identifiers.
room_id = models.AutoField(primary_key=True)
identifier = models.CharField(max_length=10, default=None, null=True)
room_number = models.CharField(max_length=10)
# Room attributes.
floor = models.IntegerField(choices=FLOOR_CHOICES)
is_ensuite = models.BooleanField('Has ensuite?', default=False)
is_double_bed = models.BooleanField('Has double bed?', default=False)
has_disabled_facilities = models.BooleanField('Has disabled facilities?', default=False)
room_last_renovated = models.IntegerField('Year Room Last Renovated (YYYY)', null=True)
bathroom_last_renovated = models.IntegerField('Year Bathroom Last Renovated (YYYY)', null=True)
kitchen_last_renovated = models.IntegerField('Year Kitchen Last Renovated (YYYY)', null=True)
faces_lensfield = models.BooleanField('Faces Lensfield Road?', default=False)
faces_court = models.BooleanField('Faces court/garden?', default=False)
bathroom_sharing = models.IntegerField(choices=BATHROOM_CHOICES, null=True)
is_flat = models.BooleanField(default=False)
size = models.FloatField()
staircase = models.ForeignKey(Staircase, on_delete=models.SET_DEFAULT, default=None)
band = models.ForeignKey(Band, on_delete=models.SET_DEFAULT, default=None, null=True, related_name='band')
new_band = models.ForeignKey(Band, on_delete=models.SET_DEFAULT, default=None, null=True, related_name='new_band')
type = models.IntegerField(choices=TYPE_CHOICES)
taken_by = models.ForeignKey(Student, on_delete=models.SET_DEFAULT, editable=False, null=True, default=None)
price = models.IntegerField(editable=False, default=0)
new_price = models.FloatField(default=0)
contract_length = models.IntegerField(null=True)
# Scores for pricing.
score_ensuite = models.FloatField(editable=False, default=0)
score_double_bed = models.FloatField(editable=False, default=0)
score_renovated = models.FloatField(editable=False, default=0)
score_renovated_facilities = models.FloatField(editable=False, default=0)
score_bathroom = models.FloatField(editable=False, default=2)
score_flat = models.FloatField(editable=False, default=0)
score_facing_lensfield = models.FloatField(editable=False, default=0)
score_size = models.FloatField(editable=False, default=0)
score_facing_court = models.FloatField(editable=False, default=0)
score_ground_floor = models.FloatField(editable=False, default=0)
score_total = models.FloatField(editable=False, default=0)
feature_price = models.FloatField(editable=False, default=0)
pricing_notes = models.CharField(max_length=1000, default=None, null=True, blank=True)
sort_number = models.IntegerField(default=0)
def __str__(self):
return self.staircase.__str__() + ", Room " + str(self.room_number)
# ================== REVIEW ======================
# Stores reviews which users have left for a particular
# room. Implements many-to-one relationship with Room.
class Review(models.Model):
review_id = models.AutoField(primary_key=True)
room = models.ForeignKey(Room, on_delete=models.SET_DEFAULT, default=None)
author_name = models.CharField(max_length=40)
author_id = models.CharField(max_length=10)
title = models.CharField(max_length=255)
layout_rating = models.IntegerField()
facilities_rating = models.IntegerField()
noise_rating = models.IntegerField()
overall_rating = models.IntegerField()
text = models.CharField(max_length=5000)
def __str__(self):
return self.room.__str__() + " (Review " + str(self.review_id) + ")"
# =================== ADMIN ======================
# Stores information about users who are authorised
# to access the backend management system.
class AdminUser(models.Model):
entry_id = models.AutoField(primary_key=True)
user_id = models.CharField('CRSid', max_length=10)
role = models.CharField(max_length=30)
def __str__(self):
return self.user_id
# =================== IMAGE ======================
# Implements relationship allowing storage of image
# files for room.
class Image(models.Model):
image_id = models.AutoField(primary_key=True)
room = models.ForeignKey(Room, on_delete=models.SET_DEFAULT, default=None)
thumbnail = models.ImageField(upload_to='room_images')
file = models.ImageField(upload_to='room_images')
def __str__(self):
return self.room.__str__() + " (Image " + str(self.image_id) + ")"
# ================ FLOORPLAN =====================
# Implements relationship allowing storage of floorplan
# files for staircase.
class Floorplan(models.Model):
floorplan_id = models.AutoField(primary_key=True)
staircase = models.ForeignKey(Staircase, on_delete=models.SET_DEFAULT, default=None)
file = models.FileField(upload_to='floorplans')
def __str__(self):
return self.staircase.__str__() + " (Floorplan " + str(self.floorplan_id) + ")"
# ================== SETTING =====================
# Stores settings required for app's function.
class Setting(models.Model):
key = models.CharField(max_length=32)
value = models.CharField(max_length=200, null=True, blank=True)
def __str__(self):
return str(self.key)
# =============== PROXY USER =====================
# Represents a user, not registered as a student, who
# can pick on a student's behalf as their proxy.
class ProxyUser(models.Model):
user_id = models.CharField('CRSid', primary_key=True, max_length=10)
first_name = models.CharField('First Name', max_length=50)
surname = models.CharField('Surname', max_length=50)
def __str__(self):
return self.first_name + " " + self.surname
# ============= PROXY INSTANCE ===================
# Represents an instance of someone being able to
# pick as a student's proxy.
class ProxyInstance(models.Model):
user_id = models.CharField('CRSid of Student', primary_key=True, max_length=10)
proxy_user_id = models.CharField('CRSid of Proxy', max_length=10)
def __str__(self):
return self.proxy_user_id + " picking for " + self.user_id
# ================= ROOM PLAN ====================
# Implements relationship allowing storage of pdf
# floor plan per room.
class RoomPlan(models.Model):
roomplan_id = models.AutoField(primary_key=True)
room = models.ForeignKey(Room, on_delete=models.SET_DEFAULT, default=None)
file = models.FileField(upload_to='room_plans')
def __str__(self):
return self.room.__str__() + " (Plan " + str(self.roomplan_id) + ")"
| 1.695313 | 2 |
AcdUtilLib.py | fermi-lat/AcdUtil | 0 | 62084 | # $Header: /nfs/slac/g/glast/ground/cvs/GlastRelease-scons/AcdUtil/AcdUtilLib.py,v 1.4 2009/08/08 01:08:04 jrb Exp $
def generate(env, **kw):
if not kw.get('depsOnly', 0):
env.Tool('addLibrary', library = ['AcdUtil'])
if env['PLATFORM'] == 'win32':
env.Tool('findPkgPath', package = 'AcdUtil')
env.Tool('addLibrary', library = ['AcdUtilCommon'])
env.Tool('CalibDataLib')
env.Tool('geometryLib')
env.Tool('xmlBaseLib')
env.Tool('rdbModelLib')
env.Tool('calibUtilLib')
env.Tool('EventLib')
env.Tool('GlastSvcLib')
env.Tool('mootCoreLib')
env.Tool('facilitiesLib')
env.Tool('addLibrary', library = env['gaudiLibs'])
env.Tool('addLibrary', library = env['mysqlLibs'])
env.Tool('addLibrary', library = env['clhepLibs'])
env.Tool('addLibrary', library = env['cppunitLibs'])
env.Tool('addLibrary', library = env['xercesLibs'])
if env['PLATFORM']=='win32' and env.get('CONTAINERNAME','')=='GlastRelease':
env.Tool('findPkgPath', package='CalibSvc')
# only needed for building static lib and compiling TestAcdUtil.cxx
if kw.get('incsOnly', 0) == 1:
env.Tool('findPkgPath', package = 'GlastSvc')
env.Tool('findPkgPath', package = 'idents')
env.Tool('findPkgPath', package = 'geometry')
env.Tool('findPkgPath', package = 'Event')
env.Tool('findPkgPath', package = 'enums')
def exists(env):
return 1;
| 1.09375 | 1 |
2021/aoc.py | dorianignee/advent_of_code | 0 | 62212 | import re
def read_raw(path):
return "".join(open(path).readlines())
def ints(input):
# return a list of ints being separated by non-numerical characters
if input.endswith(".txt"):
return ints(read_raw(input))
else:
return [int(num) for num in re.split("\D+", input) if num != '']
def lines(input):
# return lines separated by \n.
if input.endswith(".txt"):
return lines(read_raw(input))
else:
return input.split("\n")
def blocks(input):
# return blocks separated by \n\n
if input.endswith(".txt"):
return blocks(read_raw(input))
else:
return input.split("\n\n")
def neighbors(grid, with_diagonals = True):
# assign neighbors to each element in the grid
for y in range(len(grid)):
for x in range(len(grid[y])):
neighbors = []
for ny in range(max(0, y-1), y+2):
for nx in range(max(0, x-1), x+2):
if not (x == nx and y == ny):
if with_diagonals or x == nx or y == ny:
try:
neighbors.append(grid[ny][nx])
except IndexError:
pass
grid[y][x].neighbors = neighbors
def flat_grid(grid):
# return each element in 2-D grid
return [item for row in grid for item in row]
| 2.546875 | 3 |
djue/utils.py | brmc/django-djue | 0 | 62340 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import sys
import jsbeautifier
from bs4 import BeautifulSoup
from django.apps import apps
from django.conf import settings
from django.template.loader import render_to_string
from editorconfig import PathError
def flatten(lst: []) -> []:
if not isinstance(lst, (list, tuple)):
return [lst]
if not lst:
return lst
return flatten(lst[0]) + flatten(lst[1:])
def convert_to_pascalcase(string: str) -> str:
return "".join(
[word.capitalize() for word in re.findall(r"[a-zA-Z0-9]+", string)])
def convert_to_camelcase(string: str) -> str:
string = convert_to_pascalcase(string)
return string[0].lower() + string[1:]
def convert_to_kebab_case(string: str) -> str:
return re.sub('(?!^)([A-Z])', r'-\1', string).lower()
def replace(match: str) -> str:
return ':' + match.groups()[-1]
def render_to_js_string(template: str, context: {}):
output = render_to_string(template, context)
options = jsbeautifier.default_options()
opts_file = getattr(settings, 'EDITOR_CONFIG', '.editorconfig')
options.brace_style = 'collapse,preserve-inline'
try:
jsbeautifier.set_file_editorconfig_opts(opts_file, options)
except PathError:
log("No editor config found at: {opts_file}")
log("Using defaults.")
return jsbeautifier.beautify(output, opts=options)
def render_to_html_string(template, context):
# todo find a nicer library to pretty print
if True:
output = render_to_string(template, context)
return output.replace('</label>', '</label>\n')
output = render_to_string(template, context)
soup = BeautifulSoup(output, 'html.parser')
return soup.prettify(None, None)
def as_vue(self):
for name, field in self.fields.items():
template: str = field.widget.template_name
field.widget.template_name = template.replace('django/forms', 'djue')
return self._html_output(
normal_row='<div%(html_class_attr)s> %(field)s%('
'help_text)s</div>',
error_row='%s',
row_ender='</div>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
def get_app_name(obj):
try:
return apps.get_containing_app_config(obj.__module__).name
except AttributeError:
log("Object is not part of an app. About to do stupid shit")
return obj.__module__.split('.')[0]
def convert_file_to_component_name(path):
file_name = path.split(os.path.sep)[-1]
return convert_to_pascalcase(file_name.split('.')[0].capitalize())
def log(msg):
sys.stdout.write(msg)
sys.stdout.write('\n')
def get_output_path():
root = getattr(settings, 'DJUE_OUTPUT_DIR', os.getcwd())
path = os.path.join(root, 'src')
os.makedirs(path, exist_ok=True)
return path
| 1.523438 | 2 |
instagram/models.py | samwel-chege/Instagram | 0 | 62468 | <reponame>samwel-chege/Instagram<filename>instagram/models.py
from django.db import models
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import post_save
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(User,on_delete=models.CASCADE,null=True,related_name='user')
photo = models.ImageField(upload_to='photos',default="img2.jpg")
bio = models.TextField(max_length=1000)
followers = models.ManyToManyField(User,related_name='followers',blank=True)
following = models.ManyToManyField(User,related_name='following',blank=True)
@receiver(post_save,sender = User)
def update_user_profile(sender,instance,created, **kwargs):
if created:
Profile.objects.create(user=instance)
try:
instance.profile.save()
except AttributeError:
pass
@classmethod
def search_user(cls,username):
return User.objects.filter(username = username)
class Image(models.Model):
image = models.ImageField(upload_to='photos')
name= models.CharField(max_length=30)
caption = models.CharField(max_length=30)
profile = models.ForeignKey(Profile, on_delete=CASCADE,null=True)
likes = models.ManyToManyField(Profile,related_name='posts')
posted_date = models.DateTimeField(auto_now_add=True,null=True)
def __str__(self):
return self.name
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def like_count(self):
return self.likes.count()
class Comments(models.Model):
content = models.TextField()
posted_date = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User,on_delete=models.CASCADE)
image = models.ForeignKey(Image,on_delete=models.CASCADE,default =1, related_name= "comments")
def __str__(self):
return self.content
def save_comment(self):
self.save()
def delete_comment(self):
self.delete()
@classmethod
def get_image_comments(cls,image):
return cls.objects.filter(image=image)
class Meta:
ordering = ['-posted_date']
| 1.4375 | 1 |
main.py | p-sodmann/Scan-Forget | 0 | 62596 | <gh_stars>0
import datetime
from win32com.client import Dispatch
from rich import print
import typer
import pytesseract
from PIL import Image
import json
import os
debug_mode = False
class TesseractWrapper():
def __init__(self) -> None:
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
def __call__(self, image) -> str:
return pytesseract.image_to_string(image, lang="deu")
class ScanService():
def __init__(self) -> None:
wia = Dispatch("WIA.CommonDialog")
dev = wia.ShowSelectDevice()
self.scanner = dev.Items[0]
self.scanner.ItemID
self.WIA_IMG_FORMAT_PNG = "{B96B3CAF-0728-11D3-9D7B-0000F81EF32E}"
# switch to feeder
if not debug_mode:
dev.Properties("Document Handling Select").Value = 1
def __call__(self):
return self.scanner.Transfer(self.WIA_IMG_FORMAT_PNG)
class Scan():
def __init__(self, id=-1, title="") -> None:
# set date to today
self.id = id
self.title = title
self.date = datetime.date.today()
self.keywords = []
self.text = ""
self.owner = ""
self.page = 0
self.image_path = f"images/{self.id:06d}_{self.sanitize_title()}.png"
def sanitize_title(self):
replaces = [" ", ":", ";", ",", ".", "!", "?", "\"", "'", "\\", "/", "|", "*", "&", "^", "%", "$", "#", "@", "~", "`", "(", ")", "[", "]", "{", "}", "=", "+", "-", "_", ">", "<"]
title = self.title
for replace in replaces:
title = title.replace(replace, "_")
return title
def toJSON(self):
return {
"id": self.id,
"title": self.title,
"date": self.date.isoformat(),
"keywords": self.keywords,
"text": self.text,
"owner": self.owner,
"image_path": self.image_path,
"page": self.page
}
def from_json(self, dictionary):
# load from a json dictionary
self.id = dictionary["id"]
self.title = dictionary["title"]
self.date = datetime.datetime.strptime(dictionary["date"], "%Y-%m-%d").date()
self.keywords = dictionary["keywords"]
self.text = dictionary["text"]
self.owner = dictionary["owner"]
self.image_path = dictionary["image_path"]
self.page = dictionary["page"]
return self
class DataBase():
def __init__(self):
self.data = []
def get_next_id(self):
return len(self.data)
def save(self):
serialization_copy = [d.toJSON() for d in self.data]
with open("database.json", "w", encoding='utf-8') as f:
json.dump(serialization_copy, f, indent=4, ensure_ascii=False)
def load(self):
if not os.path.exists("database.json"):
return None
with open("database.json", "r", encoding='utf-8') as f:
unserialization_copy = json.load(f)
for d in unserialization_copy:
self.data.append(Scan().from_json(d))
def main(user: str):
previous = False
title = input("Title: ")
page = 0
while title != "q":
if previous:
if next_title == "q":
return None
elif next_title != "":
title = next_title
page = 0
else:
page += 1
scanner = ScanService()
# scan
image = scanner()
scan = Scan(db.get_next_id(), title)
scan.user = user
scan.page = page
image.SaveFile(scan.image_path)
# reload with pillow
image = Image.open(scan.image_path)
# ocr
tesseract = TesseractWrapper()
scan.text = tesseract(image)
db.data.append(scan)
db.save()
next_title = input("Title: ")
previous = True
if __name__ == "__main__":
db = DataBase()
db.load()
typer.run(main)
| 1.695313 | 2 |
20200505_alignment_week4/structural_alignment_by_gradient_descent_DR/pymol_plugin.py | DTRademaker/DL_tutorials_2020 | 0 | 62724 | <gh_stars>0
from pymol.cgo import COLOR, SPHERE, BEGIN, LINES, VERTEX, END
from pymol import cmd
"""
Pymol plugin
instalation: https://pymolwiki.org/index.php/Plugins
"""
def __init_plugin__(app):
cmd.extend('showAlignment', showAlignment)
# This class is used to generate pdb backbones as connected spheres
class Mesh():
""" Mesh class, each feature should be own Mesh object"""
def __init__(self, name):
self.name = name
self.list = []
def addSphere(self, vertex, size=0.1, color=[1.0,0.0,0.0]):
self.list += [COLOR] + color
self.list += [SPHERE] + vertex + [size]
def addLine(self, v1, v2, c1=[1,1,1]):
self.list += [BEGIN, LINES]
self.list += [COLOR] + c1
self.list += [VERTEX] + v1
self.list += [VERTEX] + v2
self.list += [END]
def push(self):
"""When done defining mesh, send to pymol"""
cmd.load_cgo(self.list, self.name, 1)
def showAlignment(fileName):
radius = 0.5
coordinates = open(fileName).read().split('\n')[:-1]
coordinates = [[float(i)
for i in line.split(' ')]
for line in coordinates]
mesh = Mesh(fileName)
prev = 0
for i, coordinate in enumerate(coordinates):
if i < len(coordinates)/2.:
# Draws 1st protein
mesh.addSphere(coordinate, size=radius, color=[1,0,0])
else:
# Draws 2nd protein
mesh.addSphere(coordinate, size=radius, color=[0,1,0])
# Connect C-alphas backbone with lines
if i<(len(coordinates)-1) and i != int(len(coordinates)/2.)-1:
mesh.addLine(coordinates[i], coordinates[i+1])
mesh.push()
| 2.484375 | 2 |
daisy/tools/check_task_library.py | tschoonj/cgat-daisy | 1 | 62852 | <reponame>tschoonj/cgat-daisy
"""check-task-library
==========================
Perform a quick check of tools and metrics in the task library.
The script outputs a tab-separated table with three columns:
.. csv-table::
:header: "column", "description"
"category", "task category (tool|metric|...)"
"name", "task name"
"version", "version string. Set to 'unavailable' if tools in to found"
"""
import sys
import cgatcore.experiment as E
# import tasks to apply in this pipeline
from daisy.tasks import map_tool_to_runner, \
map_metric_to_runner, \
map_collate_to_runner, \
map_split_to_runner
def main(argv=None):
parser = E.OptionParser(version="%prog version: $Id$",
usage=globals()["__doc__"])
(options, args) = E.start(parser,
argv=argv,
add_output_options=True)
total_counter = E.Counter()
table = []
for section, map_task2runner in [
("tool", map_tool_to_runner),
("metric", map_metric_to_runner),
("split", map_split_to_runner),
("collate", map_collate_to_runner)]:
E.debug("processing section: {}".format(section))
counter = E.Counter()
for task, taskf in sorted(map_task2runner.items()):
counter.ntasks += 1
comments = []
try:
version = taskf().get_version()
counter.version_ok += 1
except Exception:
version = ""
comments.append("unavailable")
counter.version_fail += 1
comments = "; ".join(comments)
table.append(
(section, task, version,
comments))
E.info("{}: {}".format(section, counter))
total_counter += counter
options.stdout.write("section\ttask\tversion\tcomments\n")
for row in table:
options.stdout.write("\t".join(map(
str, row)) + "\n")
E.info("{}: {}".format("total", counter))
E.stop()
if __name__ == "__main__":
sys.exit(main())
| 1.9375 | 2 |
src/collage_renderer.py | yozw/metapixel | 0 | 62980 | """Collage renderer."""
import itertools
import logger
import numpy
from PIL import Image, ImageEnhance
from distance_matrix import imageMSE
ENABLE_POST_OPTIMIZATION = True
def adjustImage(image, parameters):
"""Adjusts the brightness, contrast, and saturation of the given image."""
(brightness, contrast, saturation) = parameters
newImage = ImageEnhance.Brightness(image).enhance(brightness)
newImage = ImageEnhance.Contrast(newImage).enhance(contrast)
newImage = ImageEnhance.Color(newImage).enhance(saturation)
return newImage
def postOptimize(image, goalImage):
"""Adjusts the brightness, contrast, and saturation of the given image in such
a way that the MSE between the adjusted image and the goal image is minimized."""
if not ENABLE_POST_OPTIMIZATION:
return (1, 1, 1)
# Vary brightness, saturation, contrast to better match the goal image
brightnessSet = numpy.arange(0.6, 1.3, 0.05)
contrastSet = numpy.arange(0.9, 1.2, 0.05)
saturationSet = numpy.arange(1.0, 1.3, 0.05)
settings = itertools.product(brightnessSet, contrastSet, saturationSet)
bestMSE = None
for parameters in settings:
newImage = adjustImage(image, parameters)
MSE = imageMSE(newImage, goalImage)
if not bestMSE or MSE < bestMSE:
bestMSE = MSE
bestParameters = parameters
if not bestParameters:
raise Exception("Post-optimization failed")
return bestParameters
def renderCollage(solution, grid, sampleGrid, imageLibrary, outputFile, cheatFactor=0):
"""Post-optimizes the solution and renders the output."""
logger.info("Post-optimizing ...")
optimalParameters = {}
for i in range(grid.imageCountX):
logger.progress(i, grid.imageCountX)
for j in range(grid.imageCountY):
imageIndex = solution[i, j]
image = imageLibrary.images[imageIndex]
sampleImage = image.get(sampleGrid.imageWidth, sampleGrid.imageHeight).get()
optimalParameters[i, j] = postOptimize(sampleImage, sampleGrid[i, j].get())
logger.info("Rendering collage ...")
background = Image.new("RGB", grid.size, "white")
collage = Image.new("RGB", grid.size, "white")
for i in range(grid.imageCountX):
logger.progress(i, grid.imageCountX)
for j in range(grid.imageCountY):
offset = (i * grid.imageWidth, j * grid.imageHeight)
imageIndex = solution[i, j]
image = imageLibrary.images[imageIndex]
subImage = image.get(grid.imageWidth, grid.imageHeight).get()
image = adjustImage(subImage, optimalParameters[i, j])
background.paste(grid[i, j].get(), offset)
collage.paste(image, offset)
logger.info("Saving ...")
output = Image.blend(collage, background, cheatFactor)
output.save(outputFile)
| 2.1875 | 2 |
pycore/extras/cors.py | ardihikaru/eaglestitch | 0 | 63108 | <filename>pycore/extras/cors.py
import aiohttp_cors
CORS_OPTIONS = {
"*": aiohttp_cors.ResourceOptions(
allow_credentials=True,
expose_headers="*",
allow_headers="*"
),
}
| 0.824219 | 1 |
src/lib/dStat.py | eehello/Dshield | 49 | 63236 | <reponame>eehello/Dshield
#!/usr/bin/env python
# encoding=utf-8
"""
* @ Dshield for Python
##############################################################################
# Author: YWJT / <NAME> #
# Modify: 2016-12-08 #
##############################################################################
# This program is distributed under the "Artistic License" Agreement #
# The LICENSE file is located in the same directory as this program. Please #
# read the LICENSE file before you make copies or distribute this program #
##############################################################################
"""
import sys
sys.path.append("..")
import os, re, time
from time import sleep
from lib import Dshield
class Dstat(Dshield):
def __init__(self):
Dshield.__init__(self)
def _read(self):
fd = open("/proc/net/dev", "r")
for line in fd.readlines():
if line.find(self.avr['montInterface']) > 0:
field = line.split(":")
recv = field[1].split()[0]
send = field[1].split()[8]
continue
fd.close()
return (float(recv), float(send))
def net(self):
net = {}
(recv, send) = self._read()
while True:
time.sleep(1)
(new_recv, new_send) = self._read()
net['recv'] = "%.3f" %((new_recv - recv)/1024/1024)
net['send'] = "%.3f" %((new_send - send)/1024/1024)
return net
def loadavg(self):
loadavg = {}
f = open("/proc/loadavg","r")
con = f.read().split()
f.close()
loadavg['1m'] = con[0]
loadavg['5m'] = con[1]
loadavg['15m'] = con[2]
return loadavg
| 1.429688 | 1 |
shared/util.py | demelin/IDGAN | 0 | 63364 | """ Defines a number of diverse, system-wide helper functions.
Contents:
1. Pickling
2. Graph saving and loading
3. Reporting
4. Corpus processing
5. Math functions
"""
import os
import sys
import time
import pickle
import codecs
import random
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
# =========================================== Pickling ===========================================
def make_pickle(opt, data_processor, corpus_name, source_path, data_path, vocab_path=None, is_train=False,
is_valid=False, is_test=False):
""" Pickles corpus information for fast re-use; tracks the duration of the performed operations for
rudimentary estimation of processing efficiency. """
# Vocabulary objects are created for training sets only
if not is_train:
# Display the appropriate feedback to user
if is_valid:
print('Processing the validation data ...')
elif is_test:
print('Processing the test data ...')
else:
print('Processing full corpus data ...')
g_st = time.time()
sentences = data_processor(opt, source_path, corpus_name)
g_diff = time.time() - g_st
print('Data generation took {:d} minutes and {:.4f} seconds!'.format(int(g_diff // 60), g_diff % 60))
p_st = time.time()
with open(data_path, 'wb') as in_file:
pickle.dump(sentences, in_file)
p_diff = time.time() - p_st
print('Pickling took {:d} minutes and {:.4f} seconds!'.format(int(p_diff // 60), p_diff % 60))
else:
print('Processing training vocab and data ...')
g_st = time.time()
vocab, sentences = data_processor(opt, source_path, corpus_name, zipf_sort=True, generate_vocab=True)
g_diff = time.time() - g_st
print('Data generation took {:d} minutes and {:.4f} seconds!'.format(int(g_diff // 60), g_diff % 60))
if vocab_path is not None:
pv_st = time.time()
with open(vocab_path, 'wb') as in_file:
pickle.dump(vocab, in_file)
pv_diff = time.time() - pv_st
print('Vocab pickling took {:d} minutes and {:.4f} seconds!'.format(int(pv_diff // 60), pv_diff % 60))
pd_st = time.time()
with open(data_path, 'wb') as in_file:
pickle.dump(sentences, in_file)
pd_diff = time.time() - pd_st
print('Data pickling took {:d} minutes and {:.4f} seconds!'.format(int(pd_diff // 60), pd_diff % 60))
def load_pickle(pickle_path):
""" Un-pickles corpus information (or any pickle, in general). """
with open(pickle_path, 'rb') as out_file:
return pickle.load(out_file)
# =========================================== Graph saving and loading ===========================================
def save_model(session, model, model_saver, save_dir, source_epoch):
""" Saves the model to the specified save directory. """
# Epoch designations are limited to 'best', 'final', and time-stamps
unique = ['best', 'final']
# Generate the appropriate checkpoint name
if source_epoch in unique:
file_name = '{:s}_{:s}.ckpt'.format(str(source_epoch), model.name)
else:
time_tuple = time.localtime(time.time())
time_stamp = '{:d}.{:d}.{:d}_{:d}:{:d}:{:d}' \
.format(time_tuple[2], time_tuple[1], time_tuple[0], time_tuple[3], time_tuple[4], time_tuple[5])
file_name = '{:s}_{:s}_{:s}.ckpt'.format(str(source_epoch), time_stamp, model.name)
# Save
save_path = model_saver.save(session, os.path.join(save_dir, file_name))
# Report
logging.info('{:s} model {:s} has been saved in file {:s}'.format(model.name, file_name, save_path))
def load_model(session, model_saver, save_dir, target_epoch):
""" Loads the specified checkpoint from the designated save directory. """
# Retrieve the correct checkpoint file
checkpoints = [
ckpt for ckpt in os.listdir(save_dir) if os.path.isfile(os.path.join(save_dir, ckpt)) and 'meta' in ckpt]
if target_epoch is None:
load_from = [ckpt for ckpt in checkpoints if ckpt.startswith('best')]
else:
load_from = [ckpt for ckpt in checkpoints if ckpt.startswith(str(target_epoch))]
file_name = '.'.join(load_from[0].split('.')[:-1])
file_path = os.path.join(save_dir, file_name)
# Load
model_saver.restore(session, file_path)
# Report
logging.info('Model restored from {:s}'.format(file_name))
# =========================================== Reporting ===========================================
def print_off():
""" Suppresses print output; see: stackoverflow.com/questions/8391411/suppress-calls-to-print-python"""
sys.stdout = open(os.devnull, 'w')
def print_on():
""" Re-enables print output; same source as above."""
sys.stdout = sys.__stdout__
# =========================================== Corpus processing ===========================================
def clean_europarl(source_path, clean_path, keep_above=2):
""" Removes lines of length <= 2 from the corpus so as to make the training more stable. """
line_count = 0
word_count = 0
with codecs.open(source_path, 'r', encoding='utf8') as out_file:
with open(clean_path, 'w') as in_file:
for line in out_file:
line_length = len(line.split())
if line_length > keep_above:
in_file.write(line)
line_count += 1
word_count += line_length
# Report the outcome of the cleaning process
print('Corpus cleaned. Cleaned corpus contains {:d} lines, totaling up to {:d} words.'.
format(line_count, word_count))
def truncate_europarl(source_path, truncated_path, truncated_length=100000):
""" Truncates the Europarl v7 monolingual English (or any) corpus to the specified length. """
with codecs.open(source_path, 'r', encoding='utf8') as out_file:
word_count = 0
with open(truncated_path, 'w') as in_file:
for i, line in enumerate(out_file):
if i < truncated_length:
in_file.write(line)
word_count += len(line.split())
# Report the scope of the truncated corpus
print('Corpus truncated to {:d} lines, totaling up to {:d} words.'.format(truncated_length, word_count))
def train_valid_test_split(source_path, train_path, valid_path, test_path, split_fractions):
""" Splits the specified source corpus in training, validation, and testing sub-corpora according to
the proportions specified in split_factions. """
with open(source_path, 'r') as out_file:
# Read in the full corpus
all_lines = out_file.readlines()
# Shuffle to increase the diversity of sentences contained in each of the split sets
random.shuffle(all_lines)
source_len = len(all_lines)
# Determine cut-off points for each split
train_bound = int(source_len * split_fractions[0])
valid_bound = int(source_len * (split_fractions[0] + split_fractions[1]))
# Split source corpus in train/ valid/ test sets
with open(train_path, 'w') as train_file:
for line in all_lines[: train_bound]:
train_file.write(line.strip() + '\n')
with open(valid_path, 'w') as valid_file:
for line in all_lines[train_bound: valid_bound]:
valid_file.write(line.strip() + '\n')
with open(test_path, 'w') as test_file:
for line in all_lines[valid_bound:]:
test_file.write(line.strip() + '\n')
print('Train-valid-test-split successfully completed.')
def shrink_domain(scored_path, reduced_path, keep_fraction=0.9):
""" Prunes 1.0 - keep_faction of the total source corpus size in outliers, as determined by model perplexity scores
assigned to each of the corpus sentences by a trained language model. """
# Read in the source corpus
df_full = pd.read_table(scored_path, header=None, names=['Sentence', 'Sentence_Perplexity'], skip_blank_lines=True)
# Sort dataframe by sentence-wise model perplexity scores, ascending
df_full = df_full.sort_values('Sentence_Perplexity', ascending=True)
# Prune the lowest 1.0 - keep_fraction of the dataframe
df_shrunk = df_full.iloc[0: int(len(df_full) * keep_fraction), 0]
# Shuffle the retained dataframe and write the result to file
df_shrunk = df_shrunk.iloc[np.random.permutation(len(df_shrunk))]
with open(reduced_path, 'w') as in_file:
for entry_id in range(len(df_shrunk)):
line = df_shrunk.iloc[entry_id].strip() + '\n'
in_file.write(line)
print('Corpus domain successfully restricted.')
def id_split(annotated_path, low_path, high_path):
""" Splits the annotated corpus into low-ID and a high-ID sub-corpora, each containing an identical
number of samples; the so obtained corpora are used in both stages of IDGAN training. """
# Read in the source corpus
df_annotated = pd.read_table(annotated_path, header=None,
names=['Sentence', 'Total_surprisal', 'Per_word_surprisal', 'Normalized_surprisal',
'Total_UID_divergence', 'Per_word_UID_divergence', 'Normalized_UID_divergence'],
skip_blank_lines=True)
# Sort dataframe along sentence-wise normalized surprisal scores
df_annotated = df_annotated.loc[:, ['Sentence', 'Normalized_surprisal']]
df_annotated = df_annotated.sort_values('Normalized_surprisal', ascending=True)
# Split dataframe along the median surprisal value
median_row = len(df_annotated) // 2
df_low = df_annotated.iloc[: median_row, :]
df_high = df_annotated.iloc[median_row:, :]
id_variant_corpora = [(df_low, low_path), (df_high, high_path)]
# Shuffle the derived corpora and write the result to file
for tpl in id_variant_corpora:
# Calculate ID-related corpus statistics
corpus_name = tpl[1].split('/')[-1]
ns_mean = tpl[0]['Normalized_surprisal'].mean()
ns_median = tpl[0]['Normalized_surprisal'].median()
ns_min = tpl[0]['Normalized_surprisal'].min()
ns_max = tpl[0]['Normalized_surprisal'].max()
sent_lens = 0
corpus = tpl[0].iloc[np.random.permutation(len(tpl[0]))]
with open(tpl[1], 'w') as in_file:
for entry_id in range(len(corpus)):
line = corpus.iloc[entry_id][0].strip()
in_file.write(line + '\n')
sent_lens += len(line.split())
mean_sent_len = sent_lens / len(corpus)
# Report ID-relevant statistics
print('{:s} corpus: Mean NS: {:.4f} | Median NS: {:.4f} | Min NS: {:.4f} | Max NS: {:.4f} | '
'Mean sentence length: {:.4f}'
.format(corpus_name, ns_mean, ns_median, ns_min, ns_max, mean_sent_len))
print('Corpus successfully subdivided according to the chosen ID criterion.')
# =========================================== Math functions ===========================================
def padded_log(input_tensor):
""" Prevents NaNs during log computations, see github.com/AYLIEN/IDGAN-intro/blob/master/IDGAN.py. """
return tf.log(tf.maximum(input_tensor, 1e-5))
| 2.75 | 3 |
classType.py | Andreby42/learn_python_hardway | 0 | 63492 | ##创建自定义类
class Person:
def set_name(self,name):
self.name=name;
def get_name(self):
return self.name
def say_hello(self):
print("hello"+self.name);
def to_str(self):
print(self.name())
person=Person();
person.set_name("lily")
print(str(person));
##属性函数与方法
##函数与方法的区别:方法位于类里,有self参数,函数与类同级无self参数
class Mobile:
def set_os(self,os):
self.os=os;
def set_brand(self,brand):
self.brand=brand;
def get_os(self):
return self.os;
def get_brand(self):
return self.brand
def method(self):
print("I have a mobile"+self.os)
def fuction():
print("I dont have a Mobile")
sumsung=Mobile();
sumsung.set_os("Android")
sumsung.set_brand("Sumsung")
print(sumsung.method())
print(fuction())
##隐藏 如果想让方法或者属性成为私有,那么只需要让方法或者属性的名称以两个下划线开头即可,只可以在类中进行调用,
##在类外的作用域是调用不到的
class Pc:
def __brand(self):
print("this is my pc")
def print(self):
print("the pc is")
self.__brand()
mypc=Pc();
mypc.print()
# mypc._brand()
##指定超类
class Human:
def body(self,body):
self.body=body
def get_body(self):
print("humanbeing all have body")
def face(self,face):
self.face=face
def get_face(self):
print("humanbeing all have face")
class Man(Human):#指定human为man的父类
def dick(self,dick):
self.dick=dick;
def _get_dick(self):
print("man all have a dick or its a monster");
jackChan=Man();
jackChan.dick("big")
jackChan.body("strong")
jackChan.face("handsome")
print(jackChan.get_face(),jackChan.get_body(),jackChan._get_dick())
##判断一个类是否是另一个类的子类 使用 issubclass
print(issubclass(Man,Human))
##如果你有一个类想知道他的基类 那么使用_bases_方法
print(Man.__bases__)
##要确定对象是否是某个类的实例使用isinstance
print(isinstance(jackChan,Man))
##获得当前对象属于哪个类
print(jackChan.__class__)
##多继承暂时不考虑 太麻烦了
class boy(Man):
ages=14;
def set_age(self,age):
self.age=age;
def get_age(self):
print("im"+self.age+"old boy");
jim=boy();
jim.ages=14;
jim.set_age("14")
print(boy.__bases__);
attrs=getattr(jackChan,"dick");
print(attrs)
body_attr=hasattr(jackChan,"body")
print(body_attr)
##将对象的指定属性设置为指定的值
setattr(jackChan,"dick","tiny")
print(getattr(jackChan,"dick"))
##返回对象的类型
print(type(jackChan))
###接口与内省
##getattr(object,name[,default]) 获取属性的值 还可以提供默认值
| 2.109375 | 2 |
python/testdbconnector.py | AungWinnHtut/POL | 2 | 63620 | #!/usr/bin/python
import mysql.connector
cnx = mysql.connector.connect(user='root', password='',
host='127.0.0.1',
database='test')
cnx.close()
| 0.84375 | 1 |
Lab_64/challenge01-musters.py | MandoGuardado/2022-01-04-Python | 1 | 63748 | #!/usr/bin/python3
"""Alta3 Research | RZFeeser
Review of Lists and Dictionaries"""
# define a short data set (in real world, we want to read this from a file or API)
munsters = {'endDate': 1966, 'startDate': 1964,\
'names':['Lily', 'Herman', 'Grandpa', 'Eddie', 'Marilyn']} # {} creates dict
# Your solution goes below this line
# ----------------------------------
# Display the value mapped to names
names = munsters["names"]
for i in range(len(names)):
print(names[i])
# Display the value mapped to endDate
print(munsters["endDate"])
# Display the value mapped to startDate
print(munsters["startDate"])
# Add a new key, episodes mapped to the value of 70
munsters["episodes"] = 70
print(munsters)
| 2.796875 | 3 |
example/dataset_load.py | rebeccadavidsson/covid19-sir | 0 | 63876 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import warnings
import covsirphy as cs
def main():
warnings.simplefilter("error")
# Create output directory in example directory
code_path = Path(__file__)
input_dir = code_path.parent.with_name("input")
output_dir = code_path.with_name("output").joinpath(code_path.stem)
output_dir.mkdir(exist_ok=True, parents=True)
# Create data loader instance
data_loader = cs.DataLoader(input_dir)
# Load JHU dataset
print("<The number of cases>")
jhu_data = data_loader.jhu()
print(jhu_data.citation)
ncov_df = jhu_data.cleaned()
ncov_df.to_csv(output_dir.joinpath("covid19_cleaned_jhu.csv"), index=False)
# Subset for Japan
japan_df, _ = jhu_data.records("Japan")
japan_df.to_csv(
output_dir.joinpath("jhu_cleaned_japan.csv"), index=False)
# Load Population dataset
print("<Population values>")
population_data = data_loader.population()
print(population_data.citation)
population_df = population_data.cleaned()
population_df.to_csv(
output_dir.joinpath("population_cleaned.csv"), index=False
)
# Load OxCGRT dataset
print("<Government response tracker>")
oxcgrt_data = data_loader.oxcgrt()
print(oxcgrt_data.citation)
oxcgrt_df = oxcgrt_data.cleaned()
oxcgrt_df.to_csv(
output_dir.joinpath("oxcgrt_cleaned.csv"), index=False
)
# Load PCR test dataset
print("<The number of PCR tests>")
pcr_data = data_loader.pcr()
print(pcr_data.citation)
pcr_df = pcr_data.cleaned()
pcr_df.to_csv(
output_dir.joinpath("pcr_cleaned.csv"), index=False)
pcr_data.positive_rate(
country="Greece",
filename=output_dir.joinpath("pcr_positive_rate_Greece.jpg"))
# Load vaccine dataset
print("<The number of vaccinations>")
vaccine_data = data_loader.vaccine()
print(vaccine_data.citation)
vaccine_df = vaccine_data.cleaned()
vaccine_df.to_csv(
output_dir.joinpath("vaccine_cleaned.csv"), index=False)
subset_df = vaccine_data.subset(country="Canada")
subset_df.to_csv(
output_dir.joinpath("vaccine_subset_canada.csv"), index=False)
# Load population pyramid dataset
print("<Population pyramid>")
pyramid_data = data_loader.pyramid()
print(pyramid_data.citation)
subset_df = pyramid_data.subset(country="Japan")
subset_df.to_csv(
output_dir.joinpath("pyramid_subset_japan.csv"), index=False)
if __name__ == "__main__":
main()
| 1.5 | 2 |