hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
4bc441d80eb90948270b0c67a69acd09e054bf96
793
py
Python
packages/jobs/statuses.py
NHSDigital/list-reconciliation
37b1ebe99a64275e23b0e7fb6a89415b92d14306
[ "MIT" ]
4
2021-06-25T08:28:54.000Z
2021-12-16T11:03:42.000Z
packages/jobs/statuses.py
NHSDigital/list-reconciliation
37b1ebe99a64275e23b0e7fb6a89415b92d14306
[ "MIT" ]
184
2021-06-24T15:27:08.000Z
2022-03-17T12:44:28.000Z
packages/jobs/statuses.py
NHSDigital/list-reconciliation
37b1ebe99a64275e23b0e7fb6a89415b92d14306
[ "MIT" ]
3
2021-11-05T10:21:44.000Z
2022-03-04T14:29:24.000Z
from enum import Enum class JobStatus(Enum): PENDING = "PENDING" REJECTED = "REJECTED" SENT_TO_DPS = "SENT_TO_DPS" PROCESSED_BY_DPS = "PROCESSED_BY_DPS" COMPLETE = "COMPLETE" NOTIFIED_VALIDATION_FAILED = "NOTIFIED_VALIDATION_FAILED" CLEANED_UP = "CLEANED_UP" class ParseStatus(Enum): NOT_PARSED = "NOT_PARSED" PARSE_FAILED = "PARSE_FAILED" class InvalidErrorType(Enum): RECORDS = "INVALID_RECORDS" STRUCTURE = "INVALID_STRUCTURE" FILENAME = "INVALID_FILENAME" class InputFolderType(Enum): IN = "inbound/" PASS = "pass/" FAIL = "fail/" RETRY = "retry/" REJECTED = "rejected/" class RegistrationType(Enum): GP = "OnlyOnGP" PDS = "OnlyOnPDS" class JobNotFound(Exception): """Job Not Found Exception"""
18.880952
61
0.675914
753
0.949559
0
0
0
0
0
0
273
0.344262
4bc544909f30548d56d19ceee6f586966f0cd714
843
py
Python
DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py
unimauro/Courses
81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2
[ "Apache-2.0" ]
1
2020-07-25T04:56:55.000Z
2020-07-25T04:56:55.000Z
DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py
unimauro/Courses
81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2
[ "Apache-2.0" ]
2
2020-06-15T04:42:00.000Z
2021-08-29T03:48:28.000Z
DevOps/Data_Science_in_Production/cap02/15_scikit-learn.py
unimauro/Courses
81e5b9c4cbc9b875eff82f96bda7d21ec4f258b2
[ "Apache-2.0" ]
null
null
null
import pandas as pd from sklearn.linear_model import LogisticRegression import mlflow import mlflow.sklearn import flask model_path = "models/logit_games_v1" model = mlflow.sklearn.load_model(model_path) app = flask.Flask(__name__) @app.route("/", methods=["GET","POST"]) def predict(): data = {"success": False} params = flask.request.args if "G1" in params.keys(): new_row = { "G1": params.get("G1"),"G2": params.get("G2"), "G3": params.get("G3"),"G4": params.get("G4"), "G5": params.get("G5"),"G6": params.get("G6"), "G7": params.get("G7"),"G8": params.get("G8"), "G9": params.get("G9"),"G10":params.get("G10")} new_x = pd.DataFrame.from_dict(new_row, orient = "index").transpose() data["response"] = str(model.predict_proba(new_x)[0][1]) data["success"] = True return flask.jsonify(data) if __name__ == '__main__': app.run(host='0.0.0.0')
31.222222
58
0.688019
0
0
0
0
54
0.064057
0
0
177
0.209964
298a78605da6ac7b5a8526389d33bd97829a9e95
12,607
py
Python
tornado_sockets/views/timeseries.py
willjschmitt/joulia-webserver
712decb749c2d1bda71af49ecab245378bf30078
[ "FTL" ]
null
null
null
tornado_sockets/views/timeseries.py
willjschmitt/joulia-webserver
712decb749c2d1bda71af49ecab245378bf30078
[ "FTL" ]
95
2016-08-04T01:59:37.000Z
2021-06-10T18:41:46.000Z
tornado_sockets/views/timeseries.py
willjschmitt/joulia-webserver
712decb749c2d1bda71af49ecab245378bf30078
[ "FTL" ]
null
null
null
"""Handles websockets and asynchronous endpoints provided by Tornado instead of Django, but use the Django model framework for a database ORM. """ import datetime import functools import json import logging import tornado.escape from tornado.ioloop import IOLoop import tornado.web import tornado.websocket from django.db.models.signals import post_save from django.dispatch import receiver from django.utils import timezone from rest_framework.authtoken.models import Token from rest_framework.utils import model_meta from brewery.models import AssetSensor from brewery.models import RecipeInstance from brewery.models import TimeSeriesDataPoint from brewery.serializers import TimeSeriesDataPointSerializer from joulia.random import random_string from tornado_sockets.views.django import DjangoAuthenticatedWebSocketHandler LOGGER = logging.getLogger(__name__) class TimeSeriesSocketHandler(DjangoAuthenticatedWebSocketHandler): """A websocket request handler/connection used for a two-way connection for streaming sensor data between a client and the webserver. Allows for real-time streaming of sensor data as soon as it is posted. Client posts a subscription request, which then triggers the handler to send any updates to that sensor immediately to the client. Attributes: waiters: (class-level) - A set containing all of the current connection handlers that are active. subscriptions: (class-level) - A dictionary mapping to connection handlers. Key is specified as a tuple of (recipe_instance_pk, sensor_pk). controller_requestmap: (class-level) - A dictionary mapping a websocket connection to a brewhouse object. Used for indicating if a connection exists with a brewhouse. controller_controllermap: (class-level) A dictionary mapping a brewhouse to the websocket connection to it. Used for indicating if a connection exists with a brewhouse. source_id: Identifies a unique connection with a short hash, which we can use to compare new data points to, and see if the socket was the one that originated it, and thusly should not """ waiters = set() subscriptions = {} controller_requestmap = {} controller_controllermap = {} def __init__(self, *args, **kwargs): super(TimeSeriesSocketHandler, self).__init__(*args, **kwargs) self.auth = None self.recipe_instance_pk = None self.source_id = random_string(4) def get_compression_options(self): # Non-None enables compression with default options. return {} def _authenticate(self): """If the connection comes from authentication associating it with a particular Brewhouse, make sure we store the connection in a mapping between the websocket and the brewhouse. Stores this request in a class-level map to indicate we have an established connection with a Brewhouse controller. """ if self.auth is not None and isinstance(self.auth, Token): if self.auth.brewhouse_pk: self.controller_controllermap[self.auth.brewhouse_pk] = self self.controller_requestmap[self] = self.auth.brewhouse_pk def _unauthenticate(self): """Remove this request from the class-level maps to indicate we have lost connection with the Brewhouse. """ if self.auth is not None and isinstance(self.auth, Token): if self.auth.brewhouse_pk: del self.controller_controllermap[self.auth.brewhouse_pk] del self.controller_requestmap[self] def open(self): """Handles the opening of a new websocket connection for streaming data. """ LOGGER.info("New websocket connection incoming from %s.", self.get_current_user()) self.waiters.add(self) self._authenticate() def on_close(self): """Handles the closing of the websocket connection, removing any subscriptions. """ LOGGER.info("Websocket connection from %s ended.", self.get_current_user()) self.waiters.remove(self) self.unsubscribe() self._unauthenticate() def on_message(self, message): """Handles an incoming message in a websocket. Determines what subaction to route it to, and calls that sub action. Args: message: the incoming raw message from the websocket. """ parsed_message = tornado.escape.json_decode(message) self.recipe_instance_pk = parsed_message['recipe_instance'] if not self.check_permission(): return # Subscription to a signal. if 'subscribe' in parsed_message: self.subscribe(parsed_message) # Submission of a new datapoint. else: self.new_data(parsed_message) def check_permission(self): """Checks if the user has access to the ``recipe_instance``.""" permitted = True recipe_instance = RecipeInstance.objects.get(pk=self.recipe_instance_pk) if not permitted: LOGGER.error("Forbidden request from %s for %d.", self.get_current_user(), recipe_instance) return permitted # TODO(willjschmitt): Get this working again. # user = get_current_user(self) # brewhouse = recipe_instance.brewhouse # brewery = brewhouse.brewery # company = brewery.company # has_permission = is_member_of_brewing_company(user,company) # # if not has_permission: # LOGGER.error("User %s attempted to access brewhouse they do not" # " have access to (%s)", # user, recipe_instance.brewhouse) # # return has_permission def subscribe(self, parsed_message): """Handles a subscription request. Args: parsed_message: Data received from websocket. """ LOGGER.info('New subscription received from %s: %s.', self.get_current_user(), parsed_message) recipe_instance_pk = parsed_message['recipe_instance'] sensor_pk = parsed_message['sensor'] history_time = parsed_message.get('history_time', None) self._add_subscription(recipe_instance_pk, sensor_pk) historical_timedelta = \ datetime.timedelta(seconds=history_time) if history_time else None self._write_historical_data(sensor_pk, recipe_instance_pk, timedelta=historical_timedelta) def unsubscribe(self): for subscription in self.subscriptions.values(): if self in subscription: subscription.remove(self) def _add_subscription(self, recipe_instance_pk, sensor_pk): key = (recipe_instance_pk, sensor_pk) if key not in self.subscriptions: self.subscriptions[key] = set() self.subscriptions[key].add(self) @classmethod def _write_data_response_chunked(cls, websocket, data_points, chunk_size=1000): """Writes serialized datas in chunks asynchronously. Args: websocket: The websocket to write messages on. data_points: The data to write. chunk_size: The number of data points to write as a maximum. """ assert chunk_size > 0 lower_bound = 0 total_points = len(data_points) while lower_bound < total_points: upper_bound = min(lower_bound + chunk_size, total_points) chunk = data_points[lower_bound:upper_bound] IOLoop.current().add_callback( functools.partial(cls._write_data_response, websocket, chunk)) lower_bound += chunk_size @staticmethod def _write_data_response(websocket, data_points): """Generates a serialized data message with headers for deserialization. Writes output to websocket. """ assert data_points model_info = model_meta.get_field_info(TimeSeriesDataPoint) field_names = TimeSeriesDataPointSerializer(data_points[0])\ .get_field_names({}, model_info) response = { 'headers': list(field_names), 'data': [], } LOGGER.debug("Writing %d datapoints out.", len(data_points)) for data_point in data_points: serializer = TimeSeriesDataPointSerializer(data_point) data = serializer.data data_entry = [] for field_name in field_names: data_entry.append(data[field_name]) response['data'].append(data_entry) websocket.write_message(json.dumps(response)) def _write_historical_data(self, sensor_pk, recipe_instance_pk, timedelta=None): """Sends all the data that already exists, limited to now + timedelta. If data exists, but is older than the timedelta, returns the last point observed. Args: sensor_pk: The primary key for the sensor to send data. recipe_instance_pk: The primary key for the recipe instance to send data. timedelta: The amount of time + now to filter against for sending to client. Negative indicates data in the past. Positive indicates data in the future, which will be none. If unset (set to None), no time filter will be applied and all historical data will be written. """ data_points = TimeSeriesDataPoint.objects.filter( sensor=sensor_pk, recipe_instance=recipe_instance_pk) if timedelta is not None: now = timezone.now() filter_start_time = now + timedelta data_points = data_points.filter(time__gt=filter_start_time) data_points = data_points.order_by("time") if data_points.exists(): self._write_data_response_chunked(self, data_points) else: try: latest_point = TimeSeriesDataPoint.objects.filter( sensor=sensor_pk, recipe_instance=recipe_instance_pk)\ .latest() self._write_data_response_chunked(self, [latest_point]) except TimeSeriesDataPoint.DoesNotExist: pass def new_data(self, parsed_message): """Handles a new data point request. Args: parsed_message: Data received from websocket. """ LOGGER.debug('New data received from %s: %s.', self.get_current_user(), parsed_message) data = parsed_message data["source"] = self.source_id serializer = TimeSeriesDataPointSerializer(data=data) serializer.is_valid(raise_exception=True) serializer.save() @classmethod def send_updates(cls, new_data_point): """Sends a new data point to all of the waiters watching the sensor it is associated with. Args: new_data_point: An instance of a TimeSeriesDataPoint to be streamed to any subscribers. """ key = (new_data_point.recipe_instance.pk, new_data_point.sensor.pk) if key not in cls.subscriptions: LOGGER.debug("No subscribers for %s.", new_data_point.sensor.name) return subscriptions = cls.subscriptions[key] LOGGER.info("Sending value %s for sensor %s to %d waiters.", new_data_point.value, new_data_point.sensor, len(subscriptions)) for waiter in subscriptions: # Skip sending data points to the subscriber that sent it. source = new_data_point.source if source is not None and source == waiter.source_id: continue LOGGER.debug("Writing value %s for sensor %s for %s.", new_data_point.value, new_data_point.sensor, waiter.get_current_user()) cls._write_data_response_chunked(waiter, [new_data_point]) @receiver(post_save, sender=TimeSeriesDataPoint) def time_series_watcher(sender, instance, **kwargs): """A django receiver watching for any saves on a datapoint to send to waiters """ LOGGER.debug("Observed newly saved datapoint: %s.", instance) TimeSeriesSocketHandler.send_updates(instance)
39.396875
80
0.649005
11,422
0.906005
0
0
3,316
0.263028
0
0
5,091
0.403823
298cc1131b4fa0f1be5f4becb01286a44e8b0a9d
66
py
Python
report/create_db.py
Kellel/reports
975e99396301e87176a38dd440a273c9319b3e22
[ "BSD-3-Clause" ]
null
null
null
report/create_db.py
Kellel/reports
975e99396301e87176a38dd440a273c9319b3e22
[ "BSD-3-Clause" ]
null
null
null
report/create_db.py
Kellel/reports
975e99396301e87176a38dd440a273c9319b3e22
[ "BSD-3-Clause" ]
null
null
null
from models import Base, engine Base.metadata.create_all(engine)
16.5
32
0.818182
0
0
0
0
0
0
0
0
0
0
298ede4e030cbedbbcf9ef9a22b8209288395ba1
1,751
py
Python
plugins/train/model/dfaker.py
aaman123/faceswap
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
[ "MIT" ]
2
2021-11-11T08:29:01.000Z
2021-11-11T08:34:50.000Z
plugins/train/model/dfaker.py
aaman123/faceswap
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
[ "MIT" ]
null
null
null
plugins/train/model/dfaker.py
aaman123/faceswap
a5825c3457b062c1824ef3f8b02e4f3fa4c2217f
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ DFaker Model Based on the dfaker model: https://github.com/dfaker """ from keras.initializers import RandomNormal from keras.layers import Input from lib.model.nn_blocks import Conv2DOutput, UpscaleBlock, ResidualBlock from .original import Model as OriginalModel, KerasModel class Model(OriginalModel): """ Dfaker Model """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.input_shape = (64, 64, 3) self.encoder_dim = 1024 self.kernel_initializer = RandomNormal(0, 0.02) def decoder(self, side): """ Decoder Network """ input_ = Input(shape=(8, 8, 512)) var_x = input_ var_x = UpscaleBlock(512, res_block_follows=True)(var_x) var_x = ResidualBlock(512, kernel_initializer=self.kernel_initializer)(var_x) var_x = UpscaleBlock(256, res_block_follows=True)(var_x) var_x = ResidualBlock(256, kernel_initializer=self.kernel_initializer)(var_x) var_x = UpscaleBlock(128, res_block_follows=True)(var_x) var_x = ResidualBlock(128, kernel_initializer=self.kernel_initializer)(var_x) var_x = UpscaleBlock(64)(var_x) var_x = Conv2DOutput(3, 5, name="face_out_{}".format(side))(var_x) outputs = [var_x] if self.config.get("learn_mask", False): var_y = input_ var_y = UpscaleBlock(512)(var_y) var_y = UpscaleBlock(256)(var_y) var_y = UpscaleBlock(128)(var_y) var_y = UpscaleBlock(64)(var_y) var_y = Conv2DOutput(1, 5, name="mask_out_{}".format(side))(var_y) outputs.append(var_y) return KerasModel([input_], outputs=outputs, name="decoder_{}".format(side))
38.911111
85
0.656768
1,438
0.821245
0
0
0
0
0
0
192
0.109652
29902382e677a01b98fcb79346e95e0a9cada7e6
2,459
py
Python
classify/train.py
gallupliu/QA
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
[ "Apache-2.0" ]
3
2017-09-06T07:10:05.000Z
2019-08-01T03:27:39.000Z
classify/train.py
gallupliu/QA
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
[ "Apache-2.0" ]
2
2018-01-25T14:46:40.000Z
2018-01-25T14:53:13.000Z
classify/train.py
gallupliu/QA
0e284dd17e27ea9384a1e4d7a4c206eb95e4bf7f
[ "Apache-2.0" ]
null
null
null
# encoding: utf-8 """ @author: gallupliu @contact: [email protected] @version: 1.0 @license: Apache Licence @file: train.py @time: 2018/3/5 22:58 """ import tensorflow as tf from classify.dataset import data_utils from sklearn.model_selection import train_test_split from classify.model import TextCNN def dataset_input_fn(ids,labels,batch_size): dataset = tf.data.Dataset.from_tensor_slices((ids, labels)) dataset = dataset.shuffle(buffer_size=10000) dataset = dataset.batch(batch_size) return dataset if __name__ == "__main__": text,labels = data_utils.loadfile('./data_with_label.csv') word2idx, vocab = data_utils.load_embedding('./dataset/test_cut.txt', './dataset/wiki_50.model') ids = data_utils.get_sentence_ids(text, word2idx) train_ids,test_ids,train_labels,test_labels = train_test_split(ids,labels,test_size=0.1) # print(len(text),type(text)) # max_length = count_length(text) # print(max_length) # train_word2vec() # print(type(text)) # print(list(word2idx.keys())) # dataset = tf.data.Dataset.from_tensor_slices((ids, train_labels)) # iterator = dataset.make_initializable_iterator() # next_element = iterator.get_next() train_dataset = dataset_input_fn(train_ids, train_labels, 100) val_dataset = dataset_input_fn(train_ids, train_labels, 100) iterator = tf.data.Iterator.from_structure(train_dataset.output_types,train_dataset.output_shapes) next_element,labels = iterator.get_next() train_iterator_init_op = iterator.make_initializer(train_dataset) val_iterator_init_op = iterator.make_initializer(val_dataset) with tf.Session() as sess: # sess.run(iterator.initializer) # print(sess.run(next_element)) model = TextCNN(next_element,labels,vocab,120,3,[1,2,3,5],512) sess.run(tf.global_variables_initializer()) # _,acc,loss = sess.run([model.train_op,model.accuracy,model.loss]) # print(acc,loss) for _ in range(10): #训练 sess.run(train_iterator_init_op) feed_dict = {model.dropout_keep_prob:1.0} while True: try: _, acc, loss = sess.run([model.train_op, model.accuracy, model.loss],feed_dict=feed_dict) print(acc,loss) # print(sess.run(next_element),sess.run(labels)) except tf.errors.OutOfRangeError: break
33.684932
109
0.684831
0
0
0
0
0
0
0
0
742
0.301259
2991579a0641f47ea260ec96e0a53c12f4df3dbf
342
py
Python
authors/apps/author_follows/urls.py
andela/ah-backend-dojo
f2b14f15c4af906da846cafe722f13868d58371f
[ "BSD-3-Clause" ]
3
2019-05-01T10:41:09.000Z
2021-04-25T22:17:20.000Z
authors/apps/author_follows/urls.py
andela/ah-backend-dojo
f2b14f15c4af906da846cafe722f13868d58371f
[ "BSD-3-Clause" ]
24
2019-04-23T14:56:21.000Z
2021-12-13T19:58:37.000Z
authors/apps/author_follows/urls.py
andela/ah-backend-dojo
f2b14f15c4af906da846cafe722f13868d58371f
[ "BSD-3-Clause" ]
4
2019-06-29T10:40:32.000Z
2022-01-04T11:44:53.000Z
from django.urls import path from .views import FollowStatsViews, AuthorFollowViews urlpatterns = [ # /authors/followers/ or ../following/ path("<str:follow_state>/", FollowStatsViews.as_view(), name="follows"), # /authors/<author_username>/follow path("<str:username>/follow/", AuthorFollowViews.as_view(), name="follow") ]
34.2
78
0.719298
0
0
0
0
0
0
0
0
135
0.394737
2992bbf3c6e1e4c6fcb24c568c080fff0f59e86b
2,299
py
Python
src/cone/app/tests/test_browser_login.py
lenadax/cone.app
b25c55aedb85e45a962003d2767a22a927cc61c0
[ "BSD-3-Clause" ]
1
2022-03-13T17:51:09.000Z
2022-03-13T17:51:09.000Z
src/cone/app/tests/test_browser_login.py
lenadax/cone.app
b25c55aedb85e45a962003d2767a22a927cc61c0
[ "BSD-3-Clause" ]
1
2021-08-06T08:12:00.000Z
2021-08-06T08:12:00.000Z
src/cone/app/tests/test_browser_login.py
lenadax/cone.app
b25c55aedb85e45a962003d2767a22a927cc61c0
[ "BSD-3-Clause" ]
null
null
null
from cone.app import get_root from cone.app import security from cone.app import testing from cone.app.browser.login import login_view from cone.app.browser.login import logout_view from cone.tile import render_tile from cone.tile.tests import TileTestCase from webob.response import Response from webob.exc import HTTPFound class TestBrowserLogin(TileTestCase): layer = testing.security def test_login_view(self): root = get_root() request = self.layer.new_request() response = login_view(root, request) self.assertTrue(isinstance(response, Response)) def test_logout_view(self): root = get_root() request = self.layer.new_request() response = logout_view(root, request) self.assertTrue(isinstance(response, HTTPFound)) def test_logout_tile(self): root = get_root() request = self.layer.new_request() with self.layer.authenticated('admin'): render_tile(root, request, 'logout') self.checkOutput(""" ResponseHeaders([('Set-Cookie', 'auth_tkt=; Max-Age=0; Path=/; expires=...'), ('Set-Cookie', 'auth_tkt=; Domain=example.com; Max-Age=0; Path=/; expires=...'), ('Set-Cookie', 'auth_tkt=; Domain=.example.com; Max-Age=0; Path=/; expires=...')]) """, str(request.response.headers)) def test_login_form(self): root = get_root() request = self.layer.new_request() res = render_tile(root, request, 'loginform') self.assertTrue(res.find('<form action="http://example.com/login"') > -1) # Authenticate with wrong credentials request.params['loginform.user'] = 'foo' request.params['loginform.password'] = 'bar' request.params['action.loginform.login'] = '1' res = render_tile(root, request, 'loginform') self.assertTrue(res.find('class="errormessage">Invalid Credentials') > -1) # Authenticate with correct credentials request.params['loginform.user'] = security.ADMIN_USER request.params['loginform.password'] = security.ADMIN_PASSWORD request.params['action.loginform.login'] = '1' render_tile(root, request, 'loginform') self.assertTrue(isinstance(request.environ['redirect'], HTTPFound))
40.333333
94
0.660722
1,971
0.857329
0
0
0
0
0
0
650
0.282732
29934fcb2bb4b9dd5b0dcf07accd0d89e7187b95
752
py
Python
View/telaEditarControle.py
IuriBritoDev/TKINO
3c689788324bd5badc84c7969f331b076046c211
[ "MIT" ]
null
null
null
View/telaEditarControle.py
IuriBritoDev/TKINO
3c689788324bd5badc84c7969f331b076046c211
[ "MIT" ]
null
null
null
View/telaEditarControle.py
IuriBritoDev/TKINO
3c689788324bd5badc84c7969f331b076046c211
[ "MIT" ]
null
null
null
from tkinter import * def TelaEditarControle(tela, controle): # Cria a tela de configuração telaEditar = Toplevel(tela) telaEditar.title('EDITA CONTROLE') telaEditar.geometry('280x180+620+120') telaEditar['bg'] = 'gray' telaEditar.resizable(False,False) telaEditar.focus_force() telaEditar.grab_set() # Lables de porta e arduino lblPorta = Label(telaEditar,text='VALOR CONTROLADOR',foreground='black',bg='gray',anchor=W,) lblPorta.place(x=50,y=20) slide = Scale(telaEditar,from_=10,to=90,orient=HORIZONTAL) slide.place(x=95,y=70,width=100,height=50) # Botão de conexão btnCnt = Button(telaEditar,text='SALVAR',command = '',foreground='white',bg='black') btnCnt.place(x=210,y=140)
32.695652
96
0.694149
0
0
0
0
0
0
0
0
177
0.234127
29955858830022ac8b0ab1ecf8622aef64dde5f8
395
py
Python
yao_framework/__init__.py
QuantumBFS/yao-python
c877b3c4f920e76858021b6af8728f839d88fc1d
[ "Apache-2.0" ]
3
2019-12-04T16:40:55.000Z
2021-12-16T04:28:59.000Z
yao_framework/__init__.py
QuantumBFS/yao-python
c877b3c4f920e76858021b6af8728f839d88fc1d
[ "Apache-2.0" ]
null
null
null
yao_framework/__init__.py
QuantumBFS/yao-python
c877b3c4f920e76858021b6af8728f839d88fc1d
[ "Apache-2.0" ]
2
2021-05-07T01:17:50.000Z
2021-12-16T04:32:31.000Z
# workaround static linked python from julia.api import Julia __julia__ = Julia(compiled_modules=False) import os import sys import subprocess from .wrappers import apply script_dir = os.path.dirname(os.path.realpath(__file__)) def install(): """ Install Julia packages required for yao-framework. """ subprocess.check_call(['julia', os.path.join(script_dir, 'install.jl')])
21.944444
76
0.744304
0
0
0
0
0
0
0
0
118
0.298734
29964b779c4f66694fdf10686261f2a4a69976ee
4,531
py
Python
src/multiuserpad/twitchutil.py
codingwithsomeguy/multiuserpad
caca02bb3f98e855a0980b8ac9947c05d5b89463
[ "MIT" ]
4
2020-04-14T03:25:06.000Z
2020-11-03T14:30:20.000Z
src/multiuserpad/twitchutil.py
codingwithsomeguy/multiuserpad
caca02bb3f98e855a0980b8ac9947c05d5b89463
[ "MIT" ]
null
null
null
src/multiuserpad/twitchutil.py
codingwithsomeguy/multiuserpad
caca02bb3f98e855a0980b8ac9947c05d5b89463
[ "MIT" ]
null
null
null
# TODO: Generalize this with the discordutil module, factor out oauth import logging from urllib.parse import urlencode import requests import json from flask import request, redirect, session from creds import get_creds from config import config from sessionutil import invalidate_session def twitch_login(): ss = get_creds() params = { "client_id": ss["twitch_client_id"], "redirect_uri": "%s/api/ident/twitchcb" % config.MY_URL, "state": True, "response_type": "code", "scope": "openid", # not user_read, id_token, or user_subscriptions # need to request objects for the later userinfo request "claims": json.dumps({ "id_token": {}, "userinfo": { "picture": "null", "preferred_username": "null", } }) } redirect_url = "%s/oauth2/authorize?%s" % ( config.TWITCH_API_URL, urlencode(params)) return redirect(redirect_url) def twitch_login_cb(): user_authenticated = False result = "Missing code" code = request.args.get("code") scope = request.args.get("scope") if code is not None and scope == "openid": # fetch a token user_authenticated = fetch_twitch_token(code) if user_authenticated == True: user_fetch_worked = fetch_twitch_user() if not user_fetch_worked: invalidate_session() return redirect("/") return redirect("/user") return result # TODO: this should be cached until expiration # The server can reuse this for API requests # TODO: factor this out with discord auth to oauth..auth def fetch_twitch_token(code): # on success, session has the token to use ss = get_creds() result = False body_payload = { "client_id": ss["twitch_client_id"], "client_secret": ss["twitch_client_secret"], "grant_type": "authorization_code", "code": code, "redirect_uri": "%s/api/ident/twitchcb" % config.MY_URL, } # redirect_uri may need to match original cb URI (twitch_login_cb) extra_headers = { "Content-Type": "application/x-www-form-urlencoded", } response = requests.post( "%s/oauth2/token" % config.TWITCH_API_URL, data=urlencode(body_payload), headers=extra_headers) logging.debug("fetch_twitch_token: headers: %s\n\traw response: %s" % ( response.headers, response.text)) try: token_response = json.loads(response.text) if "access_token" in token_response and "refresh_token" in token_response: session["token_response"] = token_response result = True else: logging.warn("NO refresh_token AVAILABLE, BAD AUTH!") except ValueError as e: logging.error("ValueError: " + e) result = False return result def fetch_twitch_user(): if "token_response" not in session: return False token = session["token_response"] if "token_type" not in token or "access_token" not in token: return False auth_header_token_type = "" # token request returns "bearer", not "Bearer" sometimes if token["token_type"] in ["bearer", "Bearer"]: auth_header_token_type = "Bearer" # this are attached to session in fetch_twitch_token extra_headers = { "Authorization": "%s %s" % ( auth_header_token_type, token["access_token"] ), } response = requests.get( "%s/oauth2/userinfo" % config.TWITCH_API_URL, headers=extra_headers) logging.debug("fetch_twitch_user: headers: %s\n\traw response: %s" % ( response.headers, response.text)) twitch_avatar_url = None twitch_username = None twitch_id = None try: logging.debug("fetch_twitch_user response: %s" % response.text) parsed_response = json.loads(response.text) twitch_id = parsed_response["sub"] twitch_username = parsed_response["preferred_username"] twitch_avatar_url = parsed_response["picture"] except ValueError as e: logging.error("ValueError: " + e) return False ss = get_creds() # TODO: move away from "discord" to a generic auth provider session["discord"] = { "full_username": twitch_username, # TODO: get the right avatar from picture "avatar_url": twitch_avatar_url, "id": twitch_id, "authorized": twitch_id in ss["authorized_twitch_ids"] } return True
31.685315
82
0.63827
0
0
0
0
0
0
0
0
1,614
0.356213
2996df27209f1d350199a6a54bcf14fae9ad1a1a
6,173
py
Python
src/pixel_sorting.py
in3rtial/imgsrt
2dec237b7d797d9964ed874c4e4d72f7eb23eaf0
[ "CC0-1.0" ]
2
2015-11-08T09:22:30.000Z
2020-10-15T03:42:24.000Z
src/pixel_sorting.py
in3rtial/imgsrt
2dec237b7d797d9964ed874c4e4d72f7eb23eaf0
[ "CC0-1.0" ]
null
null
null
src/pixel_sorting.py
in3rtial/imgsrt
2dec237b7d797d9964ed874c4e4d72f7eb23eaf0
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/python3 """transliteration of Kim Asendorf's pixel sorting script""" from copy import copy from random import random, gauss from PIL import Image from numpy import int32 from argparse import ArgumentParser # PROGRAM CONSTANTS # rgb(103, 105, 128) BLACK_VALUE = int32(-10000000) # rgb(164, 114, 128) WHITE_VALUE = int32((255 << 24) + (230 << 16) + (230 << 8) + 230) BRIGHTNESS_VALUE = int32(30) # PIXEL CONVERSION FUNCTIONS def get_pixel_value(pixel): """rgb pixel to int32 processing representation""" return(int32((((255 << 8) | pixel[0]) << 8 | pixel[1]) << 8 | pixel[2])) def get_pixel_brightness(pixel): """rgb pixel to brightness value""" return(max((pixel[0], pixel[1], pixel[2])) / 255 * 100) # PIXEL FINDING FUNCTIONS def get_next_satisfying(vector, starting_position, condition_fun): """find next pixel in the vector after starting position that satisfies the condition (boolean) return -1 if not found""" position = starting_position while(position < len(vector) and not(condition_fun(vector[position]))): position += 1 if(position == (len(vector) - 1) and not(condition_fun(vector[position]))): position = - 1 return(position) # black mode def get_next_black(vector, starting_position): """next black pixel""" condition = lambda x: int32(get_pixel_value(x)) > BLACK_VALUE return get_next_satisfying(vector, starting_position, condition) def get_next_not_black(vector, starting_position): """next non black pixel""" condition = lambda x: int32(get_pixel_value(x)) < BLACK_VALUE return get_next_satisfying(vector, starting_position, condition) # bright mode def get_next_bright(vector, starting_position): """next bright pixel""" condition = lambda x: int32(get_pixel_brightness(x)) < BRIGHTNESS_VALUE return get_next_satisfying(vector, starting_position, condition) def get_next_dark(vector, starting_position): """next dark pixel""" condition = lambda x: int32(get_pixel_brightness(x)) > BRIGHTNESS_VALUE return get_next_satisfying(vector, starting_position, condition) # white mode def get_next_white(vector, starting_position): """next white pixel""" condition = lambda x: int32(get_pixel_value(x)) < WHITE_VALUE return get_next_satisfying(vector, starting_position, condition) def get_next_not_white(vector, starting_position): """next not white pixel""" condition = lambda x: int32(get_pixel_value(x)) > WHITE_VALUE return get_next_satisfying(vector, starting_position, condition) FIND_FUNCTIONS = ((get_next_black, get_next_not_black), # black (get_next_bright, get_next_dark), # bright (get_next_white, get_next_not_white)) # white # PIXEL SORTING FUNCTIONS def sort_pixels(vector, mode=0, find=FIND_FUNCTIONS): """sort pixel in the given vector""" assert(mode in (0, 1, 2)), "invalid use case" vector = copy(vector) position = 0 pos_end = None while(position < len(vector)): if((position == -1) or (pos_end == -1)): break position = find[mode][0](vector, position) pos_end = find[mode][1](vector, position) vector[position:pos_end] = sorted(vector[position:pos_end], key=lambda x: get_pixel_value(x)) position = pos_end + 1 return(vector) # IMAGE TRANSFORMATIONS def to_vectors(rgb_image, row_or_col): """rgb image -> list of lists of RGB tuples""" assert(rgb_image.mode == "RGB"), "must be a RGB image""" assert(row_or_col in (0, 1)), "row = 0, col = 1" vectors = [] x_size, y_size = rgb_image.size if(row_or_col == 0): for y_coord in range(0, y_size): row = [] for x_coord in range(0, x_size): row.append(rgb_image.getpixel((x_coord, y_coord))) vectors.append(row) else: for x_coord in range(0, x_size): col = [] for y_coord in range(0, y_size): col.append(rgb_image.getpixel((x_coord, y_coord))) vectors.append(col) return(vectors) # COMPLETE FUNCTIONS def sort_image(image, row_or_col, mode=0, prob=1, avg_band_size=1): """input: (rgb image, row or column, sort mode, probability of sorting, average band size for sorting) output: sorted out image)""" x_size, y_size = image.size sigma = avg_band_size / 4 vectors = to_vectors(image, row_or_col) new_vectors = [] position = 0 while(position < len(vectors)): if(random() < prob): # calculate the indices of the rows to sort to_sort = [] coarseness = int(gauss(avg_band_size, sigma)) for index in range(position, position + coarseness): if(index >= len(vectors)): break else: to_sort.append(index) for index in to_sort: new_vectors.append(sort_pixels(vectors[index], mode)) position += coarseness else: new_vectors.append(vectors[position]) position += 1 new_image = [] if(row_or_col == 0): for vector in new_vectors: for (red, green, blue) in vector: new_image.append(int(red)) new_image.append(int(green)) new_image.append(int(blue)) else: for i in range(0, y_size): for vector in new_vectors: (red, green, blue) = vector[i] new_image.append(int(red)) new_image.append(int(green)) new_image.append(int(blue)) return(Image.fromstring('RGB', (x_size, y_size), bytes(new_image))) __all__ = ["sort_image"] if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-i", dest="input_image_file", required=True, type=str, help="input") parser.add_argument("-o", dest="output_image_file", required=True, type=str, help="output") args = parser.parse_args() image = Image.open(args.input_image_file) sort_image(image, 0).save(args.output_image_file)
32.661376
95
0.638749
0
0
0
0
0
0
0
0
1,086
0.175927
29978909888062a7973e1bdbe5b82311fd8d9b27
6,173
py
Python
main.py
ml4design/text-processing-module
f1bfe1a49d58156e9e48e5ef69b980f89a5981ea
[ "MIT" ]
null
null
null
main.py
ml4design/text-processing-module
f1bfe1a49d58156e9e48e5ef69b980f89a5981ea
[ "MIT" ]
null
null
null
main.py
ml4design/text-processing-module
f1bfe1a49d58156e9e48e5ef69b980f89a5981ea
[ "MIT" ]
null
null
null
import pandas as pd from preprocessing import preprocess from wordclouds import wordcloud, find_by_word from sentiment_analysis import calculate_sentiment, find_by_sentiment import nltk import os import tempfile from topic_modelling import lda_topic_model, show_topics, show_example_sentences_by_topic os.environ["MPLCONFIGDIR"] = tempfile.gettempdir() nltk.download('punkt') nltk.download('stopwords') pd.set_option('display.max_columns', None) ##################################################### # READING THE DATA # ##################################################### # In this tutorial we will mostly deal with comma separated files (CSV) (similar to the structure of Excel files). Each line of the file is a data record. Each record consists of one or more fields, separated by commas. Check here for more information https://en.wikipedia.org/wiki/Comma-separated_values # reads the file named "students_eng.csv". # If you want to read a different file you need to (1) upload it in replit and (2) change "students_eng.csv" to the name of the newly uploaded file. Here we use the Pandas library ("pd") to read our file and in return we get a Pandas Dataframe. For faster processing and experimentation you can also select different subsets of the file's content through the nrows parameter -> number of lines to read. students_data = pd.read_csv("data/students_eng.csv") # With the next line you can print the data you just read and see how a Pandas Dataframe looks like (seems quite similar to Excel) print(students_data.head(3)) # As you can see the data is separated in columns. Let's see how we can get the data from a specific column. The following line allows us to get only the data inside the column named "students_needs". Other options are: study_programme, degree_programme, planned_grad_year, today_feeling, physical_health, student_needs, students_sugg_to_improve_wellbeing students_data = students_data['student_needs'] ################################################# # TEXT PREPROCESSING # ################################################# # Here we will pre-process our entire text collection. # First, we need to merge all the different lines of the "comments" into one big corpus, so that we can later analyze it. corpus = students_data.to_list() print(corpus[0:5]) # Then we need to "preprocess" our text. To do so we use the following line of code (more details on what happens under the hood could be found in the "preprocessing.py" file - feel free to take a look at it). # The following code: makes all words lowercase, create word tokens, removes stopwords, punctuations, and digits, and reduces inflected words to their word stem (stemming).Feel free to experiment by turning any of the following values from True to False. In addition, you can add extra words which you do not want to include in your analysis by adding them within the extra_stopwords brackets e.g. extra_stopwords=["people"] would remove the word people from everywhere in the document. Hint: don't forget to use the quotes! # tokens = [preprocess(sentence, lower=True, rem_punc=True, word_tokenization=True, rem_numb=True, rem_stopwords=True, stem=True, extra_stopwords = []) for sentence in students_data.to_list()] # print(tokens) ############################################# # WORD FREQUENCIES # ############################################# # Word frequencies calculation is the most basic tool in text processing yet it gives a comprehensive picture of the content in your text collection. One the most ways to visualize word frequencies is WordCloud (which you've already seen if you opened Voyant) # This function needs two things from you: # 1. tokens -- the result of our preprocessing step # 2. the name of the picture it will generate and save to your directory # 3. Number of words to show # wordcloud(words = tokens, name_of_output = 'wordcloud', num = 10) # Text processing often requires working with examples, because words are often contextual and it is difficult to understand what is happening in your text collection. For this purpose, you can find documents by pieces of texts. # This function needs two things from you: # 1. tokens -- the result of our preprocessing step (it will look for examples in this collection) # 2. a word or a phrase the text should include # test = find_by_word(tokens, 'studi') #print(test) ############################################# # Sentiment analysis # ############################################# # The aim of sentiment analysis is to calculate how emotional your texts are and what is the valence of these texts. In our example we use VADER (Valence Aware Dictionary and sEntiment Reasoner) but you can find other various sentiment analysis tools in the internet. # VADER calculated how positive, neutral, and negative a text is. It also calculates a compound score which considers all three metrics to give you a precise measurement of the sentiment. # This function requires only the preprocessed collection of texts # sent_result = calculate_sentiment(tokens) # print(sent_result) # Now, when the sentiment scores are calculated, you can find the most interesting texts by looking at the documents with highest scores (in this example, we look at the 5 most positive documents). # This function requires three things: # 1. The result of sentiment calculation # 2. What score you're interested in # 3. Number of examples you want to get # res = find_by_sentiment(df_with_scores = sent_result, score_type = 'pos', num_of_examples = 5) # print(res) ############################################# # TOPIC MODELING # ############################################# # num_of_topics = 4 # word_num_per_topic = 5 # lda_model = lda_topic_model(tokens, topic_num=num_of_topics) # show_topics(lda_model, word_num_per_topic ) # Check examples assigned to a particular topic #### # num_of_examples = 5 # show_example_sentences_by_topic(corpus, tokens, lda_model, word_num_per_topic,topic_to_check=1, num_of_examp_to_show = num_of_examples)
55.116071
524
0.706302
0
0
0
0
0
0
0
0
5,542
0.897781
299875f6900cd7a8b095fbe70057acd505857f31
4,796
py
Python
finetune/target_models/multifield.py
IndicoDataSolutions/finetune-transformer-lm
3534658e5de281e5634c8481b0fb37635b0cb3af
[ "MIT" ]
null
null
null
finetune/target_models/multifield.py
IndicoDataSolutions/finetune-transformer-lm
3534658e5de281e5634c8481b0fb37635b0cb3af
[ "MIT" ]
null
null
null
finetune/target_models/multifield.py
IndicoDataSolutions/finetune-transformer-lm
3534658e5de281e5634c8481b0fb37635b0cb3af
[ "MIT" ]
null
null
null
import copy from finetune.errors import FinetuneError from finetune.target_models.classifier import Classifier, ClassificationPipeline from finetune.target_models.regressor import Regressor, RegressionPipeline from finetune.base import BaseModel class MultiFieldClassificationPipeline(ClassificationPipeline): def _format_for_encoding(self, X): return X class MultiFieldRegressionPipeline(RegressionPipeline): def _format_for_encoding(self, X): return X class MultiFieldClassifier(Classifier): """ Classifies a set of documents into 1 of N classes. :param config: A :py:class:`finetune.config.Settings` object or None (for default config). :param \**kwargs: key-value pairs of config items to override. """ defaults = {"chunk_long_sequences": False} def __init__(self, **kwargs): d = copy.deepcopy(MultiFieldClassifier.defaults) d.update(kwargs) super().__init__(**d) if self.config.chunk_long_sequences: raise FinetuneError( "Multifield model is incompatible with chunk_long_sequences = True in config." ) def _get_input_pipeline(self): return MultiFieldClassificationPipeline(self.config) def finetune(self, Xs, Y=None, context=None, **kwargs): """ :param \*Xs: lists of text inputs, shape [batch, n_fields] :param Y: integer or string-valued class labels. It is necessary for the items of Y to be sortable. :param batch_size: integer number of examples per batch. When N_GPUS > 1, this number corresponds to the number of training examples provided to each GPU. """ return BaseModel.finetune(self, Xs, Y=Y, context=context, **kwargs) def predict(self, Xs, context=None, **kwargs): """ Produces list of most likely class labels as determined by the fine-tuned model. :param \*Xs: lists of text inputs, shape [batch, n_fields] :returns: list of class labels. """ return BaseModel.predict(self, Xs, context=context, **kwargs) def predict_proba(self, Xs, context=None, **kwargs): """ Produces probability distribution over classes for each example in X. :param \*Xs: lists of text inputs, shape [batch, n_fields] :returns: list of dictionaries. Each dictionary maps from X2 class label to its assigned class probability. """ return BaseModel.predict_proba(self, Xs, context=context, **kwargs) def featurize(self, Xs, **kwargs): """ Embeds inputs in learned feature space. Can be called before or after calling :meth:`finetune`. :param \*Xs: lists of text inputs, shape [batch, n_fields] :returns: np.array of features of shape (n_examples, embedding_size). """ return BaseModel.featurize(self, Xs, **kwargs) class MultiFieldRegressor(Regressor): """ Regresses one or more floating point values given a set of documents per example. :param config: A :py:class:`finetune.config.Settings` object or None (for default config). :param \**kwargs: key-value pairs of config items to override. """ def _get_input_pipeline(self): return MultiFieldRegressionPipeline(self.config) def finetune(self, Xs, Y=None, **kwargs): """ :param \*Xs: lists of text inputs, shape [batch, n_fields] :param Y: floating point targets :param batch_size: integer number of examples per batch. When N_GPUS > 1, this number corresponds to the number of training examples provided to each GPU. """ return BaseModel.finetune(self, Xs, Y=Y, **kwargs) def predict(self, Xs, **kwargs): """ Produces list of most likely class labels as determined by the fine-tuned model. :param \*Xs: lists of text inputs, shape [batch, n_fields] :returns: list of class labels. """ return BaseModel.predict(self, Xs, **kwargs) def predict_proba(self, Xs, **kwargs): """ Produces probability distribution over classes for each example in X. :param \*Xs: lists of text inputs, shape [batch, n_fields] :returns: list of dictionaries. Each dictionary maps from X2 class label to its assigned class probability. """ return BaseModel.predict_proba(self, Xs, **kwargs) def featurize(self, Xs, **kwargs): """ Embeds inputs in learned feature space. Can be called before or after calling :meth:`finetune`. :param \*Xs: lists of text inputs, shape [batch, n_fields] :returns: np.array of features of shape (n_examples, embedding_size). """ return BaseModel.featurize(self, Xs, **kwargs)
38.677419
116
0.66347
4,537
0.945997
0
0
0
0
0
0
2,794
0.582569
2998b411809973174ac82478a06ef6fa40c371df
157
py
Python
db_s3_backup/db_interface/dump_protocol.py
saurabhariyan/db-s3-backup
5b67737f43814f0841d47033c92825206a24e1a1
[ "MIT" ]
9
2015-08-04T00:54:46.000Z
2021-08-29T04:21:13.000Z
db_s3_backup/db_interface/dump_protocol.py
saurabhariyan/db-s3-backup
5b67737f43814f0841d47033c92825206a24e1a1
[ "MIT" ]
7
2015-05-28T15:57:15.000Z
2017-01-25T11:29:28.000Z
db_s3_backup/db_interface/dump_protocol.py
saurabhariyan/db-s3-backup
5b67737f43814f0841d47033c92825206a24e1a1
[ "MIT" ]
9
2015-05-28T13:09:25.000Z
2021-02-12T04:57:04.000Z
from exceptions import ValueError class DumpProtocol: def dump(self, config=None, verbose=False): raise ValueError('DumpProtocol not followed')
26.166667
53
0.751592
122
0.77707
0
0
0
0
0
0
27
0.171975
299a6f26561daff35ded494f622e2b673df00a7d
24,679
py
Python
LPES-video/01.01-podstawy-terminala/01.01.04-pagery.py
opcode-eu-org-website/LPES-wyklady-src
dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0
[ "MIT" ]
null
null
null
LPES-video/01.01-podstawy-terminala/01.01.04-pagery.py
opcode-eu-org-website/LPES-wyklady-src
dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0
[ "MIT" ]
null
null
null
LPES-video/01.01-podstawy-terminala/01.01.04-pagery.py
opcode-eu-org-website/LPES-wyklady-src
dd4d413f2bb5560790b6365fa7e68e8f1a8a65b0
[ "MIT" ]
1
2021-07-03T16:43:47.000Z
2021-07-03T16:43:47.000Z
# Copyright (c) 2020-2021 Matematyka dla Ciekawych Świata (http://ciekawi.icm.edu.pl/) # Copyright (c) 2020-2021 Robert Ryszard Paciorek <[email protected]> # # MIT License # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. try: clipData except NameError: clipData = [] clipData += [ { 'section': 'wyświetlanie plików\n z podziałem na strony' }, { 'console': [ [0.070374, "o", eduMovie.clear + eduMovie.prompt()], [1.067533, "o", "m"], [1.259512, "o", "a"], [1.41949, "o", "n"], [1.611479, "o", " "], [1.92349, "o", "m"], [2.115325, "o", "a"], [2.243465, "o", "n"], [2.435497, "o", " "], [3.475488, "o", "\r\n"], [3.54519, "o", "\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\r"], [3.585094, "o", "MAN(1) Manual pager utils MAN(1)\u001b[m\r\n\u001b[m\r\n\u001b[1mNAME\u001b[0m\u001b[m\r\n man - an interface to the on-line reference manuals\u001b[m\r\n\u001b[m\r\n\u001b[1mSYNOPSIS\u001b[0m\u001b[m\r\n \u001b[1mman\u001b[0m [\u001b[1m-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] [\u001b[1m--warnings\u001b[0m[=\u001b[4mwarnings\u001b[24m]] [\u001b[1m-R\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m-L\u001b[0m \u001b[4mlo‐\u001b[24m\u001b[m\r\n \u001b[4mcale\u001b[24m] [\u001b[1m-m\u001b[0m \u001b[4msystem\u001b[24m[,...]] [\u001b[1m-M\u001b[0m \u001b[4mpath\u001b[24m] [\u001b[1m-S\u001b[0m \u001b[4mlist\u001b[24m] [\u001b[1m-e\u001b[0m \u001b[4mextension\u001b[24m] [\u001b[1m-i\u001b[0m|\u001b[1m-I\u001b[0m]\u001b[m\r\n [\u001b[1m--regex\u001b[0m|\u001b[1m--wildcard\u001b[0m] [\u001b[1m--names-only\u001b[0m] [\u001b[1m-a\u001b[0m] [\u001b[1m-u\u001b[0m] [\u001b[1m--no-subpages\u001b[0m] [\u001b[1m-P\u001b[0m\u001b[m\r\n \u001b[4mpager\u001b[24m] [\u001b[1m-r\u001b[0m \u001b[4mprompt\u001b[24m] [\u001b[1m-7\u001b[0m] [\u001b[1m-E\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m--no-hyphenation\u001b[0m] [\u001b[1m--no-justifi‐\u001b[0m\u001b[m\r\n \u001b[1mcation\u001b[0m] [\u001b[1m-p\u001b[0m \u001b[4mstring\u001b[24m] [\u001b[1m-t\u001b[0m] [\u001b[1m-T\u001b[0m[\u001b[4mdevice\u001b[24m]] [\u001b[1m-H\u001b[0m[\u001b[4mbrowser\u001b[24m]] [\u001b[1m-X\u001b[0m[\u001b[4mdpi\u001b[24m]] [\u001b[1m-Z\u001b[0m]\u001b[m\r\n [[\u001b["], [3.585479, "o", "4msection\u001b"], [3.585788, "o", "[24m] \u001b[4mpage\u001b[24m[.\u001b[4msection\u001b[24m] ...] ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-k\u001b[0m [\u001b[4mapropos\u001b[24m \u001b[4moptions\u001b[24m] \u001b[4mregexp\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-K\u001b[0m [\u001b[1m-w\u001b[0m|\u001b[1m-W\u001b[0m] [\u001b[1m-S\u001b[0m \u001b[4mlist\u001b[24m] [\u001b[1m-i\u001b[0m|\u001b[1m-I\u001b[0m] [\u001b[1m--regex\u001b[0m] [\u001b[4msection\u001b[24m] \u001b[4mterm\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-f\u001b[0m [\u001b[4mwhatis\u001b[24m \u001b[4moptions\u001b[24m] \u001b[4mpage\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-l\u001b[0m [\u001b[1m-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] [\u001b[1m--warnings\u001b[0m[=\u001b[4mwarnings\u001b[24m]] [\u001b[1m-R\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m-L\u001b[0m\u001b[m\r\n \u001b[4mlocale\u001b[24m] [\u001b[1m-P\u001b[0m \u001b[4mpager\u001b[24m] [\u001b[1m-r\u001b[0m \u001b[4mprompt\u001b[24m] [\u001b[1m-7\u001b[0m] [\u001b[1m-E\u001b[0m \u001b[4mencoding\u001b[24m] [\u001b[1m-p\u001b[0m \u001b[4mstring\u001b[24m] [\u001b[1m-t\u001b[0m]\u001b[m\r\n [\u001b[1m-T\u001b[0m[\u001b[4mdevice\u001b[24m]] [\u001b[1m-H\u001b[0m[\u001b[4mbrowser\u001b[24m]] [\u001b[1m-X\u001b[0m[\u001b[4mdpi\u001b[24m]] [\u001b[1m-Z\u001b[0m] \u001b[4mfile\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-w\u001b[0m|\u001b[1m-W\u001b[0m [\u001b[1m-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] \u001b[4mpage\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m \u001b[1m-c\u001b[0m [\u001b[1m"], [3.586087, "o", "-C\u001b[0m \u001b[4mfile\u001b[24m] [\u001b[1m-d\u001b[0m] [\u001b[1m-D\u001b[0m] \u001b[4mpage\u001b[24m ...\u001b[m\r\n \u001b[1mman\u001b[0m [\u001b[1m-?V\u001b[0m]\u001b[m\r\n\u001b[m\r\n\u001b[1mDESCRIPTION\u001b[0m\u001b[m\r\n\u001b[7m Manual page man(1) line 1 (press h for help or q to quit)\u001b[27m\u001b[K"], [5.099652, "o", "\r\u001b[K"], [5.100412, "o", " \u001b[1mman\u001b[0m is the system's manual pager. Each \u001b[4mpage\u001b[24m argument given to \u001b[1mman\u001b[0m is\u001b[m\r\n normally the name of a program, utility or function. The \u001b[4mmanual\u001b[24m \u001b[4mpage\u001b[24m\u001b[m\r\n associated with each of these arguments is then found and displayed. A\u001b[m\r\n \u001b[4msection\u001b[24m, if provided, will direct \u001b[1mman\u001b[0m to look only in that \u001b[4msection\u001b[24m of\u001b[m\r\n the manual. The default action is to search in all of the available\u001b[m\r\n \u001b[4msections\u001b[24m following a pre-defined order (\"1 n l 8 3 2 3posix 3pm 3perl\u001b[m\r\n 3am 5 4 9 6 7\" by default, unless overridden by the \u001b[1mSECTION\u001b[0m directive\u001b[m\r\n in \u001b[4m/etc/manpath.config\u001b[24m), and to show only the first \u001b[4mpage\u001b[24m found, even if\u001b[m\r\n \u001b[4mpage\u001b[24m exists in several \u001b[4msections\u001b[24m.\u001b[m\r\n\u001b[m\r\n The table below shows the \u001b[4msection\u001b[24m numbers of the manual followed by the\u001b[m\r\n types of pages they contain.\u001b[m\r\n\u001b[m\r\n 1 Executable programs or shell commands\u001b[m\r"], [5.10077, "o", "\n 2 System calls (functions provided by the kernel)\u001b[m\r\n 3 Library calls (functions within program libraries)\u001b[m\r\n 4 Special files (usually found in \u001b[4m/dev\u001b[24m)\u001b[m\r\n 5 File formats and conventions eg \u001b[4m/etc/passwd\u001b[24m\u001b[m\r\n 6 Games\u001b[m\r\n 7 Miscellaneous (including macro packages and conventions), e.g.\u001b[m\r\n \u001b[1mman\u001b[0m(7), \u001b[1mgroff\u001b[0m(7)\u001b[m\r\n 8 System administration commands (usually only for root)\u001b[m\r\n 9 Kernel routines [Non standard]\u001b[m\r\n\u001b[7m Manual page man(1) line 24 (press h for help or q to quit)\u001b[27m\u001b[K"], [10.259702, "o", "\r\u001b[K"], [10.260051, "o", "\u001b[m\r\n A manual \u001b[4mpage\u001b[24m consists of several sections.\u001b[m\r\n\u001b[m\r\n Conventional section names include \u001b[1mNAME\u001b[0m, \u001b[1mSYNOPSIS\u001b[0m, \u001b[1mCONFIGURATION\u001b[0m, \u001b[1mDE‐\u001b[0m\u001b[m\r\n \u001b[1mSCRIPTION\u001b[0m, \u001b[1mOPTIONS\u001b[0m, \u001b[1mEXIT\u001b[0m \u001b[1mSTATUS\u001b[0m, \u001b[1mRETURN\u001b[0m \u001b[1mVALUE\u001b[0m, \u001b[1mERRORS\u001b[0m, \u001b[1mENVIRONMENT\u001b[0m,\u001b[m\r\n"], [10.260674, "o", " \u001b[1mFILES\u001b[0m, \u001b[1mVERSIONS\u001b[0m, \u001b[1mCONFORMING\u001b[0m \u001b[1mTO\u001b[0m, \u001b[1mNOTES\u001b[0m, \u001b[1mBUGS\u001b[0m, \u001b[1mEXAMPLE\u001b[0m, \u001b[1mAUTHORS\u001b[0m, and\u001b[m\r\n \u001b[1mSEE\u001b[0m \u001b[1mALSO\u001b[0m.\u001b[m\r\n\u001b[m\r\n The following conventions apply to the \u001b[1mSYNOPSIS\u001b[0m section and can be used\u001b[m\r\n as a guide in other sections.\u001b[m\r\n\u001b[m\r\n \u001b[1mbold\u001b[0m \u001b[1mtext\u001b[0m type exactly as shown.\u001b[m\r\n \u001b[4mitalic\u001b[24m \u001b[4mtext\u001b[24m replace with appropriate argument.\u001b[m\r\n [\u001b[1m-abc\u001b[0m] any or all arguments within [ ] are optional.\u001b[m\r\n \u001b[1m-a\u001b[0m|\u001b[1m-b\u001b[0m options delimited by | cannot be used together.\u001b[m\r\n\u001b[m\r\n"], [10.261539, "o", " \u001b[4margument\u001b[24m ... \u001b[4margument\u001b[24m is repeatable.\u001b[m\r\n [\u001b[4mexpression\u001b[24m] ... entire \u001b[4mexpression\u001b[24m within [ ] is repeatable.\u001b[m\r\n\u001b[m\r\n Exact rendering may vary depending on the output device. For instance,\u001b[m\r\n man will usually not be able to render italics when running in a termi‐\u001b[m\r\n nal, and will typically use underlined or coloured text instead.\u001b[m\r\n\u001b[m\r\n\u001b[7m Manual page man(1) line 47 (press h for help or q to quit)\u001b[27m\u001b[K"], ["search + 0.467738", "o", "\r\u001b[K/"], ["search + 2.427696", "o", "\u001b[Km\bm"], ["search + 2.811736", "o", "\u001b[Ka\ba"], ["search + 3.187628", "o", "\u001b[Kn\bn"], ["search + 4.835791", "o", "\r\u001b[K"], ["search + 4.838372", "o", "\u001b[1;1H\u001b[m\r\n\u001b[2;1H A manual \u001b[4mpage\u001b[24m consists of several sections.\u001b[m\r\n\u001b[3;1H\u001b[m\r\n\u001b[4;1H Conventional section names include \u001b[1mNAME\u001b[0m, \u001b[1mSYNOPSIS\u001b[0m, \u001b[1mCONFIGURATION\u001b[0m, \u001b[1mDE‐\u001b[0m\u001b[m\r\n\u001b[5;1H \u001b[1mSCRIPTION\u001b[0m, \u001b[1mOPTIONS\u001b[0m, \u001b[1mEXIT\u001b[0m \u001b[1mSTATUS\u001b[0m, \u001b[1mRETURN\u001b[0m \u001b[1mVALUE\u001b[0m, \u001b[1mERRORS\u001b[0m, \u001b[1mENVIRONMENT\u001b[0m,\u001b[m\r\n\u001b[6;1H \u001b[1mFILES\u001b[0m, \u001b[1mVERSIONS\u001b[0m, \u001b[1mCONFORMING\u001b[0m \u001b[1mTO\u001b[0m, \u001b[1mNOTES\u001b[0m, \u001b[1mBUGS\u001b[0m, \u001b[1mEXAMPLE\u001b[0m, \u001b[1mAUTHORS\u001b[0m, and\u001b[m\r\n\u001b[7;1H \u001b[1mSEE\u001b[0m \u001b[1mALSO\u001b[0m.\u001b[m\r\n\u001b[8;1H\u001b[m\r\n\u001b[9;1H The following conventions apply to the \u001b[1mSYNOPSIS\u001b[0m section and can be used\u001b[m\r\n\u001b[10;1H as a guide in other sections.\u001b[m\r\n\u001b[11;1H\u001b[m\r\n\u001b[12;1H \u001b[1mbold\u001b[0m \u001b[1mtext\u001b[0m type exactly as shown.\u001b[m\r\n\u001b[13;1H \u001b[4mitalic\u001b[24m \u001b[4mtext\u001b[24m replace with appropriate argument.\u001b[m\r\n\u001b[14;1H [\u001b[1m-abc\u001b[0m] any or all arguments within [ ] are optional.\u001b[m\r\n\u001b[15;1H \u001b[1m-a\u001b[0m|\u001b[1m-b\u001b[0m"], ["search + 4.838671", "o", " "], ["search + 4.840168", "o", " options delimited by | cannot be used together.\u001b[m\r\n\u001b[16;1H\u001b[m\r\n\u001b[17;1H \u001b[4margument\u001b[24m ... \u001b[4margument\u001b[24m is repeatable.\u001b[m\r\n\u001b[18;1H [\u001b[4mexpression\u001b[24m] ... entire \u001b[4mexpression\u001b[24m within [ ] is repeatable.\u001b[m\r\n\u001b[19;1H\u001b[m\r\n\u001b[20;1H Exact rendering may vary depending on the output device. For instance,\u001b[m\r\n\u001b[21;1H man will usually not be able to render italics when running in a termi‐\u001b[m\r\n\u001b[22;1H nal, and will typically use underlined or coloured text instead.\u001b[m\r\n\u001b[23;1H\u001b[m\r\n\u001b[24;1H\u001b[1;1H\u001b[m\r\n\u001b[2;1H A \u001b[7mman\u001b[27mual \u001b[4mpage\u001b[24m consists of several sections.\u001b[m\r\n\u001b[3;1H\u001b[m\r\n\u001b[4;1H Conventional section names include \u001b[1mNAME\u001b[0m, \u001b[1mSYNOPSIS\u001b[0m, \u001b[1mCONFIGURATION\u001b[0m, \u001b[1mDE‐\u001b[0m\u001b[m\r\n\u001b[5;1H \u001b[1mSCRIPTION\u001b[0m, \u001b[1mOPTIONS\u001b[0m, \u001b[1mEXIT\u001b[0m \u001b[1mSTATUS\u001b[0m, \u001b[1mRETURN\u001b[0m \u001b[1mVALUE\u001b[0m, \u001b[1mERRORS\u001b[0m, \u001b[1mENVIRONMENT\u001b[0m,\u001b[m\r\n\u001b[6;1H \u001b[1mFILES\u001b[0m, \u001b[1mVERSIONS\u001b[0m, \u001b[1mCONFORMING\u001b[0m \u001b[1mTO\u001b[0m, \u001b[1mNOTES\u001b[0m, \u001b[1mBUGS\u001b[0m, "], ["search + 4.840432", "o", " \u001b[1mEXAMPLE\u001b"], ["search + 4.841096", "o", "[0m, \u001b[1mAUTHORS\u001b[0m, and\u001b[m\r\n\u001b[7;1H \u001b[1mSEE\u001b[0m \u001b[1mALSO\u001b[0m.\u001b[m\r\n\u001b[8;1H\u001b[m\r\n\u001b[9;1H The following conventions apply to the \u001b[1mSYNOPSIS\u001b[0m section and can be used\u001b[m\r\n\u001b[10;1H as a guide in other sections.\u001b[m\r\n\u001b[11;1H\u001b[m\r\n\u001b[12;1H \u001b[1mbold\u001b[0m \u001b[1mtext\u001b[0m type exactly as shown.\u001b[m\r\n\u001b[13;1H \u001b[4mitalic\u001b[24m \u001b[4mtext\u001b[24m replace with appropriate argument.\u001b[m\r\n\u001b[14;1H [\u001b[1m-abc\u001b[0m] any or all arguments within [ ] are optional.\u001b[m\r\n\u001b[15;1H \u001b[1m-a\u001b[0m|\u001b[1m-b\u001b[0m options delimited by | cannot be used together.\u001b[m\r\n\u001b[16;1H\u001b[m\r\n\u001b[17;1H \u001b[4margument\u001b[24m ... \u001b[4margument\u001b[24m is repeatable.\u001b[m\r\n\u001b[18;1H [\u001b[4mexpression\u001b[24m] ... entire \u001b[4mexpression\u001b[24m within [ ] is repeatable.\u001b[m\r\n\u001b[19;1H\u001b[m\r\n\u001b[20;1H Exact rendering may vary depending on the output device. For instance,\u001b[m\r\n\u001b[21;1H \u001b[7mman\u001b[27m will usually not be able to render italics when running in a termi‐\u001b[m\r\n\u001b[22;1H nal, and will ty"], ["search + 4.841183", "o", "pically use und"], ["search + 4.842076", "o", "erlined or coloured text instead.\u001b[m\r\n\u001b[23;1H\u001b[m\r\n\u001b[24;1H The com\u001b[7mman\u001b[27md or function illustration is a pattern that should match all\u001b[m\r\n\u001b[7m Manual page man(1) line 48 (press h for help or q to quit)\u001b[27m\u001b[K"], ["search + 9.491816", "o", "\r\u001b[K/\r\u001b[K"], ["search + 9.492785", "o", " possible invocations. In some cases it is advisable to illustrate sev‐\u001b[m\r\n eral exclusive invocations as is shown in the \u001b[1mSYNOPSIS\u001b[0m section of this\u001b[m\r\n \u001b[7mman\u001b[27mual page.\u001b[m\r\n\u001b[m\r\n\u001b[1mEXAMPLES\u001b[0m\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[4mls\u001b[24m\u001b[m\r\n Display the \u001b[7mman\u001b[27mual page for the \u001b[4mitem\u001b[24m (program) \u001b[4mls\u001b[24m.\u001b[m\r\n\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[4m\u001b[7mman\u001b[27m\u001b[24m.\u001b[4m7\u001b[24m\u001b[m\r\n Display the \u001b[7mman\u001b[27mual page for macro package \u001b[4m\u001b[7mman\u001b[27m\u001b[24m from section \u001b[4m7\u001b[24m.\u001b[m\r\n\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m-a\u001b[0m \u001b[4mintro\u001b[24m\u001b[m\r\n Display, in succession, all of the available \u001b[4mintro\u001b[24m \u001b[7mman\u001b[27mual pages\u001b[m\r\n contained within the \u001b[7mman\u001b[27mual. It is possible to quit between suc‐\u001b[m\r\n cessive displays or skip any of them.\u001b[m\r\n\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m-t\u001b[0m \u001b[4malias\u001b[24m | \u001b[4mlpr\u001b[24m \u001b[4m-Pps\u001b[24m\u001b[m\r\n Format the \u001b[7mman\u001b[27mual page referenced by `\u001b[4mali"], ["search + 9.493205", "o", "as\u001b[24m', usually a shell \u001b[7mman\u001b[27m‐\u001b[m\r\n ual page, into the default \u001b[1mtroff\u001b[0m or \u001b[1mgroff\u001b[0m format and pipe it to the\u001b[m\r\n\u001b[7m Manual page man(1) line 67 (press h for help or q to quit)\u001b[27m\u001b[K"], ["search + 10.755837", "o", "\r\u001b[K/\r\u001b[K"], ["search + 10.75625", "o", " printer named \u001b[4mps\u001b[24m. The default output for \u001b[1mgroff\u001b[0m is usually Post‐\u001b[m\r\n Script. \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m--help\u001b[0m should advise as to which processor is bound to\u001b[m\r\n the \u001b[1m-t\u001b[0m option.\u001b[m\r\n\u001b[7m Manual page man(1) line 70 (press h for help or q to quit)\u001b[27m\u001b[K"], ["search + 11.707833", "o", "\r\u001b[K/\r\u001b[K"], ["search + 11.708289", "o", "\u001b[m\r\n \u001b[1m\u001b[7mman\u001b[27m\u001b[0m \u001b[1m-l\u001b[0m \u001b[1m-T\u001b[0m\u001b[4mdvi\u001b[24m \u001b[4m./foo.1x.gz\u001b[24m \u001b[1m>\u001b[0m \u001b[4m./foo.1x.dvi\u001b[24m\u001b[m\r\n This com\u001b[7mman\u001b[27md will decompress and format the nroff source \u001b[7mman\u001b[27mual\u001b[m\r\n\u001b[7m Manual page man(1) line 73 (press h for help or q to quit)\u001b[27m\u001b[K"], ["search + 12.75573", "o", "\r\u001b[K/\r\u001b[K"], ["search + 12.75613", "o", " page \u001b[4m./foo.1x.gz\u001b[24m into a \u001b[1mdevice\u001b[0m \u001b[1mindependent\u001b[0m \u001b[1m(dvi)\u001b[0m file. The redi‐\u001b[m\r\n rection is necessary as the \u001b[1m-T\u001b[0m flag causes output to be directed to\u001b[m\r\n \u001b[1mstdout\u001b[0m with no pager. The output could be viewed with a program\u001b[m\r\n\u001b[7m Manual page man(1) line 76 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 0.547791", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], ["up + 0.54827", "o", "\u001b[KA\bA\r\u001b[K"], ["up + 0.548773", "o", "\u001b[H\u001bM\u001b[1mEXAMPLES\u001b[0m\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 75 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 1.339862", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], ["up + 1.340289", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 74 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 2.035805", "o", "\r\u001b[K \u001b[KESC\b\b\bESC"], ["up + 2.036168", "o", "\u001b[KO\bO\u001b[KA\bA\r\u001b[K\u001b[H\u001bM \u001b[7mman\u001b[27mual page.\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 73 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 2.827918", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], ["up + 2.82833", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM eral exclusive invocations as is shown in the \u001b[1mSYNOPSIS\u001b[0m section of this\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 72 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 3.459969", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], ["up + 3.46038", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM possible invocations. In some cases it is advisable to illustrate sev‐\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 71 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 4.059878", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], ["up + 4.060296", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM The com\u001b[7mman\u001b[27md or function illustration is a pattern that should match all\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 70 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 4.731672", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], ["up + 4.73192", "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bM\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 69 (press h for help or q to quit)\u001b[27m\u001b[K"], ["up + 5.331857", "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO\u001b[KA\bA\r\u001b[K"], ["up + 5.332293", "o", "\u001b[H\u001bM nal, and will typically use underlined or coloured text instead.\u001b[m\r\n\u001b[24;1H\r\u001b[K\u001b[7m Manual page man(1) line 68 (press h for help or q to quit)\u001b[27m\u001b[K"], ], 'text' : [ # EKRAN: man - przewijnaie stron 'Zapewne zauważyliśmy że dokumentację man przeglądaliśmy ekran po ekranie, <m> natomiast informacje wypisywane przez wywołanie komendy <m> z opcją help wypisywały się hurtem na ekran. <m>' 'Dzieje się tak dlatego że polecenie man korzysta z programu <m> nazywanego pagerem do podziału tekstu na strony. <m>' 'Rolę tą zazwyczaj pełni more lub less a kolejne strony <m> można wyświetlać z użyciem spacji. <mark name="search" />' # EKRAN: man - wyszukiwanie 'Oba te programy pozwalają także na wyszukiwanie z użyciem ukośnika, <m> rozpoczynającego wprowadzenie szukanego tekstu <m> i klawisza n do wyszukania kolejnego wystąpienia. <m>' 'Natomiast zakończenie działania odbywa się za pomocą klawisza q. <m>' 'Zarówno taki sposób wyszukiwania jak też zamykania programu <m> jest wart zapamiętania, gdyż jest często spotykaną konwencją. <mark name="up" />' # EKRAN: man - przewijnaie w góre linia po linii 'Less jest bardziej rozbudowany niż more i pozwala także na <m> przewijanie linia po linii, przewijanie w obu kierunkach <m> z użyciem strzałek oraz page up i page down, <m> a także wyszukiwanie wstecz z użyciem shift n <m>' ] }, { 'console': [ [0.058688, "o", eduMovie.clear + eduMovie.prompt()], [0.69478, "o", "l"], [0.886743, "o", "e"], [1.142775, "o", "s"], [1.294708, "o", "s"], [1.454712, "o", " "], [1.678724, "o", "/"], [1.838742, "o", "e"], [2.086702, "o", "t"], [2.452806, "o", "c/"], [2.94277, "o", "p"], [3.478765, "o", "a"], [3.702748, "o", "s"], [3.940034, "o", "\u0007swd"], [4.270745, "o", "\r\n"], [4.279067, "o", "\u001b[?1049h\u001b[22;0;0t\u001b[?1h\u001b=\r"], [4.279456, "o", "root:x:0:0:root:/root:/bin/bash\r\ndaemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin\r\nbin:x:2:2:bin:/bin:/usr/sbin/nologin\r\nsys:x:3:3:sys:/dev:/usr/sbin/nologin\r\nsync:x:4:65534:sync:/bin:/bin/sync\r\ngames:x:5:60:games:/usr/games:/usr/sbin/nologin\r\nman:x:6:12:man:/var/cache/man:/usr/sbin/nologin\r\nlp:x:7:7:lp:/var/spool/lpd:/usr/sbin/nologin\r\nmail:x:8:8:mail:/var/mail:/usr/sbin/nologin\r\nnews:x:9:9:news:/var/spool/news:/usr/sbin/nologin\r\nuucp:x:10:10:uucp:/var/spool/uucp:/usr/sbin/nologin\r\nproxy:x:13:13:proxy:/bin:/usr/sbin/nologin\r\nwww-data:x:33:33:www-data:/var/www:/usr/sbin/nologin\r\nbackup:x:34:34:backup:/var/backups:/usr/sbin/nologin\r\nlist:x:38:38:Mailing List Manager:/var/list:/usr/sbin/nologin\r\nirc:x:39:39:ircd:/var/run/ircd:/usr/sbin/nologin\r\ngnats:x:41:41:Gnats Bug-Reporting System (admin):/var/lib/gnats:/usr/sbin/nologi \bn\r\nnobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin\r\nrrp:x:1000:1000:Robert Paciorek,,,:/rrp:/bin/bash"], [4.27982, "o", "\r\nmessagebus:x:101:104::/var/run/dbus:/bin/false\r\nsshd:x:102:65534::/var/run/sshd:/usr/sbin/nologin\r\nsystemd-timesync:x:103:111:systemd Time Synchronization,,,:/run/systemd:/bin/fal \b\u001b[7m/etc/passwd\u001b[27m\u001b[K"], [5.254821, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], [5.255221, "o", "\u001b[KB\bB\r\u001b[Kse\r\n:\u001b[K"], [5.638786, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], [5.639159, "o", "\u001b[KB\bB\r\u001b[Ksystemd-network:x:105:113:systemd Network Management,,,:/run/systemd/netif:/bin/ \b:\u001b[K"], [6.83884, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], [6.839274, "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bMdaemon:x:1:1:daemon:/usr/sbin:/usr/sbin/nologin\r\n\u001b[24;1H\r\u001b[K:\u001b[K"], [7.126802, "o", "\r\u001b[K \u001b[KESC\b\b\bESC\u001b[KO\bO"], [7.127176, "o", "\u001b[KA\bA\r\u001b[K\u001b[H\u001bMroot:x:0:0:root:/root:/bin/bash\r\n\u001b[24;1H\r\u001b[K:\u001b[K"], ], 'text' : [ 'Polecenia te mogą być także użyte do wyświetlania plików, <m> a także, dzięki przekierowaniom strumieni, <m> do podziału na strony outputu innych poleceń. <m>' ] }, ]
173.795775
1,738
0.678066
0
0
0
0
0
0
0
0
23,172
0.935638
299c02cae606323659e0dd5bb1c799eaccfa8b0a
2,109
py
Python
setup.py
tilakpatidar/pytest-snowflake_bdd
db49f0a6d844828b607a2717b96bba517995cf72
[ "MIT" ]
null
null
null
setup.py
tilakpatidar/pytest-snowflake_bdd
db49f0a6d844828b607a2717b96bba517995cf72
[ "MIT" ]
null
null
null
setup.py
tilakpatidar/pytest-snowflake_bdd
db49f0a6d844828b607a2717b96bba517995cf72
[ "MIT" ]
1
2022-01-24T08:26:08.000Z
2022-01-24T08:26:08.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import codecs import os from setuptools import setup def read(fname): file_path = os.path.join(os.path.dirname(__file__), fname) return codecs.open(file_path, encoding='utf-8').read() gh_run_number = os.environ.get("BUILD_NUMBER", None) build_number = None if gh_run_number is None or gh_run_number == "" else gh_run_number version = '0.2.2' setup( name='pytest-snowflake_bdd', version=f"{version}-{build_number}" if build_number else version, author='Tilak Patidar', author_email='[email protected]', maintainer='Tilak Patidar', maintainer_email='[email protected]', license='MIT', url='https://github.com/tilakpatidar/pytest-snowflake_bdd', description='Setup test data and run tests on snowflake in BDD style!', long_description=read('README.rst'), py_modules=['pytest_snowflake_bdd'], python_requires='>=3.6.7', install_requires=['pytest>=6.2.0', 'pytest-bdd>=3.2.1', 'snowflake-sqlalchemy>=1.3.2', 'SQLAlchemy>=1.4.27', \ 'pandas>=0.25.3', 'python-dateutil>=2.8.2'], tests_require=[ 'tox', ], classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Pytest', 'Intended Audience :: Developers', 'Topic :: Software Development :: Testing', 'Topic :: Database', 'Topic :: Software Development :: Testing :: BDD', 'Programming Language :: Python', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Framework :: Pytest', 'Operating System :: OS Independent', 'License :: OSI Approved :: MIT License', ], packages=["pytest_snowflake_bdd"], entry_points={ 'pytest11': [ 'pytest-snowflake-bdd = pytest_snowflake_bdd.plugin', ], }, )
33.47619
114
0.625889
0
0
0
0
0
0
0
0
1,173
0.556188
299c9b32319909c8f36fc5af498db57a782db34f
437
py
Python
integration_tests/test_12_dmaap.py
krasm/python-onapsdk
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
[ "Apache-2.0" ]
4
2020-06-13T04:51:27.000Z
2021-01-06T15:00:51.000Z
integration_tests/test_12_dmaap.py
krasm/python-onapsdk
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
[ "Apache-2.0" ]
10
2021-09-20T15:42:47.000Z
2021-09-23T12:49:51.000Z
integration_tests/test_12_dmaap.py
krasm/python-onapsdk
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
[ "Apache-2.0" ]
8
2020-08-28T10:56:02.000Z
2022-02-11T17:06:03.000Z
# SPDX-License-Identifier: Apache-2.0 # Copyright 2020 Nokia import pytest import logging import os from onapsdk.dmaap.dmaap import Dmaap logging.basicConfig(level=os.environ.get("LOGLEVEL", "DEBUG")) @pytest.mark.integration def test_should_get_all_topics_from_dmaap(): # given # when response = Dmaap.get_all_topics(basic_auth={'username': 'demo', 'password': 'demo123456!'}) # then assert len(response) == 9
20.809524
95
0.729977
0
0
0
0
231
0.528604
0
0
134
0.306636
299d93368abb2922353eb3246c80b4d0b6d61d48
390
py
Python
awx/main/migrations/0112_deployhistory_date.py
Pavloid21/awx
224827f6060013b996eb8210597bca68cda65d40
[ "Apache-2.0" ]
null
null
null
awx/main/migrations/0112_deployhistory_date.py
Pavloid21/awx
224827f6060013b996eb8210597bca68cda65d40
[ "Apache-2.0" ]
null
null
null
awx/main/migrations/0112_deployhistory_date.py
Pavloid21/awx
224827f6060013b996eb8210597bca68cda65d40
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.2.8 on 2020-03-25 13:50 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0111_auto_20200325_1311'), ] operations = [ migrations.AddField( model_name='deployhistory', name='date', field=models.DateTimeField(auto_now=True), ), ]
20.526316
54
0.602564
297
0.761538
0
0
0
0
0
0
99
0.253846
299f9a135fb0ac674c3200f9214021f3cf9fd561
920
py
Python
policy.py
shantanusingh16/Pytorch-DQN
b7d3270e9e345e85e5c5a5216109529879ab77bd
[ "MIT" ]
4
2018-09-23T19:58:24.000Z
2022-03-22T20:32:36.000Z
policy.py
shantanusingh16/Pytorch-DQN
b7d3270e9e345e85e5c5a5216109529879ab77bd
[ "MIT" ]
null
null
null
policy.py
shantanusingh16/Pytorch-DQN
b7d3270e9e345e85e5c5a5216109529879ab77bd
[ "MIT" ]
2
2019-05-22T06:02:38.000Z
2019-10-18T17:08:24.000Z
import numpy as np import torch from utils.helpers import process_state, device def make_epsilon_greedy_policy(estimator, nA): """ :param estimator: model that returns q values for a given statem/action pair :param nA: number of actions in the environment :return: A function that takes in a state and an epsilon and returns probs for each action in the form of a numpy array of length nA """ def policy_fn(state, epsilon): """ :param state: tensor of b x 1 x 84 x 84 :param epsilon: :return: action probabilities, of size b x nA """ A = torch.ones(nA) * epsilon / nA state = torch.from_numpy(state).float().to(device).unsqueeze(0) / 255.0 q_vals = estimator.forward(state) best_action = torch.argmax(q_vals, dim=0).unsqueeze(-1) # b A[best_action] += (1.0 - epsilon) return A return policy_fn
36.8
87
0.646739
0
0
0
0
0
0
0
0
434
0.471739
29a1b5f087c1d14e9f6ed91d094e1aa061d5a041
2,798
py
Python
phonotactics/onsets/onsets.py
shlomo-Kallner/coventreiya
aa0773693220025f8d2c23644a2c5d9d884773e9
[ "Apache-2.0" ]
null
null
null
phonotactics/onsets/onsets.py
shlomo-Kallner/coventreiya
aa0773693220025f8d2c23644a2c5d9d884773e9
[ "Apache-2.0" ]
null
null
null
phonotactics/onsets/onsets.py
shlomo-Kallner/coventreiya
aa0773693220025f8d2c23644a2c5d9d884773e9
[ "Apache-2.0" ]
null
null
null
__name__ = 'onsets' __version__ = '1.5.1' __package__ = 'phonotactics' # imports #some import machinery checking and manipulations... #import sys #import os #from os import path #if '__file__' in dir(): # __mod_path = path.dirname(__file__) # if __mod_path not in sys.path: # sys.path.append(__mod_path) # __pack_path = path.dirname(__mod_path) # if __pack_path not in sys.path: # sys.path.append(__pack_path) from coventreiya.utils.ver import ver from coventreiya.utils.ver import gen_ver from coventreiya.utils.ver import Version_Registry from coventreiya.phonotactics.abc import abc ######################################################## # # # Generating the Onsets # # class Onsets(abc): def __init__(self, min_length=0, max_length=0, major=0, minor=0, patch=0, version=None): super().__init__(min_length, max_length, major, minor, patch, version) pass ################################################################################ # # Version Information Control & UnExported [but Versioned] Object Instantiation # # __versions = Version_Registry( Onsets() ) def register( version, functor ): if isinstance( version, Onsets ): return __versions.register( version, functor ) else: raise TypeError() def get_version(major=0, minor=0, patch=0, version=None): return __versions.get( major, minor, patch, version ) def gen_version( major=0, minor=0, patch=0, version=None ): return __versions.gen( major, minor, patch, version ) def get_all_versions(): return list(__versions) ################################################################################### # # Getting/Setting the default/current version... # def get_current(): return __versions.current() def get_current_version(): return __versions.current().version() def reset_current_version( major=0, minor=0, patch=0, version=None ): v = gen_ver(major, minor, patch, version) return __versions.current(v) ################################################################################### # # The original default version -- used for the(now obsolete and removed) # "default" gen_*_ functions and the pre-generated lists... # Note: the *COMPATABILITY_ONLY* default gen_*_ functions will self-update to # accomidate resets (they call into *THE_CURRENT_VERSION_OBJECT*!!) # the PRE-GENERATED LISTS will not be updated at all.. # Note: VERSION 2_0: the *OLD* gen_*_ functions no longer self-update as # they are now directly linked to version 1.5.1 only. # # from ver_1_5_1 import * # __versions.current(gen_ver(1,5,1))
30.086022
84
0.592566
248
0.088635
0
0
0
0
0
0
1,447
0.517155
29a35e6f75f695c4d26d13d7a9c5d6dff08f119d
6,228
py
Python
aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py
CloudGround/aptitudetech_private
d4d150226bd33ea0c76086264286ae7cae52457f
[ "MIT" ]
null
null
null
aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py
CloudGround/aptitudetech_private
d4d150226bd33ea0c76086264286ae7cae52457f
[ "MIT" ]
null
null
null
aptitudetech_private/aptitudetech_private/doctype/simplified_time_reporting/simplified_time_reporting.py
CloudGround/aptitudetech_private
d4d150226bd33ea0c76086264286ae7cae52457f
[ "MIT" ]
1
2019-05-17T00:04:05.000Z
2019-05-17T00:04:05.000Z
# -*- coding: utf-8 -*- # Copyright (c) 2019, Aptitudetech and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe import _ from frappe.model.document import Document class SimplifiedTimeReporting(Document): def onload_post_render(self): import json #https://stackoverflow.com/a/610923/8291000 issues = self.load_closed_issues() remaining_issues = [] try: if self.timesheet_detail: td_issues=[td.issue for td in self.get('timesheet_detail')] for issue in issues: #Load newest issues - not in the timesheet table yet if issue['name'] not in td_issues: remaining_issues.append(issue) self.add_timesheet_rows(remaining_issues) except AttributeError: self.employee = frappe.db.get_value("Employee", {"user_id" : frappe.session.user}, "name") self.add_timesheet_rows(issues) def add_timesheet_rows(self, issues = []): import datetime from datetime import datetime if issues: for issue in issues: end_time_obj = datetime.strptime(issue['reported_work_end_time'].split('.')[0], '%Y-%m-%d %H:%M:%S') start_time_obj = datetime.strptime(issue['reported_work_start_time'].split('.')[0], '%Y-%m-%d %H:%M:%S') diff_time = self.get_diff_time(start_time_obj, end_time_obj) detail = { 'issue': issue['name'], 'from_time': issue['reported_work_start_time'], 'to_time': issue['reported_work_end_time'], 'note' : issue['description'], 'project' : issue['project'] if issue['project'] else None, 'hours' : diff_time } self.append("timesheet_detail", detail) def before_save(self): import json, datetime from frappe.utils import now_datetime from datetime import datetime _now = now_datetime() self.posting_date = datetime.strptime(str(_now).split('.')[:-1][0], '%Y-%m-%d %H:%M:%S') self.total_reported_time = self.get_total_reported_time() self.total_captured_time = self.get_total_captured_time() def on_submit(self): import json import datetime from frappe.utils import now_datetime, getdate if self.workflow_state == 'Approved' or self.workflow_state == 'To Approve': _now = now_datetime() expenses_list = [] if self.expenses: data = json.loads(str(frappe.as_json(self.expenses))) #need to be as_json, otherwhise the json won't load because of the datetime attribute for expense in data: try: description = expense["description"] except: description = "" exp = { 'expense_date' : expense['date'], 'expense_type' : expense['reason'], 'description' : description, 'claim_amount' : expense['claim_amount'], 'sanctioned_amount' : expense['claim_amount'] } expenses_list.append(exp) frappe.new_doc('Expense Claim').update({ "employee": self.employee, "approval_status" : "Draft", "posting_date" : datetime.datetime.now().date(), "expenses" : expenses_list, "company" : "Aptitude Technologies" }).save() for detail in self.get('timesheet_detail'): if not detail.billable: continue service = frappe.db.get_value('Dynamic Link', {'parenttype': 'Issue', 'parent': detail.issue, 'link_doctype': 'Service'}, 'link_name') if not service: continue service_plan = frappe.db.get_value('Service', service, 'service_plan') metered_feature = frappe.db.get_value('Metered Feature', {'service_plan': service_plan}) if not metered_feature: continue args = { 'service': service, 'customer': frappe.db.get_value('Service', service, 'customer'), 'metered_feature': metered_feature, 'consumed_units': detail.hours, 'start_date': getdate(detail.from_time), 'end_date': getdate(detail.to_date), 'item_group': frappe.db.get_value('Employee', self.employee, 'employee_name'), 'item_code': detail.name, 'item_type': detail.activity_type, 'unit': 'Hours' } frappe.new_doc('Metered Feature Units Log').update(args).insert() def get_total_reported_time(self): import json total_reported_time = 0 issues = json.loads(str(frappe.as_json(self.timesheet_detail))) for issue in issues: total_reported_time = total_reported_time + issue['hours'] return total_reported_time def get_total_captured_time(self): import datetime from datetime import datetime total_captured_time = 0 issues = self.load_closed_issues() for issue in issues: end_time_obj = datetime.strptime(issue['captured_end_working_time'].split('.')[0], '%Y-%m-%d %H:%M:%S') start_time_obj = datetime.strptime(issue['captured_start_working_time'].split('.')[0], '%Y-%m-%d %H:%M:%S') diff_time = self.get_diff_time(start_time_obj, end_time_obj) # diff_time = self.get_diff_time(issue['captured_start_working_time'], issue['captured_end_working_time']) total_captured_time = total_captured_time + diff_time return total_captured_time def get_diff_time(self, start_time, end_time): import datetime return round(self.round_number_quarter((end_time - start_time).total_seconds()/3600), 2) def round_number_quarter(self, number): import math return math.ceil(number*4)/4 def load_closed_issues(self): import datetime, json cur_month = datetime.datetime.now().strftime("%m") cur_year = datetime.datetime.now().strftime("%Y") next_month = int(cur_month) + 1 next_year = cur_year if next_month == 13: next_month = 1 next_year = int(next_year) + 1 start_date = "{0}-{1}-01".format(cur_year, cur_month) end_date = "{0}-{1}-01".format(next_year, next_month) closed_issues = frappe.db.get_all("Issue", {"kanban_status" : "Completed", "reported_work_start_time" : [ ">=", start_date ], "reported_work_end_time" : [ "<=", end_date ]},['_assign, name, reported_work_start_time, reported_work_end_time, description, captured_start_working_time, captured_end_working_time']) self_issues = [] for issue in closed_issues: if issue['_assign'] and frappe.session.user in issue['_assign']: issue.project = frappe.db.get_value('Task', {'issue' : issue['name']}, 'project') self_issues.append(issue) return json.loads(str(frappe.as_json(self_issues)))
34.793296
312
0.701028
5,975
0.959377
0
0
0
0
0
0
1,737
0.278902
29a405435385e49ddae23458da015f3ba0c567e1
442
py
Python
6 - Python/Collections/7 - Piling Up!.py
Terence-Guan/Python.HackerRank
165a5f0e739c7678dfac7eae95443018e2167c3d
[ "MIT" ]
88
2016-10-23T16:41:14.000Z
2019-12-30T23:51:47.000Z
HackerRank/6 - Python/Collections/7 - Piling Up!.py
natalie-o-perret/coding-challenges
9a242e0ec54488f59be82592822b31ff51af1633
[ "MIT" ]
1
2018-10-13T14:31:54.000Z
2018-10-13T14:31:54.000Z
HackerRank/6 - Python/Collections/7 - Piling Up!.py
natalie-o-perret/coding-challenges
9a242e0ec54488f59be82592822b31ff51af1633
[ "MIT" ]
82
2017-02-01T17:02:56.000Z
2020-02-01T11:45:58.000Z
from collections import deque T = int(input()) for t in range(T): n = int(input()) lengths = deque(map(int, input().split())) top = max(lengths) while len(lengths) > 0: left = lengths[0] right = lengths[-1] if (right >= left) and (right <= top): top = right lengths.pop() elif (left >= right) and (left <= top): top = left lengths.popleft() else: break if len(lengths) == 0: print("YES") else: print("NO")
19.217391
43
0.595023
0
0
0
0
0
0
0
0
9
0.020362
29a40a64c821d23b2e28418293629df23986810c
6,343
bzl
Python
tools/bzl/deps.bzl
xshaun/compiler-programl
f90bcd84700d0f245c80440a3d5fd29370d2f973
[ "Apache-2.0" ]
null
null
null
tools/bzl/deps.bzl
xshaun/compiler-programl
f90bcd84700d0f245c80440a3d5fd29370d2f973
[ "Apache-2.0" ]
null
null
null
tools/bzl/deps.bzl
xshaun/compiler-programl
f90bcd84700d0f245c80440a3d5fd29370d2f973
[ "Apache-2.0" ]
null
null
null
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") def programl_deps(): http_archive( name="labm8", sha256="e4bc669322533e7615f689e5e8a810446d0c803be2e3b21e99a4e0135709755e", strip_prefix="labm8-b98301dec615465a6567bed4ec4131753d1f8b32", urls=[ "https://github.com/ChrisCummins/labm8/archive/b98301dec615465a6567bed4ec4131753d1f8b32.tar.gz" ], ) http_archive( name="com_github_nelhage_rules_boost", sha256="4031539fe0af832c6b6ed6974d820d350299a291ba7337d6c599d4854e47ed88", strip_prefix="rules_boost-4ee400beca08f524e7ea3be3ca41cce34454272f", urls=[ "https://github.com/nelhage/rules_boost/archive/4ee400beca08f524e7ea3be3ca41cce34454272f.tar.gz" ], ) http_archive( name="llvm", sha256="ea0dbab56d11e78006c68c39bc99da672bb6adc7ca03237ba4eb88887bf91a93", strip_prefix="bazel_llvm-ae9ef2a711c5744fe52c5666d76976a3c6a3128b", urls=[ "https://github.com/ChrisCummins/bazel_llvm/archive/ae9ef2a711c5744fe52c5666d76976a3c6a3128b.tar.gz" ], ) http_archive( name="rules_python", sha256="64a3c26f95db470c32ad86c924b23a821cd16c3879eed732a7841779a32a60f8", strip_prefix="rules_python-748aa53d7701e71101dfd15d800e100f6ff8e5d1", urls=[ "https://github.com/bazelbuild/rules_python/archive/748aa53d7701e71101dfd15d800e100f6ff8e5d1.tar.gz" ], ) http_archive( name="com_github_chriscummins_rules_bats", strip_prefix="rules_bats-6600627545380d2b32485371bed36cef49e9ff68", sha256="bfaa7a5818e7d6b142ac6e564f383f69f72ea593eb7de360e9aa15db69f67505", urls=[ "https://github.com/ChrisCummins/rules_bats/archive/6600627545380d2b32485371bed36cef49e9ff68.tar.gz" ], ) http_archive( name="subprocess", build_file="@programl//:third_party/subprocess.BUILD", sha256="886df0a814a7bb7a3fdeead22f75400abd8d3235b81d05817bc8c1125eeebb8f", strip_prefix="cpp-subprocess-2.0", urls=["https://github.com/arun11299/cpp-subprocess/archive/v2.0.tar.gz",], ) http_archive( name="ctpl", build_file="@programl//:third_party/ctpl.BUILD", sha256="8c1cec7c570d6d84be1d29283af5039ea27c3e69703bd446d396424bf619816e", strip_prefix="CTPL-ctpl_v.0.0.2", urls=["https://github.com/vit-vit/CTPL/archive/ctpl_v.0.0.2.tar.gz"], ) http_archive( name="fmt", build_file="@programl//:third_party/fmt.BUILD", sha256="1cafc80701b746085dddf41bd9193e6d35089e1c6ec1940e037fcb9c98f62365", strip_prefix="fmt-6.1.2", urls=["https://github.com/fmtlib/fmt/archive/6.1.2.tar.gz"], ) http_archive( name="pybind11_json", build_file="@programl//:third_party/pybind11_json.BUILD", sha256="45957f8564e921a412a6de49c578ef1faf3b04e531e859464853e26e1c734ea5", strip_prefix="pybind11_json-0.2.4/include", urls=["https://github.com/pybind/pybind11_json/archive/0.2.4.tar.gz"], ) http_archive( name="nlohmann_json", build_file="@programl//:third_party/nlohmann_json.BUILD", sha256="87b5884741427220d3a33df1363ae0e8b898099fbc59f1c451113f6732891014", strip_prefix="single_include", urls=[ "https://github.com/nlohmann/json/releases/download/v3.7.3/include.zip" ], ) http_archive( name="build_stack_rules_proto", sha256="85ccc69a964a9fe3859b1190a7c8246af2a4ead037ee82247378464276d4262a", strip_prefix="rules_proto-d9a123032f8436dbc34069cfc3207f2810a494ee", urls=[ "https://github.com/stackb/rules_proto/archive/d9a123032f8436dbc34069cfc3207f2810a494ee.tar.gz" ], ) http_archive( name="tbb_mac", build_file="@programl//:third_party/tbb_mac.BUILD", sha256="6ff553ec31c33b8340ce2113853be1c42e12b1a4571f711c529f8d4fa762a1bf", strip_prefix="tbb2017_20170226oss", urls=[ "https://github.com/01org/tbb/releases/download/2017_U5/tbb2017_20170226oss_mac.tgz" ], ) http_archive( name="tbb_lin", build_file="@programl//:third_party/tbb_lin.BUILD", sha256="c4cd712f8d58d77f7b47286c867eb6fd70a8e8aef097a5c40f6c6b53d9dd83e1", strip_prefix="tbb2017_20170226oss", urls=[ "https://github.com/01org/tbb/releases/download/2017_U5/tbb2017_20170226oss_lin.tgz" ], ) http_archive( name="pybind11", build_file="@programl//:third_party/pybind11_bazel/pybind11.BUILD", sha256="1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d", strip_prefix="pybind11-2.4.3", urls=["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"], ) http_archive( name="com_google_absl", sha256="d10f684f170eb36f3ce752d2819a0be8cc703b429247d7d662ba5b4b48dd7f65", strip_prefix="abseil-cpp-3088e76c597e068479e82508b1770a7ad0c806b6", url="https://github.com/abseil/abseil-cpp/archive/3088e76c597e068479e82508b1770a7ad0c806b6.tar.gz", ) http_archive( name="com_github_gflags_gflags", sha256="34af2f15cf7367513b352bdcd2493ab14ce43692d2dcd9dfc499492966c64dcf", strip_prefix="gflags-2.2.2", urls=["https://github.com/gflags/gflags/archive/v2.2.2.tar.gz"], ) http_archive( name="gtest", sha256="9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb", strip_prefix="googletest-release-1.10.0", urls=[ "https://github.com/google/googletest/archive/release-1.10.0.tar.gz", ], ) http_archive( name="com_github_google_benchmark", sha256="616f252f37d61b15037e3c2ef956905baf9c9eecfeab400cb3ad25bae714e214", strip_prefix="benchmark-1.4.0", url="https://github.com/google/benchmark/archive/v1.4.0.tar.gz", ) http_archive( name="org_tensorflow", sha256="92116bfea188963a0e215e21e67c3494f6e1e6959f44dfbcc315f66eb70b5f83", strip_prefix="tensorflow-f13f807c83c0d8d4d1ef290a17f26fe884ccfe2f", urls=[ "https://github.com/ChrisCummins/tensorflow/archive/f13f807c83c0d8d4d1ef290a17f26fe884ccfe2f.tar.gz" ], ) http_archive( name="io_bazel_rules_closure", sha256="5b00383d08dd71f28503736db0500b6fb4dda47489ff5fc6bed42557c07c6ba9", strip_prefix="rules_closure-308b05b2419edb5c8ee0471b67a40403df940149", urls=[ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", "https://github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", # 2019-06-13 ], )
35.836158
153
0.760366
0
0
0
0
0
0
0
0
4,485
0.707079
29a7a6484cb6277e0cdd34fa9a54d64187a477f7
1,082
py
Python
matrix_multiplication_evolution_example.py
bobbywlindsey/stokepy
e7f4ad0c27d8fc22129558db6ae0dbbab0627ace
[ "MIT" ]
9
2017-05-09T20:00:10.000Z
2020-07-02T18:00:22.000Z
matrix_multiplication_evolution_example.py
bobbywlindsey/stokepy
e7f4ad0c27d8fc22129558db6ae0dbbab0627ace
[ "MIT" ]
null
null
null
matrix_multiplication_evolution_example.py
bobbywlindsey/stokepy
e7f4ad0c27d8fc22129558db6ae0dbbab0627ace
[ "MIT" ]
2
2017-08-10T14:47:07.000Z
2019-01-25T02:37:34.000Z
import stokepy as sp import numpy as np # instantiate class fmc = sp.FiniteMarkovChain() # create initial distribution vector phi = np.array([0, 0, 1, 0, 0]) # generate Markov chain with no boundary conditions fmc.gen_from_params(phi, p = 0.6, num_states = 5, dim = 1) # apply boundary condition: absorbing, reflecting, semi-reflecting # only works for 1 dimension Markov chains at the moment fmc.apply_boundary_condition(condition='absorbing') # choose solution method like Matrix Multiplication Evolution matrx_mult_evo = sp.MatrixMultiplicationEvolution(fmc, phi, steps = 2000,\ rec_class_states = []) # run the solution matrx_mult_evo.run() # get data from the run average_distribution = matrx_mult_evo.pi tpdf = matrx_mult_evo.tpdf absorption_proportions = matrx_mult_evo.absorption_proportions apbrc = matrx_mult_evo.recurrent_class_absorbed_proportions mean_absorption_time = matrx_mult_evo.mean_absorption_time # plot absorption tiems for recurrent classes matrx_mult_evo.plot_absorption()
33.8125
76
0.750462
0
0
0
0
0
0
0
0
386
0.356747
29a7cf6d7a2997edf3ae4f28829f450e4f401145
1,225
py
Python
tests/__main__.py
nickswebsite/nickswebsite-serializer
2c131a04a4105afae439670f96b5b72bdfe65854
[ "Unlicense" ]
2
2017-09-26T16:38:36.000Z
2018-08-09T15:09:51.000Z
tests/__main__.py
nickswebsite/nickswebsite-serializer
2c131a04a4105afae439670f96b5b72bdfe65854
[ "Unlicense" ]
8
2015-02-20T13:16:11.000Z
2016-12-20T14:55:43.000Z
tests/__main__.py
nickswebsite/nickswebsite-serializer
2c131a04a4105afae439670f96b5b72bdfe65854
[ "Unlicense" ]
6
2015-05-20T21:26:40.000Z
2018-08-08T10:33:04.000Z
import doctest import sys import unittest import r2dto from tests.test_acceptance import AcceptanceTests from tests.test_base_serializer import BaseSerializerTests __all__ = ["doctest", "sys", "unittest", "r2dto", "AcceptanceTests", "BaseSerializerTests"] try: import pep8 except ImportError: print("WARNING: pep8 not installed. Style will not be checked and therefore your build may fail when integrated" "with the main branch.") pep8 = None PEP8_SOURCES = [ "r2dto/__init__.py", "r2dto/base.py", "r2dto/fields.py", "r2dto/validators.py", "tests/__init__.py", "tests/__main__.py", "tests/test_acceptance.py", "tests/test_base_serializer.py", ] if __name__ == "__main__": if pep8 is not None: sg = pep8.StyleGuide(max_line_length=120) res = sg.check_files(PEP8_SOURCES) if res.total_errors != 0: print("pep8 failed") sys.exit(1) doctest_ctx = { "Serializer": r2dto.Serializer, "fields": r2dto.fields, "ValidationError": r2dto.ValidationError, } results = doctest.testfile("../README.md", globs=doctest_ctx) if results.failed != 0: sys.exit(1) unittest.main()
26.06383
117
0.660408
0
0
0
0
0
0
0
0
440
0.359184
29a7fecfec58a37e5770387c0619949240d50800
10,697
py
Python
manager/jobs/jobs.py
jlbrewe/hub
c737669e6493ad17536eaa240bed3394b20c6b7d
[ "Apache-2.0" ]
30
2016-03-26T12:08:04.000Z
2021-12-24T14:48:32.000Z
manager/jobs/jobs.py
jlbrewe/hub
c737669e6493ad17536eaa240bed3394b20c6b7d
[ "Apache-2.0" ]
1,250
2016-03-23T04:56:50.000Z
2022-03-28T02:27:58.000Z
manager/jobs/jobs.py
jlbrewe/hub
c737669e6493ad17536eaa240bed3394b20c6b7d
[ "Apache-2.0" ]
11
2016-07-14T17:04:20.000Z
2021-07-01T16:19:09.000Z
""" Module that defines the interface between the `manager` (i.e Django) and the `broker` (i.e. RabbitMQ). Defines three functions involved in a job's lifecycle: - `dispatch_job` - send a job to a queue - `update_job` - update the status of a job by checking it's (intermediate) result - `check_job` - for a parent job, trigger any child jobs, and / or update it's status - `cancel_job` - remove job from the queue, or terminate it if already started """ import datetime import logging import time from celery import Celery, signature from celery.result import AsyncResult from django.conf import settings from django.core.exceptions import PermissionDenied from django.utils import timezone from jobs.models import Job, JobMethod, JobStatus, Queue, Worker logger = logging.getLogger(__name__) # Setup the Celery app app = Celery("manager", broker=settings.BROKER_URL, backend=settings.CACHE_URL) app.conf.update( # By default Celery will keep on trying to connect to the broker forever # This overrides that. Initially try again immediately, then add 0.5 seconds for each # subsequent try (with a maximum of 3 seconds). # See https://github.com/celery/celery/issues/4296 broker_transport_options={ "max_retries": 10, "interval_start": 0, "interval_step": 0.5, "interval_max": 3, }, # Needed to ensure STARTED state is emitted task_track_started=True, ) def dispatch_job(job: Job) -> Job: """ Send a job to a queue. Decides which queue a job should be sent to and sends it. The queue can depend upon both the project and the account (either the account that the project is linked to, or the default account of the job creator). """ if not JobMethod.is_member(job.method): raise ValueError("Unknown job method '{}'".format(job.method)) if job.method in settings.JOB_METHODS_STAFF_ONLY and ( not job.creator or not job.creator.is_staff ): raise PermissionDenied if JobMethod.is_compound(job.method): children = job.children.all().order_by("id") if len(children) == 0: # If there are no children (e.g. a pull job for a project with no sources) # then job is immediately finished job.runtime = 0 job.is_active = False job.status = JobStatus.SUCCESS.value else: if job.method == JobMethod.parallel.value: # Dispatch all child jobs simultaneously for child in children: dispatch_job(child) else: # Dispatch the first child; subsequent children # will be status WAITING and will get dispatched later # on update of the parent. for index, child in enumerate(children): if index == 0: dispatch_job(child) else: child.is_active = True child.status = JobStatus.WAITING.value child.save() job.is_active = True job.status = JobStatus.DISPATCHED.value else: # Find queues that have active workers on them # order by descending priority queues = list( Queue.objects.filter( workers__in=Worker.objects.filter( # Has not finished finished__isnull=True, # Has been updated in the last x minutes updated__gte=timezone.now() - datetime.timedelta(minutes=15), ), ).order_by("priority") ) # Fallback to the default Stencila queue # Apart from anything else having this fallback is useful in development # because if means that the `overseer` service does not need to be running # in order keep track of the numbers of workers listening on each queue # (during development `worker`s listen to the default queue) if len(queues) == 0: logger.warning("No queues found with active workers") queue, _ = Queue.get_or_create( account_name="stencila", queue_name="default" ) else: if job.creator is None or job.project is None: # Jobs created by anonymous users go on the lowest # priority queue priority = 1 else: # The priority of other jobs is determined by the # account tier of the project priority = job.project.account.tier.id queue = queues[min(len(queues), priority) - 1] # Add the job's project id, key and secrets to it's kwargs. # Doing this here ensures it is done for all jobs # and avoids putting the secrets in the job's `params` field. kwargs = dict(**job.params) if job.params else {} kwargs["project"] = job.project.id if job.project else None kwargs["key"] = job.key kwargs["secrets"] = job.secrets # Send the job to the queue task = signature( job.method, kwargs=kwargs, queue=queue.name, task_id=str(job.id), app=app, ) task.apply_async() job.queue = queue job.is_active = True job.status = JobStatus.DISPATCHED.value job.save() return job def update_job(job: Job, data={}, force: bool = False) -> Job: """ Update a job. This method is triggered by a PATCH request from the `overseer` service. It updates the status, and other fields of the job, and if the job has a parent, updates it's status too. See https://stackoverflow.com/a/38267978 for important considerations in using AsyncResult. """ # Avoid unnecessary update if not job.is_active and not force: return job was_active = job.is_active if JobMethod.is_compound(job.method): # Update the status of compound jobs based on children status = job.status is_active = False all_previous_succeeded = True any_previous_failed = False for child in job.get_children(): # If the child has a 'higher' status then update the # status of the compound job status = JobStatus.highest([status, child.status]) # If the child is still waiting then... if child.status == JobStatus.WAITING.value: # If all previous have succeeded, dispatch it if all_previous_succeeded: dispatch_job(child) # If any previous have failed, cancel it elif any_previous_failed: cancel_job(child) if child.status != JobStatus.SUCCESS.value: all_previous_succeeded = False if child.status == JobStatus.FAILURE.value: any_previous_failed = True # If the child is still active then the compound job is active if child.is_active: is_active = True job.is_active = is_active job.status = JobStatus.RUNNING.value if is_active else status else: status = data.get("status") assert status # Do not do anything if the new status is lower rank than the # existing status. This can exist for example when a job is # terminated (the SUCCESS state is sent after TERMINATED) if JobStatus.rank(status) < JobStatus.rank(job.status): return job # Update fields sent by `overseer` service, including `status` for key, value in data.items(): setattr(job, key, value) def async_result(): return AsyncResult(str(job.id), app=app) # If job succeeded then get the result if we haven't already if status == JobStatus.SUCCESS.value and job.result is None: response = None attempts = 0 while not response and attempts < 5: try: response = async_result().get(timeout=30) except Exception: # Catch all errors, but log them. Occasional # errors encountered in prod include ResponseError and TimeoutError logger.warning( "Error getting async result", exc_info=True, extra=dict(id=job.id, method=job.method, attempts=attempts), ) time.sleep(1) attempts += 1 if response: job.result = response.get("result") job.log = response.get("log") else: logger.error( "Unable to get async result", extra=dict(id=job.id, method=job.method, attempts=attempts), ) job.status = JobStatus.FAILURE.value job.error = dict( type="RuntimeError", message="Unable to get result of job" ) # If job failed then get the error # For FAILURE, `info` is the raised Exception elif status == JobStatus.FAILURE.value: info = async_result().info if info: job.error = dict(type=type(info).__name__, message=str(info)) # If the job has just ended then mark it as inactive if JobStatus.has_ended(status): job.is_active = False # If the job is no longer active clear its secrets and run its callback if was_active and not job.is_active: job.secrets = None job.run_callback() # Save before updating parent (and then this again) job.save() # If the job has a parent then update it too if job.parent: update_job(job.parent) return job def cancel_job(job: Job) -> Job: """ Cancel a job. This uses Celery's terminate options which will kill the worker child process. This is not normally recommended but in this case is OK because there is only one task per process. See `worker/worker.py` for the reasoning for using `SIGUSR1`. See https://docs.celeryproject.org/en/stable/userguide/workers.html#revoke-revoking-tasks """ if job.is_active: if JobMethod.is_compound(job.method): for child in job.children.all(): cancel_job(child) else: app.control.revoke(str(job.id), terminate=True, signal="SIGUSR1") job.status = JobStatus.CANCELLED.value job.is_active = False job.secrets = None job.save() return job
37.013841
102
0.598299
0
0
0
0
0
0
0
0
4,179
0.39067
29a916eb7d2d8321665bd4ae8b4fed316f3bc30f
217
py
Python
sklearn-nlp/utils/data_utils.py
fmailhot/sklearn-nlp
3de76cb71fc85bc1231bdfa9cd78b5f98a0f14f7
[ "BSD-3-Clause" ]
null
null
null
sklearn-nlp/utils/data_utils.py
fmailhot/sklearn-nlp
3de76cb71fc85bc1231bdfa9cd78b5f98a0f14f7
[ "BSD-3-Clause" ]
null
null
null
sklearn-nlp/utils/data_utils.py
fmailhot/sklearn-nlp
3de76cb71fc85bc1231bdfa9cd78b5f98a0f14f7
[ "BSD-3-Clause" ]
null
null
null
"""Data loading/munging utilities. This will need to leverage a lot of existing stuff (e.g. numpy.genfromtxt)... """ import logging class DataLoader(object): def __init__(self, data_files=None): pass
15.5
50
0.700461
79
0.364055
0
0
0
0
0
0
117
0.539171
29aa089f836846e2e53f80e15d88b7aa8aa740d4
12,785
py
Python
assignment2/ptb-lm-loss-compute.py
adijo/ift6135-rnn
88ebcd621cea4042f5ada688f2452ce25d02b761
[ "Apache-2.0" ]
null
null
null
assignment2/ptb-lm-loss-compute.py
adijo/ift6135-rnn
88ebcd621cea4042f5ada688f2452ce25d02b761
[ "Apache-2.0" ]
null
null
null
assignment2/ptb-lm-loss-compute.py
adijo/ift6135-rnn
88ebcd621cea4042f5ada688f2452ce25d02b761
[ "Apache-2.0" ]
null
null
null
#!/bin/python # coding: utf-8 import argparse import time import collections import os import sys import torch import torch.nn from torch.autograd import Variable import torch.nn as nn import numpy as np from models_grad import RNN, GRU from models_grad import make_model as TRANSFORMER parser = argparse.ArgumentParser(description='PyTorch Penn Treebank Language Modeling') # Arguments you may need to set to run different experiments in 4.1 & 4.2. parser.add_argument('--data', type=str, default='data', help='location of the data corpus') parser.add_argument('--model', type=str, default='TRANSFORMER', help='type of recurrent net (RNN, GRU, TRANSFORMER)') parser.add_argument('--optimizer', type=str, default='SGD_LR_SCHEDULE', help='optimization algo to use; SGD, SGD_LR_SCHEDULE, ADAM') parser.add_argument('--seq_len', type=int, default=35, help='number of timesteps over which BPTT is performed') parser.add_argument('--batch_size', type=int, default=20, help='size of one minibatch') parser.add_argument('--initial_lr', type=float, default=20.0, help='initial learning rate') parser.add_argument('--hidden_size', type=int, default=512, help='size of hidden layers. IMPORTANT: for the transformer\ this must be a multiple of 16.') parser.add_argument('--save_best', action='store_true', help='save the model for the best validation performance') parser.add_argument('--num_layers', type=int, default=2, help='number of hidden layers in RNN/GRU, or number of transformer blocks in TRANSFORMER') # Other hyperparameters you may want to tune in your exploration parser.add_argument('--emb_size', type=int, default=200, help='size of word embeddings') parser.add_argument('--num_epochs', type=int, default=40, help='number of epochs to stop after') parser.add_argument('--dp_keep_prob', type=float, default=0.35, help='dropout *keep* probability. drop_prob = 1-dp_keep_prob \ (dp_keep_prob=1 means no dropout)') # Arguments that you may want to make use of / implement more code for parser.add_argument('--debug', action='store_true') parser.add_argument('--save_dir', type=str, default='', help='path to save the experimental config, logs, model \ This is automatically generated based on the command line \ arguments you pass and only needs to be set if you want a \ custom dir name') parser.add_argument('--evaluate', action='store_true', help="use this flag to run on the test set. Only do this \ ONCE for each model setting, and only after you've \ completed ALL hyperparameter tuning on the validation set.\ Note we are not requiring you to do this.") # DO NOT CHANGE THIS (setting the random seed makes experiments deterministic, # which helps for reproducibility) parser.add_argument('--seed', type=int, default=1111, help='random seed') args = parser.parse_args() argsdict = args.__dict__ argsdict['code_file'] = sys.argv[0] # Use the model, optimizer, and the flags passed to the script to make the # name for the experimental dir print("\n########## Setting Up Experiment ######################") flags = [flag.lstrip('--') for flag in sys.argv[1:]] current_script_path = os.path.dirname(os.path.realpath(__file__)) experiment_path = os.path.join(os.path.sep, current_script_path, args.save_dir, '_'.join([argsdict['model'], argsdict['optimizer']] + flags)) # Increment a counter so that previous results with the same args will not # be overwritten. Comment out the next four lines if you only want to keep # the most recent results. i = 0 while os.path.exists(experiment_path + "_" + str(i)): i += 1 experiment_path = experiment_path + "_" + str(i) # Creates an experimental directory and dumps all the args to a text file os.makedirs(experiment_path) print("\nPutting log in %s" % experiment_path) argsdict['save_dir'] = experiment_path with open(os.path.join(experiment_path, 'exp_config.txt'), 'w') as f: for key in sorted(argsdict): f.write(key+' '+str(argsdict[key])+'\n') # Set the random seed manually for reproducibility. torch.manual_seed(args.seed) # Use the GPU if you have one if torch.cuda.is_available(): print("Using the GPU") device = torch.device("cuda") else: print("WARNING: You are about to run on cpu, and this will likely run out \ of memory. \n You can try setting batch_size=1 to reduce memory usage") device = torch.device("cpu") ############################################################################### # # DATA LOADING & PROCESSING # ############################################################################### # HELPER FUNCTIONS def _read_words(filename): with open(filename, "r") as f: return f.read().replace("\n", "<eos>").split() def _build_vocab(filename): data = _read_words(filename) counter = collections.Counter(data) count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0])) words, _ = list(zip(*count_pairs)) word_to_id = dict(zip(words, range(len(words)))) id_to_word = dict((v, k) for k, v in word_to_id.items()) return word_to_id, id_to_word def _file_to_word_ids(filename, word_to_id): data = _read_words(filename) return [word_to_id[word] for word in data if word in word_to_id] # Processes the raw data from text files def ptb_raw_data(data_path=None, prefix="ptb"): train_path = os.path.join(data_path, prefix + ".train.txt") valid_path = os.path.join(data_path, prefix + ".valid.txt") test_path = os.path.join(data_path, prefix + ".test.txt") word_to_id, id_2_word = _build_vocab(train_path) train_data = _file_to_word_ids(train_path, word_to_id) valid_data = _file_to_word_ids(valid_path, word_to_id) test_data = _file_to_word_ids(test_path, word_to_id) return train_data, valid_data, test_data, word_to_id, id_2_word # Yields minibatches of data def ptb_iterator(raw_data, batch_size, num_steps): raw_data = np.array(raw_data, dtype=np.int32) data_len = len(raw_data) batch_len = data_len // batch_size data = np.zeros([batch_size, batch_len], dtype=np.int32) for i in range(batch_size): data[i] = raw_data[batch_len * i:batch_len * (i + 1)] epoch_size = (batch_len - 1) // num_steps if epoch_size == 0: raise ValueError("epoch_size == 0, decrease batch_size or num_steps") for i in range(epoch_size): x = data[:, i*num_steps:(i+1)*num_steps] y = data[:, i*num_steps+1:(i+1)*num_steps+1] yield (x, y) class Batch: """ Data processing for the transformer. This class adds a mask to the data. """ def __init__(self, x, pad=-1): self.data = x self.mask = self.make_mask(self.data, pad) @staticmethod def make_mask(data, pad): """ Create a mask to hide future words. """ def subsequent_mask(size): """ helper function for creating the masks. """ attn_shape = (1, size, size) subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8') return torch.from_numpy(subsequent_mask) == 0 mask = (data != pad).unsqueeze(-2) mask = mask & Variable( subsequent_mask(data.size(-1)).type_as(mask.data)) return mask # LOAD DATA print('Loading data from '+args.data) raw_data = ptb_raw_data(data_path=args.data) train_data, valid_data, test_data, word_to_id, id_2_word = raw_data vocab_size = len(word_to_id) print(' vocabulary size: {}'.format(vocab_size)) ############################################################################### # # MODEL SETUP # ############################################################################### # NOTE ============================================== # This is where your model code will be called. You may modify this code # if required for your implementation, but it should not typically be necessary, # and you must let the TAs know if you do so. if args.model == 'RNN': print("seq_length", args.seq_len) print("batch_size", args.batch_size) model = RNN(emb_size=args.emb_size, hidden_size=args.hidden_size, seq_len=args.seq_len, batch_size=args.batch_size, vocab_size=vocab_size, num_layers=args.num_layers, dp_keep_prob=args.dp_keep_prob) elif args.model == 'GRU': model = GRU(emb_size=args.emb_size, hidden_size=args.hidden_size, seq_len=args.seq_len, batch_size=args.batch_size, vocab_size=vocab_size, num_layers=args.num_layers, dp_keep_prob=args.dp_keep_prob) elif args.model == 'TRANSFORMER': if args.debug: # use a very small model model = TRANSFORMER(vocab_size=vocab_size, n_units=16, n_blocks=2) else: # Note that we're using num_layers and hidden_size to mean slightly # different things here than in the RNNs. # Also, the Transformer also has other hyper-parameters # (such as the number of attention heads) which can change it's behavior. model = TRANSFORMER(vocab_size=vocab_size, n_units=args.hidden_size, n_blocks=args.num_layers, dropout=1.-args.dp_keep_prob) # these 3 attributes don't affect the Transformer's computations; # they are only used in run_epoch model.batch_size = args.batch_size model.seq_len = args.seq_len model.vocab_size = vocab_size else: print("Model type not recognized.") model = model.to(device) # LOSS FUNCTION loss_fn = nn.CrossEntropyLoss() if args.optimizer == 'ADAM': optimizer = torch.optim.Adam(model.parameters(), lr=args.initial_lr) # LEARNING RATE SCHEDULE lr = args.initial_lr lr_decay_base = 1 / 1.15 m_flat_lr = 14.0 # we will not touch lr for the first m_flat_lr epochs ############################################################################### # # DEFINE COMPUTATIONS FOR PROCESSING ONE EPOCH # ############################################################################### def repackage_hidden(h): """ Wraps hidden states in new Tensors, to detach them from their history. This prevents Pytorch from trying to backpropagate into previous input sequences when we use the final hidden states from one mini-batch as the initial hidden states for the next mini-batch. Using the final hidden states in this way makes sense when the elements of the mini-batches are actually successive subsequences in a set of longer sequences. This is the case with the way we've processed the Penn Treebank dataset. """ if isinstance(h, Variable): return h.detach_() else: return tuple(repackage_hidden(v) for v in h) def run_epoch(model, data): """ One epoch of training/validation (depending on flag is_train). """ model.eval() state_dict = torch.load('saved_model.pt', map_location="cpu") model.load_state_dict(state_dict) total_loss = np.zeros(model.seq_len) steps = 0 # LOOP THROUGH MINI BATCHES for step, (x, y) in enumerate(ptb_iterator(data, model.batch_size, model.seq_len)): steps += 1 if args.model != 'TRANSFORMER': hidden = model.init_hidden() hidden = hidden.to(device) if args.model == 'TRANSFORMER': batch = Batch(torch.from_numpy(x).long().to(device)) model.zero_grad() outputs = model.forward(batch.data, batch.mask).transpose(1, 0) # print ("outputs.shape", outputs.shape) else: inputs = torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda() model.zero_grad() hidden = repackage_hidden(hidden) outputs, hidden = model(inputs, hidden) targets = torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous().to(device)#.cuda() total_loss += np.array([loss_fn(outputs[i], targets[i]).item() for i in range(len(outputs))]) total_loss /= float(steps) print(total_loss) ############################################################################### # # RUN MAIN LOOP (TRAIN AND VAL) # ############################################################################### print("\n########## Running Main Loop ##########################") # Gradient compute num_epochs = 1 # MAIN LOOP for epoch in range(num_epochs): # RUN MODEL ON VALID DATA run_epoch(model, valid_data)
38.509036
141
0.630504
767
0.059992
630
0.049276
544
0.04255
0
0
5,452
0.426437
29aa6576959454006572496dfd5c5ae886a2c7c2
78
py
Python
Configuration/Eras/python/Modifier_run3_nanoAOD_devel_cff.py
malbouis/cmssw
16173a30d3f0c9ecc5419c474bb4d272c58b65c8
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
Configuration/Eras/python/Modifier_run3_nanoAOD_devel_cff.py
gartung/cmssw
3072dde3ce94dcd1791d778988198a44cde02162
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
Configuration/Eras/python/Modifier_run3_nanoAOD_devel_cff.py
gartung/cmssw
3072dde3ce94dcd1791d778988198a44cde02162
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms run3_nanoAOD_devel = cms.Modifier()
19.5
40
0.820513
0
0
0
0
0
0
0
0
0
0
29aa65c529d8ece9233ccff13d236d4bc2a7ac6d
4,892
py
Python
python-3.4.4.amd64/Lib/site-packages/idlexlib/extensions/ClearWindow.py
CSnap/photogate
208272ef39f4e86f40d431da2ca523e21701f789
[ "CC0-1.0" ]
2
2018-12-29T13:47:40.000Z
2018-12-29T13:47:49.000Z
Build/External/WPy3710/python-3.7.1/Lib/site-packages/idlexlib/extensions/ClearWindow.py
Heono/Turtle-IDE
aa42dd8f658284601b1a8d3ffb92f157de5022e2
[ "MIT" ]
1
2022-03-17T16:46:04.000Z
2022-03-17T16:46:04.000Z
Lib/site-packages/idlexlib/extensions/ClearWindow.py
JWerbrouck/RWTH_M1_Projekt
7ae63a2277361fa3273cf0677b297379482b8240
[ "bzip2-1.0.6" ]
null
null
null
# IDLEX EXTENSION ## """ ## Copyright(C) 2011-2012 The Board of Trustees of the University of Illinois. ## All rights reserved. ## ## Developed by: Roger D. Serwy ## University of Illinois ## ## Permission is hereby granted, free of charge, to any person obtaining ## a copy of this software and associated documentation files (the ## "Software"), to deal with the Software without restriction, including ## without limitation the rights to use, copy, modify, merge, publish, ## distribute, sublicense, and/or sell copies of the Software, and to ## permit persons to whom the Software is furnished to do so, subject to ## the following conditions: ## ## + Redistributions of source code must retain the above copyright ## notice, this list of conditions and the following disclaimers. ## + Redistributions in binary form must reproduce the above copyright ## notice, this list of conditions and the following disclaimers in the ## documentation and/or other materials provided with the distribution. ## + Neither the names of Roger D. Serwy, the University of Illinois, nor ## the names of its contributors may be used to endorse or promote ## products derived from this Software without specific prior written ## permission. ## ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ## IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ## ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF ## CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH ## THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. ## ## ## ## ## Clear Window Extension ## ## About: ## ## It provides "Clear Shell Window" under "Options" ## with ability to undo. ## ## Part of Issue 6143 ## ## """ config_extension_def = """ [ClearWindow] enable=1 enable_editor=0 enable_shell=1 [ClearWindow_cfgBindings] clear-window=<Control-Key-l> """ jn = lambda x,y: '%i.%i' % (x,y) # join integers to text coordinates sp = lambda x: tuple(map(int, x.split('.'))) # convert tkinter Text coordinate to a line and column tuple import sys import re from idlelib.UndoDelegator import DeleteCommand ansi_re = re.compile(r'\x01?\x1b\[(.*?)m\x02?') def strip_ansi(s): return ansi_re.sub("", s) class ClearWindow: menudefs = [ ('options', [ ('Clear Shell Window', '<<clear-window>>'), ]),] def __init__(self, editwin): self.editwin = editwin self.text = self.editwin.text self.text.bind("<<clear-window>>", self.clear_window) def clear_window_event(self, ev=None): self.clear_window(ev) return "break" def clear_window(self, event): per = self.editwin.per text = per.bottom iomark_orig = text.index('iomark') line_io, col_io = sp(iomark_orig) # if cursor is at the prompt, preserve the prompt (multiline) prompt = strip_ansi(sys.ps1) backlines = prompt.count('\n') prompt_start = jn(line_io-backlines, 0) maybe_prompt = text.get(prompt_start, prompt_start + '+%ic' % len(prompt)) at_prompt = maybe_prompt == prompt if at_prompt: endpos = text.index(prompt_start) else: endpos = text.index('iomark linestart') dump = text.dump('1.0', endpos, all=True) # Add a command to the undo delegator undo = self.editwin.undo if undo: dc = ClearWindowDeleteCommand('1.0', endpos, dump) undo.addcmd(dc) text.edit_reset() # clear out Tkinter's undo history class ClearWindowDeleteCommand(DeleteCommand): def __init__(self, index1, index2, dump): DeleteCommand.__init__(self, index1, index2) self.dump = dump def do(self, text): text.delete(self.index1, self.index2) text.see('insert') def redo(self, text): text.delete(self.index1, self.index2) text.see('insert') def undo(self, text): # inspired by "Serializing a text widget" at http://wiki.tcl.tk/9167 dump = self.dump tag = {} # remember the index where a tag was activated for key, value, index in dump: if key == 'text': text.insert(index, value, '') elif key == 'tagon': tag[value] = index elif key == 'tagoff': text.tag_add(value, tag[value], index) del tag[value] # extend existing tags to the end position for value in tag: text.tag_add(value, tag[value], self.index2) text.see('insert')
33.737931
107
0.634914
2,360
0.48242
0
0
0
0
0
0
2,678
0.547424
29aa9a45456c6db0c06ce0852d48191f56cbe430
104
py
Python
src/hardware/GPIO_Map.py
lbowes/ascii-pong
00e5a5f4b33a360f14299e6fc33f862880e5fb8f
[ "MIT" ]
null
null
null
src/hardware/GPIO_Map.py
lbowes/ascii-pong
00e5a5f4b33a360f14299e6fc33f862880e5fb8f
[ "MIT" ]
null
null
null
src/hardware/GPIO_Map.py
lbowes/ascii-pong
00e5a5f4b33a360f14299e6fc33f862880e5fb8f
[ "MIT" ]
1
2021-03-10T17:22:00.000Z
2021-03-10T17:22:00.000Z
GPIO_CON_1_BUT_1 = 10 GPIO_CON_1_BUT_2 = 9 GPIO_CON_2_BUT_1 = 11 GPIO_CON_2_BUT_2 = 14 GPIO_BUZZER = 15
17.333333
21
0.807692
0
0
0
0
0
0
0
0
0
0
29ac9c03bbaa51b34d7d739bc8607fc9dd0af610
309
py
Python
main.py
yaojenkuo/stockflow
946609c2fcc1d602032672b57ae7119b4cadae8d
[ "MIT" ]
33
2015-03-08T00:43:37.000Z
2021-02-18T23:40:05.000Z
main.py
Asoul/stockflow
946609c2fcc1d602032672b57ae7119b4cadae8d
[ "MIT" ]
null
null
null
main.py
Asoul/stockflow
946609c2fcc1d602032672b57ae7119b4cadae8d
[ "MIT" ]
25
2015-03-07T15:57:23.000Z
2021-07-05T01:32:32.000Z
#!/bin/python # -*- coding: utf-8 -*- '''基本範例格式''' import sys from ctrls.Tester import Tester from models.exampleModel import exampleModel def main(): numbers = ['1314']# 股票編號 tester = Tester(numbers, exampleModel)# 使用測試元件 tester.run()# 模擬 if __name__ == '__main__': sys.exit(main())
17.166667
50
0.647249
0
0
0
0
0
0
0
0
118
0.342029
29ad5c4ad4e9d3f8e84eb705d16ecf7d414f2aac
4,025
py
Python
tests/test_preprocessing_evaluation_pipelines.py
CLARIN-PL/embeddings
49fb59b796475ca92bc262ec2bc6def1d89a10e0
[ "MIT" ]
33
2021-06-15T12:09:29.000Z
2022-03-26T14:34:16.000Z
tests/test_preprocessing_evaluation_pipelines.py
CLARIN-PL/embeddings
49fb59b796475ca92bc262ec2bc6def1d89a10e0
[ "MIT" ]
201
2021-03-23T05:50:23.000Z
2022-03-31T22:56:04.000Z
tests/test_preprocessing_evaluation_pipelines.py
CLARIN-PL/embeddings
49fb59b796475ca92bc262ec2bc6def1d89a10e0
[ "MIT" ]
null
null
null
from tempfile import TemporaryDirectory from typing import Any, Dict, Tuple import datasets import flair import numpy as np import pytest import torch from flair.data import Corpus from numpy import typing as nptyping from embeddings.data.data_loader import HuggingFaceDataLoader from embeddings.data.dataset import HuggingFaceDataset from embeddings.pipeline.evaluation_pipeline import ( FlairSequenceLabelingEvaluationPipeline, ModelEvaluationPipeline, ) from embeddings.pipeline.preprocessing_pipeline import PreprocessingPipeline from embeddings.transformation.flair_transformation.column_corpus_transformation import ( ColumnCorpusTransformation, ) from embeddings.transformation.flair_transformation.downsample_corpus_transformation import ( DownsampleFlairCorpusTransformation, ) from embeddings.transformation.flair_transformation.split_sample_corpus_transformation import ( SampleSplitsFlairCorpusTransformation, ) from embeddings.utils.flair_corpus_persister import FlairConllPersister @pytest.fixture def result_path() -> "TemporaryDirectory[str]": return TemporaryDirectory() @pytest.fixture def embedding_name() -> str: return "allegro/herbert-base-cased" @pytest.fixture def ner_dataset_name() -> str: return "clarin-pl/kpwr-ner" @pytest.fixture def hidden_size() -> int: return 256 @pytest.fixture def task_train_kwargs() -> Dict[str, int]: return {"max_epochs": 1, "mini_batch_size": 256} @pytest.fixture def sequence_labeling_preprocessing_pipeline( result_path: "TemporaryDirectory[str]", embedding_name: str, ner_dataset_name: str, ) -> Tuple[PreprocessingPipeline[str, datasets.DatasetDict, Corpus], "TemporaryDirectory[str]"]: dataset = HuggingFaceDataset(ner_dataset_name) data_loader = HuggingFaceDataLoader() transformation = ( ColumnCorpusTransformation("tokens", "ner") .then(SampleSplitsFlairCorpusTransformation(dev_fraction=0.1, test_fraction=0.1, seed=441)) .then(DownsampleFlairCorpusTransformation(percentage=0.005)) .persisting(FlairConllPersister(result_path.name)) ) pipeline = PreprocessingPipeline( dataset=dataset, data_loader=data_loader, transformation=transformation ) return pipeline, result_path @pytest.fixture def sequence_labeling_evaluation_pipeline( result_path: "TemporaryDirectory[str]", embedding_name: str, ner_dataset_name: str, hidden_size: int, task_train_kwargs: Dict[str, int], ) -> Tuple[ ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]], "TemporaryDirectory[str]", ]: pipeline = FlairSequenceLabelingEvaluationPipeline( dataset_path=result_path.name, embedding_name=embedding_name, output_path=result_path.name, hidden_size=hidden_size, persist_path=None, task_train_kwargs=task_train_kwargs, ) return pipeline, result_path def test_sequence_labeling_preprocessing_pipeline( result_path: "TemporaryDirectory[str]", embedding_name: str, ner_dataset_name: str, hidden_size: int, task_train_kwargs: Dict[str, int], sequence_labeling_preprocessing_pipeline: Tuple[ PreprocessingPipeline[str, datasets.DatasetDict, Corpus], "TemporaryDirectory[str]" ], sequence_labeling_evaluation_pipeline: Tuple[ ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]], "TemporaryDirectory[str]", ], ) -> None: flair.set_seed(441) flair.device = torch.device("cpu") preprocessing_pipeline, path = sequence_labeling_preprocessing_pipeline preprocessing_pipeline.run() evaluation_pipeline, _ = sequence_labeling_evaluation_pipeline result = evaluation_pipeline.run() np.testing.assert_almost_equal( result["seqeval__mode_None__scheme_None"]["overall_accuracy"], 0.7881773 ) np.testing.assert_almost_equal(result["seqeval__mode_None__scheme_None"]["overall_f1"], 0) path.cleanup()
31.692913
99
0.766957
0
0
0
0
1,907
0.473789
0
0
391
0.097143
29adb65f2ba3f76e7586b891107a612d5e21f5e3
672
py
Python
Exercises/Exercises_01/06_exercise.py
Szymon-Budziak/ASD_exercises_solutions
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
[ "MIT" ]
7
2021-12-28T23:38:42.000Z
2022-03-29T16:36:16.000Z
Exercises/Exercises_01/06_exercise.py
Szymon-Budziak/ASD_exercises_solutions
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
[ "MIT" ]
null
null
null
Exercises/Exercises_01/06_exercise.py
Szymon-Budziak/ASD_exercises_solutions
36ccbdae03a6c7e4ad141a2b7b01bef9353574ee
[ "MIT" ]
4
2021-06-29T20:21:52.000Z
2022-03-12T10:04:17.000Z
# Proszę zaimplementować funkcję, która otrzymuje na wejściu posortowaną niemalejąco tablicę A # o rozmiarze n oraz liczbę x i sprawdza, czy x występuje w A. Jeśli tak, to zwraca najmniejszy indeks, # pod którym x występuje. def binary_search(T, i, j, x): if i > j: return None c = (i + j) // 2 if T[c] == x: value = binary_search(T, i, c - 1, x) if value is None: return c return value if T[c] > x: return binary_search(T, i, c - 1, x) else: return binary_search(T, c + 1, j, x) T = [0, 1, 2, 3, 4, 5, 5, 5, 6] for i in range(len(T)): print(i, binary_search(T, 0, len(T) - 1, T[i]))
28
103
0.574405
0
0
0
0
0
0
0
0
235
0.343066
29ae59f7491eb508b08d30811e2ad409b6a63558
4,508
py
Python
lib/sentencers/RuleBasedSentencer.py
gucorpling/GumDrop
06e705adc5b78b048f199a3d6f50d911fed398e2
[ "Apache-2.0" ]
null
null
null
lib/sentencers/RuleBasedSentencer.py
gucorpling/GumDrop
06e705adc5b78b048f199a3d6f50d911fed398e2
[ "Apache-2.0" ]
null
null
null
lib/sentencers/RuleBasedSentencer.py
gucorpling/GumDrop
06e705adc5b78b048f199a3d6f50d911fed398e2
[ "Apache-2.0" ]
null
null
null
import re, io, os, sys from nltk import word_tokenize from argparse import ArgumentParser # Allow package level imports in module script_dir = os.path.dirname(os.path.realpath(__file__)) lib = os.path.abspath(script_dir + os.sep + "..") sys.path.append(lib) from conll_reader import space_join, text2conllu class RuleBasedSplitter: def __init__(self,lang="eng"): lang_map = {"deu":"german","eng":"english","spa":"spanish","fra":"french","nld":"dutch","rus":"russian", "eus":"basque","por":"portuguese", "zho": "chinese", "tur":"turkish"} self.lang = lang self.name = "RuleBasedSplitter" self.long_lang = lang_map[lang] if lang in lang_map else lang def predict(self,conllu): if "\t" not in conllu: # this is a token list, not conllu string conllu = text2conllu(" ".join(conllu)) tokens = space_join(conllu) tokens = tokens.split() # Run RuleBased sentence tokenize with open(script_dir + os.sep + "frequency", 'r', encoding='utf-8') as f: data = [line.strip().split() for line in f.readlines()] sent_inital = {d[0]: d[1:] for d in data} ratios ={} for word in sent_inital[self.lang]: if word.count("|") == 2: w, r, f = word.split("|") r = float(r) f = int(f) ratios[w] = r processed = [] for token in tokens: if token in ratios: token = "//<->//" + token processed.append(token) # Reconstruct text with heuristics text = " ".join(processed) text = re.sub(r" ([.,,!?;;::!?。)\]}%])", r'\1', text) text = re.sub(r"([$([{]) ", r'\1', text) endpunct = "[!?。.!?]" text = re.sub("(" + endpunct + ")", r'\1//<->//', text) sents = re.split('(?://<->// ?)+', text) sents = [s for s in sents if len(s.strip()) > 0] # Realign to input tokens tabbed = "\t".join(sents) tabbed = "\t" + tabbed.replace(" ","") output = [] for tok in tokens: ratio = ratios[tok] if tok in ratios else -1.0 if tabbed.startswith("\t"): # This is a split point output.append((1,ratio)) # Prediction is 1 (='segment') probability is 1.0 tabbed = tabbed[1:] else: output.append((0,0.0)) # Prediction is 0 (='non segment') probability is 0.0 if tabbed.startswith(tok): tabbed = tabbed[len(tok):] # Verify we are returning as many predictions as we received input tokens assert len(tokens) == len(output) return output if __name__ == "__main__": p = ArgumentParser() p.add_argument("-f", "--file", default=None, help="file to tokenize") p.add_argument("-l", "--lang", default="eng", help="language 3 letter code", choices=["eng", "spa", "fra", "deu", "eus", "nld", "rus", "por", "zho", "tur"]) opts = p.parse_args() infile = opts.file lang = opts.lang # Run test sentencer = RuleBasedSplitter(lang=lang) if infile is None: # Some default test tokens if no file provided if lang == "zho": tokens = ['闽', '台', '经贸', '合作', '的', '深入', '发展', '为', '福建', '汽车', '工业', '注入', '了', '生机', '。', '去年', '初', '以来', ',', '台湾', '最', '具', '实力', '的', '汽车', '公司', '——', '裕隆', '集团', '中华', '汽车', '公司', '多', '次', '组', '团', '访', '闽', ',', '就', '合作', '发展', '汽车', '工业', '进行', '了', '积极', '的', '蹉商', ';', "新华社", '福建', '方面', '则', '成立', '了', '由', '省委', '书记', '贾庆林', '、', '省长', '陈明义', '任', '正', '、', '副', '组长', '的', '省', '汽车', '工业', '领导', '小组', ',', '将', '发展', '本', '省', '汽车', '工业', '摆上', '重要', '议事', '日程', '。'] elif lang == "nld": tokens = ['Een', 'ieder', 'heeft', 'recht', 'op', 'onderwijs', ';', 'het', 'onderwijs', 'zal', 'kosteloos', 'zijn,', 'althans', 'wat', 'het', 'lager', 'en', 'basisonderwijs', 'betreft', '.', 'Het', 'lager', 'onderwijs', 'zal', 'verplicht', 'zijn', '.', 'Ambachtsonderwijs', 'en', 'beroepsopleiding', 'zullen', 'algemeen', 'beschikbaar', 'worden', 'gesteld', '.', 'Hoger', 'onderwijs', 'zal', 'openstaan', 'voor', 'een', 'ieder,', 'die', 'daartoe', 'de', 'begaafdheid', 'bezit', '.', 'Het', 'onderwijs', 'zal', 'gericht', 'zijn', 'op', 'de', 'volle', 'ontwikkeling', 'van', 'de', 'menselijke', 'persoonlijkheid', 'en', 'op', 'de', 'versterking', 'van', 'de', 'eerbied', 'voor', 'de', 'rechten', 'van', 'de', 'mens', 'en', 'de', 'fundamentele', 'vrijheden', '.'] else: tokens = ['Introduction', 'Research', 'has', 'shown', 'examples', '.', 'But', 'we', 'need', 'more', '.'] else: text = io.open(infile, encoding="utf8").read() tokens = word_tokenize(text) sent_starts = sentencer.predict(tokens) print([(tok, boundary) for tok, boundary in (zip(tokens, sent_starts))])
37.882353
110
0.562999
2,036
0.42399
0
0
0
0
0
0
2,161
0.450021
29b0e35636d971fec8136ffc141e0dd2c3c239b5
2,878
py
Python
pyogp/lib/client/tests/test_appearance.py
grobertson/PyOGP.lib.Client
681492d95b9a901a79071b70c77bfdd55cdb02db
[ "Apache-2.0" ]
null
null
null
pyogp/lib/client/tests/test_appearance.py
grobertson/PyOGP.lib.Client
681492d95b9a901a79071b70c77bfdd55cdb02db
[ "Apache-2.0" ]
null
null
null
pyogp/lib/client/tests/test_appearance.py
grobertson/PyOGP.lib.Client
681492d95b9a901a79071b70c77bfdd55cdb02db
[ "Apache-2.0" ]
null
null
null
""" Contributors can be viewed at: http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/trunk/CONTRIBUTORS.txt $LicenseInfo:firstyear=2008&license=apachev2$ Copyright 2009, Linden Research, Inc. Licensed under the Apache License, Version 2.0. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 or in http://svn.secondlife.com/svn/linden/projects/2008/pyogp/lib/base/LICENSE.txt $/LicenseInfo$ """ # standard python libs import unittest from binascii import unhexlify #related # pyogp from pyogp.lib.client.appearance import * from pyogp.lib.client.settings import Settings from pyogp.lib.client.agent import Agent from pyogp.lib.client.region import Region from pyogp.lib.base.datatypes import * # pyogp messaging from pyogp.lib.base.message.udpdeserializer import UDPMessageDeserializer # pyogp tests import pyogp.lib.base.tests.config class TestAppearance(unittest.TestCase): def setUp(self): self.settings = Settings() self.agent = Agent() self.appearance = AppearanceManager(self.agent, settings = self.settings) self.agent.agent_id = UUID("01234567-89ab-cdef-0123-456789abcdef") self.agent.session_id = UUID("fedcba98-7654-3210-fedc-ba9876543210") self.agent.region = DummyRegion() def tearDown(self): pass def test_request_agent_wearables(self): self.agent.appearance.request_agent_wearables() packet_list = self.agent.region.dummy_packet_holder self.assertEquals(len(packet_list), 1) packet = packet_list.pop() self.assertEquals(self.agent.agent_id, packet["AgentData"][0]['AgentID']) self.assertEquals(self.agent.session_id, packet["AgentData"][0]['SessionID']) def test_request_agent_noAgentIDorSessionID(self): packet_list = self.agent.region.dummy_packet_holder self.agent.agent_id = None self.agent.appearance.request_agent_wearables() self.assertEquals(len(packet_list), 0) self.agent.agent_id = UUID() self.agent.appearance.request_agent_wearables() self.assertEquals(len(packet_list), 0) self.agent.agent_id = UUID("01234567-89ab-cdef-0123-456789abcdef") self.agent.session_id = None self.agent.appearance.request_agent_wearables() self.assertEquals(len(packet_list), 0) self.agent.session_id = UUID() self.agent.appearance.request_agent_wearables() self.assertEquals(len(packet_list), 0) def test_send_AgentIsNowWearing(self): pass class DummyRegion(Region): dummy_packet_holder = [] def enqueue_message(self, packet, reliable = False): self.dummy_packet_holder.append(packet) def test_suite(): from unittest import TestSuite, makeSuite suite = TestSuite() suite.addTest(makeSuite(TestAppearance)) return suite
32.704545
89
0.723767
1,820
0.632384
0
0
0
0
0
0
679
0.235928
29b119e99bde0832d57541650801a62ec77c42f6
1,017
py
Python
jisho_api/word/cfg.py
finia2NA/jisho-api
c80beb44a7b70f24e799cd2a7d579356c58f8625
[ "Apache-2.0" ]
26
2021-10-05T03:54:33.000Z
2022-03-26T10:46:31.000Z
jisho_api/word/cfg.py
finia2NA/jisho-api
c80beb44a7b70f24e799cd2a7d579356c58f8625
[ "Apache-2.0" ]
7
2021-11-22T00:43:30.000Z
2022-01-12T00:34:22.000Z
jisho_api/word/cfg.py
finia2NA/jisho-api
c80beb44a7b70f24e799cd2a7d579356c58f8625
[ "Apache-2.0" ]
4
2021-12-08T13:41:07.000Z
2022-03-25T20:54:07.000Z
from enum import Enum from typing import List, Optional from pydantic import BaseModel, HttpUrl class Sense(BaseModel): class Link(BaseModel): text: str url: HttpUrl class Source(BaseModel): language: str english_definitions: List[str] parts_of_speech: List[Optional[str]] links: List[Link] tags: List[str] restrictions: List[str] see_also: List[str] antonyms: List[str] source: List[Source] info: List[str] class Japanese(BaseModel): # Japanese Word - full fledged kanji # Is optional because there are words that are just kana word: Optional[str] # Kana reading reading: Optional[str] @property def name(): if self.word: return self.word return self.reading class WordConfig(BaseModel): slug: str is_common: Optional[bool] tags: List[str] jlpt: List[str] japanese: List[Japanese] senses: List[Sense] def __iter__(self): yield from self.senses
19.941176
60
0.647984
911
0.895772
50
0.049164
104
0.102262
0
0
106
0.104228
29b134fd22e0ec5acfe0ea6bb8fddd3eb700cbd7
1,018
py
Python
tests/validators/test_symbol_required.py
Ennkua/wtforms
c08ec7840c5a78ae8784139f7ee70f9627cf1ab8
[ "BSD-3-Clause" ]
null
null
null
tests/validators/test_symbol_required.py
Ennkua/wtforms
c08ec7840c5a78ae8784139f7ee70f9627cf1ab8
[ "BSD-3-Clause" ]
null
null
null
tests/validators/test_symbol_required.py
Ennkua/wtforms
c08ec7840c5a78ae8784139f7ee70f9627cf1ab8
[ "BSD-3-Clause" ]
null
null
null
import pytest from wtforms.validators import symbol_required from wtforms.validators import ValidationError @pytest.mark.parametrize("min_v", [2, 3, 4, 5, 6]) def test_correct_symbol_required(min_v, dummy_form, dummy_field): """ It should pass for the string with correct count of required symbol. """ dummy_field.data = "-A%s^D*f(G87KJ@hg8J.&" validator = symbol_required(min_v) validator(dummy_form, dummy_field) @pytest.mark.parametrize( ("validator", "message"), ( ( symbol_required(2, "at least 2 symbol"), "at least 2 symbol letter", ), (symbol_required(2), "at least 2 symbol"), ), ) def test_symbol_required_messages(dummy_form, dummy_field, validator, message): """ It should raise ValidationError for string with incorect symbol_required. """ dummy_field.data = "foo123Bar" with pytest.raises(ValidationError) as e: validator(dummy_form, dummy_field) assert str(e.value) == message
28.277778
79
0.674853
0
0
0
0
903
0.887033
0
0
298
0.292731
29b245fab6ed28cf6c359207c9c4af61c43d22d1
102
py
Python
ch7/exercises/parrot.py
hewittaj/python_crash_course
52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa
[ "MIT" ]
null
null
null
ch7/exercises/parrot.py
hewittaj/python_crash_course
52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa
[ "MIT" ]
null
null
null
ch7/exercises/parrot.py
hewittaj/python_crash_course
52a3341eec79c2eb6c7f9f1cb7f0806c3b2d61aa
[ "MIT" ]
null
null
null
# using the input() function message = input("Tell me something, and I'll repeat it!") print(message)
25.5
57
0.72549
0
0
0
0
0
0
0
0
68
0.666667
29b2e2e2b5e0b11ab0a21e7a356d8c2fabd4abe1
1,028
py
Python
src/Nodes/WhileOp.py
gabrielzezze/z-lang
89be471fd5618a9d1c9e3eb955608cdc888511c2
[ "MIT" ]
null
null
null
src/Nodes/WhileOp.py
gabrielzezze/z-lang
89be471fd5618a9d1c9e3eb955608cdc888511c2
[ "MIT" ]
null
null
null
src/Nodes/WhileOp.py
gabrielzezze/z-lang
89be471fd5618a9d1c9e3eb955608cdc888511c2
[ "MIT" ]
null
null
null
from src.Node import Node from src.Nodes import Block from src.SymbolTable import SymbolTable class WhileOp(Node): def __init__(self, child: Block, condition: Node): self.condition = condition self.child = child super().__init__( value=condition, children=[child, condition], node_type='WhileOp' ) def Evaluate(self, symbol_table: SymbolTable): while_entry = self.builder.append_basic_block(name=f'while_{self.id}') while_exit = self.builder.append_basic_block(name=f'exit_while_{self.id}') condition_i = self.condition.Evaluate(symbol_table=symbol_table) self.builder.cbranch(condition_i, while_entry, while_exit) self.builder.position_at_start(while_entry) self.child.Evaluate(symbol_table=symbol_table) condition_i = self.condition.Evaluate(symbol_table) self.builder.cbranch(condition_i, while_entry, while_exit) self.builder.position_at_start(while_exit) return
35.448276
82
0.694553
932
0.906615
0
0
0
0
0
0
50
0.048638
29b5b93fcc93149c869189a925d3bab4277eed76
748
py
Python
googledevices/cli/commands/info.py
vlebourl/googledevices
5d8604ad48d94170127d1da9f01106a4d3bc4845
[ "MIT" ]
19
2018-11-24T03:09:59.000Z
2021-02-11T09:20:11.000Z
googledevices/cli/commands/info.py
vlebourl/googledevices
5d8604ad48d94170127d1da9f01106a4d3bc4845
[ "MIT" ]
13
2018-11-24T13:16:38.000Z
2022-02-22T17:27:08.000Z
googledevices/cli/commands/info.py
vlebourl/googledevices
5d8604ad48d94170127d1da9f01106a4d3bc4845
[ "MIT" ]
4
2018-11-26T16:14:42.000Z
2021-10-20T14:20:40.000Z
"""Get information about this package.""" def info(system): """Get information about this package.""" import googledevices.utils.const as package print("Projectname: ", package.NAME) print("Version: ", package.VERSION) print("GitHub link: ", package.URLS.get("github")) print("PyPi link: ", package.URLS.get("pypi")) print("Maintainers:") for maintainer in package.MAINTAINERS: print(" ", maintainer.get("name"), "(", maintainer.get("github"), ")") print("") if system: import platform print("") print("System: ", platform.system()) print("Version: ", platform.version()) print("Python version: ", platform.python_version())
32.521739
81
0.593583
0
0
0
0
0
0
0
0
261
0.34893
29b61776c27c79d1d7092a2b9bd2ee11a295186e
251
py
Python
Check_if_subarray_with_0_sum_is_exists_or_not.py
KiranPesarlanka9/data-structures-and-algorithms-Problems
557e3ca7f04b37fa5a709295f455b6338815486e
[ "MIT" ]
1
2019-11-28T12:21:51.000Z
2019-11-28T12:21:51.000Z
Check_if_subarray_with_0_sum_is_exists_or_not.py
KiranPesarlanka9/data-structures-and-algorithms-Problems
557e3ca7f04b37fa5a709295f455b6338815486e
[ "MIT" ]
null
null
null
Check_if_subarray_with_0_sum_is_exists_or_not.py
KiranPesarlanka9/data-structures-and-algorithms-Problems
557e3ca7f04b37fa5a709295f455b6338815486e
[ "MIT" ]
1
2019-12-06T09:18:41.000Z
2019-12-06T09:18:41.000Z
def check(arr): sum_log = set() _sum = 0 for i in xrange(len(arr)): if _sum in sum_log: return True _sum += 1 sum_log.add(_sum) return False arr = [1, 0, -2, 5, -4, 1, 9, -2] print(check(arr))
15.6875
34
0.49004
0
0
0
0
0
0
0
0
0
0
29b90065070b5025868557255475b9c600fb78b4
1,588
py
Python
scripts/join_completed.py
shannonfenn/data-tools
c730c2f88b8443f3c84a41467a40b2cc59dd8e87
[ "MIT" ]
null
null
null
scripts/join_completed.py
shannonfenn/data-tools
c730c2f88b8443f3c84a41467a40b2cc59dd8e87
[ "MIT" ]
null
null
null
scripts/join_completed.py
shannonfenn/data-tools
c730c2f88b8443f3c84a41467a40b2cc59dd8e87
[ "MIT" ]
null
null
null
#! /usr/bin/env python import argparse import pandas as pd import numpy as np def check_dataframe(filename, data_frame, key_columns): if any(col not in data_frame for col in key_columns): raise ValueError('Key columns not in {}.'.format(filename)) nonzero = np.count_nonzero(data_frame['trg_error']) if nonzero: print('Warning, some failed runs in {}.'.format(filename)) def join_completed(filenames, key_columns=None): completed = None # build single dataframe of successful runs for filename in filenames: df = pd.read_json(filename) check_dataframe(df, key_columns) if completed is None: completed = df[df['trg_error'] == 0] else: completed = pd.concat([completed, df[df['trg_error'] == 0]], ignore_index=True) # check if rows are unique on given columns if key_columns: completed.sort(key_columns) duplicated = completed.duplicated(key_columns).sum() if duplicated > 0: raise ValueError('Duplicate rows: {}'.format(duplicated)) return completed def main(): parser = argparse.ArgumentParser( description='Join results with zero training error.') parser.add_argument('-i', type=str, nargs='+', required=True, help='list of input files') parser.add_argument('-o', type=str, required=True, help='file to store result') args = parser.parse_args() join_completed(args.i).to_json(args.o) if __name__ == '__main__': main()
31.137255
72
0.632872
0
0
0
0
0
0
0
0
323
0.203401
29b95a7e7b6ab6d04a7196faa187fadcabb8c0e4
9,859
py
Python
pet/preprocessor.py
YerongLi/pet
8323080e9033c38c234431aecacad154ed477472
[ "Apache-2.0" ]
null
null
null
pet/preprocessor.py
YerongLi/pet
8323080e9033c38c234431aecacad154ed477472
[ "Apache-2.0" ]
null
null
null
pet/preprocessor.py
YerongLi/pet
8323080e9033c38c234431aecacad154ed477472
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from typing import List, Optional import numpy as np from pet.utils import InputFeatures, InputExample, PLMInputFeatures, GenerativeInputFeatures, GenerativeInputExample from pet.pvp import PVPS, PVP class Preprocessor(ABC): """ A preprocessor that transforms an :class:`InputExample` into a :class:`InputFeatures` object so that it can be processed by the model being used. """ def __init__(self, wrapper, task_name: str, pattern_ids: Optional[List[int]] = None, verbalizer_file: str = None): """ Create a new preprocessor. :param wrapper: the wrapper for the language model to use :param task_name: the name of the task :param pattern_ids: the ids of the PVPs to be used :param verbalizer_file: path to a file containing a verbalizer that overrides the default verbalizer """ self.wrapper = wrapper if pattern_ids is not None: self.pvps = {pid: PVPS[task_name](self.wrapper, pid, verbalizer_file) for pid in pattern_ids} self.label_map = {label: i for i, label in enumerate(self.wrapper.config.label_list)} @abstractmethod def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False, **kwargs) -> InputFeatures: """Convert the given example into a set of input features""" pass class MLMPreprocessor(Preprocessor): """Preprocessor for models pretrained using a masked language modeling objective (e.g., BERT).""" def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False, **kwargs) -> InputFeatures: pvp = self.pvps[pattern_id] # type: PVP if priming: input_ids, token_type_ids = pvp.encode(example, priming=True) priming_data = example.meta['priming_data'] # type: List[InputExample] priming_input_ids = [] for priming_example in priming_data: pe_input_ids, _ = pvp.encode(priming_example, priming=True, labeled=True) priming_input_ids += pe_input_ids input_ids = priming_input_ids + input_ids token_type_ids = self.wrapper.tokenizer.create_token_type_ids_from_sequences(input_ids) input_ids = self.wrapper.tokenizer.build_inputs_with_special_tokens(input_ids) else: input_ids, token_type_ids = pvp.encode(example) if self.wrapper.config.model_type == 'pegasus': # bugfix: Transformers' create_token_type_ids_from_sequences seems to ignore the final </s> token in Pegasus token_type_ids += [0] attention_mask = [1] * len(input_ids) padding_length = self.wrapper.config.max_seq_length - len(input_ids) if padding_length < 0: raise ValueError(f"Maximum sequence length is too small, got {len(input_ids)} input ids") input_ids = input_ids + ([self.wrapper.tokenizer.pad_token_id] * padding_length) attention_mask = attention_mask + ([0] * padding_length) token_type_ids = token_type_ids + ([0] * padding_length) assert len(input_ids) == self.wrapper.config.max_seq_length assert len(attention_mask) == self.wrapper.config.max_seq_length assert len(token_type_ids) == self.wrapper.config.max_seq_length label = self.label_map[example.label] if example.label is not None else -100 logits = example.logits if example.logits else [-1] if labelled: mlm_labels = pvp.get_mask_positions(input_ids) if self.wrapper.config.model_type == 'gpt2': # shift labels to the left by one mlm_labels.append(mlm_labels.pop(0)) else: mlm_labels = [-1] * self.wrapper.config.max_seq_length return InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label, mlm_labels=mlm_labels, logits=logits, idx=example.idx, pattern_id=pattern_id) class PLMPreprocessor(MLMPreprocessor): """Preprocessor for models pretrained using a permuted language modeling objective (e.g., XLNet).""" def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False, **kwargs) -> PLMInputFeatures: input_features = super().get_input_features(example, pattern_id, labelled=labelled, priming=priming, **kwargs) input_ids = input_features.input_ids pvp = self.pvps[pattern_id] # type: PVP num_masks = 1 # currently, PLMPreprocessor supports only replacements that require exactly one mask perm_mask = np.zeros((len(input_ids), len(input_ids)), dtype=np.float) label_idx = input_ids.index(pvp.mask_id) perm_mask[:, label_idx] = 1 # the masked token is not seen by any other token target_mapping = np.zeros((num_masks, len(input_ids)), dtype=np.float) target_mapping[0, label_idx] = 1.0 return PLMInputFeatures(perm_mask=perm_mask, target_mapping=target_mapping, **input_features.__dict__) class GenerativePreprocessor(MLMPreprocessor): """Preprocessor for a generative language model and generative task.""" def get_input_features(self, example: InputExample, pattern_id: int, labelled: bool, priming: bool = False, **kwargs) -> GenerativeInputFeatures: input_features = super().get_input_features(example, pattern_id, labelled=False, priming=False, **kwargs) assert isinstance(example, GenerativeInputExample) if example.output_text is not None: generative_prefix = self.pvps[pattern_id].generative_prefix_ids() max_length = self.wrapper.config.output_max_seq_length - len(generative_prefix) output_ids = self.wrapper.tokenizer.encode(example.output_text, add_special_tokens=True, max_length=max_length, padding='max_length', truncation='only_first') pad_token = self.wrapper.tokenizer.pad_token_id output_loss_mask = [0] * len(generative_prefix) + [0 if tok_id == pad_token else 1 for tok_id in output_ids] output_ids = generative_prefix + output_ids else: output_ids = [self.wrapper.tokenizer.pad_token_id] output_loss_mask = [0] if 'token_ids' in example.meta: token_ids = example.meta['token_ids'] token_probabilities = example.meta['token_probabilities'] len_output_ids = sum(1 for x in output_ids if x != self.wrapper.tokenizer.pad_token_id) assert len(token_ids) == len_output_ids, \ f"If given, there should be as many token ids as there are output ids. Got {len(token_ids)} token " \ f"ids and {len_output_ids} output ids." padding_entry = [0] * len(token_ids[0]) padding = [padding_entry] * (self.wrapper.config.output_max_seq_length - len(token_ids)) input_features.meta['token_ids'] = token_ids + padding input_features.meta['token_probabilities'] = token_probabilities + padding return GenerativeInputFeatures(output_ids=output_ids, output_loss_mask=output_loss_mask, **input_features.__dict__) class SequenceClassifierPreprocessor(Preprocessor): """Preprocessor for a regular sequence classification model.""" def get_input_features(self, example: InputExample, **kwargs) -> InputFeatures: inputs = self.wrapper.task_helper.get_sequence_classifier_inputs(example) if self.wrapper.task_helper else None if inputs is None: inputs = self.wrapper.tokenizer.encode_plus( example.text_a if example.text_a else None, example.text_b if example.text_b else None, add_special_tokens=True, max_length=self.wrapper.config.max_seq_length, ) input_ids, token_type_ids = inputs["input_ids"], inputs.get("token_type_ids") attention_mask = [1] * len(input_ids) padding_length = self.wrapper.config.max_seq_length - len(input_ids) input_ids = input_ids + ([self.wrapper.tokenizer.pad_token_id] * padding_length) attention_mask = attention_mask + ([0] * padding_length) if not token_type_ids: token_type_ids = [0] * self.wrapper.config.max_seq_length else: token_type_ids = token_type_ids + ([0] * padding_length) mlm_labels = [-1] * len(input_ids) assert len(input_ids) == self.wrapper.config.max_seq_length assert len(attention_mask) == self.wrapper.config.max_seq_length assert len(token_type_ids) == self.wrapper.config.max_seq_length label = self.label_map[example.label] if example.label is not None else -100 logits = example.logits if example.logits else [-1] return InputFeatures(input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label, mlm_labels=mlm_labels, logits=logits, idx=example.idx)
48.566502
120
0.674713
9,061
0.919059
0
0
264
0.026778
0
0
2,108
0.213815
29ba20d9bd3f8cb7d67a41fe698ce4a315481ebd
21
py
Python
test/__init__.py
mzappitello/http_monitor
6a20e41bdbab480090de3c8d760bc7c425b9c899
[ "MIT" ]
null
null
null
test/__init__.py
mzappitello/http_monitor
6a20e41bdbab480090de3c8d760bc7c425b9c899
[ "MIT" ]
null
null
null
test/__init__.py
mzappitello/http_monitor
6a20e41bdbab480090de3c8d760bc7c425b9c899
[ "MIT" ]
null
null
null
# test __init__ file
10.5
20
0.761905
0
0
0
0
0
0
0
0
20
0.952381
29ba94b0967bd5341e441dd394da5100f547c093
3,542
py
Python
xbrl/const.py
blinkace/pxp
9155103dc166674137bd0e2fddb609ca44875761
[ "MIT" ]
1
2022-01-27T14:53:23.000Z
2022-01-27T14:53:23.000Z
xbrl/const.py
blinkace/pxp
9155103dc166674137bd0e2fddb609ca44875761
[ "MIT" ]
null
null
null
xbrl/const.py
blinkace/pxp
9155103dc166674137bd0e2fddb609ca44875761
[ "MIT" ]
null
null
null
import re class NS: xs = 'http://www.w3.org/2001/XMLSchema' link = 'http://www.xbrl.org/2003/linkbase' xlink = "http://www.w3.org/1999/xlink" xbrli = "http://www.xbrl.org/2003/instance" xbrldi = "http://xbrl.org/2006/xbrldi" xbrldie = "http://xbrl.org/2005/xbrldi/errors" xbrldt = "http://xbrl.org/2005/xbrldt" xbrldte = "http://xbrl.org/2005/xbrldt/errors" catalog = "urn:oasis:names:tc:entity:xmlns:xml:catalog" tp = "http://xbrl.org/2016/taxonomy-package" oime = "https://xbrl.org/((~status_date_uri~))/oim/error" oimce = "https://xbrl.org/((~status_date_uri~))/oim-common/error" xbrlxe = "http://www.xbrl.org/WGWD/YYYY-MM-DD/xbrl-xml/error" xbrl21e = "http://www.blinkace.com/python-xbrl-oim/xbrl-2.1/error" xbrl = "https://xbrl.org/2021" iso4217 = "http://www.xbrl.org/2003/iso4217" utr = "http://www.xbrl.org/2009/utr" ix = "http://www.xbrl.org/2013/inlineXBRL" ix10 = "http://www.xbrl.org/2008/inlineXBRL" ixe = "http://www.xbrl.org/2013/inlineXBRL/error" pyxbrle = "https://blinkace.com/pyxbrl/error" tpe = 'http://xbrl.org/2016/taxonomy-package/errors' xhtml = 'http://www.w3.org/1999/xhtml' xbrlce = 'https://xbrl.org/((~status_date_uri~))/xbrl-csv/error' xbrlje = 'https://xbrl.org/((~status_date_uri~))/xbrl-json/error' enum2 = 'http://xbrl.org/2020/extensible-enumerations-2.0' xsi = 'http://www.w3.org/2001/XMLSchema-instance' entities = "https://xbrl.org/((~status_date_uri~))/entities" entities_cr7 = "https://xbrl.org/2021-02-03/entities" PREFIX = {} NSMAP = {} OIM_COMMON_RESERVED_PREFIXES = {} OIM_COMMON_RESERVED_PREFIX_MAP = {} def buildPrefixMaps(): global PREFIX PREFIX.clear() for k, v in NS.__dict__.items(): if not k.startswith("_"): PREFIX[v] = k global NSMAP NSMAP.clear() for k, v in NS.__dict__.items(): if not k.startswith("_"): NSMAP[k] = v global OIM_COMMON_RESERVED_PREFIXES OIM_COMMON_RESERVED_PREFIXES = { "iso4217", "utr", "xbrl", "xbrli", "xs" } global OIM_COMMON_RESERVED_PREFIX_MAP OIM_COMMON_RESERVED_PREFIX_MAP.clear() for k in OIM_COMMON_RESERVED_PREFIXES: OIM_COMMON_RESERVED_PREFIX_MAP[k] = getattr(NS, k) buildPrefixMaps() def setOIMVersion(version): for k, v in NS.__dict__.items(): if not k.startswith("_"): setattr(NS, k, re.sub(r'\(\(~status_date_uri~\)\)', version, v)) buildPrefixMaps() class LinkType: footnote = 'http://www.xbrl.org/2003/arcrole/fact-footnote' explanatoryFact = 'http://www.xbrl.org/2009/arcrole/fact-explanatoryFact' class LinkGroup: default = 'http://www.xbrl.org/2003/role/link' LINK_RESERVED_URI_MAP = { "_": LinkGroup.default, "footnote": LinkType.footnote, "explanatoryFact": LinkType.explanatoryFact, } class DocumentType: xbrlcsv_git = 'https://xbrl.org/((~status_date_uri~))/xbrl-csv' xbrlcsv_cr7 = 'https://xbrl.org/CR/2021-02-03/xbrl-csv' xbrlcsv_cr9 = 'https://xbrl.org/CR/2021-07-07/xbrl-csv' xbrlcsv_pr1 = 'https://xbrl.org/PR/2021-08-04/xbrl-csv' xbrlcsv = 'https://xbrl.org/2021/xbrl-csv' xbrljson_git = 'https://xbrl.org/((~status_date_uri~))/xbrl-json' xbrljson_wgwd = 'https://xbrl.org/WGWD/YYYY-MM-DD/xbrl-json' xbrljson_cr7 = 'https://xbrl.org/CR/2021-02-02/xbrl-json' xbrljson_cr9 = 'https://xbrl.org/CR/2021-07-07/xbrl-json' xbrljson_pr1 = 'https://xbrl.org/PR/2021-08-04/xbrl-json' xbrljson = 'https://xbrl.org/2021/xbrl-json'
37.680851
78
0.660926
2,469
0.697064
0
0
0
0
0
0
1,863
0.525974
29bcfd631b01019c349e3bbedaeeb2cbda9283d5
2,832
py
Python
src/cogs/xpevent.py
nsde/lhxp
ef6d1004c704c1156b9b01172e4748634b31b541
[ "MIT" ]
2
2021-12-18T11:44:31.000Z
2022-01-07T23:27:00.000Z
src/cogs/xpevent.py
nsde/lhxp
ef6d1004c704c1156b9b01172e4748634b31b541
[ "MIT" ]
null
null
null
src/cogs/xpevent.py
nsde/lhxp
ef6d1004c704c1156b9b01172e4748634b31b541
[ "MIT" ]
null
null
null
try: from .helpers import config, management, xp, spam except ImportError: import helpers.config, helpers.management, helpers.xp, helpers.spam import time import discord from discord.ext import commands from discord.commands import slash_command class XPEvent(commands.Cog): def __init__(self, client): self.client = client async def antispam(self, message): message_is_spam = False message_content = message.content if spam.is_spam(message_content): message_is_spam = True await message.delete() last_message = await message.channel.history().get(author__name=message.author.name) if spam.is_spam(message_content + last_message.content): message_is_spam = True messages = [] async for msg in message.channel.history(limit=2): messages.append(msg) if message_is_spam or messages[0].content == messages[1].content: try: await message.delete() await last_message.delete() except: pass return message_is_spam async def give_xp(self, message): text = message.content.replace(' ', '') # avoid spam xp_gain = text.count(' ')*config.load()['word-reward-xp'] # word count if xp_gain < 2: # don't go into negative XP numbers! xp_gain = config.load()['word-reward-xp'] xp.add(message.author, xp_gain) async def daily_check(self, message): is_empty = message.author.id not in list(config.load('dailystep').keys()) if is_empty: config.set('dailystep', message.author.id, 0) if config.load('dailystep')[message.author.id] > 31: config.set('dailystep', message.author.id, 0) if not config.load('lastmessage').get(message.author.id): config.set('lastmessage', message.author.id, -1) else: config.set('lastmessage', message.author.id, time.time()) penultimate_message_time = config.load('lastmessage')[message.author.id] today_begin = (time.time()//86400)*86400 if today_begin > penultimate_message_time: config.change('dailystep', message.author.id, 1) daily_reward = config.load()['daily-rewards'][int(time.strftime('%d'))-1] xp.add(message.author, daily_reward*config.load()['daily-reward-multiplier']) @commands.Cog.listener() async def on_message(self, message): if message.author.bot: return was_spam = await self.antispam(message) if was_spam: return await self.give_xp(message) await self.daily_check(message) await self.client.process_commands(message) def setup(client): client.add_cog(XPEvent(client))
32.551724
92
0.628884
2,519
0.889477
0
0
342
0.120763
2,377
0.839336
252
0.088983
29be043b68e9b14821af31619772ea7a817c2a7b
2,199
py
Python
utilities/utils.py
jluech/PGAcloud_Manager
9008fac26f9d762b2ab527034e46d467b5b0c26f
[ "MIT" ]
null
null
null
utilities/utils.py
jluech/PGAcloud_Manager
9008fac26f9d762b2ab527034e46d467b5b0c26f
[ "MIT" ]
null
null
null
utilities/utils.py
jluech/PGAcloud_Manager
9008fac26f9d762b2ab527034e46d467b5b0c26f
[ "MIT" ]
null
null
null
import logging import os import subprocess import sys import yaml files_dir = "" # --- General util commands --- def execute_command( command, working_directory, environment_variables, executor, logger=logging, livestream=False ): logger_prefix = "" if executor: logger_prefix = executor + ": " process = subprocess.Popen( command, cwd=working_directory, env=environment_variables, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, ) logger.debug(logger_prefix + "command: " + command) stdout = "" for line in iter(process.stdout.readline, b''): line = str(line, "utf-8") stdout += line if livestream: sys.stdout.write(line) else: logger.debug(logger_prefix + "command output: " + line.rstrip()) return_code = process.wait() stdout = stdout.rstrip() return stdout, return_code def merge_dict(dict1, dict2): res = {**dict1, **dict2} return res def parse_yaml(yaml_file_path): with open(yaml_file_path, mode="r", encoding="utf-8") as yaml_file: content = yaml.safe_load(yaml_file) or {} return content # --- File and path handling commands --- def get_uploaded_files_path(pga_id): return os.path.join(files_dir, str(pga_id)) def get_uploaded_files_dict(pga_id): files_dict = {} directory = get_uploaded_files_path(pga_id) files = os.listdir(directory) for filename in files: name = filename.split(".")[0] yaml_dict = parse_yaml(os.path.join(directory, filename)) yaml_dict["_filename"] = filename files_dict[name] = yaml_dict return files_dict def get_filename_from_path(file_path): if file_path.__contains__("\\"): filename = file_path.split("\\")[-1].split(".")[0] else: filename = file_path.split("/")[-1].split(".")[0] return filename def create_pga_subdir(pga_id): os.makedirs(os.path.join(files_dir, str(pga_id))) def __set_files_dir(path): global files_dir files_dir = os.path.join(path, 'files') os.makedirs(files_dir, exist_ok=True)
23.393617
76
0.637108
0
0
0
0
0
0
0
0
169
0.076853
29bfe3374dd25a06358a1da66a585cb725eee7be
578
py
Python
pyscf/nao/m_color.py
KMCzajkowski/pyscf
e8af41d910cc0d3963655120c0b689590ad978e7
[ "BSD-2-Clause" ]
null
null
null
pyscf/nao/m_color.py
KMCzajkowski/pyscf
e8af41d910cc0d3963655120c0b689590ad978e7
[ "BSD-2-Clause" ]
null
null
null
pyscf/nao/m_color.py
KMCzajkowski/pyscf
e8af41d910cc0d3963655120c0b689590ad978e7
[ "BSD-2-Clause" ]
null
null
null
class color: import os T = os.getenv('TERM') if ( T=='cygwin' or T=='mingw' ) : HEADER = '\033[01;35m' BLUE = '\033[01;34m' GREEN = '\033[01;32m' WARNING = '\033[01;33m' FAIL = '\033[01;31m' RED = FAIL ENDC = '\033[0m' else : HEADER = '\033[95m' BLUE = '\033[94m' GREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' RED = FAIL ENDC = '\033[0m' def disable(self): self.HEADER = '' self.OKBLUE = '' self.OKGREEN = '' self.WARNING = '' self.FAIL = '' self.RED = '' self.ENDC = ''
19.931034
36
0.49308
577
0.99827
0
0
0
0
0
0
168
0.290657
29c079a0baef167378f06f75800a84013625dfce
7,958
py
Python
Scripts Daily/재무정보수집.py
oms1226/msbot
4c141502ef6899f9e4bb3fe8e03c7eb866487d5e
[ "MIT" ]
1
2020-05-01T07:50:49.000Z
2020-05-01T07:50:49.000Z
Scripts Daily/재무정보수집.py
oms1226/msbot
4c141502ef6899f9e4bb3fe8e03c7eb866487d5e
[ "MIT" ]
1
2021-06-01T22:36:14.000Z
2021-06-01T22:36:14.000Z
Scripts Daily/재무정보수집.py
oms1226/msbot
4c141502ef6899f9e4bb3fe8e03c7eb866487d5e
[ "MIT" ]
8
2019-10-26T03:30:53.000Z
2022-03-26T08:06:25.000Z
# -*- coding: utf-8 -*- import re import calendar import datetime, time from datetime import timedelta import urllib.request import requests, json from http.cookiejar import CookieJar from bs4 import BeautifulSoup import numpy as np import pandas as pd from pandas import DataFrame import pandas.io.sql as pdsql from matplotlib import dates import sqlite3 DATABASE = '..\\DATA\\mymoneybot.sqlite' def sqliteconn(): conn = sqlite3.connect(DATABASE) return conn def get_webpage(url, encoding=""): cj = CookieJar() opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj)) opener.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36')] respstr = "" try: op = opener.open(url) sourcecode = op.read() except Exception as e: time.sleep(1) op = opener.open(url) sourcecode = op.read() encodingmethod = op.info().get_param('charset') if encodingmethod == None: if encoding != "": encodingmethod = encoding if encoding != "": encodingmethod = encoding try: respstr = sourcecode.decode(encoding=encodingmethod, errors='ignore') except Exception as e: respstr = sourcecode.decode(encoding="cp949", errors='ignore') opener.close() return respstr def get_company_fundamental_fnguide(code): def g(x): if type(x) == str: return datetime.datetime.strptime(x, '%Y-%m-%d') else: return x # url = "http://comp.fnguide.com/SVO2/ASP/SVD_main.asp?pGB=1&gicode=A%s&cID=&MenuYn=Y&ReportGB=&NewMenuID=11&stkGb=&strResearchYN=" % (code) url = "http://asp01.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A%s&NewMenuID=11&cID=50&MenuYn=N" % (code) respstr = get_webpage(url, encoding="utf8") # soup = BeautifulSoup(respstr) soup = BeautifulSoup(respstr, "lxml") # <!--IFRS 별도/연간 --> target_table = soup.find("div", class_="um_table", id="highlight_B_Y") # print(target_table) result = [] try: target_table.find_all('tr') except Exception as e: return (DataFrame(), DataFrame()) for tr in target_table.find_all('tr'): # print("[%s]" % tr) for th in tr.find_all('th'): value = "%s" % th.text.replace('(P) : Provisional','').replace('(E) : Estimate','').replace('잠정실적','').replace('컨센서스, 추정치','').replace('(E)','').replace('(P)','').replace('/','-').strip() if ('-02' in value): value = value + '-28' elif ('-04' in value) or ('-06' in value) or ('-09' in value) or ('-11' in value): value = value + '-30' elif ('-01' in value) or ('-03' in value) or ('-05' in value) or ('-07' in value) or ('-08' in value) or ('-10' in value) or ('-12' in value): value = value + '-31' result.append(value) # print("[%s]" % th.text.replace('(E) : Estimate','').replace('컨센서스, 추정치','').strip()) for td in tr.find_all('td'): value = td.text.strip().replace(',','') try: value = float(value) except Exception as e: value = 0 result.append(value) # print(td.text.strip()) # print(result[1:]) result = result[1:] dfdata = [] for x in range(0, len(result), 9): dfdata.append(result[x:x+9]) df = DataFrame(data=dfdata, columns = [str(x) for x in range(1,10)]).T df.columns = ['날짜', '매출액', '영업이익', '당기순이익', '자산총계', '부채총계', '자본총계', '자본금', '부채비율', '유보율', '영업이익률', '순이익률', 'ROA', 'ROE', 'EPS', 'BPS', 'DPS', 'PER', 'PBR', '발행주식수', '배당수익률'] df.drop(df.index[[0]], inplace=True) # df['날짜'] = df['date'].apply(g) # df.drop(['date'], axis=1, inplace=True) df = df.convert_objects(convert_numeric=True) # df.set_index('날짜', inplace=True) df_year = df # <!--IFRS 별도/분기 --> target_table = soup.find("div", class_="um_table", id="highlight_B_Q") # print(target_table) result = [] for tr in target_table.find_all('tr'): # print("[%s]" % tr) for th in tr.find_all('th'): value = "%s" % th.text.replace('(P) : Provisional','').replace('(E) : Estimate','').replace('잠정실적','').replace('컨센서스, 추정치','').replace('(E)','').replace('(P)','').replace('/','-').strip() if ('-02' in value): value = value + '-28' elif ('-04' in value) or ('-06' in value) or ('-09' in value) or ('-11' in value): value = value + '-30' elif ('-01' in value) or ('-03' in value) or ('-05' in value) or ('-07' in value) or ('-08' in value) or ('-10' in value) or ('-12' in value): value = value + '-31' result.append(value) # print("[%s]" % th.text.replace('(E) : Estimate','').replace('컨센서스, 추정치','').strip()) for td in tr.find_all('td'): value = td.text.strip().replace(',','') try: value = float(value) except Exception as e: value = 0 result.append(value) # print(td.text.strip()) # print(result[1:]) result = result[1:] dfdata = [] for x in range(0, len(result), 9): dfdata.append(result[x:x+9]) df = DataFrame(data=dfdata, columns = [str(x) for x in range(1,10)]).T df.columns = ['날짜', '매출액', '영업이익', '당기순이익', '자산총계', '부채총계', '자본총계', '자본금', '부채비율', '유보율', '영업이익률', '순이익률', 'ROA', 'ROE', 'EPS', 'BPS', 'DPS', 'PER', 'PBR', '발행주식수', '배당수익률'] df.drop(df.index[[0]], inplace=True) # df['날짜'] = df['date'].apply(g) # df.drop(['date'], axis=1, inplace=True) df = df.convert_objects(convert_numeric=True) # df.set_index('날짜', inplace=True) df_qtr = df return (df_year, df_qtr) def build_fundamental_data(): with sqlite3.connect(DATABASE) as conn: cursor = conn.cursor() replace_sqlite = ( "replace into 재무정보( 날짜,종목코드,기간구분,매출액,영업이익,당기순이익,자산총계,부채총계,자본총계,자본금,부채비율,유보율,영업이익률,순이익률,ROA,ROE,EPS,BPS,DPS,PER,PBR,발행주식수,배당수익률 ) " "values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) " ) df = pdsql.read_sql_query('select 단축코드, 종목명 from 종목코드 ', con=conn) CODES = list(df.values) for code, name in CODES: print('FnGuide - %s %s' % (code, name)) try: (df_year, df_qtr) = get_company_fundamental_fnguide(code) except Exception as e: continue if len(df_year.index) > 0 or len(df_qtr.index) > 0: if len(df_year.index) > 0: 기간구분 = '년간' for idx, row in df_year.iterrows(): 날짜, 매출액, 영업이익, 당기순이익, 자산총계, 부채총계, 자본총계, 자본금, 부채비율, 유보율, 영업이익률, 순이익률, ROA, ROE, EPS, BPS, DPS, PER, PBR, 발행주식수, 배당수익률 = row 종목코드 = code d = (날짜,종목코드,기간구분,매출액,영업이익,당기순이익,자산총계,부채총계,자본총계,자본금,부채비율,유보율,영업이익률,순이익률,ROA,ROE,EPS,BPS,DPS,PER,PBR,발행주식수,배당수익률) cursor.execute(replace_sqlite, d) conn.commit() if len(df_qtr.index) > 0: 기간구분 = '분기' for idx, row in df_qtr.iterrows(): 날짜, 매출액, 영업이익, 당기순이익, 자산총계, 부채총계, 자본총계, 자본금, 부채비율, 유보율, 영업이익률, 순이익률, ROA, ROE, EPS, BPS, DPS, PER, PBR, 발행주식수, 배당수익률 = row 종목코드 = code d = (날짜,종목코드,기간구분,매출액,영업이익,당기순이익,자산총계,부채총계,자본총계,자본금,부채비율,유보율,영업이익률,순이익률,ROA,ROE,EPS,BPS,DPS,PER,PBR,발행주식수,배당수익률) cursor.execute(replace_sqlite, d) conn.commit() # time.sleep(2) # except Exception as e: # print(code, name, str(e)) if __name__ == "__main__": # 재무정보가져오기 - 분기에 한번 실행하면 됨 build_fundamental_data()
37.895238
199
0.539834
0
0
0
0
0
0
0
0
2,680
0.298242
29c2e1b7e5523be19b17e937a85dde93fdb45fab
24,237
py
Python
apps/recurring_donations/management/commands/process_monthly_donations.py
gannetson/sportschooldeopenlucht
0c78e5a95b22a963244112e478119ba60c572141
[ "BSD-3-Clause" ]
1
2019-01-19T06:58:39.000Z
2019-01-19T06:58:39.000Z
apps/recurring_donations/management/commands/process_monthly_donations.py
gannetson/sportschooldeopenlucht
0c78e5a95b22a963244112e478119ba60c572141
[ "BSD-3-Clause" ]
null
null
null
apps/recurring_donations/management/commands/process_monthly_donations.py
gannetson/sportschooldeopenlucht
0c78e5a95b22a963244112e478119ba60c572141
[ "BSD-3-Clause" ]
null
null
null
import csv import os import math import logging import traceback import requests import sys from collections import namedtuple from optparse import make_option from django.conf import settings from django.contrib.contenttypes.models import ContentType from django.core.management.base import BaseCommand from django.db import connection from django.db import transaction from django.utils import timezone from apps.cowry_docdata.adapters import WebDirectDocDataDirectDebitPaymentAdapter from apps.cowry_docdata.exceptions import DocDataPaymentException from apps.cowry_docdata.models import DocDataPaymentOrder from apps.fund.models import RecurringDirectDebitPayment, Order, OrderStatuses, Donation, OrderItem from apps.projects.models import Project, ProjectPhases from ...mails import mail_monthly_donation_processed_notification logger = logging.getLogger(__name__) # # Run with: # ./manage.py process_monthly_donations -v 2 --settings=bluebottle.settings.local (or .production etc.) # class Command(BaseCommand): help = 'Process monthly donations.' requires_model_validation = True verbosity_loglevel = { '0': logging.ERROR, # 0 means no output. '1': logging.WARNING, # 1 means normal output (default). '2': logging.INFO, # 2 means verbose output. '3': logging.DEBUG # 3 means very verbose output. } option_list = BaseCommand.option_list + ( make_option('--dry-run', action='store_true', dest='dry_run', default=False, help="Process the monthly donations without creating any db records or payments."), make_option('--no-email', action='store_true', dest='no_email', default=False, help="Don't send the monthly donation email to users."), make_option('--csv-export', action='store_true', dest='csv_export', default=False, help="Generate CSV export of monthly donors with donations amounts."), make_option('--process-one-recurring-payment', action='store', dest='process_payment_id', type='int', metavar='RECURRING-PAYMENT-ID', help="Process only the RecurringDirectDebitPayment specified by its primary key."), ) def handle(self, *args, **options): # Setup the log level for root logger. loglevel = self.verbosity_loglevel.get(options['verbosity']) logger.setLevel(loglevel) if options['dry_run'] and options['csv_export']: logger.error("You cannot set both '--dry-run' and '--csv-export'.") sys.exit(1) send_email = not options['no_email'] if options['dry_run']: # TODO Implement --dry-run. logger.warn("Config: --dry-run not fully implemented yet. Database records and payments will be created.") logger.info("Config: Not sending emails.") send_email = False recurring_payments_queryset = RecurringDirectDebitPayment.objects.filter(active=True) if options['csv_export']: generate_monthly_donations_csv(recurring_payments_queryset) else: if options['process_payment_id']: recurring_payments_queryset = recurring_payments_queryset.filter(id=options['process_payment_id']) try: process_monthly_donations(recurring_payments_queryset, send_email) except: print traceback.format_exc() def generate_monthly_donations_csv(recurring_payments_queryset): csv_path = os.path.expanduser('~/monthly-donors-{0}.csv'.format(timezone.now().date())) logger.info("Saving monthly donations CSV file to:") logger.info(" {0}".format(csv_path)) with open(csv_path, 'wb') as csv_file: csvwriter = csv.writer(csv_file, dialect='excel') csvwriter.writerow(['Member', 'Active', 'Amount']) for rp in recurring_payments_queryset: csvwriter.writerow([rp.user.email, rp.active, rp.amount]) def update_last_donation(donation, remaining_amount, popular_projects): """ Updates the last donation with the remaining amount of the payment. If the donation is more than the project needs, the project will be filled and the balance will be used to fill the popular projects recursively. """ project = Project.objects.get(id=donation.project_id) # Base case. if project.projectcampaign.money_donated + remaining_amount <= project.projectcampaign.money_asked or \ len(popular_projects) == 0: # The remaining amount won't fill up the project or we have no more projects to try. We're done. logger.debug(u"Donation is less than project '{0}' needs. No further adjustments are needed.".format(project.title)) donation.amount = remaining_amount donation.donation_type = Donation.DonationTypes.recurring donation.save() return # Recursive case. else: # Fill up the project. logger.debug(u"Donation is more than project '{0}' needs. Filling up project and creating new donation.".format(project.title)) donation.amount = project.projectcampaign.money_asked - project.projectcampaign.money_donated donation.donation_type = Donation.DonationTypes.recurring donation.save() # Create a new Donation and recursively update it with the remaining amount. ct = ContentType.objects.get_for_model(donation) order = OrderItem.objects.get(content_type=ct, content_object=donation) new_project = popular_projects.pop(0) new_donation = Donation.objects.create(user=donation.user, project=new_project, amount=0, currency='EUR', donation_type=Donation.DonationTypes.recurring) OrderItem.objects.create(content_object=new_donation, order=order) update_last_donation(new_donation, remaining_amount - donation.amount, popular_projects) def create_recurring_order(user, projects, order=None): """ Creates a recurring Order with donations to the supplied projects. """ if not order: order = Order.objects.create(status=OrderStatuses.recurring, user=user, recurring=True) for p in projects: project = Project.objects.get(id=p.id) if project.phase == ProjectPhases.campaign: donation = Donation.objects.create(user=user, project=project, amount=0, currency='EUR', donation_type=Donation.DonationTypes.recurring) OrderItem.objects.create(content_object=donation, order=order) return order def correct_donation_amounts(popular_projects, recurring_order, recurring_payment): """ Divides the total amount for the monthly donation across all projects. This method deals with the case of a donation filling up a project. """ remaining_amount = recurring_payment.amount num_donations = recurring_order.donations.count() amount_per_project = math.floor(recurring_payment.amount / num_donations) donations = recurring_order.donations for i in range(0, num_donations - 1): donation = donations[i] project = Project.objects.get(id=donation.project_id) if project.projectcampaign.money_donated + amount_per_project > project.projectcampaign.money_asked: donation.amount = project.projectcampaign.money_asked - project.projectcampaign.money_donated else: donation.amount = amount_per_project donation.donation_type = Donation.DonationTypes.recurring donation.save() remaining_amount -= donation.amount # Update the last donation with the remaining amount. update_last_donation(donations[num_donations - 1], remaining_amount, popular_projects) def set_order_created_datetime(recurring_order, order_created_datetime): """ Uses custom SQL to set the created time of Order to a consistent value. """ db_table = recurring_order._meta.db_table pk_name = recurring_order._meta.pk.name logger.debug("Setting created and updated to {0} on Order {1}.".format(order_created_datetime, recurring_order.id)) cursor = connection.cursor() sql_statement = "UPDATE {0} SET created = '{1}' WHERE {2} = {3}".format(db_table, order_created_datetime, pk_name, recurring_order.pk) cursor.execute(sql_statement) sql_statement = "UPDATE {0} SET updated = '{1}' WHERE {2} = {3}".format(db_table, order_created_datetime, pk_name, recurring_order.pk) cursor.execute(sql_statement) transaction.commit_unless_managed() def process_monthly_donations(recurring_payments_queryset, send_email): """ The starting point for creating DocData payments for the monthly donations. """ recurring_donation_errors = [] RecurringDonationError = namedtuple('RecurringDonationError', 'recurring_payment error_message') skipped_recurring_payments = [] SkippedRecurringPayment = namedtuple('SkippedRecurringPayment', 'recurring_payment orders') donation_count = 0 # The adapter is used after the recurring Order and donations have been adjusted. It's created here so that we can # reuse it to process all recurring donations. webdirect_payment_adapter = WebDirectDocDataDirectDebitPaymentAdapter() # A consistent created time to use for the created recurring Orders. order_created_datetime = timezone.now() # Fixed lists of the popular projects. popular_projects_all = list(Project.objects.filter(phase=ProjectPhases.campaign).order_by('-popularity')) top_three_projects = popular_projects_all[:3] popular_projects_rest = popular_projects_all[3:] logger.info("Config: Using these projects as 'Top Three':") for project in top_three_projects: logger.info(" {0}".format(project.title)) # The main loop that processes each monthly donation. for recurring_payment in recurring_payments_queryset: top_three_donation = False user_selected_projects = [] # Skip payment if there has been a recurring Order recently. ten_days_ago = timezone.now() + timezone.timedelta(days=-10) recent_closed_recurring_orders = Order.objects.filter(user=recurring_payment.user, status=OrderStatuses.closed, recurring=True, updated__gt=ten_days_ago) if recent_closed_recurring_orders.count() > 0: skipped_recurring_payments.append(SkippedRecurringPayment(recurring_payment, list(recent_closed_recurring_orders))) logger.warn( "Skipping '{0}' because it looks like it has been processed recently with one of these Orders:".format( recurring_payment)) for closed_order in recent_closed_recurring_orders: logger.warn(" Order Number: {0}".format(closed_order.order_number)) continue # Check if there is a monthly shopping cart (Order status is 'recurring') for this recurring_payment user. try: recurring_order = Order.objects.get(user=recurring_payment.user, status=OrderStatuses.recurring) logger.debug("Using existing recurring Order for user: {0}.".format(recurring_payment.user)) except Order.DoesNotExist: # There is no monthly shopping cart. The user is supporting the top three projects so we need to create an # Order with Donations for the top three projects. logger.debug("Creating new 'Top Three' recurring Order for user {0}.".format(recurring_payment.user)) recurring_order = create_recurring_order(recurring_payment.user, top_three_projects) top_three_donation = True except Order.MultipleObjectsReturned: error_message = "Multiple Orders with status 'recurring' returned for '{0}'. Not processing this recurring donation.".format( recurring_payment) logger.error(error_message) recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message)) continue # Check if we're above the DocData minimum for direct debit. if recurring_payment.amount < 113: # Cleanup the Order if there's an error. if top_three_donation: recurring_order.delete() error_message = "Payment amount for '{0}' is less than the DocData minimum for direct debit (113). Skipping.".format( recurring_payment) logger.error(error_message) recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message)) continue # Remove donations to projects that are no longer in the campaign phase. for donation in recurring_order.donations: project = Project.objects.get(id=donation.project.id) if project.phase != ProjectPhases.campaign: ctype = ContentType.objects.get_for_model(donation) order_item = OrderItem.objects.get(object_id=donation.id, content_type=ctype) order_item.delete() donation.delete() if recurring_order.donations.count() > 0: # There are donations in the recurring Order and we need to redistribute / correct the donation amounts. # Save a copy of the projects that have been selected by the user so that the monthly shopping cart can # recreated after the payment has been successfully started. for donation in recurring_order.donations: user_selected_projects.append(donation.project) correct_donation_amounts(popular_projects_all, recurring_order, recurring_payment) else: # There are no donations in the recurring Order so we need to create a monthly shopping cart to support the # top three projects and redistribute / correct the donation amounts. create_recurring_order(recurring_payment.user, top_three_projects, recurring_order) if recurring_order.donations.count() == 0: logger.debug("The top three donations are full. Using next three projects for top three.") top_three_projects = popular_projects_rest[:3] popular_projects_rest = popular_projects_rest[3:] create_recurring_order(recurring_payment.user, top_three_projects, recurring_order) correct_donation_amounts(popular_projects_rest, recurring_order, recurring_payment) top_three_donation = True # At this point the order should be correctly setup and ready for the DocData payment. if top_three_donation: donation_type_message = "supporting the 'Top Three' projects" else: donation_type_message = "with {0} donations".format(recurring_order.donations.count()) logger.info("Starting payment for '{0}' {1}.".format(recurring_payment, donation_type_message)) # Safety check to ensure the modifications to the donations in the recurring result in an Order total that # matches the RecurringDirectDebitPayment. if recurring_payment.amount != recurring_order.total: # Cleanup the Order if there's an error. if top_three_donation: recurring_order.delete() error_message = "RecurringDirectDebitPayment amount: {0} does not equal recurring Order amount: {1} for '{2}'. Not processing this recurring donation.".format( recurring_payment.amount, recurring_order.total, recurring_payment) logger.error(error_message) recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message)) continue # Check if the IBAN / BIC is stored correctly on the RecurringDirectDebitPayment. if recurring_payment.iban == '' or recurring_payment.bic == '' or \ not recurring_payment.iban.endswith(recurring_payment.account) or \ recurring_payment.bic[:4] != recurring_payment.iban[4:8]: # Cleanup the Order if there's an error. if top_three_donation: recurring_order.delete() error_message = "Cannot create payment because the IBAN and/or BIC are not available." logger.error(error_message) recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message)) continue # Create and fill in the DocDataPaymentOrder. payment = DocDataPaymentOrder() payment.order = recurring_order payment.payment_method_id = 'dd-webdirect' payment.amount = recurring_payment.amount payment.currency = recurring_payment.currency payment.customer_id = recurring_payment.user.id payment.email = recurring_payment.user.email # Use the recurring payment name (bank account name) to set the first and last name if they're not set. if not recurring_payment.user.first_name: if ' ' in recurring_payment.name: payment.first_name = recurring_payment.name.split(' ')[0] else: payment.first_name = recurring_payment.name else: payment.first_name = recurring_payment.user.first_name if not recurring_payment.user.last_name: if ' ' in recurring_payment.name: payment.last_name = recurring_payment.name[recurring_payment.name.index(' ') + 1:] else: payment.last_name = recurring_payment.name else: payment.last_name = recurring_payment.user.last_name # Try to use the address from the profile if it's set. address = recurring_payment.user.address if not address: # Cleanup the Order if there's an error. if top_three_donation: recurring_order.delete() error_message = "Cannot create a payment for '{0}' because user does not have an address set.".format(recurring_payment) logger.error(error_message) recurring_donation_errors.append(RecurringDonationError(recurring_payment, error_message)) continue # Set a default value for the pieces of the address that we don't have. unknown_value = u'Unknown' if not address.line1: logger.warn("User '{0}' does not have their street and street number set. Using '{1}'.".format(recurring_payment.user, unknown_value)) payment.address = unknown_value else: payment.address = address.line1 if not address.city: logger.warn("User '{0}' does not have their city set. Using '{1}'.".format(recurring_payment.user, unknown_value)) payment.city = unknown_value else: payment.city = address.city if not address.postal_code: logger.warn("User '{0}' does not have their postal code set. Using '{1}'.".format(recurring_payment.user, unknown_value)) payment.postal_code = unknown_value else: payment.postal_code = address.postal_code # Assume the Netherlands when country not set. if address.country: payment.country = address.country.alpha2_code else: payment.country = 'NL' # Try to use the language from the User settings if it's set. if recurring_payment.user.primary_language: payment.language = recurring_payment.user.primary_language[:2] # Cut off locale. else: payment.language = 'nl' payment.save() # Start the WebDirect payment. try: webdirect_payment_adapter.create_remote_payment_order(payment) except DocDataPaymentException as e: # Cleanup the Order if there's an error. if top_three_donation: recurring_order.delete() error_message = "Problem creating remote payment order." logger.error(error_message) recurring_donation_errors.append( RecurringDonationError(recurring_payment, "{0} {1}".format(error_message, e.message))) continue else: recurring_order.status = OrderStatuses.closed recurring_order.save() try: webdirect_payment_adapter.start_payment(payment, recurring_payment) except DocDataPaymentException as e: # Cleanup the Order if there's an error. if top_three_donation: recurring_order.delete() else: recurring_order.status = OrderStatuses.recurring recurring_order.save() error_message = "Problem starting payment." logger.error(error_message) recurring_donation_errors.append( RecurringDonationError(recurring_payment, "{0} {1}".format(error_message, e.message))) continue logger.debug("Payment for '{0}' started.".format(recurring_payment)) donation_count += 1 # Send an email to the user. if send_email: mail_monthly_donation_processed_notification(recurring_payment, recurring_order) # Create a new recurring Order (monthly shopping cart) for donations that are not to the 'Top Three'. if not top_three_donation and len(user_selected_projects) > 0: new_recurring_order = create_recurring_order(recurring_payment.user, user_selected_projects) # Adjust donation amounts in a simple way for the recurring Order (the monthly donations shopping cart). num_donations = new_recurring_order.donations.count() amount_per_project = math.floor(recurring_payment.amount / num_donations) donations = new_recurring_order.donations for i in range(0, num_donations - 1): donation = donations[i] donation.amount = amount_per_project donation.donation_type = Donation.DonationTypes.recurring donation.save() # Update the last donation with the remaining amount. donation = donations[num_donations - 1] donation.amount = recurring_payment.amount - (amount_per_project * (num_donations - 1)) donation.donation_type = Donation.DonationTypes.recurring donation.save() set_order_created_datetime(recurring_order, order_created_datetime) logger.info("") logger.info("Recurring Donation Processing Summary") logger.info("=====================================") logger.info("") logger.info("Total number of recurring donations: {0}".format(recurring_payments_queryset.count())) logger.info("Number of recurring Orders successfully processed: {0}".format(donation_count)) logger.info("Number of errors: {0}".format(len(recurring_donation_errors))) logger.info("Number of skipped payments: {0}".format(len(skipped_recurring_payments))) if len(recurring_donation_errors) > 0: logger.info("") logger.info("") logger.info("Detailed Error List") logger.info("===================") logger.info("") for error in recurring_donation_errors: logger.info("RecurringDirectDebitPayment: {0} {1}".format(error.recurring_payment.id, error.recurring_payment)) logger.info("Error: {0}".format(error.error_message)) logger.info("--") if len(skipped_recurring_payments) > 0: logger.info("") logger.info("") logger.info("Skipped Recurring Payments") logger.info("==========================") logger.info("") for skipped_payment in skipped_recurring_payments: logger.info("RecurringDirectDebitPayment: {0} {1}".format(skipped_payment.recurring_payment.id, skipped_payment.recurring_payment)) for closed_order in skipped_payment.orders: logger.info("Order Number: {0}".format(closed_order.order_number)) logger.info("--")
49.564417
171
0.67562
2,449
0.101044
0
0
0
0
0
0
6,742
0.27817
29c3750914f24305e5c021af40b18b30bd0ff4d0
5,387
py
Python
information_extraction/Preprocessor.py
shatha2014/Fashion_Rec
5f4dd4f1c7c2d18a9364b02f1798125c259e6598
[ "BSD-2-Clause" ]
11
2018-08-30T10:52:35.000Z
2021-11-08T06:04:22.000Z
information_extraction/Preprocessor.py
shatha2014/Fashion_Rec
5f4dd4f1c7c2d18a9364b02f1798125c259e6598
[ "BSD-2-Clause" ]
1
2020-09-08T19:53:48.000Z
2021-11-08T13:29:42.000Z
information_extraction/Preprocessor.py
shatha2014/Fashion_Rec
5f4dd4f1c7c2d18a9364b02f1798125c259e6598
[ "BSD-2-Clause" ]
8
2018-08-30T10:52:37.000Z
2022-02-20T09:13:40.000Z
# Author: Kim Hammar <[email protected]> KTH 2018 from nltk.corpus import stopwords from nltk.stem import WordNetLemmatizer from nltk.tokenize import TweetTokenizer from nltk.tag.perceptron import PerceptronTagger import nltk import emoji nltk.download('averaged_perceptron_tagger') nltk.download('stopwords') nltk.download('wordnet') class PreProcessor(object): """ Preprocessor module in the Information Extraction Process of Fashion Related Properties of Instagram posts. Performs text normalization and parsing. """ # Class variables shared by all instances tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) wordnet_lemmatizer = WordNetLemmatizer() stop_words = set(stopwords.words('english')) stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}']) tagger = PerceptronTagger() def __init__(self, ids, comments, captions, tags): """ Class Constructor""" # Raw text self.raw_id = ids self.raw_comments = comments self.raw_captions = captions self.raw_tags = tags print("Read in Raw Text") # Preprocess raw text self.remove_non_unicode() self.lower_case() self.to_unicode() print("Normalized Raw Text") # Tokenize and preprocess tokens self.tokenize() print("Tokenized the text") self.remove_stopwords() #self.remove_urls() print("Normalized tokens") # Extract specific tokens self.lemmatize() print("Extracted lemmas") self.extract_emojis() print("Extracted emojis") self.extract_hashtags() print("Extracted hashtags") #self.pos_tag() #print("Extracted POS") def remove_non_unicode(self): """ Remove non-unicode tokens""" self.raw_comments = map(lambda x: x.decode('utf-8','ignore').encode("utf-8"), self.raw_comments) self.raw_captions = map(lambda x: x.decode('utf-8', 'ignore').encode("utf-8"), self.raw_captions) self.raw_tags = map(lambda x: x.decode('utf-8','ignore').encode("utf-8"), self.raw_tags) def to_unicode(self): """ Convert text to unicode """ self.raw_comments = map(lambda x: x.decode('utf-8'), self.raw_comments) self.raw_captions = map(lambda x: x.decode('utf-8'), self.raw_captions) self.raw_tags = map(lambda x: x.decode('utf-8'), self.raw_tags) def tokenize(self): """ Tokenize text with TweetTokenizer, preserve emojis, hashtags etc """ self.tokens_captions = [self.tknzr.tokenize(caption) for caption in self.raw_captions] self.tokens_comments = [self.tknzr.tokenize(comment) for comment in self.raw_comments] self.tokens_tags = [self.tknzr.tokenize(tag) for tag in self.raw_tags] self.tokens_all = [] for i in range(len(self.raw_id)): self.tokens_all.append(self.tokens_captions[i] + self.tokens_comments[i] + self.tokens_tags[i]) def lower_case(self): """ Convert raw text into lowercase""" self.raw_captions = [caption.lower() for caption in self.raw_captions] self.raw_comments = [comments.lower() for comments in self.raw_comments] self.raw_tags = [tags.lower() for tags in self.raw_tags] def lemmatize(self): """ Lemmatize tokens""" self.lemma_caption = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), caption) for caption in self.tokens_captions] self.lemma_comments = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), comments) for comments in self.tokens_comments] self.lemma_tags = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), tags) for tags in self.tokens_tags] self.lemma_all = [map(lambda x: self.wordnet_lemmatizer.lemmatize(x), tokens) for tokens in self.tokens_all] def remove_urls(self): """ Remove urls from tokens """ self.tokens_captions = [filter(lambda x: "http" not in x, caption) for caption in self.tokens_captions] self.tokens_comments = [filter(lambda x: "http" not in x, comments) for comments in self.tokens_comments] self.tokens_tags = [filter(lambda x: "http" not in x, tags) for tags in self.tokens_tags] self.tokens_all = [filter(lambda x: "http" not in x, tokens) for tokens in self.tokens_all] def remove_stopwords(self): """ Remove stopwords from tokens """ self.tokens_captions = [[token for token in caption if token not in self.stop_words] for caption in self.tokens_captions] self.tokens_comments = [[token for token in comments if token not in self.stop_words] for comments in self.tokens_comments] self.tokens_tags = [[token for token in tags if token not in self.stop_words] for tags in self.tokens_tags] self.tokens_all = [[token for token in tokens if token not in self.stop_words] for tokens in self.tokens_all] def extract_emojis(self): """ Extract emojis """ self.emojis = [[c for c in tokens if c in emoji.UNICODE_EMOJI] for tokens in self.tokens_all] def extract_hashtags(self): """ Extract hashtags """ self.hashtags = [[x for x in tokens if x.startswith("#")] for tokens in self.tokens_all] def pos_tag(self): """ Extract POS tags """ self.pos_tokens = [self.tagger.tag(tokens) for tokens in self.tokens_all]
45.652542
131
0.667347
5,053
0.937999
0
0
0
0
0
0
1,103
0.204752
29c3fd69e8b7142e1bb7b65ea92363c60fad4735
47
py
Python
searl/__init__.py
automl/SEARL
bac75d8c9540ff4f0b5b340c612ec384b189bd84
[ "Apache-2.0" ]
25
2021-03-10T09:10:53.000Z
2022-03-28T09:11:16.000Z
searl/__init__.py
automl/SEARL
bac75d8c9540ff4f0b5b340c612ec384b189bd84
[ "Apache-2.0" ]
null
null
null
searl/__init__.py
automl/SEARL
bac75d8c9540ff4f0b5b340c612ec384b189bd84
[ "Apache-2.0" ]
4
2021-03-17T15:00:02.000Z
2021-07-24T13:35:39.000Z
from .utils.handler.config import ConfigHandler
47
47
0.87234
0
0
0
0
0
0
0
0
0
0
29c4a45e5143815cb47c3724fcaecb30960fac72
475
py
Python
src/kotify/fabric/procfile.py
kotify/kotify.fabric
5ce50a38210217f643c81438b53466b60fc16cb1
[ "MIT" ]
null
null
null
src/kotify/fabric/procfile.py
kotify/kotify.fabric
5ce50a38210217f643c81438b53466b60fc16cb1
[ "MIT" ]
null
null
null
src/kotify/fabric/procfile.py
kotify/kotify.fabric
5ce50a38210217f643c81438b53466b60fc16cb1
[ "MIT" ]
null
null
null
from ._core import Collection, local, task @task(name="main", default=True) def start_main(c): local(f"overmind start -l {','.join(c.start.main + c.start.minimal)}", pty=True) @task(name="minimal") def start_minimal(c): local(f"overmind start -l {','.join(c.start.minimal)}", pty=True) @task(name="all") def start_all(c): local("overmind start", pty=True) ns = Collection("start") ns.add_task(start_all) ns.add_task(start_main) ns.add_task(start_minimal)
20.652174
84
0.692632
0
0
0
0
322
0.677895
0
0
154
0.324211
29c698fcf98da3c177cd1347dd70acef351370fb
888
py
Python
backend/src/feature_extraction/rolloff.py
AnXi-TieGuanYin-Tea/MusicGenreClassifiaction
a0b9f621b0a5d2451180b12af7681756c5abd138
[ "MIT" ]
7
2018-05-01T19:39:17.000Z
2020-01-02T17:11:05.000Z
backend/src/feature_extraction/rolloff.py
AnXi-TieGuanYin-Tea/MusicGenreClassifiaction
a0b9f621b0a5d2451180b12af7681756c5abd138
[ "MIT" ]
10
2018-12-10T22:16:43.000Z
2020-08-27T18:23:45.000Z
backend/src/feature_extraction/rolloff.py
AnXi-TieGuanYin-Tea/MusicGenreClassifiaction
a0b9f621b0a5d2451180b12af7681756c5abd138
[ "MIT" ]
2
2021-04-16T08:20:17.000Z
2022-01-06T14:06:44.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Mar 17 23:14:28 2018 @author: Akihiro Inui """ def rolloff(input_power_spectrum: list, param: float=0.85) -> float: """ Spectral Rolloff :param input_power_spectrum: power spectrum in list :param param: threadshold for roll off :return Spectral Rolloff """ assert (param <= 0 or param >= 1) is False, "parameter must be between 0 and 1" # Initialize energy and FFT number energy = 0 count = 0 # Calculate total energy total_energy = sum(input_power_spectrum[:]**2) # Find Count which has energy below param*total_energy while energy <= param*total_energy and count < len(input_power_spectrum): energy = pow(input_power_spectrum[count], 2) + energy count += 1 # Normalise Spectral Rolloff return count/len(input_power_spectrum)
28.645161
83
0.667793
0
0
0
0
0
0
0
0
448
0.504505
29c6aa30b18efa3ef99f8685007919d2bfcf3019
112
py
Python
webStorm-APICloud/python_tools/Tools/Scripts/2to3.py
zzr925028429/androidyianyan
8967fdba92473e8e65ee222515dfc54cdae5bb0b
[ "MIT" ]
null
null
null
webStorm-APICloud/python_tools/Tools/Scripts/2to3.py
zzr925028429/androidyianyan
8967fdba92473e8e65ee222515dfc54cdae5bb0b
[ "MIT" ]
null
null
null
webStorm-APICloud/python_tools/Tools/Scripts/2to3.py
zzr925028429/androidyianyan
8967fdba92473e8e65ee222515dfc54cdae5bb0b
[ "MIT" ]
null
null
null
#!/usr/bin/env python from lib2to3.main import main import sys import os sys.exit(main("lib2to3.fixes"))
16
32
0.714286
0
0
0
0
0
0
0
0
37
0.330357
29c79f364e1d41c68e19d472b3c1d55bd0b5b9e5
1,070
py
Python
afterglow_core/schemas/api/v1/jobs/field_cal_job.py
SkynetRTN/afterglow-access-server
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
[ "Apache-2.0" ]
2
2021-05-24T15:12:07.000Z
2022-02-17T19:58:16.000Z
afterglow_core/schemas/api/v1/jobs/field_cal_job.py
SkynetRTN/afterglow-access-server
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
[ "Apache-2.0" ]
1
2022-02-27T03:01:06.000Z
2022-02-27T03:01:06.000Z
afterglow_core/schemas/api/v1/jobs/field_cal_job.py
SkynetRTN/afterglow-access-server
3d8d62f622577fdd1ae7b0076cb536251f7bf0cd
[ "Apache-2.0" ]
2
2021-06-08T18:16:40.000Z
2021-07-09T14:19:49.000Z
""" Afterglow Core: photometric calibration job schemas """ from typing import List as TList from marshmallow.fields import Integer, List, Nested from ..job import JobSchema, JobResultSchema from ..field_cal import FieldCalSchema, FieldCalResultSchema from ..photometry import PhotSettingsSchema from .source_extraction_job import SourceExtractionSettingsSchema __all__ = ['FieldCalJobResultSchema', 'FieldCalJobSchema'] class FieldCalJobResultSchema(JobResultSchema): data: TList[FieldCalResultSchema] = List( Nested(FieldCalResultSchema), default=[]) class FieldCalJobSchema(JobSchema): type = 'field_cal' result: FieldCalJobResultSchema = Nested( FieldCalJobResultSchema, default={}) file_ids: TList[int] = List(Integer(), default=[]) field_cal: FieldCalSchema = Nested(FieldCalSchema, default={}) source_extraction_settings: SourceExtractionSettingsSchema = Nested( SourceExtractionSettingsSchema, default=None) photometry_settings: PhotSettingsSchema = Nested( PhotSettingsSchema, default=None)
31.470588
72
0.774766
638
0.596262
0
0
0
0
0
0
114
0.106542
29c7ff7b0f45d2d5b8a537d89fbcc9e55ee8907c
2,692
py
Python
Python/addRow.py
alexwu2021/practice
ff786d4d16afdef3e031002d22b58a976c8ed16b
[ "MIT" ]
null
null
null
Python/addRow.py
alexwu2021/practice
ff786d4d16afdef3e031002d22b58a976c8ed16b
[ "MIT" ]
1
2021-11-22T05:54:33.000Z
2021-11-22T05:54:33.000Z
Python/addRow.py
alexwu2021/practice
ff786d4d16afdef3e031002d22b58a976c8ed16b
[ "MIT" ]
null
null
null
#import unittest def addRow(r, d, v): dmo = [] getHeightAndMore(r, 0, dmo, d) if len(dmo) <= 0: print ('no way to add row for no d-1 nodes found') return print('dmo has %d' % len(dmo)) print('dmo: %s' % ','.join([str(x.val) for x in dmo])) for n in dmo: left, right = Node(v), Node(v) left.left = n.left n.left = left right.right = n.right n.right = right def getHeightAndMore(r, h, dmo, d): h += 1 if d == h: dmo.append(r) if r.left != None: getHeightAndMore(r.left, h, dmo, d) if r.right != None: getHeightAndMore(r.right, h, dmo, d) class Node: def __init__(self, val): self.left = None self.right = None self.val = val def __expr__(self): msg = 'Node({self.val})'.format(self=self) return msg def insertIntoBinaryTreeWithAGivenIntArray(root, intArray): n = len(intArray) if n <= 0: return root = Node(intArray[0]) if n == 1: return nodeArray = [root] i = 1 while i < n: temp = Node(intArray[i]) if i % 2 == 0: parentIndex = (i - 2) // 2 nodeArray[parentIndex].right = temp else: parentIndex = (i - 1) // 2 nodeArray[parentIndex].left = temp nodeArray.append(temp) i += 1 for n in nodeArray: print('content: %s' % (n)) def binary_insert(root, node): if root is None: root = node else: if root.val > node.val: if root.left is None: root.left = node else: binary_insert(root.left, node) else: if root.right is None: root.right = node else: binary_insert(root.right, node) def in_order_print(root): if not root: return in_order_print(root.left) print (root.val) in_order_print(root.right) def pre_order_print(root): if not root: return print ('%s left:%s right:%s' % (str(root.val), str(root.left.val if root.left != None else ''), str(root.right.val if root.right != None else ''))) pre_order_print(root.left) pre_order_print(root.right) #case 1 t = Node(4) #binary_insert(t, Node(2)) #binary_insert(t, Node(7)) #binary_insert(t, Node(3)) #binary_insert(t, Node(6)) #binary_insert(t, Node(2)) #binary_insert(t, Node(5)) #insertIntoBinaryTreeWithAGivenIntArray(t, [4, 2, 6, 3, 1, 5]) t.left = Node(2) t.right = Node(6) t.left.left = Node(3) t.left.right = Node(1) t.right.left = Node(5) pre_order_print(t) d = 2 v = 99 addRow(t, d, v) pre_order_print(t) #in_order_print(t) #case 2
23.206897
154
0.556092
208
0.077266
0
0
0
0
0
0
389
0.144502
29c8162014a517194fd9f41815841a6c8677d84e
4,458
py
Python
src/genie/libs/parser/iosxe/tests/ShowIpInterfaceBrief/cli/equal/golden_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
204
2018-06-27T00:55:27.000Z
2022-03-06T21:12:18.000Z
src/genie/libs/parser/iosxe/tests/ShowIpInterfaceBrief/cli/equal/golden_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
468
2018-06-19T00:33:18.000Z
2022-03-31T23:23:35.000Z
src/genie/libs/parser/iosxe/tests/ShowIpInterfaceBrief/cli/equal/golden_expected.py
balmasea/genieparser
d1e71a96dfb081e0a8591707b9d4872decd5d9d3
[ "Apache-2.0" ]
309
2019-01-16T20:21:07.000Z
2022-03-30T12:56:41.000Z
expected_output = { "interface": { "GigabitEthernet0/0/0": { "interface_is_ok": "YES", "ip_address": "10.105.44.23", "method": "other", "protocol": "up", "status": "up" }, "GigabitEthernet0/0/1": { "interface_is_ok": "YES", "ip_address": "10.174.10.1", "method": "other", "protocol": "up", "status": "up" }, "GigabitEthernet0/0/2": { "interface_is_ok": "YES", "ip_address": "10.64.10.1", "method": "other", "protocol": "up", "status": "up" }, "GigabitEthernet0/0/3": { "interface_is_ok": "YES", "ip_address": "10.186.10.1", "method": "other", "protocol": "up", "status": "up" }, "Loopback65528": { "interface_is_ok": "YES", "ip_address": "192.168.1.1", "method": "other", "protocol": "up", "status": "up" }, "NVI0": { "interface_is_ok": "YES", "ip_address": "unassigned", "method": "unset", "protocol": "up", "status": "up" }, "Sdwan-system-intf": { "interface_is_ok": "YES", "ip_address": "192.168.10.64", "method": "unset", "protocol": "up", "status": "up" }, "TenGigabitEthernet0/0/4": { "interface_is_ok": "YES", "ip_address": "10.121.10.1", "method": "other", "protocol": "up", "status": "up" }, "TenGigabitEthernet0/0/5": { "interface_is_ok": "YES", "ip_address": "unassigned", "method": "unset", "protocol": "down", "status": "down" }, "Tunnel1": { "interface_is_ok": "YES", "ip_address": "10.174.10.1", "method": "TFTP", "protocol": "up", "status": "up" }, "Tunnel2000000001": { "interface_is_ok": "YES", "ip_address": "192.168.2.1", "method": "unset", "protocol": "up", "status": "up" }, "Tunnel2000000002": { "interface_is_ok": "YES", "ip_address": "192.168.166.1", "method": "unset", "protocol": "up", "status": "up" }, "Tunnel3": { "interface_is_ok": "YES", "ip_address": "10.186.10.1", "method": "TFTP", "protocol": "up", "status": "up" }, "TwoGigabitEthernet0/1/0": { "interface_is_ok": "YES", "ip_address": "unassigned", "method": "unset", "protocol": "up", "status": "up" }, "TwoGigabitEthernet0/2/0": { "interface_is_ok": "YES", "ip_address": "unassigned", "method": "unset", "protocol": "up", "status": "up" }, "Ucse1/0/0": { "interface_is_ok": "YES", "ip_address": "10.19.14.1", "method": "other", "protocol": "down", "status": "administratively down" }, "Ucse1/0/1": { "interface_is_ok": "YES", "ip_address": "unassigned", "method": "unset", "protocol": "down", "status": "administratively down" }, "VirtualPortGroup0": { "interface_is_ok": "YES", "ip_address": "192.0.2.1", "method": "other", "protocol": "up", "status": "up" }, "VirtualPortGroup1": { "interface_is_ok": "YES", "ip_address": "192.168.2.1", "method": "other", "protocol": "up", "status": "up" }, "VirtualPortGroup3": { "interface_is_ok": "YES", "ip_address": "192.168.3.1", "method": "other", "protocol": "up", "status": "up" }, "VirtualPortGroup4": { "interface_is_ok": "YES", "ip_address": "192.168.166.1", "method": "other", "protocol": "up", "status": "up" } } }
29.328947
45
0.394796
0
0
0
0
0
0
0
0
2,281
0.511664
29c8dfdb3c65c5e9847d8ee2d3b8fe9a5f54498a
1,000
py
Python
ssh.py
telkomdev/keris
8451f3d69df174e33003e90e4fd70f602412412a
[ "MIT" ]
1
2020-02-11T16:10:11.000Z
2020-02-11T16:10:11.000Z
ssh.py
telkomdev/keris
8451f3d69df174e33003e90e4fd70f602412412a
[ "MIT" ]
null
null
null
ssh.py
telkomdev/keris
8451f3d69df174e33003e90e4fd70f602412412a
[ "MIT" ]
null
null
null
from common import is_connection_ok import paramiko """ execute_ssh(host, port, username, password, cmd) """ def execute_ssh(host, username, password, cmd, port='22'): if is_connection_ok(): try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=host, port=int(port), username=username, password=password) _, stdout, stderr = ssh.exec_command(cmd, timeout=5) res = stdout.read().decode() error = stderr.read().decode('utf-8') if error: print(error) return 'SSH_CONNECTION_FAIL' else: ssh.close() return 'SSH_CONNECTION_SUCCESS with username : {username} and password {password}'.format(username=username, password=password) except Exception: print('*') return 'SSH_CONNECTION_FAIL' else: return 'CONNECTION_NOT_FOUND'
35.714286
143
0.601
0
0
0
0
0
0
0
0
209
0.209
29ca0af350d167975f57568f8d8d244098802dd2
376
py
Python
novel/spider/config.py
rrcgat/novel-info
fcda24f9f6da5a4755e942a520045b7b5a53bef4
[ "MIT" ]
4
2019-04-02T09:44:18.000Z
2020-04-15T11:47:49.000Z
novel/spider/config.py
rrcgat/novel-info
fcda24f9f6da5a4755e942a520045b7b5a53bef4
[ "MIT" ]
1
2019-03-04T17:20:39.000Z
2019-03-04T17:48:18.000Z
novel/spider/config.py
rrcgat/novel-info
fcda24f9f6da5a4755e942a520045b7b5a53bef4
[ "MIT" ]
1
2020-04-15T11:47:50.000Z
2020-04-15T11:47:50.000Z
''' 请求头 ''' HEADERS_IPHONE = {'user-agent': ( 'Mozilla/5.0 ' '(iPhone; CPU iPhone OS 6_0 like Mac OS X) ' 'AppleWebKit/536.26 (KHTML, like Gecko) ' 'Version/6.0 Mobile/10A5376e Safari/8536.25' )} HEADERS_CHROME = {'user-agent': ( 'Mozilla/5.0 (X11; Linux x86_64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/67.0.3396.99 Safari/537.36' )}
22.117647
48
0.617021
0
0
0
0
0
0
0
0
294
0.769634
29cb4ed39bb073f7561e68074f27a72bbc5b7c7c
7,167
py
Python
tests/test_editor_common.py
jpfxgood/ped
f753ca27e4462c321ed28f00e1ef47fbde62990e
[ "MIT" ]
null
null
null
tests/test_editor_common.py
jpfxgood/ped
f753ca27e4462c321ed28f00e1ef47fbde62990e
[ "MIT" ]
21
2020-07-03T13:14:15.000Z
2020-07-14T14:27:43.000Z
tests/test_editor_common.py
jpfxgood/ped
f753ca27e4462c321ed28f00e1ef47fbde62990e
[ "MIT" ]
null
null
null
from ped_core import editor_common import io import pprint import os import curses import curses.ascii import time import re from ped_core import keymap from ped_core import keytab from ped_core import clipboard from ped_test_util import read_str, match_attr, undo_all, window_pos, play_macro, validate_mark, validate_screen, editor_test_suite import subprocess def test_memline(): m = editor_common.MemLine( "01234567890123456789" ) assert( m.length() == 20 ) assert( m.getContent() == "01234567890123456789" ) def test_EditFile(testdir): lines_to_test = ["This is the first line","This is the second line","This is the third line","This is the last line"] testfile = testdir.makefile(".txt",lines_to_test[0],lines_to_test[1],lines_to_test[2],lines_to_test[3]) fn = str(testfile) ef = editor_common.EditFile( fn ) assert(ef.get_tabs() == [ 4, 8 ] ) ef.set_tabs( [ 8, 16] ) assert(ef.get_tabs() == [ 8, 16 ] ) w = ef.getWorking() assert( not w.closed ) assert( ef.getModref() == 0) assert( isinstance(ef.getUndoMgr(), editor_common.undo.UndoManager )) assert( not ef.isChanged() ) assert( not ef.isReadOnly() ) ef.setReadOnly( True ) assert( ef.isReadOnly() ) ef.setReadOnly( False ) assert( not ef.isReadOnly() ) assert( ef.getFilename() == fn ) ef.setFilename( "bogus.txt" ) assert( ef.getFilename() == "bogus.txt" ) ef.setFilename( fn ) assert( ef.getFilename() == fn ) fls = ef.getLines() assert( ef.numLines() == 4 ) ef.close() assert( ef.getWorking() == None ) ef.load() w = ef.getWorking() assert( not w.closed ) for line in range(0,len(lines_to_test)): assert(ef.length(line) == len(lines_to_test[line])) fl = ef.getLine(line) assert(fl.rstrip() == lines_to_test[line]) assert(fls[line].rstrip() == lines_to_test[line]) fls = ef.getLines(1,3) assert(len(fls) == 2 ) assert(fls[0].rstrip() == lines_to_test[1] and fls[1].rstrip() == lines_to_test[2]) ef.deleteLine(1) fls = ef.getLines() assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[2] and fls[2].rstrip() == lines_to_test[3] ) assert(ef.numLines() == 3 ) assert(ef.getModref() == 1 ) assert(ef.isChanged() ) um = ef.getUndoMgr() um.undo_transaction() fls = ef.getLines() for line in range(0,len(lines_to_test)): assert(fls[line].rstrip() == lines_to_test[line]) assert(ef.numLines() == 4) assert(ef.getModref() == 2) assert(not ef.isChanged() ) new_test_line = "This is the line for insert" ef.insertLine(2,new_test_line) fls = ef.getLines() assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[1] and fls[2].rstrip() == new_test_line and fls[3].rstrip() == lines_to_test[2] and fls[4].rstrip() == lines_to_test[3] ) assert(ef.numLines() == 5 ) assert(ef.getModref() == 3 ) assert(ef.isChanged() ) um = ef.getUndoMgr() um.undo_transaction() fls = ef.getLines() for line in range(0,len(lines_to_test)): assert(fls[line].rstrip() == lines_to_test[line]) assert(ef.numLines() == 4) assert(ef.getModref() == 4) assert(not ef.isChanged() ) ef.replaceLine(3,new_test_line) fls = ef.getLines() assert(fls[0].rstrip() == lines_to_test[0] and fls[1].rstrip() == lines_to_test[1] and fls[2].rstrip() == lines_to_test[2] and fls[3].rstrip() == new_test_line ) assert(ef.numLines() == 4 ) assert(ef.getModref() == 5 ) assert(ef.isChanged() ) um = ef.getUndoMgr() um.undo_transaction() fls = ef.getLines() for line in range(0,len(lines_to_test)): assert(fls[line].rstrip() == lines_to_test[line]) assert(ef.numLines() == 4) assert(ef.getModref() == 6) assert(not ef.isChanged() ) fd = str(testdir.tmpdir) backup_filepath = ef.make_backup_dir( fn, fd ) assert(os.path.exists(os.path.dirname(backup_filepath))) ef.insertLine(10,new_test_line) ef.backuproot = fd ef.save() assert(os.path.exists(backup_filepath)) fls = ef.getLines() for line in range(0,len(lines_to_test)): assert(fls[line].rstrip() == lines_to_test[line]) assert(fls[10].rstrip() == new_test_line) newname = os.path.join(fd,"1_"+os.path.basename(fn)) ef.save(newname) assert(os.path.exists(newname)) ef.close() ef.load() assert(ef.getFilename() == newname) fls = ef.getLines() for line in range(0,len(lines_to_test)): assert(fls[line].rstrip() == lines_to_test[line]) assert(fls[10].rstrip() == new_test_line) assert(ef.get_tab_stop(4) == 8) assert(ef.get_tab_stop(10) == 16 ) assert(ef.get_tab_stop(10,True) == 8) tabby_string = "01234\t56789012\t3456789" expanded_string = "01234 56789012 3456789" assert(ef.expand_tabs(tabby_string) == expanded_string) def test_Editor_unwrapped(testdir,capsys): with capsys.disabled(): curses.wrapper(editor_test_suite,testdir,False,None) def test_Editor_wrapped(testdir,capsys): with capsys.disabled(): curses.wrapper(editor_test_suite,testdir,True,None) def test_StreamEditor(testdir,capsys): with capsys.disabled(): def main(stdscr,testdir): max_y,max_x = stdscr.getmaxyx() generator_lines = [ "for i in range(0,1000000):", " print('Line %d of test file'%i)", ] generator_script = testdir.makepyfile(**{ "generator": "\n".join(generator_lines)}) cmd = 'python3 %s'%str(generator_script) se = editor_common.StreamEditor(stdscr,stdscr.subwin(max_y,max_x,0,0),"Test Stream",subprocess.Popen(cmd, shell=True, bufsize=1024, encoding="utf-8", stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout) starting_num_lines = se.numLines() time.sleep(1) for i in range(0,100): se.main(False) assert(se.getContent(i) == 'Line %d of test file'%i) current_line = se.getLine() se.main(False,6) # ctrl-f for i in range(0,200): se.main(False) assert(se.follow == True and se.getLine() > current_line) se.main(False,6) # ctrl-f current_line = se.getLine() for i in range(0,200): se.main(False) assert(se.follow == False and se.getLine() == current_line) play_macro(se, [keytab.KEYTAB_ALTO,keytab.KEYTAB_TAB,keytab.KEYTAB_DOWN]+list("testout.out")+[keytab.KEYTAB_CR,keytab.KEYTAB_CR]) assert(se.getFilename().endswith("testout.out") and os.path.exists(se.getFilename())) se.close() curses.wrapper(main,testdir)
40.954286
205
0.599972
0
0
0
0
0
0
0
0
435
0.060695
29cc2793a730a906cbbcc655e8b03fef329faada
227
py
Python
rsmtpd/response/smtp_501.py
alfmel/rsmtpd
10900876b1f83d6c141070a413f81edf3c98ac51
[ "Apache-2.0" ]
1
2017-06-12T04:10:07.000Z
2017-06-12T04:10:07.000Z
rsmtpd/response/smtp_501.py
alfmel/rsmtpd
10900876b1f83d6c141070a413f81edf3c98ac51
[ "Apache-2.0" ]
null
null
null
rsmtpd/response/smtp_501.py
alfmel/rsmtpd
10900876b1f83d6c141070a413f81edf3c98ac51
[ "Apache-2.0" ]
null
null
null
from rsmtpd.response.action import OK from rsmtpd.response.base_response import BaseResponse class SmtpResponse501(BaseResponse): _smtp_code = 501 _message = "Syntax error in parameters or arguments" _action = OK
25.222222
56
0.779736
131
0.577093
0
0
0
0
0
0
41
0.180617
29cd522f460b996800fe0d9f2739255f875ef960
14,116
py
Python
qatrack/qatrack_core/tests/test_core.py
crcrewso/qatrackplus
b9da3bc542d9e3eca8b7291bb631d1c7255d528e
[ "MIT" ]
20
2021-03-11T18:37:32.000Z
2022-03-23T19:38:07.000Z
qatrack/qatrack_core/tests/test_core.py
crcrewso/qatrackplus
b9da3bc542d9e3eca8b7291bb631d1c7255d528e
[ "MIT" ]
75
2021-02-12T02:37:33.000Z
2022-03-29T20:56:16.000Z
qatrack/qatrack_core/tests/test_core.py
crcrewso/qatrackplus
b9da3bc542d9e3eca8b7291bb631d1c7255d528e
[ "MIT" ]
5
2021-04-07T15:46:53.000Z
2021-09-18T16:55:00.000Z
import datetime import re from django.contrib.sites.models import Site from django.core import mail from django.test import TestCase from django.urls import reverse from django.utils import timezone import numpy as np import pandas as pd import pytz from qatrack.qa.tests import utils from qatrack.qatrack_core.serializers import QATrackJSONEncoder from qatrack.qatrack_core.utils import end_of_day, relative_dates, start_of_day class TestLoginViews(TestCase): def test_password_reset(self): """Test full cycle of password reset process""" Site.objects.all().update(domain="") u = utils.create_user() self.client.post(reverse("password_reset"), {'email': u.email}) assert "Password reset" in mail.outbox[0].subject url = re.search(r"(?P<url>https?://[^\s]+)", mail.outbox[0].body).group("url") resp = self.client.get(url) resp = self.client.post( resp.url, { 'new_password1': '8P0Cut!v6XUr', 'new_password2': '8P0Cut!v6XUr', }, follow=True ) assert "/accounts/reset/done/" in resp.redirect_chain[0] class TestJSONEncoder: def test_np_int(self): enc = QATrackJSONEncoder() assert enc.default(np.int8(1)) == 1 def test_np_array(self): enc = QATrackJSONEncoder() assert enc.default(np.array(range(3))) == [0, 1, 2] def test_range(self): enc = QATrackJSONEncoder() assert enc.default(range(3)) == [0, 1, 2] def test_zip(self): enc = QATrackJSONEncoder() assert enc.default(zip(range(3), range(3))) == [(0, 0), (1, 1), (2, 2)] def test_set(self): enc = QATrackJSONEncoder() assert set(enc.default(set(range(3)))) == set(range(3)) def test_pd_df(self): enc = QATrackJSONEncoder() d = {'col1': [1, 2], 'col2': [3, 4]} df = pd.DataFrame(data=d) expected = {'col1': {0: 1, 1: 2}, 'col2': {0: 3, 1: 4}} assert enc.default(df) == expected def test_datetime(self): enc = QATrackJSONEncoder() tz = pytz.timezone("America/Toronto") dt = timezone.datetime(2020, 2, 29, 12, 34, 56, tzinfo=tz) assert enc.default(dt) == "29 Feb 2020 12:34:56" def test_date(self): enc = QATrackJSONEncoder() dt = datetime.date(2020, 2, 29) assert enc.default(dt) == "29 Feb 2020" class TestRelativeDates: def setup_class(self): self.tz = pytz.timezone("America/Toronto") self.now = timezone.datetime(2020, 1, 2, 11, 38, tzinfo=self.tz) self.day_start = start_of_day(self.now) def test_next_7_days(self): r = relative_dates("next 7 days", self.now) end = end_of_day(timezone.datetime(2020, 1, 9, tzinfo=self.tz)) assert r.start() == self.day_start assert r.end() == end def test_next_30_days(self): end = end_of_day(timezone.datetime(2020, 2, 1, tzinfo=self.tz)) r = relative_dates("next 30 days", self.now) assert r.start() == self.day_start assert r.end() == end def test_next_365_days(self): end = end_of_day(timezone.datetime(2021, 1, 1, tzinfo=self.tz)) r = relative_dates("next 365 days", self.now) assert r.start() == self.day_start assert r.end() == end def test_next_week(self): start = start_of_day(timezone.datetime(2020, 1, 5, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 11, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("next week", self.now) assert r.start() == start assert r.end() == end def test_next_week_sat(self): pivot = timezone.datetime(2020, 1, 11, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 1, 12, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 18, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("next week", pivot) assert r.start() == start assert r.end() == end def test_next_week_sun(self): pivot = timezone.datetime(2020, 1, 12, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 1, 19, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 25, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("next week", pivot) assert r.start() == start assert r.end() == end def test_next_month(self): start = start_of_day(timezone.datetime(2020, 2, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 2, 29, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("next month", self.now) assert r.start() == start assert r.end() == end def test_next_month_first_day(self): pivot = timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 2, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 2, 29, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("next month", pivot) assert r.start() == start assert r.end() == end def test_next_month_last_day(self): pivot = timezone.datetime(2020, 1, 31, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 2, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 2, 29, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("next month", pivot) assert r.start() == start assert r.end() == end def test_next_year(self): start = start_of_day(timezone.datetime(2021, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2021, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("next year", self.now) assert r.start() == start assert r.end() == end def test_this_week(self): start = start_of_day(timezone.datetime(2019, 12, 29, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 4, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("this week", self.now) assert r.start() == start assert r.end() == end def test_this_week_sat(self): pivot = timezone.datetime(2020, 1, 11, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 1, 5, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 11, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("this week", pivot) assert r.start() == start assert r.end() == end def test_this_week_sun(self): pivot = timezone.datetime(2020, 1, 5, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 1, 5, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 11, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("this week", pivot) assert r.start() == start assert r.end() == end def test_this_year(self): start = start_of_day(timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("this year", self.now) assert r.start() == start assert r.end() == end def test_this_year_jan_1(self): pivot = timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("this year", pivot) assert r.start() == start assert r.end() == end def test_this_year_dec_31(self): pivot = timezone.datetime(2020, 12, 31, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("this year", pivot) assert r.start() == start assert r.end() == end def test_last_7_days(self): start = start_of_day(timezone.datetime(2019, 12, 26, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 2, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last 7 days", self.now) assert r.start() == start assert r.end() == end def test_last_30_days(self): start = start_of_day(timezone.datetime(2019, 12, 3, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 2, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last 30 days", self.now) assert r.start() == start assert r.end() == end def test_last_365_days(self): start = start_of_day(timezone.datetime(2019, 1, 2, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 2, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last 365 days", self.now) assert r.start() == start assert r.end() == end def test_this_month(self): start = start_of_day(timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("this month", self.now) assert r.start() == start assert r.end() == end def test_last_week(self): start = start_of_day(timezone.datetime(2019, 12, 22, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 28, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last week", self.now) assert r.start() == start assert r.end() == end def test_last_week_sat(self): pivot = timezone.datetime(2020, 1, 4, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2019, 12, 22, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 28, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last week", pivot) assert r.start() == start assert r.end() == end def test_last_week_sun(self): pivot = timezone.datetime(2020, 1, 5, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2019, 12, 29, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2020, 1, 4, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last week", pivot) assert r.start() == start assert r.end() == end def test_last_month(self): start = start_of_day(timezone.datetime(2019, 12, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last month", self.now) assert r.start() == start assert r.end() == end def test_last_month_jan1(self): pivot = timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2019, 12, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last month", pivot) assert r.start() == start assert r.end() == end def test_last_month_jan31(self): pivot = timezone.datetime(2020, 1, 31, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2019, 12, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last month", pivot) assert r.start() == start assert r.end() == end def test_last_year(self): start = start_of_day(timezone.datetime(2019, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last year", self.now) assert r.start() == start assert r.end() == end def test_last_year_jan1(self): pivot = timezone.datetime(2020, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2019, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last year", pivot) assert r.start() == start assert r.end() == end def test_last_year_dec31(self): pivot = timezone.datetime(2020, 12, 31, 11, 38, tzinfo=pytz.timezone("America/Toronto")) start = start_of_day(timezone.datetime(2019, 1, 1, 11, 38, tzinfo=pytz.timezone("America/Toronto"))) end = end_of_day(timezone.datetime(2019, 12, 31, tzinfo=pytz.timezone("America/Toronto"))) r = relative_dates("last year", pivot) assert r.start() == start assert r.end() == end def test_today(self): start = self.day_start end = end_of_day(start) r = relative_dates("today", self.now) assert r.start() == start assert r.end() == end
45.980456
110
0.63637
13,676
0.96883
0
0
0
0
0
0
1,767
0.125177
29cdd1a0441cda0528b31705900a6564e1af5682
179
py
Python
app/blog/urls.py
AjayHao/AtThirty
96b2ce00be8f3ed07ee5e6e2b1ca13ab25e9521d
[ "MIT" ]
null
null
null
app/blog/urls.py
AjayHao/AtThirty
96b2ce00be8f3ed07ee5e6e2b1ca13ab25e9521d
[ "MIT" ]
null
null
null
app/blog/urls.py
AjayHao/AtThirty
96b2ce00be8f3ed07ee5e6e2b1ca13ab25e9521d
[ "MIT" ]
null
null
null
from django.conf.urls import patterns, url from app.blog import views as blog_views urlpatterns = [ #django url url(r'^$', blog_views.index, name='blog_index'), ]
16.272727
52
0.681564
0
0
0
0
0
0
0
0
28
0.156425
29cde250e9d497ca3e7e9d2169fa12a67aa2c621
752
py
Python
core/recc/system/environ.py
bogonets/answer
57f892a9841980bcbc35fa1e27521b34cd94bc25
[ "MIT" ]
3
2021-06-20T02:24:10.000Z
2022-01-26T23:55:33.000Z
core/recc/system/environ.py
bogonets/answer
57f892a9841980bcbc35fa1e27521b34cd94bc25
[ "MIT" ]
null
null
null
core/recc/system/environ.py
bogonets/answer
57f892a9841980bcbc35fa1e27521b34cd94bc25
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from os import environ from typing import Optional, Dict, Any, Type def get_os_envs_dict() -> Dict[str, str]: return {k: str(environ.get(k)) for k in environ if environ} def exchange_env(key: str, exchange: Optional[str]) -> Optional[str]: result = environ.get(key) if result is not None: environ.pop(key) if exchange is not None: environ[key] = exchange return result def get_env(key: str) -> Optional[str]: return environ.get(key) def opt_env(key: str, default_value: Any, result_class: Type) -> Any: value = environ.get(key) if value is None: return default_value try: return result_class(value) except ValueError: return default_value
23.5
69
0.655585
0
0
0
0
0
0
0
0
23
0.030585
29cf16e7358b9161ab9d90ae6bb97701b983418a
436
py
Python
InteractiveProgramming/assignment3.3.py
mr-ice/pipython
ea27af520946cb710cb717815be625489fc8a1a3
[ "MIT" ]
null
null
null
InteractiveProgramming/assignment3.3.py
mr-ice/pipython
ea27af520946cb710cb717815be625489fc8a1a3
[ "MIT" ]
null
null
null
InteractiveProgramming/assignment3.3.py
mr-ice/pipython
ea27af520946cb710cb717815be625489fc8a1a3
[ "MIT" ]
null
null
null
try: s = raw_input("Enter score between 0.0 and 1.0: ") score = float(s) if score < 0 or score > 1: raise Exception except ValueError: print "You didn't even enter a number" except: print "Not a possible score." else: if score >= 0.9: print "A" elif score >= 0.8: print "B" elif score >= 0.7: print "C" elif score >= 0.6: print "D" else: print "F"
20.761905
54
0.529817
0
0
0
0
0
0
0
0
105
0.240826
29cf80f6c6965927720d1b295a0c8b626681599d
254
py
Python
Store/robot-test/say.py
Quanta-Robotics/Robot-Blueberry
7b7e77e09ac5e9ec5afd947e0db1ecc8773e56da
[ "MIT" ]
25
2021-06-08T07:09:30.000Z
2021-12-30T06:28:35.000Z
Store/robot-test/say.py
ICT-CoU/Robot-Blueberry
d19fd1be037df9d67de64df57a87006d74cd6c43
[ "MIT" ]
2
2021-05-23T12:54:51.000Z
2021-06-07T17:47:56.000Z
Store/robot-test/say.py
ICT-CoU/Robot-Blueberry
d19fd1be037df9d67de64df57a87006d74cd6c43
[ "MIT" ]
14
2021-06-08T13:02:28.000Z
2021-12-30T20:07:18.000Z
import pyttsx3 engine = pyttsx3.init() engine.setProperty('rate', 150) voices = engine.getProperty('voices') engine.setProperty("voice", 'english_rp+f4') def talk(text): engine.say(text) engine.runAndWait() talk("My name is robot leena")
15.875
44
0.704724
0
0
0
0
0
0
0
0
60
0.23622
29d39c3e482269db7c4ce7b3f24a9b213141989d
141
py
Python
api/server/utils/logger.py
ktolstikhin/vision-service
b87f10f5ec3d22b76c06a0e6c0105fd823e60c39
[ "MIT" ]
null
null
null
api/server/utils/logger.py
ktolstikhin/vision-service
b87f10f5ec3d22b76c06a0e6c0105fd823e60c39
[ "MIT" ]
null
null
null
api/server/utils/logger.py
ktolstikhin/vision-service
b87f10f5ec3d22b76c06a0e6c0105fd823e60c39
[ "MIT" ]
null
null
null
import logging def initialize(app): level = logging.DEBUG if app.config.get('DEBUG') else logging.INFO app.logger.setLevel(level)
17.625
70
0.723404
0
0
0
0
0
0
0
0
7
0.049645
29d3aa7dc92ae861ca049b62d573cabdb669506d
1,579
py
Python
tests/integration/test_dug_utils.py
helxplatform/roger
60c1c1198c41949804692217c74848e2aa8b9ea2
[ "MIT" ]
null
null
null
tests/integration/test_dug_utils.py
helxplatform/roger
60c1c1198c41949804692217c74848e2aa8b9ea2
[ "MIT" ]
7
2021-04-08T12:17:27.000Z
2022-02-08T23:12:32.000Z
tests/integration/test_dug_utils.py
helxplatform/roger
60c1c1198c41949804692217c74848e2aa8b9ea2
[ "MIT" ]
3
2020-12-07T20:49:43.000Z
2021-06-12T19:49:43.000Z
import tempfile from pathlib import Path import pytest from dug_helpers.dug_utils import FileFetcher, get_topmed_files, get_dbgap_files from roger.Config import config def test_fetch_network_file(): filename = "README.md" with tempfile.TemporaryDirectory() as tmp_dir: fetch1 = FileFetcher( "https://github.com", "/helxplatform/roger/blob/main/", tmp_dir, ) expected_path = Path(tmp_dir) / filename assert not expected_path.exists() fetch1(filename) assert expected_path.exists() with tempfile.TemporaryDirectory() as tmp_dir: fetch2 = FileFetcher( "https://github.com", Path("/helxplatform/roger/blob/main/"), Path(tmp_dir), ) expected_path = Path(tmp_dir) / filename assert not expected_path.exists() fetch2(filename) assert expected_path.exists() def test_fetcher_errors(): filename = "DOES NOT EXIST.md" with tempfile.TemporaryDirectory() as tmp_dir: fetch = FileFetcher( "https://github.com", Path("/helxplatform/roger/blob/main/"), Path(tmp_dir), ) with pytest.raises(RuntimeError): fetch(filename) def test_get_topmed_files(): file_names = get_topmed_files(config=config) for file_name in file_names: assert Path(file_name).exists() def test_get_dbgap_files(): file_names = get_dbgap_files(config=config) for file_name in file_names: assert Path(file_name).exists()
26.316667
80
0.640912
0
0
0
0
0
0
0
0
186
0.117796
29d584e58250f68d3fe99344f92ca1d026fcfaa6
6,915
py
Python
tests/bean_test.py
samuelchen/truepy
f1fd86ffccf7c3b2eee4cd4ced9436ff832d257e
[ "OpenSSL" ]
40
2015-08-04T11:01:33.000Z
2022-01-17T10:45:18.000Z
tests/bean_test.py
samuelchen/truepy
f1fd86ffccf7c3b2eee4cd4ced9436ff832d257e
[ "OpenSSL" ]
9
2016-09-14T04:40:58.000Z
2021-07-22T09:07:51.000Z
tests/bean_test.py
samuelchen/truepy
f1fd86ffccf7c3b2eee4cd4ced9436ff832d257e
[ "OpenSSL" ]
13
2015-02-24T05:39:10.000Z
2022-02-03T00:41:53.000Z
# coding: utf-8 # truepy # Copyright (C) 2014-2015 Moses Palmér # # This program is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) any later # version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. import unittest from datetime import datetime from truepy import fromstring, tostring from truepy._bean import snake_to_camel, camel_to_snake from truepy._bean import value_to_xml from truepy._bean import deserialize, serialize, to_document from truepy._bean_serializers import _DESERIALIZER_CLASSES, bean_class class BeanTest(unittest.TestCase): def test_snake_to_camel(self): """Tests that snake_to_camel works as expected""" self.assertEqual( 'camelCase', snake_to_camel('camel_case')) self.assertEqual( 'camelCase', snake_to_camel('camel__case')) self.assertEqual( 'camelCase', snake_to_camel('camel_case_')) self.assertEqual( 'CamelCase', snake_to_camel('_camel_case')) def test_camel_to_snake(self): """Tests that camel_to_snake works as expected""" self.assertEqual('snake_case', camel_to_snake('snakeCase')) self.assertEqual('_snake_case', camel_to_snake('SnakeCase')) self.assertEqual('_s_n_a_k_e', camel_to_snake('SNAKE')) def test_value_to_xml_no_class(self): """Tests value_to_xml for no class name""" self.assertEqual( '<test>value</test>', tostring(value_to_xml('value', 'test'))) def test_value_to_xml_with_class(self): """Tests value_to_xml for a class name""" self.assertEqual( '<object class="test">' '<tag>value</tag>' '</object>', tostring(value_to_xml('value', 'tag', 'test'))) def test_serialize_unknown(self): """Serialises an unknown value""" class unknown(object): pass with self.assertRaises(ValueError): serialize(unknown()) def test_serialize_empty_class(self): """Serialises an empty class""" class empty(object): bean_class = 'test.class' self.assertEqual( '<object class="test.class" />', tostring(serialize(empty()))) def test_serialize_unknown_property(self): """Serialises a class with an unknown property""" class unknown(object): pass class has_unknown(object): bean_class = 'test.class' @property def test_a(self): return unknown() with self.assertRaises(ValueError): serialize(has_unknown()) def test_serialize_string(self): """Serialises a string""" self.assertEqual( '<string>hello world</string>', tostring(serialize('hello world'))) def test_serialize_object(self): """Serialises an object""" class test(object): bean_class = 'test.class' @property def test_property(self): return True self.assertEqual( '<object class="test.class">' '<void property="testProperty">' '<boolean>true</boolean>' '</void>' '</object>', tostring(serialize(test()))) def test_serialize_datetime(self): """Serialises datetime instances""" self.assertEqual( '<object class="java.util.Date">' '<long>0</long>' '</object>', tostring(serialize( datetime.strptime('1970-01-01 UTC', '%Y-%m-%d %Z')))) self.assertEqual( '<object class="java.util.Date">' '<long>86400000</long>' '</object>', tostring(serialize( datetime.strptime('1970-01-02 UTC', '%Y-%m-%d %Z')))) def test_deserialize_unknown_fragment(self): """Deserialises an unknown fragment""" with self.assertRaises(ValueError): deserialize(fromstring( '<object class="unknown">' '<void property="a">' '<int>42</int>' '</void>' '</object>')) def test_deserialize(self): """Deserialises invalid fragments""" with self.assertRaises(ValueError): deserialize(fromstring( '<boolean>invalid</boolean>')) with self.assertRaises(ValueError): deserialize(fromstring( '<int>invalid</int>')) def test_deserialize_known_fragment(self): """Deserialises known fragments""" self.assertEqual( True, deserialize(fromstring( '<boolean>true</boolean>'))) self.assertEqual( 42, deserialize(fromstring( '<int>42</int>'))) self.assertEqual( 'hello world', deserialize(fromstring( '<string>hello world</string>'))) def test_deserialize_with_constructor(self): """Deserialises an object using constructor""" global _DESERIALIZER_CLASSES class_name = 'test.class' try: @bean_class(class_name) class test(object): @property def test_a(selfself): return self._a def test___init__(self, a): self._a = a o = deserialize(fromstring( '<object class="test.class">' '<void property="a">' '<string>hello world</string>' '</void>' '</object>')) self.assertEqual('hello world', o.a) self.assertEqual(test, o.__class__) finally: del _DESERIALIZER_CLASSES[class_name] def test_deserialize_datetime(self): """Deserialises datetime objects""" expected = datetime.strptime('2014-01-01 UTC', '%Y-%m-%d %Z') self.assertEqual( expected, deserialize(serialize(expected))) def test_to_document(self): """Tests that todocument creates a valid XML document""" expected = 'hello world' self.assertEqual( expected, deserialize( fromstring( to_document( serialize(expected))) [0]))
32.013889
79
0.572523
5,898
0.852805
0
0
377
0.054511
0
0
2,353
0.340226
29d6295eb61db2d065b900e834740080a6c5d3ff
3,679
py
Python
normal_version/export_es_data.py
Logistic98/es-data-transfer
6ed916201e8ab701e258e156e2c71468a3c509e5
[ "Apache-2.0" ]
1
2022-03-23T05:22:41.000Z
2022-03-23T05:22:41.000Z
normal_version/export_es_data.py
Logistic98/es-data-transfer
6ed916201e8ab701e258e156e2c71468a3c509e5
[ "Apache-2.0" ]
null
null
null
normal_version/export_es_data.py
Logistic98/es-data-transfer
6ed916201e8ab701e258e156e2c71468a3c509e5
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- from elasticsearch import Elasticsearch from datetime import timedelta import datetime import os import json import logging from configparser import ConfigParser # 生成日志文件 logging.basicConfig(filename='logging_es.log', level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) def read_config(): cfg = ConfigParser() cfg.read('./config.ini', encoding='utf-8') host = cfg.get('SOURCE_ES', 'host') port = cfg.get('SOURCE_ES', 'port') user = cfg.get('SOURCE_ES', 'user') password = cfg.get('SOURCE_ES', 'password') timeout = cfg.get('SOURCE_ES', 'timeout') index_list = cfg.get('SOURCE_ES', 'index_list') es_dict = {} es_dict['host'] = host es_dict['port'] = port es_dict['user'] = user es_dict['password'] = password es_dict['timeout'] = timeout es_dict['index_list'] = index_list return es_dict def write_list_to_json(list, json_file_name, json_file_save_path): """ 将list写入到json文件 :param list: :param json_file_name: 写入的json文件名字 :param json_file_save_path: json文件存储路径 :return: """ if not os.path.exists(json_file_save_path): os.makedirs(json_file_save_path) os.chdir(json_file_save_path) with open(json_file_name, 'w', encoding='utf-8') as f: json.dump(list, f, ensure_ascii=False) def es_json(es_dict, start_time, end_time): str_separate = "===============================================================" try: BASE_DIR = os.getcwd() Es = Elasticsearch( hosts=[str(es_dict['host']) + ":" + str(es_dict['port'])], http_auth=(str(es_dict['user']), str(es_dict['password'])), timeout=int(es_dict['timeout']) ) except Exception as e: logging.error(e) index_list = ''.join(es_dict['index_list'].split()).split(",") for i in index_list: print(f"保存索引{i}的数据\r") print_info1 = "保存索引" + i + "的数据" logging.info(print_info1) query = { "range": { "@timestamp": { # 大于上一次读取结束时间,小于等于本次读取开始时间 "gt": start_time, "lte": end_time } } } try: data = Es.search(index=i, query=query, size=10000) source_list = [] for hit in data['hits']['hits']: source_data = hit['_source'] source_data['_id'] = hit['_id'] source_list.append(source_data) print(f"保存的时间为{start_time}到{end_time}\r") print_info2 = "保存的时间为" + start_time + "到" + end_time + "" logging.info(print_info2) file_path = BASE_DIR + "/json_file" file_name = str(i) + ".json" if len(source_list) != 0: write_list_to_json(source_list, file_name, file_path) else: print('无更新') logging.info(str(i) + '无更新') print(str_separate) logging.info(str_separate) except Exception as e: print(e) logging.info("es数据库到json文件的读写error" % e) logging.info(str_separate) if __name__ == '__main__': start_date_time = datetime.datetime.now() + timedelta(days=-1) end_date_time = datetime.datetime.now() start_time = start_date_time.strftime("%Y-%m-%dT%H:00:00.000Z") end_time = end_date_time.strftime("%Y-%m-%dT%H:00:00.000Z") # 读取配置信息 es_dict = read_config() # 获取当前的目录地址 BASE_DIR = os.getcwd() # 读取es数据库中的数据,写成json文件 es_json(es_dict, start_time, end_time)
32.27193
84
0.57271
0
0
0
0
0
0
0
0
1,110
0.283091
29d8926e28c855d35087d877a48866f5e57129f6
25
py
Python
env/lib/python2.7/site-packages/grpc/_grpcio_metadata.py
husky-parul/SheHacks
19383029947f50ebaf07232c9b2ee76c75d8ada6
[ "Apache-2.0" ]
2
2018-02-01T06:30:24.000Z
2018-04-12T15:39:56.000Z
env/lib/python2.7/site-packages/grpc/_grpcio_metadata.py
husky-parul/SheHacks
19383029947f50ebaf07232c9b2ee76c75d8ada6
[ "Apache-2.0" ]
null
null
null
env/lib/python2.7/site-packages/grpc/_grpcio_metadata.py
husky-parul/SheHacks
19383029947f50ebaf07232c9b2ee76c75d8ada6
[ "Apache-2.0" ]
null
null
null
__version__ = """1.8.4"""
25
25
0.56
0
0
0
0
0
0
0
0
11
0.44
29d92f0ff0b006c6d957ac126ab63d45f2f46f8c
146
py
Python
danesfield/core/admin/__init__.py
girder/Danesfield
04b0e991cae52bda758de4ee3f7e04dab45f3ff9
[ "Apache-2.0" ]
null
null
null
danesfield/core/admin/__init__.py
girder/Danesfield
04b0e991cae52bda758de4ee3f7e04dab45f3ff9
[ "Apache-2.0" ]
24
2021-10-29T21:03:34.000Z
2022-03-18T02:07:57.000Z
danesfield/core/admin/__init__.py
girder/Danesfield
04b0e991cae52bda758de4ee3f7e04dab45f3ff9
[ "Apache-2.0" ]
1
2022-01-26T09:31:48.000Z
2022-01-26T09:31:48.000Z
from django.contrib import admin # general admin settings admin.site.site_header = 'Danesfield Admin' admin.site.site_title = 'Danesfield Admin'
24.333333
43
0.80137
0
0
0
0
0
0
0
0
60
0.410959
29dae29f89683a7db968db7356c874c048160ba7
2,645
py
Python
cnmodel/util/expfitting.py
pbmanis/cnmodel
eee593c673752c19137658d5b9a381ea9ad4580f
[ "BSD-3-Clause" ]
5
2017-07-26T21:46:14.000Z
2020-11-27T07:53:14.000Z
cnmodel/util/expfitting.py
pbmanis/cnmodel
eee593c673752c19137658d5b9a381ea9ad4580f
[ "BSD-3-Clause" ]
12
2017-07-26T07:16:16.000Z
2021-07-14T13:41:37.000Z
cnmodel/util/expfitting.py
pbmanis/cnmodel
eee593c673752c19137658d5b9a381ea9ad4580f
[ "BSD-3-Clause" ]
10
2017-07-26T07:03:29.000Z
2021-06-23T15:52:37.000Z
#!/usr/bin/env python # encoding: utf-8 """ expfitting.py Provide single or double exponential fits to data. """ import lmfit import numpy as np import scipy.optimize class ExpFitting: """ Parameters ---------- nexp : int 1 or 2 for single or double exponential fit initpars : dict dict of initial parameters. For example: {'dc': 0., 'a1': 1., 't1': 3, 'a2' : 0.5, 'delta': 3.}, where delta determines the ratio between the time constants. bounds : dict dictionary of bounds for each parameter, with a list of lower and upper values. """ def __init__(self, nexp=1, initpars=None, bounds=None): self.fitpars = lmfit.Parameters() if nexp == 1: # (Name, Value, Vary, Min, Max, Expr) self.fitpars.add_many(('dc', 0, True, -100., 0., None), ('a1', 1., True, -25., 25., None), ('t1', 10., True, 0.1, 50, None)) self.efunc = self.exp1_err elif nexp == 2: self.fitpars.add_many(('dc', 0, True, -100., 0., None), ('a1', 1., True, 0., 25., None), ('t1', 10., True, 0.1, 50, None), ('a2', 1., True, 0., 25., None), ('delta', 3., True, 3., 100., None)) if initpars is not None: assert len(initpars) == 5 for k, v in initpars.iteritems(): self.fitpars[k].value = v if bounds is not None: assert len(bounds) == 5 for k, v in bounds.iteritems(): self.fitpars[k].min = v[0] self.fitpars[k].max = v[1] self.efunc = self.exp2_err else: raise ValueError def fit(self, x, y, p, verbose=False): kws={'maxfev': 5000} mim = lmfit.minimize(self.efunc, p, method='least_squares', args=(x, y)) #, kws=kws) if verbose: lmfit.printfuncs.report_fit(mim.params) fitpars = mim.params return fitpars @staticmethod def exp1(x, dc, t1, a1): return dc + a1*np.exp(-x/t1) def exp1_err(self, p, x, y): return np.fabs(y-self.exp1(x, **dict([(k,p.value) for k,p in p.items()]))) @staticmethod def exp2(x, dc, t1, a1, a2, delta): return dc + a1 * np.exp(-x/t1) + a2 * np.exp(-x/(t1*delta)) def exp2_err(self, p, x, y): return np.fabs(y-self.exp2(x, **dict([(k,p.value) for k,p in p.items()])))
33.910256
92
0.485444
2,473
0.934972
0
0
200
0.075614
0
0
644
0.243478
29db5cdc597125eaa323b36fcd83763a78a5f8f9
4,338
py
Python
django_cloud_tasks/models.py
joaodaher/django-cloud-tasks
bc8ff94a281bda8b49ee73229d5ed5cacdd7a388
[ "Apache-2.0" ]
null
null
null
django_cloud_tasks/models.py
joaodaher/django-cloud-tasks
bc8ff94a281bda8b49ee73229d5ed5cacdd7a388
[ "Apache-2.0" ]
1
2020-07-09T17:48:19.000Z
2020-07-09T17:53:33.000Z
django_cloud_tasks/models.py
joaodaher/django-cloud-tasks
bc8ff94a281bda8b49ee73229d5ed5cacdd7a388
[ "Apache-2.0" ]
null
null
null
# pylint: disable=no-member from datetime import datetime from typing import Optional, Dict from django.db import transaction, models from django.apps import apps from django_cloud_tasks import tasks, serializers class Pipeline(models.Model): name = models.CharField(max_length=100) def start(self): routines = self.routines.filter( models.Q(dependent_routines__id__isnull=True) & models.Q(status=Routine.Statuses.PENDING) ) for routine in routines: routine.enqueue() def revert(self): # TODO: Actually we don't know what to do when a routine with RUNNNING status is triggered # to revert. We trust that it will not be a big deal for now. But would be great to support that soon routines = self.routines.filter( models.Q(next_routines__id__isnull=True) & ~models.Q(status=Routine.Statuses.REVERTED) ) for routine in routines: routine.revert() def add_routine(self, routine: Dict) -> "Routine": return self.routines.create(**routine) class Routine(models.Model): class Statuses(models.TextChoices): PENDING = ("pending", "Pending") SCHEDULED = ("scheduled", "Scheduled") RUNNING = ("running", "Running") COMPLETED = ("completed", "Completed") FAILED = ("failed", "Failed") REVERTING = ("reverting", "Reverting") REVERTED = ("reverted", "Reverted") # TODO: We have a signal to check if task_name defined does exists. # We can do it with Django Field Validators task_name = models.CharField(max_length=100) pipeline = models.ForeignKey( to="django_cloud_tasks.Pipeline", related_name="routines", on_delete=models.PROTECT, ) body = models.JSONField( default=dict, encoder=serializers.JSONEncoder, ) attempt_count = models.PositiveIntegerField(default=0) max_retries = models.PositiveIntegerField(null=True) output = models.JSONField( null=True, blank=True, encoder=serializers.JSONEncoder, ) starts_at = models.DateTimeField(null=True, blank=True) ends_at = models.DateTimeField(null=True, blank=True) status = models.CharField( max_length=20, choices=Statuses.choices, default=Statuses.PENDING, ) created_at = models.DateTimeField( auto_now_add=True, ) updated_at = models.DateTimeField( auto_now=True, ) next_routines = models.ManyToManyField( to="Routine", through="RoutineVertex", through_fields=("routine", "next_routine"), related_name="dependent_routines", ) def fail(self, output: Dict): self.output = output self.status = self.Statuses.FAILED self.ends_at = datetime.now() self.save() def complete(self, output: Dict): self.output = output self.status = self.Statuses.COMPLETED self.ends_at = datetime.now() self.save() def enqueue(self): with transaction.atomic(): self.status = self.Statuses.SCHEDULED self.starts_at = datetime.now() self.save() def revert(self): with transaction.atomic(): if self.status not in [self.Statuses.REVERTED, self.Statuses.REVERTING]: self.status = self.Statuses.REVERTING self.save() def add_next(self, routine: Dict) -> "Routine": routine["pipeline_id"] = self.pipeline_id return self.next_routines.create(**routine) @property def task(self) -> Optional[tasks.Task]: app = apps.get_app_config("django_cloud_tasks") return app.get_task(name=self.task_name) class RoutineVertex(models.Model): next_routine = models.ForeignKey( to="django_cloud_tasks.Routine", on_delete=models.PROTECT, related_name="required_routine_vertices", ) routine = models.ForeignKey( to="django_cloud_tasks.Routine", related_name="next_routine_vertices", on_delete=models.PROTECT, ) class Meta: constraints = [ models.UniqueConstraint(name="unique_routine_next_routine", fields=("next_routine", "routine")), ] __all__ = ( "Routine", "RoutineVertex", "Pipeline", )
31.434783
109
0.642692
4,048
0.933149
0
0
158
0.036422
0
0
815
0.187875
29db5eb5db45a035903363004257142de128c253
2,913
py
Python
src/ifood/model/order/event.py
micael95/sdk-ifood-python
27462d8127b62a29b5c89624e79accbea9563a80
[ "MIT" ]
2
2021-05-06T18:50:43.000Z
2021-06-05T21:54:04.000Z
src/ifood/model/order/event.py
micael95/sdk-ifood-python
27462d8127b62a29b5c89624e79accbea9563a80
[ "MIT" ]
null
null
null
src/ifood/model/order/event.py
micael95/sdk-ifood-python
27462d8127b62a29b5c89624e79accbea9563a80
[ "MIT" ]
1
2021-05-06T18:50:54.000Z
2021-05-06T18:50:54.000Z
from datetime import datetime from uuid import UUID from ...serializer import IfoodSerializable from ...utils import auto_str from uuid import UUID @auto_str class Consumer(IfoodSerializable): financial_occurrence: str payment_type: str @staticmethod def unserialize(dict=None): if dict is None: dict = {} instance = Consumer() for k, v in dict.items(): setattr(instance, IfoodSerializable.camel_to_snake(k), v) return instance @auto_str class CancellationOccurrence(IfoodSerializable): restaurant: Consumer consumer: Consumer logistic: Consumer @staticmethod def unserialize(dict=None): if dict is None: dict = {} instance = CancellationOccurrence() for k, v in dict.items(): if k == "RESTAURANT": instance.restaurant = Consumer.unserialize(v) continue if k == "CONSUMER": instance.consumer = Consumer.unserialize(v) continue if k == "LOGISTIC": instance.logistic = Consumer.unserialize(v) continue setattr(instance, IfoodSerializable.camel_to_snake(k), v) return instance @auto_str class Metadata(IfoodSerializable): cancel_stage: str cancel_code: int cancellation_occurrence: CancellationOccurrence timeout_event: bool cancel_origin: str cancel_user: str cancel_reason: str cancellation_requested_event_id: UUID def __init__(self) -> None: pass @staticmethod def unserialize(dict=None): if dict is None: dict = {} instance = Metadata() for k, v in dict.items(): if k == "CANCELLATION_OCCURRENCE": instance.cancellation_occurrence = CancellationOccurrence.unserialize(v) continue setattr(instance, IfoodSerializable.camel_to_snake(k), v) return instance @auto_str class OrderEvent(IfoodSerializable): created_at: datetime full_code: str metadata: Metadata code: str order_id: UUID id: UUID def __init__(self, created_at: datetime = None, full_code: str = None, metadata: Metadata = None, code: str = None, order_id: UUID = None, id: UUID = None) -> None: self.created_at = created_at self.full_code = full_code self.metadata = metadata self.code = code self.order_id = order_id self.id = id @staticmethod def unserialize(dict=None): if dict is None: dict = {} instance = OrderEvent() for k, v in dict.items(): if k == "metadata": instance.metadata = Metadata.unserialize(v) continue setattr(instance, IfoodSerializable.camel_to_snake(k), v) return instance
26.243243
119
0.610711
2,714
0.931686
0
0
2,754
0.945417
0
0
67
0.023
29dc92373ea8f436e4e33eb083ad67d7e28abdae
2,599
py
Python
scripts/pm/set_sla_kpis.py
supsi-dacd-isaac/parity-sidechain-interface
b64a5fb724955332afb4998344081d1b93ac216a
[ "MIT" ]
null
null
null
scripts/pm/set_sla_kpis.py
supsi-dacd-isaac/parity-sidechain-interface
b64a5fb724955332afb4998344081d1b93ac216a
[ "MIT" ]
null
null
null
scripts/pm/set_sla_kpis.py
supsi-dacd-isaac/parity-sidechain-interface
b64a5fb724955332afb4998344081d1b93ac216a
[ "MIT" ]
null
null
null
# Importing section import json import requests import argparse import logging import time import datetime from classes.time_utils import TimeUtils import utilities as u # Main if __name__ == "__main__": arg_parser = argparse.ArgumentParser() arg_parser.add_argument('-c', help='config file') arg_parser.add_argument('-l', help='log file') args = arg_parser.parse_args() cfg = json.loads(open(args.c).read()) # Get configuration about connections to InfluxDB and remote service related to data retrieving tmp_config = json.loads(open(cfg['connectionsFile']).read()) cfg.update(tmp_config) # set logging object logger = logging.getLogger() logger.setLevel(logging.INFO) if not args.l: log_file = None else: log_file = args.l logger = logging.getLogger() logging.basicConfig(format='%(asctime)-15s::%(threadName)s::%(levelname)s::%(funcName)s::%(message)s', level=logging.INFO, filename=log_file) url_prefix = cfg['sidechainRestApi'] logger.info('Starting program') # Get the aggregator res = requests.get('%s/aggregator' % cfg['sidechainRestApi']) aggregator_id = json.loads(res.text)['Aggregator']['idx'] # Cycle over the configured SLAs for sla in cfg['slas']: dt_start, dt_end, _ = TimeUtils.get_start_end(sla['duration'], cfg['utils']['timeZone']) dt_start = dt_start - datetime.timedelta(minutes=cfg['shiftBackMinutes']['kpiSetting']) dt_end = dt_end - datetime.timedelta(minutes=cfg['shiftBackMinutes']['kpiSetting']) sla_idx = '%s_%i-%i' % (sla['idPrefix'], int(dt_start.timestamp()), int(dt_end.timestamp())) params = { 'idx': sla_idx, 'start': int(dt_start.timestamp()), 'end': int(dt_end.timestamp()), } u.send_post('%s/createSla' % url_prefix, params, logger) time.sleep(cfg['utils']['sleepBetweenTransactions']) # Cycle over the configured KPIs for kpi in sla['kpis']: params = { 'idx': '%s_%i-%i' % (kpi['idPrefix'], int(dt_start.timestamp()), int(dt_end.timestamp())), 'idxSla': sla_idx, 'rule': kpi['rule'], 'limit': kpi['limit'], 'measureUnit': kpi['mu'], 'penalty': kpi['penalty'], 'players': kpi['players'], } u.send_post('%s/createKpiFeatures' % url_prefix, params, logger) time.sleep(cfg['utils']['sleepBetweenTransactions']) logger.info('Ending program')
33.320513
106
0.614082
0
0
0
0
0
0
0
0
808
0.310889
29dd6423703e7bd3d65394220ac73d337651b108
1,603
py
Python
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_version_request.py
Roboy/LSM_SpiNNaker_MyoArm
04fa1eaf78778edea3ba3afa4c527d20c491718e
[ "BSD-3-Clause" ]
2
2020-11-01T13:22:11.000Z
2020-11-01T13:22:20.000Z
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_version_request.py
Roboy/LSM_SpiNNaker_MyoArm
04fa1eaf78778edea3ba3afa4c527d20c491718e
[ "BSD-3-Clause" ]
null
null
null
src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/spinnman/messages/scp/impl/scp_version_request.py
Roboy/LSM_SpiNNaker_MyoArm
04fa1eaf78778edea3ba3afa4c527d20c491718e
[ "BSD-3-Clause" ]
null
null
null
from spinnman.messages.scp.abstract_messages.abstract_scp_request\ import AbstractSCPRequest from spinnman.messages.sdp.sdp_flag import SDPFlag from spinnman.messages.sdp.sdp_header import SDPHeader from spinnman.messages.scp.scp_request_header import SCPRequestHeader from spinnman.messages.scp.scp_command import SCPCommand from spinnman.messages.scp.impl.scp_version_response import SCPVersionResponse class SCPVersionRequest(AbstractSCPRequest): """ An SCP request to read the version of software running on a core """ def __init__(self, x, y, p): """ :param x: The x-coordinate of the chip to read from, between 0 and 255 :type x: int :param y: The y-coordinate of the chip to read from, between 0 and 255 :type y: int :param p: The id of the processor to read the version from,\ between 0 and 31 :type p: int :raise spinnman.exceptions.SpinnmanInvalidParameterException: * If the chip coordinates are out of range * If the processor is out of range """ super(SCPVersionRequest, self).__init__( SDPHeader( flags=SDPFlag.REPLY_EXPECTED, destination_port=0, destination_cpu=p, destination_chip_x=x, destination_chip_y=y), SCPRequestHeader(command=SCPCommand.CMD_VER)) def get_scp_response(self): """ See\ :py:meth:`spinnman.messages.scp.abstract_scp_request.AbstractSCPRequest.get_scp_response` """ return SCPVersionResponse()
40.075
101
0.674984
1,191
0.742982
0
0
0
0
0
0
729
0.454772
29dd6adf13db2f5c89c5474bb138c114f67d7138
4,506
py
Python
mac/pyobjc-framework-Quartz/Examples/Core Image/CIBevelSample/CIBevelView.py
albertz/music-player
d23586f5bf657cbaea8147223be7814d117ae73d
[ "BSD-2-Clause" ]
132
2015-01-01T10:02:42.000Z
2022-03-09T12:51:01.000Z
mac/pyobjc-framework-Quartz/Examples/Core Image/CIBevelSample/CIBevelView.py
mba811/music-player
7998986b34cfda2244ef622adefb839331b81a81
[ "BSD-2-Clause" ]
6
2015-01-06T08:23:19.000Z
2019-03-14T12:22:06.000Z
mac/pyobjc-framework-Quartz/Examples/Core Image/CIBevelSample/CIBevelView.py
mba811/music-player
7998986b34cfda2244ef622adefb839331b81a81
[ "BSD-2-Clause" ]
27
2015-02-23T11:51:43.000Z
2022-03-07T02:34:18.000Z
from Cocoa import * from Quartz import * from SampleCIView import SampleCIView from math import sin import objc NUM_POINTS=4 class CIBevelView (SampleCIView): currentPoint = objc.ivar(type=objc._C_INT) points = objc.ivar() angleTime = objc.ivar(type=objc._C_FLT) lineImage = objc.ivar() twirlFilter = objc.ivar() heightFieldFilter = objc.ivar() shadedFilter = objc.ivar() def initWithFrame_(self, frameRect): self = super(CIBevelView, self).initWithFrame_(frameRect) if self is None: return None self.points = [ None ] * NUM_POINTS self.points[0] = CGPointMake(0.5 * frameRect.size.width, frameRect.size.height - 100.0) self.points[1] = CGPointMake(150.0, 100.0) self.points[2] = CGPointMake(frameRect.size.width - 150.0, 100.0) self.points[3] = CGPointMake(0.7*self.points[0].x + 0.3*self.points[2].x, 0.7*self.points[0].y + 0.3*self.points[2].y) url = NSURL.fileURLWithPath_( NSBundle.mainBundle().pathForResource_ofType_("lightball", "tiff")) self.lightball = CIImage.imageWithContentsOfURL_(url) self.heightFieldFilter = CIFilter.filterWithName_("CIHeightFieldFromMask") self.heightFieldFilter.setDefaults() self.heightFieldFilter.setValue_forKey_(15.0, "inputRadius") self.twirlFilter = CIFilter.filterWithName_("CITwirlDistortion") self.twirlFilter.setDefaults() self.twirlFilter.setValue_forKey_( CIVector.vectorWithX_Y_( 0.5*frameRect.size.width, 0.5*frameRect.size.height), "inputCenter") self.twirlFilter.setValue_forKey_(300.0, "inputRadius") self.twirlFilter.setValue_forKey_(0.0, "inputAngle") self.shadedFilter = CIFilter.filterWithName_("CIShadedMaterial") self.shadedFilter.setDefaults() self.shadedFilter.setValue_forKey_(self.lightball, "inputShadingImage") self.shadedFilter.setValue_forKey_(20.0, "inputScale") # 1/30 second should give us decent animation NSTimer.scheduledTimerWithTimeInterval_target_selector_userInfo_repeats_( 1.0/30.0, self, 'changeTwirlAngle:', None, True) return self def changeTwirlAngle_(self, timer): self.angleTime += timer.timeInterval() self.twirlFilter.setValue_forKey_( -0.2 * sin(self.angleTime*5.0), 'inputAngle') self.updateImage() def mouseDragged_(self, event): loc = self.convertPoint_fromView_(event.locationInWindow(), None) self.points[self.currentPoint].x = loc.x self.points[self.currentPoint].y = loc.y self.lineImage = None # normally we'd want this, but the timer will cause us to # redisplay anyway #self.setNeedsDisplay_(True) def mouseDown_(self, event): d = 1e4 loc = self.convertPoint_fromView_(event.locationInWindow(), None) for i in range(NUM_POINTS): x = self.points[i].x - loc.x y = self.points[i].y - loc.y t = x*x + y*y if t < d: self.currentPoint = i d = t self.mouseDragged_(event) def updateImage(self): context = NSGraphicsContext.currentContext().CIContext() if self.lineImage is None: bounds = self.bounds() layer = context.createCGLayerWithSize_info_( CGSizeMake(NSWidth(bounds), NSHeight(bounds)), None) cg = CGLayerGetContext(layer) CGContextSetRGBStrokeColor(cg, 1,1,1,1) CGContextSetLineCap(cg, kCGLineCapRound) CGContextSetLineWidth(cg, 60.0) CGContextMoveToPoint(cg, self.points[0].x, self.points[0].y) for i in range(1, NUM_POINTS): CGContextAddLineToPoint(cg, self.points[i].x, self.points[i].y) CGContextStrokePath(cg) self.lineImage = CIImage.alloc().initWithCGLayer_(layer) self.heightFieldFilter.setValue_forKey_(self.lineImage, "inputImage") self.twirlFilter.setValue_forKey_( self.heightFieldFilter.valueForKey_("outputImage"), "inputImage") self.shadedFilter.setValue_forKey_( self.twirlFilter.valueForKey_("outputImage"), "inputImage") self.setImage_(self.shadedFilter.valueForKey_("outputImage"))
36.634146
126
0.630715
4,375
0.970928
0
0
0
0
0
0
413
0.091656
29e24314b4b43a27db5d5e7fb35c4c927a75f669
4,226
py
Python
oops_fhir/r4/code_system/request_intent.py
Mikuana/oops_fhir
77963315d123756b7d21ae881f433778096a1d25
[ "MIT" ]
null
null
null
oops_fhir/r4/code_system/request_intent.py
Mikuana/oops_fhir
77963315d123756b7d21ae881f433778096a1d25
[ "MIT" ]
null
null
null
oops_fhir/r4/code_system/request_intent.py
Mikuana/oops_fhir
77963315d123756b7d21ae881f433778096a1d25
[ "MIT" ]
null
null
null
from pathlib import Path from fhir.resources.codesystem import CodeSystem from oops_fhir.utils import CodeSystemConcept __all__ = ["RequestIntent"] _resource = CodeSystem.parse_file(Path(__file__).with_suffix(".json")) class RequestIntent: """ RequestIntent Codes indicating the degree of authority/intentionality associated with a request. Status: draft - Version: 4.0.1 Copyright None http://hl7.org/fhir/request-intent """ proposal = CodeSystemConcept( { "code": "proposal", "definition": "The request is a suggestion made by someone/something that does not have an intention to ensure it occurs and without providing an authorization to act.", "display": "Proposal", } ) """ Proposal The request is a suggestion made by someone/something that does not have an intention to ensure it occurs and without providing an authorization to act. """ plan = CodeSystemConcept( { "code": "plan", "definition": "The request represents an intention to ensure something occurs without providing an authorization for others to act.", "display": "Plan", } ) """ Plan The request represents an intention to ensure something occurs without providing an authorization for others to act. """ directive = CodeSystemConcept( { "code": "directive", "definition": "The request represents a legally binding instruction authored by a Patient or RelatedPerson.", "display": "Directive", } ) """ Directive The request represents a legally binding instruction authored by a Patient or RelatedPerson. """ order = CodeSystemConcept( { "code": "order", "concept": [ { "code": "original-order", "definition": "The request represents an original authorization for action.", "display": "Original Order", }, { "code": "reflex-order", "definition": "The request represents an automatically generated supplemental authorization for action based on a parent authorization together with initial results of the action taken against that parent authorization.", "display": "Reflex Order", }, { "code": "filler-order", "concept": [ { "code": "instance-order", "definition": "An order created in fulfillment of a broader order that represents the authorization for a single activity occurrence. E.g. The administration of a single dose of a drug.", "display": "Instance Order", } ], "definition": "The request represents the view of an authorization instantiated by a fulfilling system representing the details of the fulfiller's intention to act upon a submitted order.", "display": "Filler Order", }, ], "definition": "The request represents a request/demand and authorization for action by a Practitioner.", "display": "Order", } ) """ Order The request represents a request/demand and authorization for action by a Practitioner. """ option = CodeSystemConcept( { "code": "option", "definition": "The request represents a component or option for a RequestGroup that establishes timing, conditionality and/or other constraints among a set of requests. Refer to [[[RequestGroup]]] for additional information on how this status is used.", "display": "Option", } ) """ Option The request represents a component or option for a RequestGroup that establishes timing, conditionality and/or other constraints among a set of requests. Refer to [[[RequestGroup]]] for additional information on how this status is used. """ class Meta: resource = _resource
35.813559
266
0.600331
3,999
0.946285
0
0
0
0
0
0
2,835
0.670847
29e35c162bb13bbac4bbfa70c3c033b9eb162d1c
266
py
Python
ABC/202/b.py
fumiyanll23/AtCoder
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
[ "MIT" ]
null
null
null
ABC/202/b.py
fumiyanll23/AtCoder
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
[ "MIT" ]
null
null
null
ABC/202/b.py
fumiyanll23/AtCoder
362ca9fcacb5415c1458bc8dee5326ba2cc70b65
[ "MIT" ]
null
null
null
def main(): # input S = list(input()) # compute for i,s in enumerate(S): if s == '6': S[i] = '9' elif s == '9': S[i] = '6' # output print(''.join(reversed(S))) if __name__ == '__main__': main()
14.777778
31
0.406015
0
0
0
0
0
0
0
0
48
0.180451
29e3af095a46b5abdfb783f45e3fb0c6a6c5b81f
652
py
Python
LC/201.py
szhu3210/LeetCode_Solutions
64747eb172c2ecb3c889830246f3282669516e10
[ "MIT" ]
2
2018-02-24T17:20:02.000Z
2018-02-24T17:25:43.000Z
LC/201.py
szhu3210/LeetCode_Solutions
64747eb172c2ecb3c889830246f3282669516e10
[ "MIT" ]
null
null
null
LC/201.py
szhu3210/LeetCode_Solutions
64747eb172c2ecb3c889830246f3282669516e10
[ "MIT" ]
null
null
null
class Solution(object): def rangeBitwiseAnd(self, m, n): """ :type m: int :type n: int :rtype: int """ ## my solution # res='' # for i in xrange(len(bin(m))-2): # if m>>i & 1 == 0: # res='0'+res # elif (((m>>i) + 1) << i) <= n: # res='0'+res # else: # res='1'+res # return int(res,2) ## quick solution c=0 for i in xrange(len(bin(m))-2): if m>>i != n>>i: c+=1 else: break return m>>c<<c
24.148148
44
0.328221
652
1
0
0
0
0
0
0
301
0.461656
29e4187ad7c1dcf2ceaec9e4c64c93ba30148a08
88
py
Python
twitoff/__init__.py
boscolio/twitoff-ds19
46449f9a55619a74dafa32ebee733daca8d1602f
[ "MIT" ]
null
null
null
twitoff/__init__.py
boscolio/twitoff-ds19
46449f9a55619a74dafa32ebee733daca8d1602f
[ "MIT" ]
null
null
null
twitoff/__init__.py
boscolio/twitoff-ds19
46449f9a55619a74dafa32ebee733daca8d1602f
[ "MIT" ]
null
null
null
from .app import create_app # creates the app by calling the package APP = create_app()
22
40
0.772727
0
0
0
0
0
0
0
0
40
0.454545
29e53d00d3dfdf9edbf744f3dfa7a95332d492b5
170
py
Python
books/init_api.py
nabekabebe/BookFInder
aaa7eb3028cb2ef5552f865107ddb13a5dc3fde7
[ "MIT" ]
null
null
null
books/init_api.py
nabekabebe/BookFInder
aaa7eb3028cb2ef5552f865107ddb13a5dc3fde7
[ "MIT" ]
null
null
null
books/init_api.py
nabekabebe/BookFInder
aaa7eb3028cb2ef5552f865107ddb13a5dc3fde7
[ "MIT" ]
null
null
null
from flask_restplus import Api API = Api( title="Book API", version='1.0', description="This Api provides endpoint for accessing books and their reviews." )
21.25
83
0.705882
0
0
0
0
0
0
0
0
82
0.482353
29e60f021e4805e18f02c579cb9365d85a32c49b
371
py
Python
test_game.py
thom1555/euchre
f2fa54fcecb5deeaad2e750e8cda04c94eb1e1e9
[ "Apache-2.0" ]
null
null
null
test_game.py
thom1555/euchre
f2fa54fcecb5deeaad2e750e8cda04c94eb1e1e9
[ "Apache-2.0" ]
null
null
null
test_game.py
thom1555/euchre
f2fa54fcecb5deeaad2e750e8cda04c94eb1e1e9
[ "Apache-2.0" ]
null
null
null
import unittest from game import Game from suit import Suit class TestGame(unittest.TestCase): def test_setup(self): g = Game('tim', 'rick', 'bob', 'james', 'ballers', 'scrubs') self.assertEqual(len(g.players), 4) self.assertEqual(g.dealer, 0) self.assertEqual(g.trump, Suit.spades) if __name__ == '__main__': unittest.main()
21.823529
68
0.6469
258
0.695418
0
0
0
0
0
0
50
0.134771
29e805265bd23dadb56a588aaeba28a86de79226
4,250
py
Python
src/test/resources/scripts/Authentication.py
tomjbarry/Penstro
d9179852158bebf48aaba7a198de5246acb1b064
[ "MIT" ]
1
2019-02-25T05:55:34.000Z
2019-02-25T05:55:34.000Z
src/test/resources/scripts/Authentication.py
tomjbarry/penstro
d9179852158bebf48aaba7a198de5246acb1b064
[ "MIT" ]
null
null
null
src/test/resources/scripts/Authentication.py
tomjbarry/penstro
d9179852158bebf48aaba7a198de5246acb1b064
[ "MIT" ]
null
null
null
from PyConstants import Paths from PyConstants import Codes from PyConstants import CacheTimes from PyBaseTest import BaseTest from PyRequest import PyRequest import time class Authentication(BaseTest): password = "testPassword123" invalidPassword = "incorrectincorrect" def runTests(self): print("Running authentication tests") self.testRegister(self.username, self.email) token = self.testLogin(self.username) self.testRegister(self.target, self.targetEmail) self.testLogout(token) time.sleep(CacheTimes.USER_USERNAME) token = self.testLogin(self.username) targetToken = self.testLogin(self.target) time.sleep(CacheTimes.USER_USERNAME) return targetToken, token def testRegister(self, username, email): invalidBody = {"username":username, "email":email} body = {"username":username, "email":email, "password":self.password, "confirmNewPassword":self.password, "ageMinimum":True, "recaptchaResponse":"test"} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid) invalidBody = {"email":email, "password":self.password} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid) invalidBody = {"username":username, "password":self.password} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid) invalidBody = {"username":username, "email":email, "password":self.password} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid) invalidBody = {"username":username, "email":email, "password":self.password, "confirmNewPassword":self.password + "s", "recaptchaResponse":"test"} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid) invalidBody = {"username":username, "email":email, "password":self.password, "confirmNewPassword":self.password, "ageMinimum":False, "recaptchaResponse":"test"} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, invalidBody, self.expectedInvalid) restrictedBody = {"username":username, "password":"password1234567", "email":email, "confirmNewPassword":"password1234567", "ageMinimum":True, "recaptchaResponse":"test"} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, restrictedBody, self.expectedRestrictedPassword) restrictedBody = {"username":"penstro", "password":self.password, "email":email, "confirmNewPassword":self.password, "ageMinimum":True, "recaptchaResponse":"test"} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, restrictedBody, self.expectedRestrictedUsername) restrictedBody = {"username":username, "password":self.password, "email":"[email protected]", "confirmNewPassword":self.password, "ageMinimum":True, "recaptchaResponse":"test"} PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, restrictedBody, self.expectedRestrictedEmail) PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, body, self.expectedResultCreated) PyRequest().expectResponse(Paths.REGISTER, PyRequest.POST, body, self.expectedExistsUsernameEmail) def testLogin(self, username): body = {"username":username, "password":self.invalidPassword} PyRequest().expectResponse(Paths.LOGIN, PyRequest.POST, None, self.expectedInvalid) PyRequest().expectResponse(Paths.LOGIN, PyRequest.POST, body, self.expectedDenied) body = {"username":username, "password":self.password} data = PyRequest().expectResponse(Paths.LOGIN, PyRequest.POST, body, self.expectedResultSuccess) if 'dto' in data: if 'result' in data['dto']: print("TOKEN: " + str(data['dto']['result'])) return str(data['dto']['result']) return None def testLogout(self, token): PyRequest().expectResponse(Paths.LOGOUT, PyRequest.POST, None, self.expectedDenied) PyRequest(token).expectResponse(Paths.LOGOUT, PyRequest.POST, None, self.expectedSuccess)
57.432432
183
0.704941
4,068
0.957176
0
0
0
0
0
0
797
0.187529
29e8e499563826ecb59fe97bb20177891dc4f78e
235
py
Python
my_env/lib/python3.8/site-packages/tests/strategies.py
David5627/AWWARD
a22a2b2f7d7d6377435bfd475e82268e4e907141
[ "MIT" ]
296
2015-09-07T16:04:01.000Z
2022-03-27T06:31:43.000Z
my_env/lib/python3.8/site-packages/tests/strategies.py
David5627/AWWARD
a22a2b2f7d7d6377435bfd475e82268e4e907141
[ "MIT" ]
189
2015-09-07T14:56:32.000Z
2022-01-31T09:17:22.000Z
my_env/lib/python3.8/site-packages/tests/strategies.py
David5627/AWWARD
a22a2b2f7d7d6377435bfd475e82268e4e907141
[ "MIT" ]
115
2015-09-17T08:36:36.000Z
2022-03-09T12:36:14.000Z
from __future__ import unicode_literals from hypothesis.strategies import integers from star_ratings import app_settings def scores(max_rating=app_settings.STAR_RATINGS_RANGE): return integers(min_value=0, max_value=max_rating)
26.111111
55
0.851064
0
0
0
0
0
0
0
0
0
0
29e96f72799e651c5585b837791c85848195cb09
55
py
Python
Game6/modules/sprites/__init__.py
ttkaixin1998/pikachupythongames
609a3a5a2be3f5a187c332c7980bb5bb14548f02
[ "MIT" ]
4,013
2018-06-16T08:00:02.000Z
2022-03-30T11:48:14.000Z
Game6/modules/sprites/__init__.py
pigbearcat/Games
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
[ "MIT" ]
22
2018-10-18T00:15:50.000Z
2022-01-13T08:16:15.000Z
Game6/modules/sprites/__init__.py
pigbearcat/Games
b8c47ef1bcce9a9db3f3730c162e6e8e08b508a2
[ "MIT" ]
2,172
2018-07-20T04:03:14.000Z
2022-03-31T14:18:29.000Z
'''初始化''' from .Bird import Bird from .Pipe import Pipe
18.333333
22
0.709091
0
0
0
0
0
0
0
0
15
0.245902
29ea1aef1c82bd772907c42e68df319791525947
6,824
py
Python
Render2018/lib/create_config_bodyflow.py
BigOto2/BlenderRenderDNS
a8ff239ecffef5217f0db35d579227a0a444c32d
[ "MIT" ]
1
2021-07-28T00:42:39.000Z
2021-07-28T00:42:39.000Z
Render2018/lib/create_config_bodyflow.py
BigOto2/BlenderRenderDNS
a8ff239ecffef5217f0db35d579227a0a444c32d
[ "MIT" ]
null
null
null
Render2018/lib/create_config_bodyflow.py
BigOto2/BlenderRenderDNS
a8ff239ecffef5217f0db35d579227a0a444c32d
[ "MIT" ]
1
2019-05-13T17:38:05.000Z
2019-05-13T17:38:05.000Z
import os.path import configparser from dircheck import get_yesno_input import create_jobscripts from create_dirname_config import config_dirname_cfg from create_all_dirs import create_all import socket import cgns_load_data # Script that creates the two configuration files (case and render files) necessary to run the scripts, with a data file from Abhiram's body flow simulation as input. # Check whether scripts being run on Mox if socket.gethostname()[0:3] == "mox": mox = True blender_dir = "/gscratch/ferrante/blender/blender-2.78c-linux-glibc219-x86_64/./" else: mox = False blender_dir = "" # Check if dirname.cfg, which contains directory paths used throughout the scripts, exists - otherwise, create it if not os.path.exists("dirname.cfg"): config_dirname_cfg() # Load important directories dirname_config = configparser.ConfigParser() dirname_config.read("dirname.cfg") # Get case name. This corresponds to a specific .h5dns file and is specified by the user. A case config file will be created with its name. case_name = input("Enter case name. This can be any string that refers to a particular VIZ.cgns file. ") create_all(case_name) case_config_path = dirname_config["DIRECTORIES"]["RenderConfig"] + case_name + "-case.cfg" # If existing case config file exists, the user is specifying a particular .h5dns file that is already associated with # this case name, so move on to render settings config. Otherwise, create case config file from user input. if os.path.exists(case_config_path): print("Found existing case configuration: " + case_config_path) existing_case_config = configparser.ConfigParser() existing_case_config.read(case_config_path) print("data file: " + existing_case_config["STRING"]["h5dns_path"]) else: # Create new case config file new_case_config = configparser.ConfigParser() # There are different sections for each datatype (this is how the scripts know what data types to load, when they are all saved as strings) new_case_config["STRING"] = {} new_case_config["FLOAT"] = {} new_case_config["INT"] = {} # Save important strings new_case_config["STRING"]["case_name"] = case_name new_case_config["STRING"]["data_file_type"] = "bodyflow" h5dns_path = input("Enter absolute path to data file: ") new_case_config["STRING"]["h5dns_path"] = h5dns_path # Load data file and save important params params = cgns_load_data.get_important_data(h5dns_path) new_case_config["INT"]["tres"] = str(params["tres"]) new_case_config["INT"]["ires"] = str(params["ires"]) new_case_config["INT"]["jres"] = str(params["jres"]) new_case_config["INT"]["kres"] = str(params["kres"]) # Write case config file with open(case_config_path, "w") as case_config_file: new_case_config.write(case_config_file) # Get render-specific config settings from user. This specifies what type of render to perform (photorealistic, surface # temperature, ...), and other render settings (scale of droplet to render, etc.) render_type = int(input("Select type of render to perform (enter number).\n 1 Streamline render\n 2 Vortex line render\n")) render_name = input("Enter render profile name. This can be any string that refers to specific rendering settings for a data case. ") # Initialize categories based on data types new_render_config = configparser.ConfigParser() new_render_config["STRING"] = {} new_render_config["INT"] = {} new_render_config["FLOAT"] = {} new_render_config["BOOL"] = {} new_render_config["STRING"]["render_name"] = render_name # Determine settings from user that are specific to each type. if (render_type == 1): # Streamline # Name render config file based on the type of render being performed render_config_path = dirname_config["DIRECTORIES"]["RenderConfig"] + render_name + "-render-streamline.cfg" # Get some other settings elif (render_type == 2): # Vortex line render_config_path = dirname_config["DIRECTORIES"]["RenderConfig"] + render_name + "-render-vortexline.cfg" # General inputs new_render_config["INT"]["num_streamlines"] = input("Specify number of streamlines: ") new_render_config["INT"]["streamline_seed"] = "777" #input("Specify random seed number to determine streamline start positions from: ") new_render_config["FLOAT"]["view_fraction"] = input("Specify desired render frame width as multiple of domain length: ") new_render_config["FLOAT"]["camera_azimuth_angle"] = input("Specify camera azimuth angle from the x-axis (deg): ") new_render_config["FLOAT"]["camera_elevation_angle"] = input("Specify camera elevation angle from the horizontal (deg): ") bg_image_enabled = get_yesno_input("Use custom background image? ") if bg_image_enabled: new_render_config["STRING"]["bg_image_filepath"] = dirname_config["DIRECTORIES"]["background_images"] + input("Specify background image name (in \"Render2018/BackgroundImages\"): ") new_render_config["STRING"]["bg_color_1"] = "" new_render_config["STRING"]["bg_color_2"] = "" else: new_render_config["STRING"]["bg_image_filepath"] = "" new_render_config["STRING"]["bg_color_1"] = input("Specify R,G,B value of lower background color (separate floats by commas, values range from 0 to 1): ") new_render_config["STRING"]["bg_color_2"] = input("Specify R,G,B value of upper background color (separate floats by commas, values range from 0 to 1): ") new_render_config["FLOAT"]["resolution_percentage"] = input("Specify resolution percentage out of 100, as a percentage of 4K: ") # Write render config file with open(render_config_path, "w") as render_config_file: new_render_config.write(render_config_file) # Create slurm jobscript to run on Mox slurm_name = case_name + "_" + render_name + ".slurm" create_jobscripts.create_mox_slurm(slurm_dir=dirname_config["DIRECTORIES"]["RenderJobscripts"], slurm_name=slurm_name, job_name=case_name+"_"+render_name, lib_dir=os.getcwd(), python_file_to_run="render_init.py", case_config_path=case_config_path, render_config_path=render_config_path) local_py_name = case_name + "_" + render_name + ".py" create_jobscripts.create_local_py(python_dir=dirname_config["DIRECTORIES"]["RenderJobscripts"], python_filename=local_py_name, lib_dir=dirname_config["DIRECTORIES"]["lib"], python_file_to_run="render_init.py", case_config_path=case_config_path, render_config_path=render_config_path) # Run jobscript if user desires if mox: if get_yesno_input("Run " + slurm_name + " to launch this rendering job?"): os.system("sbatch -p ferrante -A ferrante " + dirname_config["DIRECTORIES"]["RenderJobscripts"] + "/" + slurm_name) else: if get_yesno_input("Run " + local_py_name + " to launch this rendering job?"): os.system("python3 " + dirname_config["DIRECTORIES"]["RenderJobscripts"] + local_py_name)
56.396694
286
0.75381
0
0
0
0
0
0
0
0
3,682
0.539566
29eb3307185eaf4daecd050d3551f86ee4f012bf
1,004
py
Python
util/replicate.py
ZvonimirSun/janusgraph-utils
c10e7b3ccb7c56c7662053d9d8b1d0bcb0a20bb8
[ "Apache-2.0" ]
204
2017-08-10T02:36:53.000Z
2022-03-11T12:21:18.000Z
util/replicate.py
HsbcJone/Jaunsgraph-LoadBulkData-Utils-
9c4e3b0c0b9f9966ab43422929ae5ea4993b3bb8
[ "Apache-2.0" ]
37
2017-08-16T01:06:02.000Z
2020-08-05T02:30:18.000Z
util/replicate.py
HsbcJone/Jaunsgraph-LoadBulkData-Utils-
9c4e3b0c0b9f9966ab43422929ae5ea4993b3bb8
[ "Apache-2.0" ]
103
2017-08-29T14:17:32.000Z
2022-03-07T14:30:48.000Z
#!/usr/bin/python import sys import simplejson as json def replicate_vertex(conf,pos, i): p = conf["VertexTypes"][pos]["columns"]["T{}-P1".format(pos+1)] for x in range(2, i+1): new_key = "T{}-P{}".format(pos+1, str(x)) conf["VertexTypes"][pos]["columns"][new_key] = p return conf def replicate_edge(conf, pos, i): p = conf["EdgeTypes"][pos]["columns"]["E{}-P1".format(pos+1)] for x in range(2, i+1): new_key ='E{}-P{}'.format(pos+1,str(x)) conf["EdgeTypes"][pos]["columns"][new_key] = p return conf def main(): f = open(sys.argv[1], "r") j = json.load(f) json.dump(replicate_vertex(j,0, int(sys.argv[3])), open(sys.argv[2], "w")) json.dump(replicate_vertex(j,1, 2*int(sys.argv[3])), open(sys.argv[2], "w")) json.dump(replicate_vertex(j,2, 2*int(sys.argv[3])), open(sys.argv[2], "w")) json.dump(replicate_edge(j, 0, int(sys.argv[3])), open(sys.argv[2], "w")) json.dump(replicate_edge(j, 1, int(sys.argv[3])), open(sys.argv[2], "w")) if __name__ == "__main__": main()
32.387097
77
0.62749
0
0
0
0
0
0
0
0
163
0.162351
29eb4a3f8932c013c8f2635314e11c22d12e4148
1,602
py
Python
commands/fight.py
AlexMog/IRCPokemonBot
0a735f262ce06ecd4c3b702094cf4b78e3cd7c45
[ "MIT" ]
2
2015-06-10T12:16:53.000Z
2016-03-09T22:43:43.000Z
commands/fight.py
AlexMog/IRCPokemonBot
0a735f262ce06ecd4c3b702094cf4b78e3cd7c45
[ "MIT" ]
null
null
null
commands/fight.py
AlexMog/IRCPokemonBot
0a735f262ce06ecd4c3b702094cf4b78e3cd7c45
[ "MIT" ]
null
null
null
#!/usr/bin/env python2 import copy import random from classes.Pokemons import * from classes.Battle import * def fight(connection, canal, auteur, cmds, canalobj, mogbot): user = mogbot.getUserManager().findUser(auteur) if user == False: connection.privmsg(canal, auteur + " je ne te trouve pas dans la base de donnees :( - erreur 500") return if user.battle is not None: connection.privmsg(canal, auteur + " tu es deja en combat avec quelqu'un!") return if len(cmds) > 2: if cmds[2] == "nature": u = pokemonsManager.getRandom() mini = user.getActivePokemon().level - 5 if mini <= 0: mini = 1 maxi = mini + 5 u.level = random.randint(mini, maxi) battle = Battle(user, u) battle.auto = True user.battle = u.battle = battle user.battle.accepted = True connection.privmsg(canal, user.username + " tu es tombe sur un " + u.username + " sauvage (lvl: " + str(u.level) + " )! Attention!") else: u = mogbot.getUserManager().findUser(cmds[2]) if u == False: connection.privmsg(canal, user.username + " adversaire introuvable.") return user.battle = u.battle = Battle(user, u) connection.privmsg(canal, user.username + " a defie " + u.username + " en duel! Va-t-il accepter? (utilise accept pour accepter le duel et refuse pour refuser)") else: connection.privmsg(canal, auteur + " usage: fight <nature ou pseudo>")
40.05
173
0.585518
0
0
0
0
0
0
0
0
348
0.217228
29ebeb3c0f6aa2c670636976b54b4c234e9cc858
3,356
py
Python
horizon_hpe_storage/storage_panel/config/software_tests/forms.py
hp-storage/horizon-ssmc-link
f419ecf2a545a79f1ff6628dc26f31dfb7c84996
[ "Apache-2.0" ]
1
2017-01-07T13:45:57.000Z
2017-01-07T13:45:57.000Z
horizon_hpe_storage/storage_panel/config/software_tests/forms.py
hp-storage/horizon-ssmc-link
f419ecf2a545a79f1ff6628dc26f31dfb7c84996
[ "Apache-2.0" ]
null
null
null
horizon_hpe_storage/storage_panel/config/software_tests/forms.py
hp-storage/horizon-ssmc-link
f419ecf2a545a79f1ff6628dc26f31dfb7c84996
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from django.core.urlresolvers import reverse from django.utils.translation import ugettext_lazy as _ import horizon_hpe_storage.api.keystone_api as keystone import horizon_hpe_storage.api.barbican_api as barbican from horizon import exceptions from horizon import forms from horizon import messages class AddSoftwareTest(forms.SelfHandlingForm): sw_package = forms.CharField(max_length=255, label=_("Software Package")) min_version = forms.CharField(max_length=255, label=_("Minimum Version")) description = forms.CharField(max_length=255, required=False, label=_("Description")) keystone_api = keystone.KeystoneAPI() barbican_api = barbican.BarbicanAPI() def handle(self, request, data): node_type = self.initial['node_type'] try: self.keystone_api.do_setup(self.request) self.barbican_api.do_setup(self.keystone_api.get_session()) self.barbican_api.add_software_test(node_type, data['sw_package'], data['min_version'], data['description']) msg = _('Added softare package "%s".') % data['sw_package'] messages.success(request, msg) return True except Exception: redirect = reverse("horizon:admin:hpe_storage:index") exceptions.handle(request, _("Unable to add softare package."), redirect=redirect) class EditSoftwareTest(forms.SelfHandlingForm): min_version = forms.CharField(max_length=255, label=_("Minimum Version")) description = forms.CharField(max_length=255, required=False, label=_("Description")) keystone_api = keystone.KeystoneAPI() barbican_api = barbican.BarbicanAPI() def handle(self, request, data): sw_package = self.initial['sw_package'] node_type = self.initial['node_type'] try: self.keystone_api.do_setup(self.request) self.barbican_api.do_setup(self.keystone_api.get_session()) self.barbican_api.update_software_test(node_type, sw_package, data['min_version'], data['description']) msg = _('Saved softare package "%s".') % sw_package messages.success(request, msg) return True except Exception: redirect = reverse("horizon:admin:hpe_storage:index") exceptions.handle(request, _("Unable to save softare package."), redirect=redirect)
44.157895
78
0.616508
2,503
0.745828
0
0
0
0
0
0
911
0.271454
29ecd056c8357be81181e47ac71a968400c85cc9
1,396
py
Python
tests/test_liquidity_provider_factory.py
diem/liquidity-emulator
255cccd06c0949750e42e93906b083e915ddf505
[ "Apache-2.0" ]
2
2021-11-29T06:00:59.000Z
2022-01-27T18:42:29.000Z
tests/test_liquidity_provider_factory.py
hengkyherdianto/liquidity-emulator
255cccd06c0949750e42e93906b083e915ddf505
[ "Apache-2.0" ]
1
2021-01-31T09:14:05.000Z
2021-02-01T07:43:41.000Z
tests/test_liquidity_provider_factory.py
hengkyherdianto/liquidity-emulator
255cccd06c0949750e42e93906b083e915ddf505
[ "Apache-2.0" ]
4
2021-02-15T14:45:04.000Z
2022-03-03T02:32:45.000Z
from diem import chain_ids from liquidity import create_liquidity_provider, init_liquidity_provider from liquidity.liquidity import FaucetLiquidityProvider, DDLiquidityProvider CUSTODY_PRIVATE_KEYS = ( '{"liquidity":"c6537e56d844fa4a15f3bf5eacd41c9123a19ef19a1026f2325a6b2dd33a13f1"}' ) def test_faucet_liquidity_provider_factory_for_testnet_without_custody_private_keys( patch_liquidity, monkeypatch ) -> None: monkeypatch.setenv("CHAIN_ID", str(chain_ids.TESTNET.value)) monkeypatch.delenv("CUSTODY_PRIVATE_KEYS", raising=False) init_liquidity_provider() lp = create_liquidity_provider() assert isinstance(lp, FaucetLiquidityProvider) def test_dd_liquidity_provider_factory_for_testnet_with_custody_private_keys( patch_liquidity, monkeypatch ) -> None: monkeypatch.setenv("CHAIN_ID", str(chain_ids.TESTNET.value)) monkeypatch.setenv("CUSTODY_PRIVATE_KEYS", CUSTODY_PRIVATE_KEYS) init_liquidity_provider() lp = create_liquidity_provider() assert isinstance(lp, DDLiquidityProvider) def test_dd_liquidity_provider_factory_for_premainnet( patch_liquidity, monkeypatch ) -> None: monkeypatch.setenv("CHAIN_ID", str(chain_ids.PREMAINNET.value)) monkeypatch.setenv("CUSTODY_PRIVATE_KEYS", CUSTODY_PRIVATE_KEYS) init_liquidity_provider() lp = create_liquidity_provider() assert isinstance(lp, DDLiquidityProvider)
33.238095
86
0.809456
0
0
0
0
0
0
0
0
178
0.127507
29ecd75f594b19acd9901238ad242a2ae33df3f6
112
py
Python
layers/modules/__init__.py
Eralaf/ssd.pytorch
acad53fd801f32120ecb3ff57950556e35db3d1c
[ "MIT" ]
null
null
null
layers/modules/__init__.py
Eralaf/ssd.pytorch
acad53fd801f32120ecb3ff57950556e35db3d1c
[ "MIT" ]
null
null
null
layers/modules/__init__.py
Eralaf/ssd.pytorch
acad53fd801f32120ecb3ff57950556e35db3d1c
[ "MIT" ]
null
null
null
from .l2norm import L2Norm from .multibox_loss import MultiBoxLoss __all__ = ['L2Norm', 'MultiBoxLoss']
22.4
39
0.732143
0
0
0
0
0
0
0
0
22
0.196429
29edb04d6019c45efcdb61aac97edab310263a90
59
py
Python
test/octagon.py
Jahongir2007/pymetry
02c8e82a188700b4213fd4a70aa66a3b5e9843b8
[ "MIT" ]
1
2021-04-04T11:38:42.000Z
2021-04-04T11:38:42.000Z
test/octagon.py
Jahongir2007/pymetry
02c8e82a188700b4213fd4a70aa66a3b5e9843b8
[ "MIT" ]
null
null
null
test/octagon.py
Jahongir2007/pymetry
02c8e82a188700b4213fd4a70aa66a3b5e9843b8
[ "MIT" ]
null
null
null
import pymetry pym = pymetry pym.octagon(150, "yellow", 8)
14.75
29
0.728814
0
0
0
0
0
0
0
0
8
0.135593
29f10336d5ea889a3a24c9c3648237cbdaee7b65
5,586
py
Python
tools/remote_debugger.py
budelius/openstreetmap-heatmap
f7376671eecda68955b8edc016c63218c5ebc6a2
[ "Apache-2.0" ]
null
null
null
tools/remote_debugger.py
budelius/openstreetmap-heatmap
f7376671eecda68955b8edc016c63218c5ebc6a2
[ "Apache-2.0" ]
null
null
null
tools/remote_debugger.py
budelius/openstreetmap-heatmap
f7376671eecda68955b8edc016c63218c5ebc6a2
[ "Apache-2.0" ]
null
null
null
""" Remote debugging support. This addon allows you to use a remote Python debugger with PyCharm, PyDev and possibly other IDEs. As it is, without modification, it only supports PyCharm, but it may work by pointing it at a similar egg file shipped with PyDev. Before using, point the addon to your pycharm-debug-py3k.egg file in the addon preferences screen. For more information on how to use this addon, please read my article at http://code.blender.org/2015/10/debugging-python-code-with-pycharm/ """ bl_info = { 'name': 'Remote debugger', 'author': 'Sybren A. Stüvel', 'version': (0, 4), 'blender': (2, 80, 0), 'location': 'Press [Space], search for "debugger"', 'category': 'Development', } import bpy import os.path from bpy.types import AddonPreferences from bpy.props import StringProperty # Get references to all property definition functions in bpy.props, # so that they can be used to replace 'x = IntProperty()' to 'x: IntProperty()' # dynamically when working on Blender 2.80+ __all_prop_funcs = { getattr(bpy.props, propname) for propname in dir(bpy.props) if propname.endswith('Property') } def convert_properties(class_): """Class decorator to avoid warnings in Blender 2.80+ This decorator replaces property definitions like this: someprop = bpy.props.IntProperty() to annotations, as introduced in Blender 2.80: someprop: bpy.props.IntProperty() No-op if running on Blender 2.79 or older. """ if bpy.app.version < (2, 80): return class_ if not hasattr(class_, '__annotations__'): class_.__annotations__ = {} attrs_to_delete = [] for name, value in class_.__dict__.items(): if not isinstance(value, tuple) or len(value) != 2: continue prop_func, kwargs = value if prop_func not in __all_prop_funcs: continue # This is a property definition, replace it with annotation. attrs_to_delete.append(name) class_.__annotations__[name] = value for attr_name in attrs_to_delete: delattr(class_, attr_name) return class_ def addon_preferences(context): try: preferences = context.preferences except AttributeError: # Old (<2.80) location of user preferences preferences = context.user_preferences return preferences.addons[__name__].preferences @convert_properties class DebuggerAddonPreferences(AddonPreferences): # this must match the addon name, use '__package__' # when defining this in a submodule of a python package. bl_idname = __name__ eggpath = StringProperty( name='Path of the PyCharm egg file', description='Make sure you select the py3k egg', subtype='FILE_PATH', default='pycharm-debug-py3k.egg' ) pydevpath = StringProperty( name='Path of the PyDev pydevd.py file', subtype='FILE_PATH', default='pydevd.py' ) def draw(self, context): layout = self.layout layout.prop(self, 'pydevpath') layout.prop(self, 'eggpath') layout.label(text='Make sure you select the egg for Python 3.x: pycharm-debug-py3k.egg ') class DEBUG_OT_connect_debugger_pycharm(bpy.types.Operator): bl_idname = 'debug.connect_debugger_pycharm' bl_label = 'Connect to remote PyCharm debugger' bl_description = 'Connects to a PyCharm debugger on localhost:1090' def execute(self, context): import sys addon_prefs = addon_preferences(context) eggpath = os.path.abspath(addon_prefs.eggpath) if not os.path.exists(eggpath): self.report({'ERROR'}, 'Unable to find debug egg at %r. Configure the addon properties ' 'in the User Preferences menu.' % eggpath) return {'CANCELLED'} if not any('pycharm-debug' in p for p in sys.path): sys.path.append(eggpath) import pydevd pydevd.settrace('localhost', port=1090, stdoutToServer=True, stderrToServer=True, suspend=False) return {'FINISHED'} class DEBUG_OT_connect_debugger_pydev(bpy.types.Operator): bl_idname = 'debug.connect_debugger_pydev' bl_label = 'Connect to remote PyDev debugger' bl_description = 'Connects to a PyDev debugger on localhost:5678' def execute(self, context): import sys addon_prefs = addon_preferences(context) pydevpath = os.path.abspath(addon_prefs.pydevpath) if not os.path.exists(pydevpath): self.report({'ERROR'}, 'Unable to find pydevd.py at %r. Configure the addon properties ' 'in the User Preferences menu.' % pydevpath) return {'CANCELLED'} dirname = os.path.dirname(pydevpath) basename = os.path.basename(dirname) if not any(basename in p for p in sys.path): sys.path.append(dirname) import pydevd pydevd.settrace('localhost', port=5678, stdoutToServer=True, stderrToServer=True, suspend=False) return {'FINISHED'} def register(): bpy.utils.register_class(DEBUG_OT_connect_debugger_pycharm) bpy.utils.register_class(DEBUG_OT_connect_debugger_pydev) bpy.utils.register_class(DebuggerAddonPreferences) def unregister(): bpy.utils.unregister_class(DEBUG_OT_connect_debugger_pycharm) bpy.utils.unregister_class(DEBUG_OT_connect_debugger_pydev) bpy.utils.unregister_class(DebuggerAddonPreferences) if __name__ == '__main__': register()
30.692308
100
0.674544
2,705
0.48416
0
0
798
0.142832
0
0
2,150
0.384822
29f1f4d867171d615c82f43e02801e5ac479dcd4
2,247
py
Python
todo/views/accepted_petitions.py
josalhor/WebModels
6b9cde3141c53562f40b129e6e1c87448ce9853a
[ "BSD-3-Clause" ]
null
null
null
todo/views/accepted_petitions.py
josalhor/WebModels
6b9cde3141c53562f40b129e6e1c87448ce9853a
[ "BSD-3-Clause" ]
41
2021-03-23T12:58:25.000Z
2021-05-25T11:38:42.000Z
todo/views/accepted_petitions.py
josalhor/WebModels
6b9cde3141c53562f40b129e6e1c87448ce9853a
[ "BSD-3-Clause" ]
null
null
null
import datetime from django.contrib import messages from django.contrib.auth.decorators import login_required, user_passes_test from django.http import HttpResponse from django.shortcuts import render from .book_assign import send_email_reject_book from todo.forms import SearchForm from todo.models import Task, Book, Editor, Writer @login_required def accepted_petitions(request) -> HttpResponse: deleted, editor_view = False, False thedate = datetime.datetime.now() searchform = SearchForm(auto_id=False) editor = Editor.objects.filter(user=request.user).first() is_chief = False all_lists = None if editor: lists = Book.objects.filter(completed=False) all_lists = Book.objects.filter(completed=False) if editor.chief: is_chief = True lists = lists.filter(editor=editor) all_lists = all_lists.exclude(editor=None) else: lists = lists.filter(editor=editor) lists = lists.exclude(rejected=True).order_by("name") editor_view = True else: author = Writer.objects.filter(user=request.user) lists = Book.objects.filter(completed=False, rejected=False, author__in=author).exclude(editor=None).order_by("name") list_count = lists.count() task_count = 0 for book in lists: tasks = Task.objects.filter(book=book, completed=False).count() task_count += tasks if request.method == "POST": book = Book.objects.filter(name=request.POST['delete-book']).first() deleted = True book.editor = None book.rejected = True book.save() send_email_reject_book(book, reasons=request.POST['reasons']) messages.success(request, "La petición correspondiente al libro '{}' ha sido eliminada de su lista de peticiones aceptadas.".format(book.name)) context = { "editor_view": editor_view, "deleted": deleted, "lists": lists, "thedate": thedate, "searchform": searchform, "list_count": list_count, "task_count": task_count, "all_lists": all_lists, "is_chief": is_chief } return render(request, "todo/accepted_petitions.html", context)
31.208333
151
0.668002
0
0
0
0
1,909
0.849199
0
0
264
0.117438
29f31b2343f07216325a81bd944dfce29b98de66
610
py
Python
2_sheet/2-sheet-hundt-robin/plot-data.py
robinhundt/practical-course-parallel-computing
08f1fc76324d5c6338b32b2f14c2a11fef3ad619
[ "MIT" ]
null
null
null
2_sheet/2-sheet-hundt-robin/plot-data.py
robinhundt/practical-course-parallel-computing
08f1fc76324d5c6338b32b2f14c2a11fef3ad619
[ "MIT" ]
null
null
null
2_sheet/2-sheet-hundt-robin/plot-data.py
robinhundt/practical-course-parallel-computing
08f1fc76324d5c6338b32b2f14c2a11fef3ad619
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt # number of threads used to compute product of 2 matrices of dim. 1024 data_x = [1, 2, 3, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096] # execution time in seconds data_y = [3.300059, 1.664494, 2.294884, 3.200235, 2.915945, 3.082389, 3.023162, 3.012096, 2.958028, 2.939918, 2.847527, 2.898556, 2.876036, 2.963720] plt.figure() plt.plot(data_x, data_y) plt.xlabel('# of threads') plt.xscale('log') plt.ylabel('execution time in seconds') plt.title('Exection times of 1024x1024 matrix multi with different thread counts') plt.show()
33.888889
82
0.672131
0
0
0
0
0
0
0
0
214
0.35082