code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Chicago taxi example using TFX on local orchestrator."""
import os
from absl import logging
from tfx.components.example_gen.csv_example_gen.component import CsvExampleGen
from tfx.orchestration import metadata
from tfx.orchestration import pipeline
from tfx.orchestration.local import local_dag_runner
_pipeline_name = 'chicago_taxi_local'
_taxi_root = os.path.join(os.environ['HOME'], 'taxi')
_data_root = os.path.join(_taxi_root, 'data', 'simple')
_tfx_root = os.path.join(os.environ['HOME'], 'tfx')
_pipeline_root = os.path.join(_tfx_root, 'pipelines', _pipeline_name)
# Sqlite ML-metadata db path.
_metadata_path = os.path.join(_tfx_root, 'metadata', _pipeline_name,
'metadata.db')
def _create_pipeline(pipeline_name: str, pipeline_root: str, data_root: str,
metadata_path: str) -> pipeline.Pipeline:
"""Implements the chicago taxi pipeline with TFX."""
# Brings data into the pipeline or otherwise joins/converts training data.
example_gen = CsvExampleGen(input_base=data_root)
return pipeline.Pipeline(
pipeline_name=pipeline_name,
pipeline_root=pipeline_root,
components=[example_gen],
enable_cache=True,
metadata_connection_config=metadata.sqlite_metadata_connection_config(
metadata_path),
additional_pipeline_args={},
)
if __name__ == '__main__':
logging.set_verbosity(logging.INFO)
local_dag_runner.LocalDagRunner().run(
_create_pipeline(
pipeline_name=_pipeline_name,
pipeline_root=_pipeline_root,
data_root=_data_root,
metadata_path=_metadata_path))
| tensorflow/tfx | tfx/tools/cli/testdata/test_pipeline_local_2.py | Python | apache-2.0 | 2,230 |
"""
Django settings for firstdjango project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'j8s(6fw61+cx_o=g!9a(vs!wbj0&f!7u_lw$(eap5d4li@!b4('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'inventory',
'mathfilters',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'firstdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['firstdjango/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'firstdjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'#'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| carolinehardin/pagewise | firstdjango/firstdjango/settings.py | Python | gpl-2.0 | 2,728 |
"""
==========================================
IsolationForest benchmark
==========================================
A test of IsolationForest on classical anomaly detection datasets.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import fetch_kddcup99, fetch_covtype, fetch_mldata
from sklearn.ensemble import IsolationForest
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle as sh
np.random.seed(1)
datasets = ['http'] # , 'smtp', 'SA', 'SF', 'shuttle', 'forestcover']
for dat in datasets:
# loading and vectorization
print('loading data')
if dat in ['http', 'smtp', 'SA', 'SF']:
dataset = fetch_kddcup99(subset=dat, shuffle=True, percent10=True)
X = dataset.data
y = dataset.target
if dat == 'shuttle':
dataset = fetch_mldata('shuttle')
X = dataset.data
y = dataset.target
sh(X, y)
# we remove data with label 4
# normal data are then those of class 1
s = (y != 4)
X = X[s, :]
y = y[s]
y = (y != 1).astype(int)
if dat == 'forestcover':
dataset = fetch_covtype(shuffle=True)
X = dataset.data
y = dataset.target
# normal data are those with attribute 2
# abnormal those with attribute 4
s = (y == 2) + (y == 4)
X = X[s, :]
y = y[s]
y = (y != 2).astype(int)
print('vectorizing data')
if dat == 'SF':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
X = np.c_[X[:, :1], x1, X[:, 2:]]
y = (y != 'normal.').astype(int)
if dat == 'SA':
lb = LabelBinarizer()
lb.fit(X[:, 1])
x1 = lb.transform(X[:, 1])
lb.fit(X[:, 2])
x2 = lb.transform(X[:, 2])
lb.fit(X[:, 3])
x3 = lb.transform(X[:, 3])
X = np.c_[X[:, :1], x1, x2, x3, X[:, 4:]]
y = (y != 'normal.').astype(int)
if dat == 'http' or dat == 'smtp':
y = (y != 'normal.').astype(int)
n_samples, n_features = np.shape(X)
n_samples_train = n_samples // 2
n_samples_test = n_samples - n_samples_train
X = X.astype(float)
X_train = X[:n_samples_train, :]
X_test = X[n_samples_train:, :]
y_train = y[:n_samples_train]
y_test = y[n_samples_train:]
print('IsolationForest processing...')
model = IsolationForest(bootstrap=True, n_jobs=-1)
tstart = time()
model.fit(X_train)
fit_time = time() - tstart
tstart = time()
scoring = model.predict(X_test) # the lower, the more normal
predict_time = time() - tstart
fpr, tpr, thresholds = roc_curve(y_test, scoring)
AUC = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=1, label='ROC for %s (area = %0.3f, train-time: %0.2fs, test-time: %0.2fs)' % (
dat, AUC, fit_time, predict_time))
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
| DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/benchmarks/bench_isolation_forest.py | Python | mit | 3,144 |
"""Tests for log_check script."""
try:
from mock import patch
except ImportError:
from unittest.mock import patch
import subprocess
import log_check as lc
from tests import TestCase
class TestCheckFile(TestCase):
regex = 'test123'
file_path = '/tmp/file.log'
@patch('sys.stdout')
def test_calls_check_call(self, _):
with patch.object(lc.subprocess, 'check_call') as m_checkcall:
lc.check_file(self.regex, self.file_path)
m_checkcall.assert_called_once_with(
['sudo', 'egrep', self.regex, self.file_path])
@patch('sys.stdout')
def test_fails_after_attempting_multiple_times(self, _):
with patch.object(lc.subprocess, 'check_call') as m_checkcall:
m_checkcall.side_effect = subprocess.CalledProcessError(
1, ['sudo', 'egrep', self.regex, self.file_path])
with patch.object(lc.time, 'sleep') as m_sleep:
self.assertEqual(
lc.check_file(self.regex, self.file_path),
lc.check_result.failure)
self.assertEqual(m_sleep.call_count, 10)
@patch('sys.stdout')
def test_fails_when_meeting_unexpected_outcome(self, _):
with patch.object(lc.subprocess, 'check_call') as m_checkcall:
m_checkcall.side_effect = subprocess.CalledProcessError(
-1, ['sudo', 'egrep', self.regex, self.file_path])
self.assertEqual(
lc.check_file(self.regex, self.file_path),
lc.check_result.exception)
@patch('sys.stdout')
def test_succeeds_when_regex_found(self, _):
with patch.object(lc.subprocess, 'check_call'):
self.assertEqual(
lc.check_file(self.regex, self.file_path),
lc.check_result.success)
class TestRaiseIfFileNotFound(TestCase):
def test_raises_when_file_not_found(self):
with self.assertRaises(ValueError):
lc.raise_if_file_not_found('/thisfilewontexists')
def test_does_not_raise_when_file_not_found(self):
lc.raise_if_file_not_found('/')
class TestParseArgs(TestCase):
def test_basic_args(self):
args = ['test .*', '/tmp/log.file']
parsed = lc.parse_args(args)
self.assertEqual(parsed.regex, 'test .*')
self.assertEqual(parsed.file_path, '/tmp/log.file')
| freyes/juju | acceptancetests/tests/test_log_check.py | Python | agpl-3.0 | 2,372 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import json
import os
import os.path
from socorro.lib.datetimeutil import JsonDTEncoder
from socorro.lib.requestslib import session_with_retries
from socorro.scripts import FallbackToPipeAction, FlagAction, WrappedTextHelpFormatter
DESCRIPTION = """
Fetches crash data from crash-stats.mozilla.org system
"""
EPILOG = """
Given one or more crash ids via command line or stdin (one per line), fetches crash data and puts it
in specified directory.
This requires an auth-token to be in the environment in order to download dumps and personally
identifiable information::
SOCORRO_API_TOKEN=xyz
Make sure the auth-token matches the host you're fetching data from.
To create an API token for Socorro in -prod, visit:
https://crash-stats.mozilla.org/api/tokens/
"""
class CrashDoesNotExist(Exception):
pass
class BadAPIToken(Exception):
pass
def create_dir_if_needed(d):
if not os.path.exists(d):
os.makedirs(d)
def fetch_crash(
host, fetchraw, fetchdumps, fetchprocessed, outputdir, api_token, crash_id
):
"""Fetch crash data and save to correct place on the file system
http://antenna.readthedocs.io/en/latest/architecture.html#aws-s3-file-hierarchy
"""
if api_token:
headers = {"Auth-Token": api_token}
else:
headers = {}
session = session_with_retries()
if fetchraw:
# Fetch raw crash metadata
print("Fetching raw %s" % crash_id)
resp = session.get(
host + "/api/RawCrash/",
params={"crash_id": crash_id, "format": "meta"},
headers=headers,
)
# Handle 404 and 403 so we can provide the user more context
if resp.status_code == 404:
raise CrashDoesNotExist(crash_id)
if api_token and resp.status_code == 403:
raise BadAPIToken(resp.json().get("error", "No error provided"))
# Raise an error for any other non-200 response
resp.raise_for_status()
# Save raw crash to file system
raw_crash = resp.json()
fn = os.path.join(
outputdir, "v2", "raw_crash", crash_id[0:3], "20" + crash_id[-6:], crash_id
)
create_dir_if_needed(os.path.dirname(fn))
with open(fn, "w") as fp:
json.dump(raw_crash, fp, cls=JsonDTEncoder, indent=2, sort_keys=True)
if fetchdumps:
# Fetch dumps
dumps = {}
dump_names = raw_crash.get("dump_checksums", {}).keys()
for dump_name in dump_names:
print("Fetching dump %s/%s" % (crash_id, dump_name))
# We store "upload_file_minidump" as "dump", so we need to use that
# name when requesting from the RawCrash api
file_name = dump_name
if file_name == "upload_file_minidump":
file_name = "dump"
resp = session.get(
host + "/api/RawCrash/",
params={"crash_id": crash_id, "format": "raw", "name": file_name},
headers=headers,
)
if resp.status_code != 200:
raise Exception(
"Something unexpected happened. status_code %s, content %s"
% (resp.status_code, resp.content)
)
dumps[dump_name] = resp.content
# Save dump_names to file system
fn = os.path.join(outputdir, "v1", "dump_names", crash_id)
create_dir_if_needed(os.path.dirname(fn))
with open(fn, "w") as fp:
json.dump(list(dumps.keys()), fp)
# Save dumps to file system
for dump_name, data in dumps.items():
if dump_name == "upload_file_minidump":
dump_name = "dump"
fn = os.path.join(outputdir, "v1", dump_name, crash_id)
create_dir_if_needed(os.path.dirname(fn))
with open(fn, "wb") as fp:
fp.write(data)
if fetchprocessed:
# Fetch processed crash data
print("Fetching processed %s" % crash_id)
resp = session.get(
host + "/api/ProcessedCrash/",
params={"crash_id": crash_id, "format": "meta"},
headers=headers,
)
# Handle 404 and 403 so we can provide the user more context
if resp.status_code == 404:
raise CrashDoesNotExist(crash_id)
if api_token and resp.status_code == 403:
raise BadAPIToken(resp.json().get("error", "No error provided"))
# Raise an error for any other non-200 response
resp.raise_for_status()
# Save processed crash to file system
processed_crash = resp.json()
fn = os.path.join(outputdir, "v1", "processed_crash", crash_id)
create_dir_if_needed(os.path.dirname(fn))
with open(fn, "w") as fp:
json.dump(processed_crash, fp, cls=JsonDTEncoder, indent=2, sort_keys=True)
def main(argv=None):
parser = argparse.ArgumentParser(
formatter_class=WrappedTextHelpFormatter,
description=DESCRIPTION.strip(),
epilog=EPILOG.strip(),
)
parser.add_argument(
"--host",
default="https://crash-stats.mozilla.org",
help="host to pull crash data from; this needs to match SOCORRO_API_TOKEN value",
)
parser.add_argument(
"--raw",
"--no-raw",
dest="fetchraw",
action=FlagAction,
default=True,
help="whether or not to save raw crash data",
)
parser.add_argument(
"--dumps",
"--no-dumps",
dest="fetchdumps",
action=FlagAction,
default=True,
help="whether or not to save dumps",
)
parser.add_argument(
"--processed",
"--no-processed",
dest="fetchprocessed",
action=FlagAction,
default=False,
help="whether or not to save processed crash data",
)
parser.add_argument("outputdir", help="directory to place crash data in")
parser.add_argument(
"crashid",
help="one or more crash ids to fetch data for",
nargs="*",
action=FallbackToPipeAction,
)
if argv is None:
args = parser.parse_args()
else:
args = parser.parse_args(argv)
if args.fetchdumps and not args.fetchraw:
print("You cannot fetch dumps without also fetching the raw crash. Exiting.")
return 1
# Validate outputdir and exit if it doesn't exist or isn't a directory
outputdir = args.outputdir
if os.path.exists(outputdir) and not os.path.isdir(outputdir):
print("%s is not a directory. Please fix. Exiting." % outputdir)
return 1
# Sort out API token existence
api_token = os.environ.get("SOCORRO_API_TOKEN")
if api_token:
print("Using api token: %s%s" % (api_token[:4], "x" * (len(api_token) - 4)))
else:
print(
"No api token provided. Skipping dumps and personally identifiable information."
)
for crash_id in args.crashid:
crash_id = crash_id.strip()
print("Working on %s..." % crash_id)
fetch_crash(
host=args.host,
fetchraw=args.fetchraw,
fetchdumps=args.fetchdumps,
fetchprocessed=args.fetchprocessed,
outputdir=outputdir,
api_token=api_token,
crash_id=crash_id,
)
return 0
| lonnen/socorro | socorro/scripts/fetch_crash_data.py | Python | mpl-2.0 | 7,578 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, Marcelo Jorge Vieira <[email protected]>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License
# for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
from ujson import loads
from unittest.mock import patch
from tests.politicos_api.base import BaseTestCase, get_json_mock
class PoliticalPartiesHandlerTestCase(BaseTestCase):
@patch('elasticsearch_async.AsyncElasticsearch.search')
def test_political_parties_handler(self, search_mock):
f = asyncio.Future()
es_political_parties = get_json_mock(
'es_political_parties_response.json'
)
f.set_result(es_political_parties)
search_mock.return_value = f
response = self.fetch('/api/v1/political-parties/')
self.assertEqual(response.code, 200)
api_political_parties = get_json_mock(
'api_political_parties_response.json'
)
self.assertEqual(loads(response.body), api_political_parties)
| olhoneles/politicos | tests/politicos_api/handlers/test_political_parties.py | Python | agpl-3.0 | 1,568 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import telegram
import airflow
from airflow.models import Connection
from airflow.providers.telegram.hooks.telegram import TelegramHook
from airflow.utils import db
TELEGRAM_TOKEN = "dummy token"
class TestTelegramHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='telegram-webhook-without-token',
conn_type='http',
)
)
db.merge_conn(
Connection(
conn_id='telegram_default',
conn_type='http',
password=TELEGRAM_TOKEN,
)
)
db.merge_conn(
Connection(
conn_id='telegram-webhook-with-chat_id',
conn_type='http',
password=TELEGRAM_TOKEN,
host="-420913222",
)
)
def test_should_raise_exception_if_both_connection_or_token_is_not_provided(self):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
TelegramHook()
self.assertEqual("Cannot get token: No valid Telegram connection supplied.", str(e.exception))
def test_should_raise_exception_if_conn_id_doesnt_exist(self):
with self.assertRaises(airflow.exceptions.AirflowNotFoundException) as e:
TelegramHook(telegram_conn_id='telegram-webhook-non-existent')
self.assertEqual("The conn_id `telegram-webhook-non-existent` isn't defined", str(e.exception))
def test_should_raise_exception_if_conn_id_doesnt_contain_token(self):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
TelegramHook(telegram_conn_id='telegram-webhook-without-token')
self.assertEqual("Missing token(password) in Telegram connection", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_chat_id_is_not_provided_anywhere(self, mock_get_conn):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"text": "test telegram message"})
self.assertEqual("'chat_id' must be provided for telegram message", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_raise_exception_if_message_text_is_not_provided(self, mock_get_conn):
with self.assertRaises(airflow.exceptions.AirflowException) as e:
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222})
self.assertEqual("'text' must be provided for telegram message", str(e.exception))
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_all_parameters_are_correctly_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram_default')
hook.send_message({"chat_id": -420913222, "text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_through_constructor(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram_default', chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_chat_id_is_provided_in_connection(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_retry_when_any_telegram_error_is_encountered(self, mock_get_conn):
excepted_retry_count = 5
mock_get_conn.return_value = mock.Mock(password="some_token")
def side_effect(*args, **kwargs):
raise telegram.error.TelegramError("cosmic rays caused bit flips")
mock_get_conn.return_value.send_message.side_effect = side_effect
with self.assertRaises(Exception) as e:
hook = TelegramHook(telegram_conn_id='telegram-webhook-with-chat_id')
hook.send_message({"text": "test telegram message"})
self.assertTrue("RetryError" in str(e.exception))
self.assertTrue("state=finished raised TelegramError" in str(e.exception))
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_with(
**{
'chat_id': "-420913222",
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
self.assertEqual(excepted_retry_count, mock_get_conn.return_value.send_message.call_count)
@mock.patch('airflow.providers.telegram.hooks.telegram.TelegramHook.get_conn')
def test_should_send_message_if_token_is_provided(self, mock_get_conn):
mock_get_conn.return_value = mock.Mock(password="some_token")
hook = TelegramHook(token=TELEGRAM_TOKEN, chat_id=-420913222)
hook.send_message({"text": "test telegram message"})
mock_get_conn.return_value.send_message.return_value = "OK."
mock_get_conn.assert_called_once()
mock_get_conn.return_value.send_message.assert_called_once_with(
**{
'chat_id': -420913222,
'parse_mode': 'HTML',
'disable_web_page_preview': True,
'text': 'test telegram message',
}
)
| airbnb/airflow | tests/providers/telegram/hooks/test_telegram.py | Python | apache-2.0 | 7,973 |
###############################################################################
# -*- coding: utf-8 -*-
# Order: A tool to characterize the local structure of liquid water
# by geometric order parameters
#
# Authors: Pu Du
#
# Released under the MIT License
############################################################################### | ipudu/order | tests/test_order.py | Python | mit | 347 |
from modules.user.model.session import UserSession
from google.appengine.ext import webapp
from lib.registry import Registry
#decorator
def logged(f):
def _inner(*args, **kwargs):
if not UserSession().isLogged():
raise BaseException('You are not authorized')
return f(*args, **kwargs)
return _inner
#decorator
def guest_only(f):
def _inner(*args, **kwargs):
if UserSession().isLogged():
raise BaseException('You are not authorized')
return f(*args, **kwargs)
return _inner
#decorator
def is_admin(f):
def _inner(*args, **kwargs):
if not UserSession().isLogged() or not UserSession().getUser().is_admin:
raise BaseException('You are not authorized')
return f(*args, **kwargs)
return _inner
class RequestHandler(webapp.RequestHandler):
def initialize(self, *args, **kwargs):
super(RequestHandler, self).initialize(*args, **kwargs)
Registry().set('request', self.request)
Registry().set('response', self.response)
Registry().set('session', self.request.environ['beaker.session'])
self.response.headers['Content-Type'] = 'text/html; charset=utf-8'
| retif/stealthpage | modules/core/model/app.py | Python | gpl-3.0 | 1,218 |
# coding: utf-8
# Set default logging handler to avoid "No handler found" warnings.
import logging
from logging import StreamHandler, Formatter
logger = logging.getLogger('sol')
_been_initialized = False
__all__ = ['logger', 'init_logger']
def init_logger(level=logging.DEBUG):
"""
Default logger initializer. Uses console output and sets level to DEBUG
:param level custom logging level
:return: the logger instance
"""
# global _been_initialized
# if not _been_initialized:
# s = StreamHandler()
# s.setFormatter(Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
# logger.addHandler(s)
# logger.setLevel(level)
# _been_initialized = True
# return logger
# else:
return logger
| progwriter/SOL | src/sol/utils/logger.py | Python | mit | 784 |
from front import front, direct_front
from xlog import getLogger
xlog = getLogger("gae_proxy")
def set_proxy(args):
front.set_proxy(args)
direct_front.set_proxy(args)
def is_workable():
if front.http_dispatcher.is_idle():
return True
num = len(front.connect_manager.new_conn_pool.pool) +\
len(front.connect_manager.gae_conn_pool.pool) + \
front.http_dispatcher.h1_num + \
front.http_dispatcher.h2_num
if num > 0:
return True
else:
return False
def set_bind_ip(args):
xlog.info("set_bind_ip:%s", args)
front.config.listen_ip = args["ip"]
front.config.save()
| zlsun/XX-Net | code/default/gae_proxy/local/apis.py | Python | bsd-2-clause | 657 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo.serialization import jsonutils
import cinder
from cinder.api.openstack import wsgi
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
UUID = fakes.FAKE_UUID
class SchedulerHintsTestCase(test.TestCase):
def setUp(self):
super(SchedulerHintsTestCase, self).setUp()
self.fake_instance = stubs.stub_volume(1, uuid=UUID)
self.fake_instance['created_at'] =\
datetime.datetime(2013, 1, 1, 1, 1, 1)
self.fake_instance['launched_at'] =\
datetime.datetime(2013, 1, 1, 1, 1, 1)
self.flags(
osapi_volume_extension=[
'cinder.api.contrib.select_extensions'],
osapi_volume_ext_list=['Scheduler_hints'])
self.app = fakes.wsgi_app()
def test_create_server_without_hints(self):
@wsgi.response(202)
def fake_create(*args, **kwargs):
self.assertNotIn('scheduler_hints', kwargs['body'])
return self.fake_instance
self.stubs.Set(cinder.api.v2.volumes.VolumeController, 'create',
fake_create)
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
req.content_type = 'application/json'
body = {'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'volume_id': '1', }
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_with_hints(self):
@wsgi.response(202)
def fake_create(*args, **kwargs):
self.assertIn('scheduler_hints', kwargs['body'])
self.assertEqual(kwargs['body']['scheduler_hints'], {"a": "b"})
return self.fake_instance
self.stubs.Set(cinder.api.v2.volumes.VolumeController, 'create',
fake_create)
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
req.content_type = 'application/json'
body = {'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'volume_id': '1',
'scheduler_hints': {'a': 'b'}, }
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(202, res.status_int)
def test_create_server_bad_hints(self):
req = fakes.HTTPRequest.blank('/v2/fake/volumes')
req.method = 'POST'
req.content_type = 'application/json'
body = {'volume': {
'id': id,
'volume_type_id': 'cedef40a-ed67-4d10-800e-17455edce175',
'volume_id': '1',
'scheduler_hints': 'a', }}
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(400, res.status_int)
| hguemar/cinder | cinder/tests/api/contrib/test_scheduler_hints.py | Python | apache-2.0 | 3,517 |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from pynuts import Pynuts
CONFIG = {'SQLALCHEMY_DATABASE_URI': 'sqlite:////tmp/test.db',
'PYNUTS_DOCUMENT_REPOSITORY': '/tmp/employees.git'}
app = Flask(__name__)
app.config.update(CONFIG)
app.db = SQLAlchemy(app)
nuts = Pynuts(app)
| Kozea/Pynuts | docs/example/advanced/application.py | Python | bsd-3-clause | 310 |
def label_amdpar_from(instruction_xml):
label_parts = instruction_xml.get('label', '').split('-')
# <AMDPAR><EREGS_INSTRUCTIONS><INSTRUCTION>...
amdpar = instruction_xml.getparent().getparent()
return label_parts, amdpar
| tadhg-ohiggins/regulations-parser | regparser/notice/amendments/utils.py | Python | cc0-1.0 | 237 |
### Author: Dag Wieers <dag$wieers,com>
class dstat_plugin(dstat):
"""
Number of operations performed on a GPFS filesystem.
"""
def __init__(self):
self.name = 'gpfs file operations'
self.nick = ('open', 'clos', 'read', 'writ', 'rdir', 'inod')
self.vars = ('_oc_', '_cc_', '_rdc_', '_wc_', '_dir_', '_iu_')
self.type = 'd'
self.width = 5
self.scale = 1000
def check(self):
if os.access('/usr/lpp/mmfs/bin/mmpmon', os.X_OK):
try:
self.stdin, self.stdout, self.stderr = dpopen('/usr/lpp/mmfs/bin/mmpmon -p -s')
self.stdin.write('reset\n')
readpipe(self.stdout)
except IOError:
raise Exception('Cannot interface with gpfs mmpmon binary')
return True
raise Exception('Needs GPFS mmpmon binary')
def extract(self):
try:
self.stdin.write('io_s\n')
# readpipe(self.stderr)
for line in readpipe(self.stdout):
if not line: continue
l = line.split()
for name in self.vars:
self.set2[name] = int(l[l.index(name)+1])
for name in self.vars:
self.val[name] = (self.set2[name] - self.set1[name]) * 1.0 / elapsed
except IOError as e:
if op.debug > 1: print('%s: lost pipe to mmpmon, %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
except Exception as e:
if op.debug > 1: print('%s: exception %s' % (self.filename, e))
for name in self.vars: self.val[name] = -1
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4:et
| dagwieers/dstat | plugins/dstat_gpfs_ops.py | Python | gpl-2.0 | 1,751 |
import os
import sys
#PYTHON_DIR = join(dirname(__file__), '/opt/ofelia/vt_manager_kvm/src/python/')
PYTHON_DIR = os.path.join(os.path.dirname(__file__), "../..")
# This is needed because wsgi disallows using stdout
sys.stdout = sys.stderr
os.environ['DJANGO_SETTINGS_MODULE'] = 'vt_manager_kvm.settings.settingsLoader'
sys.path.insert(0,PYTHON_DIR)
from vt_manager_kvm.models.VirtualMachine import VirtualMachine
vmid = str(sys.argv[1])
vm = VirtualMachine.objects.get(id = vmid).getChildObject()
server = vm.Server.get()
server.deleteVM(vm)
| ict-felix/stack | vt_manager_kvm/src/python/vt_manager_kvm/tests/deleteVM.py | Python | apache-2.0 | 550 |
from collections import Sequence
from functools import wraps
from itertools import repeat
class DeltaPenality(object):
"""This decorator returns penalized fitness for invalid individuals and the
original fitness value for valid individuals. The penalized fitness is made
of a constant factor *delta* added with an (optional) *distance* penality. The
distance function, if provided, shall return a value growing as the
individual moves away the valid zone.
:param feasibility: A function returning the validity status of any
individual.
:param delta: Constant or array of constants returned for an invalid individual.
:param distance: A function returning the distance between the individual
and a given valid point. The distance function can also return a sequence
of length equal to the number of objectives to affect multi-objective
fitnesses differently (optional, defaults to 0).
:returns: A decorator for evaluation function.
This function relies on the fitness weights to add correctly the distance.
The fitness value of the ith objective is defined as
.. math::
f^\mathrm{penality}_i(\mathbf{x}) = \Delta_i - w_i d_i(\mathbf{x})
where :math:`\mathbf{x}` is the individual, :math:`\Delta_i` is a user defined
constant and :math:`w_i` is the weight of the ith objective. :math:`\Delta`
should be worst than the fitness of any possible individual, this means
higher than any fitness for minimization and lower than any fitness for
maximization.
See the :doc:`/tutorials/advanced/constraints` for an example.
"""
def __init__(self, feasibility, delta, distance=None):
self.fbty_fct = feasibility
if not isinstance(delta, Sequence):
self.delta = repeat(delta)
else:
self.delta = delta
self.dist_fct = distance
def __call__(self, func):
@wraps(func)
def wrapper(individual, *args, **kwargs):
if self.fbty_fct(individual):
return func(individual, *args, **kwargs)
weights = tuple(1 if w >= 0 else -1 for w in individual.fitness.weights)
dists = tuple(0 for w in individual.fitness.weights)
if self.dist_fct is not None:
dists = self.dist_fct(individual)
if not isinstance(dists, Sequence):
dists = repeat(dists)
return tuple(d - w * dist for d, w, dist in zip(self.delta, weights, dists))
return wrapper
class ClosestValidPenality(object):
"""This decorator returns penalized fitness for invalid individuals and the
original fitness value for valid individuals. The penalized fitness is made
of the fitness of the closest valid individual added with a weighted
(optional) *distance* penality. The distance function, if provided, shall
return a value growing as the individual moves away the valid zone.
:param feasibility: A function returning the validity status of any
individual.
:param feasible: A function returning the closest feasible individual
from the current invalid individual.
:param alpha: Multiplication factor on the distance between the valid and
invalid individual.
:param distance: A function returning the distance between the individual
and a given valid point. The distance function can also return a sequence
of length equal to the number of objectives to affect multi-objective
fitnesses differently (optional, defaults to 0).
:returns: A decorator for evaluation function.
This function relies on the fitness weights to add correctly the distance.
The fitness value of the ith objective is defined as
.. math::
f^\mathrm{penality}_i(\mathbf{x}) = f_i(\operatorname{valid}(\mathbf{x})) - \\alpha w_i d_i(\operatorname{valid}(\mathbf{x}), \mathbf{x})
where :math:`\mathbf{x}` is the individual,
:math:`\operatorname{valid}(\mathbf{x})` is a function returning the closest
valid individual to :math:`\mathbf{x}`, :math:`\\alpha` is the distance
multiplicative factor and :math:`w_i` is the weight of the ith objective.
"""
def __init__(self, feasibility, feasible, alpha, distance=None):
self.fbty_fct = feasibility
self.fbl_fct = feasible
self.alpha = alpha
self.dist_fct = distance
def __call__(self, func):
@wraps(func)
def wrapper(individual, *args, **kwargs):
if self.fbty_fct(individual):
return func(individual, *args, **kwargs)
f_ind = self.fbl_fct(individual)
# print("individual", f_ind)
f_fbl = func(f_ind, *args, **kwargs)
# print("feasible", f_fbl)
weights = tuple(1.0 if w >= 0 else -1.0 for w in individual.fitness.weights)
if len(weights) != len(f_fbl):
raise IndexError("Fitness weights and computed fitness are of different size.")
dists = tuple(0 for w in individual.fitness.weights)
if self.dist_fct is not None:
dist = self.dist_fct(f_ind, individual)
if not isinstance(dists, Sequence):
dists = repeat(dists)
# print("returned", tuple(f - w * self.alpha * dist for f, w in zip(f_fbl, weights)))
return tuple(f - w * self.alpha * d for f, w, d in zip(f_fbl, weights, dists))
return wrapper
# List of exported function names.
__all__ = ['DeltaPenality', 'ClosestValidPenality']
if __name__ == "__main__":
from deap import base
from deap import benchmarks
from deap import creator
import numpy
MIN_BOUND = numpy.array([0] * 30)
MAX_BOUND = numpy.array([1] * 30)
creator.create("FitnessMin", base.Fitness, weights=(-1.0, -1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
def distance(feasible_ind, original_ind):
"""A distance function to the feasibility region."""
return sum((f - o) ** 2 for f, o in zip(feasible_ind, original_ind))
def closest_feasible(individual):
"""A function returning a valid individual from an invalid one."""
feasible_ind = numpy.array(individual)
feasible_ind = numpy.maximum(MIN_BOUND, feasible_ind)
feasible_ind = numpy.minimum(MAX_BOUND, feasible_ind)
return feasible_ind
def valid(individual):
"""Determines if the individual is valid or not."""
if any(individual < MIN_BOUND) or any(individual > MAX_BOUND):
return False
return True
toolbox = base.Toolbox()
toolbox.register("evaluate", benchmarks.zdt2)
toolbox.decorate("evaluate", ClosestValidPenality(valid, closest_feasible, 1.0e-6, distance))
ind1 = creator.Individual((-5.6468535666e-01, 2.2483050478e+00, -1.1087909644e+00, -1.2710112861e-01,
1.1682438733e+00, -1.3642007438e+00, -2.1916417835e-01, -5.9137308999e-01,
-1.0870160336e+00, 6.0515070232e-01, 2.1532075914e+00, -2.6164718271e-01,
1.5244071578e+00, -1.0324305612e+00, 1.2858152343e+00, -1.2584683962e+00,
1.2054392372e+00, -1.7429571973e+00, -1.3517256013e-01, -2.6493429355e+00,
-1.3051320798e-01, 2.2641961090e+00, -2.5027232340e+00, -1.2844874148e+00,
1.9955852925e+00, -1.2942218834e+00, 3.1340109155e+00, 1.6440111097e+00,
-1.7750105857e+00, 7.7610242710e-01))
print(toolbox.evaluate(ind1))
print("Individuals is valid: %s" % ("True" if valid(ind1) else "False"))
| DailyActie/Surrogate-Model | 01-codes/deap-master/deap/tools/constraint.py | Python | mit | 7,871 |
# Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from ... import p, util
from .effect import Effect
class Pulse(Effect):
def __init__(self, target, duration=0.03):
durations = [duration] * 0x8
super(Pulse, self).__init__("pulse", target, durations, repeat=True,
once=False)
self.target = target
self.to_black = True
def action(self, *args, **kwargs):
color = 0xf - (self.index * 2) if self.to_black else self.index
self.target.update(color=color)
if self.index == 0x07:
self.to_black = not self.to_black
p.effects["pulse"] = Pulse
| town-hall-pinball/project-omega | pin/lib/ui/effects/pulse.py | Python | mit | 1,687 |
from vertebra.util import symbol
from logging import info
class incall(object):
def setup(self,frm,request):
pass
def dispatch(self,request):
pass
class outcall(object):
def setup(self,to,request):
pass
class invocation(object):
pass
class invo_local(invocation):
pass
class invo_net(invocation):
pass
class evocation(object):
pass
class evo_local(evocation):
pass
class evo_net(evocation):
pass
def register_initcall(op,args,scope):
info("initcall registered %r",op)
# These should probably become classes
once = symbol.once
once_then_exit = symbol.once_then_exit
restart = symbol.restart
restart_throttled = symbol.restart_throttled
# Mocks for Testing
class mock_incall(object):
pass
| jvantuyl/vertebra-py | lib/vertebra/calls.py | Python | lgpl-3.0 | 733 |
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Tests for the file adversarial_hinge.py"""
from absl import flags
from absl.testing import absltest
import tensorflow as tf
from discretezoo.loss import adversarial_hinge
class UntargetedLossTest(absltest.TestCase):
"""Tests for the untargeted_loss adversarial loss function."""
def test_untargeted_loss(self):
"""Tests that the function returns expected values on a non-edge case."""
true_probability = 0.8
true_label = 0
max_other_label = 2
probability_list = [0.0] * 10
probability_list[true_label] = true_probability
probability_list[max_other_label] = 1.0 - true_probability
probability_vector = tf.constant([probability_list])
true_label_vector = tf.constant([[true_label]], dtype=tf.int32)
# This should return log(0.8) - log(0.2) = log(0.8/0.2)
test_loss = adversarial_hinge.untargeted_loss(probability_vector,
true_label_vector, 0.0)
expected_loss = tf.math.log(
tf.constant(true_probability / (1 - true_probability)))
tf.debugging.assert_near(test_loss, expected_loss)
def test_untargeted_loss_uniform_distribution(self):
"""Test the edge case where the model predicts the uniform distribution."""
true_probability = 0.1
true_label = 4
max_other_label = 5
probability_list = [0.1] * 10
probability_vector = tf.constant([probability_list])
true_label_vector = tf.constant([[true_label]], dtype=tf.int32)
# This should return log(0.1) - log(0.1) = log(0.1/0.1) = log(1) = 0
test_loss = adversarial_hinge.untargeted_loss(probability_vector,
true_label_vector, 0.0)
expected_loss = tf.constant([0.0])
tf.debugging.assert_near(test_loss, expected_loss)
def test_untargeted_loss_nonzero_kappa(self):
"""Test edge case where model output is uniform and kappa is nonzero."""
true_probability = 0.1
true_label = 4
max_other_label = 5
kappa = 0.1
probability_list = [0.1] * 10
probability_vector = tf.constant([probability_list])
true_label_vector = tf.constant([[true_label]], dtype=tf.int32)
# This should return log(0.1) - log(0.1) = log(0.1/0.1) = log(1) = 0
test_loss = adversarial_hinge.untargeted_loss(probability_vector,
true_label_vector, kappa)
tf.debugging.assert_near(test_loss, tf.constant([kappa]))
def test_untargeted_loss_overconfident_model(self):
"""Test the edge case where the model predicts 1.0 for the true class."""
true_label = 8
max_other_label = 0
true_probability = 1.0
probability_list = [0.0] * 10
probability_list[true_label] = true_probability
probability_vector = tf.constant([probability_list])
true_label_vector = tf.constant([[true_label]], dtype=tf.int32)
# This should return log(1.0) - log(0.0) = 0.0 - (-inf) = +inf
test_loss = adversarial_hinge.untargeted_loss(probability_vector,
true_label_vector, 0.0)
tf.debugging.assert_equal(test_loss, tf.constant([float('inf')]))
class TargetedLossTest(absltest.TestCase):
"""Tests for the targeted_loss adversarial loss function."""
def test_targeted_loss(self):
"""Test for the standard case where the attack was not yet successful."""
target_probability = 0.3
target_label = 0
max_other_label = 2
probability_list = [0.0] * 10
probability_list[target_label] = target_probability
probability_list[max_other_label] = 1.0 - target_probability
probability_vector = tf.constant([probability_list])
target_label_vector = tf.constant([target_label], dtype=tf.int32)
# This should return log(0.7) - log(0.3) = log(0.7/0.3).
test_loss = adversarial_hinge.targeted_loss(probability_vector,
target_label_vector, 0.0)
expected_loss = tf.math.log(
tf.constant((1 - target_probability) / target_probability))
tf.debugging.assert_near(test_loss, expected_loss)
def test_targeted_loss_uniform_distribution(self):
"""Test edge case where the model predicts the uniform distribution."""
target_probability = 0.1
target_label = 0
max_other_label = 2
probability_list = [0.1] * 10
probability_vector = tf.constant([probability_list])
target_label_vector = tf.constant([target_label], dtype=tf.int32)
# It should return log(0.1) - log(0.1) = log(0.1/0.1) = log(1) = 0.0.
test_loss = adversarial_hinge.targeted_loss(probability_vector,
target_label_vector, 0.0)
tf.debugging.assert_near(test_loss, 0.0)
def test_targeted_loss_nonzero_kappa(self):
"""Test edge case where it's the uniform distribution and kappa > 0.0."""
target_probability = 0.3
target_label = 0
max_other_label = 2
kappa = 0.1
probability_list = [0.1] * 10
probability_vector = tf.constant([probability_list])
target_label_vector = tf.constant([target_label], dtype=tf.int32)
# It should return log(0.1) - log(0.1) + kappa = kappa
test_loss = adversarial_hinge.targeted_loss(probability_vector,
target_label_vector, kappa)
tf.debugging.assert_near(test_loss, tf.constant([kappa]))
def test_targeted_loss_overconfident_model(self):
"""Test the case where the model is overconfident about its prediction."""
target_label = 8
original_label = 0
probability_list = [0.0] * 10
probability_list[original_label] = 1.0
probability_vector = tf.constant([probability_list])
target_label_vector = tf.constant([target_label], dtype=tf.int32)
# This should return log(1.0) - log(0.0) = 0.0 - (-inf) = +inf.
test_loss = adversarial_hinge.targeted_loss(probability_vector,
target_label_vector, 0.0)
tf.debugging.assert_equal(test_loss, tf.constant([float('inf')]))
if __name__ == '__main__':
absltest.main()
| googleinterns/adversarial-0th-order-optimization | discretezoo/loss/adversarial_hinge_test.py | Python | apache-2.0 | 6,604 |
"""
Copyright (c) 2011, 2012, Regents of the University of California
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
@author Stephen Dawson-Haggerty <[email protected]>
"""
import sys
import os
import threading
import logging
import time
import socket
from twisted.internet.protocol import DatagramProtocol
# twisted doesn't support ipv6... this patches the reactor to add
# listenUDP6 and listenTCP6 methods. It's not great, but it's a
# workaround that we can use and is easy to deploy (doesn't involve
# patching the twisted installation directory)
from tx.ipv6.internet import reactor
from smap.driver import SmapDriver
from smap.drivers.acmex2 import ACmeX2Report
class ACmeX2Driver(SmapDriver, DatagramProtocol):
def datagramReceived(self, data, addr):
rpt = ACmeX2Report.AcReport(data=data, data_length=len(data))
moteid = addr[0].split('::')[1]
if not moteid in self.ids:
self.ids[moteid] = True
self.add_timeseries('/' + moteid + '/true_power', 'mW', buffersz=2)
self.add_timeseries('/' + moteid + '/apparent_power', 'mVA', buffersz=2)
self.add_timeseries('/' + moteid + '/true_energy', 'mWh', buffersz=2)
self.set_metadata('/' + moteid, {
'Instrument/PartNumber' : moteid,
'Instrument/SerialNumber' : ':'.join(['%02x' % x for x in rpt.get_eui64()]),
'Instrument/SamplingPeriod' : str(rpt.get_period ()),
})
for idx in range(0,2):
readingTime = rpt.get_globalTime() - (rpt.get_period() * (1 - idx))
self._add('/' + moteid + '/true_energy', readingTime,
rpt.get_readings_cumulativeRealEnergy()[idx],
rpt.get_seq())
self._add('/' + moteid + '/true_power', readingTime,
rpt.get_readings_averageRealPower()[idx],
rpt.get_seq())
self._add('/' + moteid + '/apparent_power', readingTime,
rpt.get_readings_averageApparentPower()[idx],
rpt.get_seq())
def setup(self, opts):
self.port = int(opts.get('Port', 7002))
self.ids = {}
self.set_metadata('/', {
'Extra/Driver' : 'smap.driver.acmex2.acmex2.ACmeX2Driver',
'Instrument/Manufacturer': 'UC Berkeley',
'Instrument/Model' : 'ACme X2',
})
def start(self):
reactor.listenUDP6(self.port, self)
| tectronics/smap-data | python/smap/drivers/acmex2/acmex2.py | Python | bsd-2-clause | 3,726 |
# matplotlib without any blocking GUI
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from smst.utils import audio, peaks
from smst.models import dft
(fs, x) = audio.read_wav('../../../sounds/oboe-A4.wav')
N = 512 * 2
M = 511
t = -60
w = np.hamming(M)
start = .8 * fs
hN = N / 2
hM = (M + 1) / 2
x1 = x[start:start + M]
mX, pX = dft.from_audio(x1, w, N)
ploc = peaks.find_peaks(mX, t)
iploc, ipmag, ipphase = peaks.interpolate_peaks(mX, pX, ploc)
pmag = mX[ploc]
freqaxis = fs * np.arange(mX.size) / float(N)
plt.figure(1, figsize=(9, 5))
plt.plot(freqaxis, mX, 'r', lw=1.5)
plt.axis([0, 7000, -80, max(mX) + 1])
plt.plot(fs * iploc / N, ipmag, marker='x', color='b', linestyle='', markeredgewidth=1.5)
harms = np.arange(1, 20) * 440.0
plt.vlines(harms, -80, max(mX) + 1, color='g', lw=1.5)
plt.title('mX + peaks + f0 multiples (oboe-A4.wav)')
plt.tight_layout()
plt.savefig('spectral-peaks-and-f0.png')
| bzamecnik/sms-tools | lectures/06-Harmonic-model/plots-code/spectral-peaks-and-f0.py | Python | agpl-3.0 | 953 |
import numpy as np
import bpy
from bpy.props import FloatProperty, EnumProperty, IntProperty, BoolProperty
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, zip_long_repeat, ensure_nesting_level
from sverchok.utils.curve import SvCurveLengthSolver, SvCurve
from sverchok.utils.nodes_mixins.draft_mode import DraftMode
class SvCurveLengthParameterNode(DraftMode, bpy.types.Node, SverchCustomTreeNode):
"""
Triggers: Curve Length Parameter
Tooltip: Solve curve length (natural) parameter
"""
bl_idname = 'SvExCurveLengthParameterNode'
bl_label = 'Curve Length Parameter'
bl_icon = 'OUTLINER_OB_EMPTY'
sv_icon = 'SV_CURVE_LENGTH_P'
resolution : IntProperty(
name = 'Resolution',
min = 1,
default = 50,
update = updateNode)
length : FloatProperty(
name = "Length",
min = 0.0,
default = 0.5,
update = updateNode)
length_draft : FloatProperty(
name = "[D] Length",
min = 0.0,
default = 0.5,
update = updateNode)
modes = [('SPL', 'Cubic', "Cubic Spline", 0),
('LIN', 'Linear', "Linear Interpolation", 1)]
mode: EnumProperty(name='Interpolation mode', default="SPL", items=modes, update=updateNode)
def update_sockets(self, context):
self.inputs['Length'].hide_safe = self.eval_mode != 'MANUAL'
self.inputs['Samples'].hide_safe = self.eval_mode != 'AUTO'
updateNode(self, context)
eval_modes = [
('AUTO', "Automatic", "Evaluate the curve at evenly spaced points", 0),
('MANUAL', "Manual", "Evaluate the curve at specified points", 1)
]
eval_mode : EnumProperty(
name = "Mode",
items = eval_modes,
default = 'AUTO',
update = update_sockets)
sample_size : IntProperty(
name = "Samples",
default = 50,
min = 4,
update = updateNode)
specify_accuracy : BoolProperty(
name = "Specify accuracy",
default = False,
update = updateNode)
accuracy : IntProperty(
name = "Accuracy",
default = 3,
min = 0,
update = updateNode)
accuracy_draft : IntProperty(
name = "[D] Accuracy",
default = 1,
min = 0,
update = updateNode)
draft_properties_mapping = dict(length = 'length_draft', accuracy = 'accuracy_draft')
def sv_init(self, context):
self.inputs.new('SvCurveSocket', "Curve")
self.inputs.new('SvStringsSocket', "Resolution").prop_name = 'resolution'
self.inputs.new('SvStringsSocket', "Length").prop_name = 'length'
self.inputs.new('SvStringsSocket', "Samples").prop_name = 'sample_size'
self.outputs.new('SvStringsSocket', "T")
self.outputs.new('SvVerticesSocket', "Vertices")
self.update_sockets(context)
def draw_buttons(self, context, layout):
layout.prop(self, 'eval_mode', expand=True)
layout.prop(self, 'specify_accuracy')
if self.specify_accuracy:
if self.id_data.sv_draft:
layout.prop(self, 'accuracy_draft')
else:
layout.prop(self, 'accuracy')
def draw_buttons_ext(self, context, layout):
self.draw_buttons(context, layout)
layout.prop(self, 'mode', expand=True)
def does_support_draft_mode(self):
return True
def draw_label(self):
label = self.label or self.name
if self.id_data.sv_draft:
label = "[D] " + label
return label
def process(self):
if not any((s.is_linked for s in self.outputs)):
return
need_eval = self.outputs['Vertices'].is_linked
curves_s = self.inputs['Curve'].sv_get()
resolution_s = self.inputs['Resolution'].sv_get()
length_s = self.inputs['Length'].sv_get()
samples_s = self.inputs['Samples'].sv_get(default=[[]])
length_s = ensure_nesting_level(length_s, 3)
resolution_s = ensure_nesting_level(resolution_s, 2)
samples_s = ensure_nesting_level(samples_s, 2)
curves_s = ensure_nesting_level(curves_s, 2, data_types=(SvCurve,))
ts_out = []
verts_out = []
for curves, resolutions, input_lengths_i, samples_i in zip_long_repeat(curves_s, resolution_s, length_s, samples_s):
for curve, resolution, input_lengths, samples in zip_long_repeat(curves, resolutions, input_lengths_i, samples_i):
mode = self.mode
accuracy = self.accuracy
if self.id_data.sv_draft:
mode = 'LIN'
accuracy = self.accuracy_draft
if self.specify_accuracy:
tolerance = 10 ** (-accuracy)
else:
tolerance = None
solver = SvCurveLengthSolver(curve)
solver.prepare(mode, resolution, tolerance=tolerance)
if self.eval_mode == 'AUTO':
total_length = solver.get_total_length()
input_lengths = np.linspace(0.0, total_length, num = samples)
else:
input_lengths = np.array(input_lengths)
ts = solver.solve(input_lengths)
ts_out.append(ts.tolist())
if need_eval:
verts = curve.evaluate_array(ts).tolist()
verts_out.append(verts)
self.outputs['T'].sv_set(ts_out)
self.outputs['Vertices'].sv_set(verts_out)
def register():
bpy.utils.register_class(SvCurveLengthParameterNode)
def unregister():
bpy.utils.unregister_class(SvCurveLengthParameterNode)
| nortikin/sverchok | nodes/curve/length_parameter.py | Python | gpl-3.0 | 5,754 |
'''
/*******************************************************************************
*
* Copyright (c) 2015 Fraunhofer FOKUS, All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 3.0 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library. If not, see <http://www.gnu.org/licenses/>.
*
* AUTHORS: Louay Bassbouss ([email protected])
*
******************************************************************************/
'''
from django.template import TemplateSyntaxError, Node, Variable, Library
from django.conf import settings
register = Library()
# I found some tricks in URLNode and url from defaulttags.py:
# https://code.djangoproject.com/browser/django/trunk/django/template/defaulttags.py
@register.tag
def value_from_settings(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one " \
"argument (settings constant to retrieve)" % bits[0])
settingsvar = bits[1]
settingsvar = settingsvar[1:-1] if settingsvar[0] == '"' else settingsvar
asvar = None
bits = bits[2:]
if len(bits) >= 2 and bits[-2] == 'as':
asvar = bits[-1]
bits = bits[:-2]
if len(bits):
raise TemplateSyntaxError("'value_from_settings' didn't recognise " \
"the arguments '%s'" % ", ".join(bits))
return ValueFromSettings(settingsvar, asvar)
class ValueFromSettings(Node):
def __init__(self, settingsvar, asvar):
self.arg = Variable(settingsvar)
self.asvar = asvar
def render(self, context):
ret_val = getattr(settings,str(self.arg))
if self.asvar:
context[self.asvar] = ret_val
return ''
else:
return ret_val
| fraunhoferfokus/fixmycity | dummy/templatetags/value_from_settings.py | Python | lgpl-3.0 | 2,196 |
# -*- coding: utf-8 -*-
# --------------------------------------------------------------------------------
# Updater (kodi)
# --------------------------------------------------------------------------------
import traceback
import xbmc
import xbmcaddon
import threading
import subprocess
import time
from platformcode import config, logger, platformtools
from core import jsontools
from core import filetools
json_data_file_name = 'custom_code.json'
def init():
logger.info()
"""
Todo el código añadido al add-on se borra con cada actualización. Esta función permite restaurarlo automáticamente con cada actualización.
Esto permite al usuario tener su propio código, bajo su responsabilidad, y restaurarlo al add-on cada vez que se actualiza.
El mecanismo funciona copiando el contenido de la carpeta-arbol ".\userdata\addon_data\plugin.video.alfa\custom_code\..." sobre
las carpetas de código del add-on. No verifica el contenido, solo vuelca(reemplaza) el contenido de "custom_code".
El usuario almacenará en las subcarpetas de "custom_code" su código actualizado y listo para ser copiado en cualquier momento.
Si no se desea que copie algo, simplemente se borra de "custom_code" y ya no se copiará en la próxima actualización.
Los pasos que sigue esta función, son los siguientes:
1.- La función se llama desde videolibrary_service.py, desde la función inicial:
# Copia Custom code a las carpetas de Alfa desde la zona de Userdata
from platformcode import custom_code
custom_code.init()
2.- En el inicio de Kodi, comprueba si existe la carpeta "custom_code" en ".\userdata\addon_data\plugin.video.alfa\".
Si no existe, la crea y sale sin más, dando al ususario la posibilidad de copiar sobre esa estructura su código,
y que la función la vuelque sobre el add-on en el próximo inicio de Kodi.
3.- En el siguiente inicio de Kodi, comprueba si existe el custom_code.json en la carpeta root del add-on.
Si no existe, lo crea con el número de versión del add-on vacío, para permitir que se copien los archivos en esta pasada.
4.- Verifica que el número de versión del add-on es diferente de el de custom_code.json. Si es la misma versión,
se sale porque ya se realizo la copia anteriormente.
Si la versión es distinta, se realiza el volcado de todos los archivos de la carpeta-árbol "custom_code" sobre el add-on.
Si la carpeta de destino no existe, dará un error y se cancelará la copia. Se considera que no tienen sentido nuevas carpetas.
5.- Si la copia ha terminado con éxito, se actualiza el custom_code.json con el número de versión del add-on,
para que en inicios sucesivos de Kodi no se realicen las copias, hasta que el add-on cambie de versión.
En el número de versión del add-on no se considera el número de fix.
Tiempos: Copiando 7 archivos de prueba, el proceso ha tardado una décima de segundo.
"""
try:
#Verifica si Kodi tiene algún achivo de Base de Datos de Vídeo de versiones anteriores, entonces los borra
verify_Kodi_video_DB()
#LIBTORRENT: se descarga el binario de Libtorrent cada vez que se actualiza Alfa
try:
threading.Thread(target=update_libtorrent).start() # Creamos un Thread independiente, hasta el fin de Kodi
time.sleep(2) # Dejamos terminar la inicialización...
except: # Si hay problemas de threading, nos vamos
logger.error(traceback.format_exc())
#QUASAR: Preguntamos si se hacen modificaciones a Quasar
if not filetools.exists(filetools.join(config.get_data_path(), "quasar.json")) \
and not config.get_setting('addon_quasar_update', default=False):
question_update_external_addon("quasar")
#QUASAR: Hacemos las modificaciones a Quasar, si está permitido, y si está instalado
if config.get_setting('addon_quasar_update', default=False) or \
(filetools.exists(filetools.join(config.get_data_path(), \
"quasar.json")) and not xbmc.getCondVisibility('System.HasAddon("plugin.video.quasar")')):
if not update_external_addon("quasar"):
platformtools.dialog_notification("Actualización Quasar", "Ha fallado. Consulte el log")
#Existe carpeta "custom_code" ? Si no existe se crea y se sale
custom_code_dir = filetools.join(config.get_data_path(), 'custom_code')
if not filetools.exists(custom_code_dir):
create_folder_structure(custom_code_dir)
return
else:
#Existe "custom_code.json" ? Si no existe se crea
custom_code_json_path = config.get_runtime_path()
custom_code_json = filetools.join(custom_code_json_path, 'custom_code.json')
if not filetools.exists(custom_code_json):
create_json(custom_code_json_path)
#Se verifica si la versión del .json y del add-on son iguales. Si es así se sale. Si no se copia "custom_code" al add-on
verify_copy_folders(custom_code_dir, custom_code_json_path)
except:
logger.error(traceback.format_exc())
def create_folder_structure(custom_code_dir):
logger.info()
#Creamos todas las carpetas. La importante es "custom_code". Las otras sirven meramente de guía para evitar errores de nombres...
filetools.mkdir(custom_code_dir)
filetools.mkdir(filetools.join(custom_code_dir, 'channels'))
filetools.mkdir(filetools.join(custom_code_dir, 'core'))
filetools.mkdir(filetools.join(custom_code_dir, 'lib'))
filetools.mkdir(filetools.join(custom_code_dir, 'platformcode'))
filetools.mkdir(filetools.join(custom_code_dir, 'resources'))
filetools.mkdir(filetools.join(custom_code_dir, 'servers'))
return
def create_json(custom_code_json_path, json_name=json_data_file_name):
logger.info()
#Guardamaos el json con la versión de Alfa vacía, para permitir hacer la primera copia
json_data_file = filetools.join(custom_code_json_path, json_name)
if filetools.exists(json_data_file):
filetools.remove(json_data_file)
result = filetools.write(json_data_file, jsontools.dump({"addon_version": ""}))
return
def verify_copy_folders(custom_code_dir, custom_code_json_path):
logger.info()
#verificamos si es una nueva versión de Alfa instalada o era la existente. Si es la existente, nos vamos sin hacer nada
json_data_file = filetools.join(custom_code_json_path, json_data_file_name)
json_data = jsontools.load(filetools.read(json_data_file))
current_version = config.get_addon_version(with_fix=False)
if not json_data or not 'addon_version' in json_data:
create_json(custom_code_json_path)
json_data = jsontools.load(filetools.read(json_data_file))
try:
if current_version == json_data['addon_version']:
return
except:
logger.error(traceback.format_exc(1))
#Ahora copiamos los archivos desde el área de Userdata, Custom_code, sobre las carpetas del add-on
for root, folders, files in filetools.walk(custom_code_dir):
for file in files:
input_file = filetools.join(root, file)
output_file = input_file.replace(custom_code_dir, custom_code_json_path)
if not filetools.copy(input_file, output_file, silent=True):
return
#Guardamaos el json con la versión actual de Alfa, para no volver a hacer la copia hasta la nueva versión
json_data['addon_version'] = current_version
filetools.write(json_data_file, jsontools.dump(json_data))
return
def question_update_external_addon(addon_name):
logger.info(addon_name)
#Verificamos que el addon está instalado
stat = False
if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name):
#Si es la primera vez que se pregunta por la actualización del addon externo, recogemos la respuesta,
# guardaos un .json en userdat/alfa para no volver a preguntar otra vez, y se actualiza el setting en Alfa.
stat = platformtools.dialog_yesno('Actualización de %s' % addon_name.capitalize(), '¿Quiere que actualicemos Quasar para que sea compatible con las últimas versiones de Kodi? (recomendado: SÍ)', '', 'Si actualiza Quasar, reinicie Kodi en un par de minutos')
#Con la respuesta actualizamos la variable en Alfa settings.xml. Se puede cambiar en Ajustes de Alfa, Otros
if stat:
config.set_setting('addon_quasar_update', True)
else:
config.set_setting('addon_quasar_update', False)
#Creamos un .json en userdata para no volver a preguntar otra vez
create_json(config.get_data_path(), "%s.json" % addon_name)
return stat
def update_external_addon(addon_name):
logger.info(addon_name)
#Verificamos que el addon está instalado
if xbmc.getCondVisibility('System.HasAddon("plugin.video.%s")' % addon_name):
#Path de actuali<aciones de Alfa
alfa_addon_updates = filetools.join(config.get_runtime_path(), filetools.join("lib", addon_name))
#Path de destino en addon externo
__settings__ = xbmcaddon.Addon(id="plugin.video." + addon_name)
if addon_name.lower() in ['quasar', 'elementum']:
addon_path = filetools.join(xbmc.translatePath(__settings__.getAddonInfo('Path')), \
filetools.join("resources", filetools.join("site-packages", addon_name)))
else:
addon_path = ''
#Hay modificaciones en Alfa? Las copiamos al addon
if filetools.exists(alfa_addon_updates) and filetools.exists(addon_path):
for root, folders, files in filetools.walk(alfa_addon_updates):
for file in files:
input_file = filetools.join(root, file)
output_file = input_file.replace(alfa_addon_updates, addon_path)
if not filetools.copy(input_file, output_file, silent=True):
logger.error('Error en la copia: Input: %s o Output: %s' % (input_file, output_file))
return False
return True
else:
logger.error('Alguna carpeta no existe: Alfa: %s o %s: %s' % (alfa_addon_updates, addon_name, addon_path))
# Se ha desinstalado Quasar, reseteamos la opción
else:
config.set_setting('addon_quasar_update', False)
if filetools.exists(filetools.join(config.get_data_path(), "%s.json" % addon_name)):
filetools.remove(filetools.join(config.get_data_path(), "%s.json" % addon_name))
return True
return False
def update_libtorrent():
logger.info()
if not config.get_setting("mct_buffer", server="torrent", default=""):
default = config.get_setting("torrent_client", server="torrent", default=0)
config.set_setting("torrent_client", default, server="torrent")
config.set_setting("mct_buffer", "50", server="torrent")
config.set_setting("mct_download_path", config.get_setting("downloadpath"), server="torrent")
config.set_setting("mct_background_download", True, server="torrent")
config.set_setting("mct_rar_unpack", True, server="torrent")
config.set_setting("bt_buffer", "50", server="torrent")
config.set_setting("bt_download_path", config.get_setting("downloadpath"), server="torrent")
config.set_setting("mct_download_limit", "", server="torrent")
config.set_setting("magnet2torrent", False, server="torrent")
if not filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) or not \
config.get_setting("unrar_path", server="torrent", default=""):
path = filetools.join(config.get_runtime_path(), 'lib', 'rarfiles')
creationflags = ''
sufix = ''
unrar = ''
for device in filetools.listdir(path):
if xbmc.getCondVisibility("system.platform.android") and 'android' not in device: continue
if xbmc.getCondVisibility("system.platform.windows") and 'windows' not in device: continue
if not xbmc.getCondVisibility("system.platform.windows") and not xbmc.getCondVisibility("system.platform.android") \
and ('android' in device or 'windows' in device): continue
if 'windows' in device:
creationflags = 0x08000000
sufix = '.exe'
else:
creationflags = ''
sufix = ''
unrar = filetools.join(path, device, 'unrar%s') % sufix
if not filetools.exists(unrar): unrar = ''
if unrar:
if not xbmc.getCondVisibility("system.platform.windows"):
try:
if xbmc.getCondVisibility("system.platform.android"):
# Para Android copiamos el binario a la partición del sistema
unrar_org = unrar
unrar = filetools.join(xbmc.translatePath('special://xbmc/'), 'files').replace('/cache/apk/assets', '')
if not filetools.exists(unrar):
filetools.mkdir(unrar)
unrar = filetools.join(unrar, 'unrar')
filetools.copy(unrar_org, unrar, silent=True)
command = ['chmod', '777', '%s' % unrar]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
command = ['ls', '-l', unrar]
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
xbmc.log('######## UnRAR file: %s' % str(output_cmd), xbmc.LOGNOTICE)
except:
xbmc.log('######## UnRAR ERROR in path: %s' % str(unrar), xbmc.LOGNOTICE)
logger.error(traceback.format_exc(1))
try:
if xbmc.getCondVisibility("system.platform.windows"):
p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE, creationflags=creationflags)
else:
p = subprocess.Popen(unrar, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output_cmd, error_cmd = p.communicate()
if p.returncode != 0 or error_cmd:
xbmc.log('######## UnRAR returncode in module %s: %s, %s in %s' % \
(device, str(p.returncode), str(error_cmd), unrar), xbmc.LOGNOTICE)
unrar = ''
else:
xbmc.log('######## UnRAR OK in %s: %s' % (device, unrar), xbmc.LOGNOTICE)
break
except:
xbmc.log('######## UnRAR ERROR in module %s: %s' % (device, unrar), xbmc.LOGNOTICE)
logger.error(traceback.format_exc(1))
unrar = ''
if unrar: config.set_setting("unrar_path", unrar, server="torrent")
if filetools.exists(filetools.join(config.get_runtime_path(), "custom_code.json")) and \
config.get_setting("libtorrent_path", server="torrent", default="") :
return
try:
from lib.python_libtorrent.python_libtorrent import get_libtorrent
except Exception, e:
logger.error(traceback.format_exc(1))
e = unicode(str(e), "utf8", errors="replace").encode("utf8")
config.set_setting("libtorrent_path", "", server="torrent")
if not config.get_setting("libtorrent_error", server="torrent", default=''):
config.set_setting("libtorrent_error", str(e), server="torrent")
return
def verify_Kodi_video_DB():
logger.info()
import random
platform = {}
path = ''
db_files = []
try:
path = filetools.join(xbmc.translatePath("special://masterprofile/"), "Database")
if filetools.exists(path):
platform = config.get_platform(full_version=True)
if platform and platform['num_version'] < 19:
db_files = filetools.walk(path)
if filetools.exists(filetools.join(path, platform['video_db'])):
for root, folders, files in db_files:
for file in files:
if file != platform['video_db']:
if file.startswith('MyVideos'):
randnum = str(random.randrange(1, 999999))
filetools.rename(filetools.join(path, file), 'OLD_' + randnum +'_' + file)
logger.error('BD obsoleta: ' + file)
else:
logger.error('Video_DB: ' + str(platform['video_db']) + ' para versión Kodi ' + str(platform['num_version']) + ' NO EXISTE. Analizar carpeta: ' + str(db_files))
else:
logger.error('Estructura de get_platform(full_version=True) incorrecta')
else:
logger.error('Path a Userdata/Database (' + path + ') no encontrado')
except:
logger.error('Platform: ' + str(platform) + ' / Path: ' + str(path) + ' / Files: ' + str(db_files))
logger.error(traceback.format_exc())
return | alfa-jor/addon | plugin.video.alfa/platformcode/custom_code.py | Python | gpl-3.0 | 18,041 |
import copy
import pickle
import msgpack
import numpy as np
import pytest
from tlz import identity
from distributed import wait
from distributed.protocol import (
register_serialization,
serialize,
deserialize,
nested_deserialize,
Serialize,
Serialized,
to_serialize,
serialize_bytes,
deserialize_bytes,
serialize_bytelist,
register_serialization_family,
dask_serialize,
)
from distributed.protocol.serialize import check_dask_serializable
from distributed.utils import nbytes
from distributed.utils_test import inc, gen_test
from distributed.comm.utils import to_frames, from_frames
class MyObj:
def __init__(self, data):
self.data = data
def __getstate__(self):
raise Exception("Not picklable")
def serialize_myobj(x):
return {}, [pickle.dumps(x.data)]
def deserialize_myobj(header, frames):
return MyObj(pickle.loads(frames[0]))
register_serialization(MyObj, serialize_myobj, deserialize_myobj)
def test_dumps_serialize():
for x in [123, [1, 2, 3, 4, 5, 6]]:
header, frames = serialize(x)
assert header["serializer"] == "pickle"
assert len(frames) == 1
result = deserialize(header, frames)
assert result == x
x = MyObj(123)
header, frames = serialize(x)
assert header["type"]
assert len(frames) == 1
result = deserialize(header, frames)
assert result.data == x.data
def test_serialize_bytestrings():
for b in (b"123", bytearray(b"4567")):
header, frames = serialize(b)
assert frames[0] is b
bb = deserialize(header, frames)
assert bb == b
def test_Serialize():
s = Serialize(123)
assert "123" in str(s)
assert s.data == 123
t = Serialize((1, 2))
assert str(t)
u = Serialize(123)
assert s == u
assert not (s != u)
assert s != t
assert not (s == t)
assert hash(s) == hash(u)
assert hash(s) != hash(t) # most probably
def test_Serialized():
s = Serialized(*serialize(123))
t = Serialized(*serialize((1, 2)))
u = Serialized(*serialize(123))
assert s == u
assert not (s != u)
assert s != t
assert not (s == t)
def test_nested_deserialize():
x = {
"op": "update",
"x": [to_serialize(123), to_serialize(456), 789],
"y": {"a": ["abc", Serialized(*serialize("def"))], "b": b"ghi"},
}
x_orig = copy.deepcopy(x)
assert nested_deserialize(x) == {
"op": "update",
"x": [123, 456, 789],
"y": {"a": ["abc", "def"], "b": b"ghi"},
}
assert x == x_orig # x wasn't mutated
from distributed.utils_test import gen_cluster
from dask import delayed
@gen_cluster(client=True)
async def test_object_in_graph(c, s, a, b):
o = MyObj(123)
v = delayed(o)
v2 = delayed(identity)(v)
future = c.compute(v2)
result = await future
assert isinstance(result, MyObj)
assert result.data == 123
@gen_cluster(client=True)
async def test_scatter(c, s, a, b):
o = MyObj(123)
[future] = await c._scatter([o])
await c._replicate(o)
o2 = await c._gather(future)
assert isinstance(o2, MyObj)
assert o2.data == 123
@gen_cluster(client=True)
async def test_inter_worker_comms(c, s, a, b):
o = MyObj(123)
[future] = await c._scatter([o], workers=a.address)
future2 = c.submit(identity, future, workers=b.address)
o2 = await c._gather(future2)
assert isinstance(o2, MyObj)
assert o2.data == 123
class Empty:
def __getstate__(self):
raise Exception("Not picklable")
def serialize_empty(x):
return {}, []
def deserialize_empty(header, frames):
return Empty()
register_serialization(Empty, serialize_empty, deserialize_empty)
def test_empty():
e = Empty()
e2 = deserialize(*serialize(e))
assert isinstance(e2, Empty)
def test_empty_loads():
from distributed.protocol import loads, dumps
e = Empty()
e2 = loads(dumps([to_serialize(e)]))
assert isinstance(e2[0], Empty)
def test_empty_loads_deep():
from distributed.protocol import loads, dumps
e = Empty()
e2 = loads(dumps([[[to_serialize(e)]]]))
assert isinstance(e2[0][0][0], Empty)
def test_serialize_bytes():
for x in [1, "abc", np.arange(5), b"ab" * int(40e6)]:
b = serialize_bytes(x)
assert isinstance(b, bytes)
y = deserialize_bytes(b)
assert str(x) == str(y)
def test_serialize_list_compress():
pytest.importorskip("lz4")
x = np.ones(1000000)
L = serialize_bytelist(x)
assert sum(map(nbytes, L)) < x.nbytes / 2
b = b"".join(L)
y = deserialize_bytes(b)
assert (x == y).all()
def test_malicious_exception():
class BadException(Exception):
def __setstate__(self):
return Exception("Sneaky deserialization code")
class MyClass:
def __getstate__(self):
raise BadException()
obj = MyClass()
header, frames = serialize(obj, serializers=[])
with pytest.raises(Exception) as info:
deserialize(header, frames)
assert "Sneaky" not in str(info.value)
assert "MyClass" in str(info.value)
header, frames = serialize(obj, serializers=["pickle"])
with pytest.raises(Exception) as info:
deserialize(header, frames)
assert "Sneaky" not in str(info.value)
assert "BadException" in str(info.value)
def test_errors():
msg = {"data": {"foo": to_serialize(inc)}, "a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
header, frames = serialize(msg, serializers=["msgpack", "pickle"])
assert header["serializer"] == "pickle"
header, frames = serialize(msg, serializers=["msgpack"])
assert header["serializer"] == "error"
with pytest.raises(TypeError):
serialize(msg, serializers=["msgpack"], on_error="raise")
@gen_test()
async def test_err_on_bad_deserializer():
frames = await to_frames({"x": to_serialize(1234)}, serializers=["pickle"])
result = await from_frames(frames, deserializers=["pickle", "foo"])
assert result == {"x": 1234}
with pytest.raises(TypeError):
await from_frames(frames, deserializers=["msgpack"])
class MyObject:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def my_dumps(obj, context=None):
if type(obj).__name__ == "MyObject":
header = {"serializer": "my-ser"}
frames = [
msgpack.dumps(obj.__dict__, use_bin_type=True),
msgpack.dumps(context, use_bin_type=True),
]
return header, frames
else:
raise NotImplementedError()
def my_loads(header, frames):
obj = MyObject(**msgpack.loads(frames[0], raw=False))
# to provide something to test against, lets just attach the context to
# the object itself
obj.context = msgpack.loads(frames[1], raw=False)
return obj
@gen_cluster(
client=True,
client_kwargs={"serializers": ["my-ser", "pickle"]},
worker_kwargs={"serializers": ["my-ser", "pickle"]},
)
async def test_context_specific_serialization(c, s, a, b):
register_serialization_family("my-ser", my_dumps, my_loads)
try:
# Create the object on A, force communication to B
x = c.submit(MyObject, x=1, y=2, workers=a.address)
y = c.submit(lambda x: x, x, workers=b.address)
await wait(y)
key = y.key
def check(dask_worker):
# Get the context from the object stored on B
my_obj = dask_worker.data[key]
return my_obj.context
result = await c.run(check, workers=[b.address])
expected = {"sender": a.address, "recipient": b.address}
assert result[b.address]["sender"] == a.address # see origin worker
z = await y # bring object to local process
assert z.x == 1 and z.y == 2
assert z.context["sender"] == b.address
finally:
from distributed.protocol.serialize import families
del families["my-ser"]
@gen_cluster(client=True)
async def test_context_specific_serialization_class(c, s, a, b):
register_serialization(MyObject, my_dumps, my_loads)
# Create the object on A, force communication to B
x = c.submit(MyObject, x=1, y=2, workers=a.address)
y = c.submit(lambda x: x, x, workers=b.address)
await wait(y)
key = y.key
def check(dask_worker):
# Get the context from the object stored on B
my_obj = dask_worker.data[key]
return my_obj.context
result = await c.run(check, workers=[b.address])
expected = {"sender": a.address, "recipient": b.address}
assert result[b.address]["sender"] == a.address # see origin worker
z = await y # bring object to local process
assert z.x == 1 and z.y == 2
assert z.context["sender"] == b.address
def test_serialize_raises():
class Foo:
pass
@dask_serialize.register(Foo)
def dumps(f):
raise Exception("Hello-123")
with pytest.raises(Exception) as info:
deserialize(*serialize(Foo()))
assert "Hello-123" in str(info.value)
@pytest.mark.asyncio
async def test_profile_nested_sizeof():
# https://github.com/dask/distributed/issues/1674
n = 500
original = outer = {}
inner = {}
for i in range(n):
outer["children"] = inner
outer, inner = inner, {}
msg = {"data": original}
frames = await to_frames(msg)
def test_compression_numpy_list():
class MyObj:
pass
@dask_serialize.register(MyObj)
def _(x):
header = {"compression": [False]}
frames = [b""]
return header, frames
header, frames = serialize([MyObj(), MyObj()])
assert header["compression"] == [False, False]
@pytest.mark.parametrize(
"data,is_serializable",
[
([], False),
({}, False),
({i: i for i in range(10)}, False),
(set(range(10)), False),
(tuple(range(100)), False),
({"x": MyObj(5)}, True),
({"x": {"y": MyObj(5)}}, True),
pytest.param(
[1, MyObj(5)],
True,
marks=pytest.mark.xfail(reason="Only checks 0th element for now."),
),
([MyObj([0, 1, 2]), 1], True),
(tuple([MyObj(None)]), True),
({("x", i): MyObj(5) for i in range(100)}, True),
(memoryview(b"hello"), True),
],
)
def test_check_dask_serializable(data, is_serializable):
result = check_dask_serializable(data)
expected = is_serializable
assert result == expected
@pytest.mark.parametrize(
"serializers",
[["msgpack"], ["pickle"], ["msgpack", "pickle"], ["pickle", "msgpack"]],
)
def test_serialize_lists(serializers):
data_in = ["a", 2, "c", None, "e", 6]
header, frames = serialize(data_in, serializers=serializers)
data_out = deserialize(header, frames)
assert data_in == data_out
def test_deser_memoryview():
data_in = memoryview(b"hello")
header, frames = serialize(data_in)
assert header["type"] == "builtins.memoryview"
assert frames[0] is data_in
data_out = deserialize(header, frames)
assert data_in == data_out
| blaze/distributed | distributed/protocol/tests/test_serialize.py | Python | bsd-3-clause | 11,096 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Author: Gillett Hernandez
# @Date: 2016-07-14 17:06:40
# @Last Modified by: Gillett Hernandez
# @Last Modified time: 2016-09-03 00:19:08
def Collatz(n, seen={}):
steps_list = [n]
if n in seen:
return seen[n]
steps=1
i=n
while i>1:
steps+=1
if i%2==0:
i=i/2
else:
i=3*i+1
if i in seen:
return steps+seen[i][0], steps_list+seen[i][1]
steps_list.append(i)
seen[n]=[steps, steps_list]
for i, k in enumerate(steps_list):
seen[k]=[len(steps_list[i:]), steps_list[i:]]
return steps, steps_list
maxChain = 0
for x in range(1,1000000):
chain=Collatz(x)
if chain[0] > maxChain:
maxChain=chain[0]
ind=x
if __name__ == '__main__':
# import project_euler_offline
print(ind)
# print("Correct" if project_euler_offline.check(prob_n,res) else "Incorrect")
| gillett-hernandez/project-euler | Python/problem_14.py | Python | mit | 954 |
import os
import re
import datetime
import requests.exceptions
from openstates.utils import LXMLMixin
from pupa.utils import convert_pdf
from pupa.scrape import Scraper, VoteEvent as Vote
date_re = r".*(?P<date>(MONDAY|TUESDAY|WEDNESDAY|" + \
"THURSDAY|FRIDAY|SATURDAY|SUNDAY),\s\w+\s\d{1,2},\s\d{4}).*"
chamber_re = r".*JOURNAL OF THE ((HOUSE)|(SENATE)).*\d+.*DAY.*"
page_re = r"Page\s\d+"
class NDVoteScraper(Scraper, LXMLMixin):
def scrape(self, chamber=None, session=None):
if not session:
session = self.latest_session()
self.info('no session specified, using %s', session)
chambers = [chamber] if chamber else ['upper', 'lower']
for chamber in chambers:
yield from self.scrape_chamber(chamber, session)
def scrape_chamber(self, chamber, session):
chamber_name = 'house' if chamber == 'lower' else 'senate'
session_slug = {
'62': '62-2011',
'63': '63-2013',
'64': '64-2015',
'65': '65-2017',
}[session]
# Open the index page of the session's Registers, and open each
url = "http://www.legis.nd.gov/assembly/%s/journals/%s-journal.html" % (
session_slug, chamber_name)
page = self.lxmlize(url)
pdfs = page.xpath("//a[contains(@href, '.pdf')]")
for pdf in pdfs:
# Initialize information about the vote parsing
results = {}
in_motion = False
cur_vote = None
in_vote = False
cur_motion = ""
bills = []
# Determine which URLs the information was pulled from
pdf_url = pdf.attrib['href']
try:
(path, response) = self.urlretrieve(pdf_url)
except requests.exceptions.ConnectionError:
continue
# Convert the PDF to text
data = convert_pdf(path, type='text').decode('utf-8')
os.unlink(path)
# Determine the date of the document
date = re.findall(date_re, data)
if date:
date = date[0][0]
cur_date = datetime.datetime.strptime(date, "%A, %B %d, %Y")
else:
# If no date is found anywhere, do not process the document
self.warning("No date was found for the document; skipping.")
continue
# Check each line of the text for motion and vote information
lines = data.splitlines()
for line in lines:
# Ignore lines with no information
if re.search(chamber_re, line) or \
re.search(date_re, line) or \
re.search(page_re, line) or \
line.strip() == "":
pass
# Ensure that motion and vote capturing are not _both_ active
elif in_motion and in_vote:
raise AssertionError(
"Scraper should not be simultaneously processing " +
"motion name and votes, as it is for this motion: " +
cur_motion
)
# Start capturing motion text after a ROLL CALL header
elif not in_motion and not in_vote:
if line.strip() == "ROLL CALL":
in_motion = True
elif in_motion and not in_vote:
if cur_motion == "":
cur_motion = line.strip()
else:
cur_motion = cur_motion + " " + line.strip()
# ABSENT AND NOT VOTING marks the end of each motion name
# In this case, prepare to capture votes
if line.strip().endswith("VOTING") or \
line.strip().endswith("VOTING."):
in_motion = False
in_vote = True
elif not in_motion and in_vote:
# Ignore appointments and confirmations
if "The Senate advises and consents to the appointment" \
in line:
in_vote = False
cur_vote = None
results = {}
cur_motion = ""
bills = []
# If votes are being processed, record the voting members
elif ":" in line:
cur_vote, who = (x.strip() for x in line.split(":", 1))
who = [x.strip() for x in who.split(';') if x.strip() != ""]
results[cur_vote] = who
name_may_be_continued = False if line.endswith(";") \
else True
# Extracts bill numbers in the closing text
# used for when the closing text is multiple lines.
elif cur_vote is not None and\
re.findall(r"(?i)(H|S|J)(C?)(B|R|M) (\d+)", line) and \
not any(x in line.lower() for x in ['passed', 'adopted',
'sustained', 'prevailed',
'lost', 'failed']):
bills.extend(re.findall(r"(?i)(H|S|J)(C?)(B|R|M) (\d+)", line))
elif cur_vote is not None and \
not any(x in line.lower() for x in ['passed', 'adopted',
'sustained', 'prevailed',
'lost', 'failed']):
who = [x.strip() for x in line.split(";") if x.strip() != ""]
if name_may_be_continued:
results[cur_vote][-1] = results[cur_vote][-1] + \
" " + who.pop(0)
name_may_be_continued = False if line.endswith(";") \
else True
results[cur_vote].extend(who)
# At the conclusion of a vote, save its data
elif any(x in line.lower() for x in ['passed', 'adopted',
'sustained', 'prevailed',
'lost', 'failed']):
in_vote = False
cur_vote = None
# Identify what is being voted on
# Throw a warning if impropper informaiton found
bills.extend(re.findall(r"(?i)(H|S|J)(C?)(B|R|M) (\d+)", line))
if bills == [] or cur_motion.strip() == "":
results = {}
cur_motion = ""
self.warning(
"No motion or bill name found: " +
"motion name: " + cur_motion + "; " +
"decision text: " + line.strip()
)
continue
# If votes are found in the motion name, throw an error
if "YEAS:" in cur_motion or "NAYS:" in cur_motion:
raise AssertionError(
"Vote data found in motion name: " +
cur_motion
)
# Use the collected results to determine who voted how
keys = {
"YEAS": "yes",
"NAYS": "no",
"ABSENT AND NOT VOTING": "other"
}
res = {}
for key in keys:
if key in results:
res[keys[key]] = results[key]
else:
res[keys[key]] = []
# Count the number of members voting each way
yes, no, other = \
len(res['yes']), \
len(res['no']), \
len(res['other'])
chambers = {
"H": "lower",
"S": "upper",
"J": "legislature"
}
# Almost all of the time, a vote only applies to one bill and this loop
# will only be run once.
# Some exceptions exist.
for bill in bills:
cur_bill_id = "%s%s%s %s" % bill
# Identify the source chamber for the bill
try:
bc = chambers[cur_bill_id[0]]
except KeyError:
bc = 'other'
# Determine whether or not the vote passed
if "over the governor's veto" in cur_motion.lower():
VETO_SUPERMAJORITY = 2 / 3
passed = (yes / (yes + no) > VETO_SUPERMAJORITY)
else:
passed = (yes > no)
# Create a Vote object based on the scraped information
vote = Vote(chamber=chamber,
start_date=cur_date.strftime('%Y-%m-%d'),
motion_text=cur_motion,
result='pass' if passed else 'fail',
legislative_session=session,
classification='passage',
bill=cur_bill_id,
bill_chamber=bc)
vote.add_source(pdf_url)
vote.add_source(url)
vote.set_count('yes', yes)
vote.set_count('no', no)
vote.set_count('other', other)
# For each category of voting members,
# add the individuals to the Vote object
for key in res:
for voter in res[key]:
vote.vote(key, voter)
# Check the vote counts in the motion text against
# the parsed results
for category_name in keys.keys():
# Need to search for the singular, not plural, in the text
# so it can find, for example, " 1 NAY "
vote_re = r"(\d+)\s{}".format(category_name[:-1])
motion_count = int(re.findall(vote_re, cur_motion)[0])
for item in vote.counts:
if item['option'] == keys[category_name]:
vote_count = item['value']
if motion_count != vote_count:
self.warning(
"Motion text vote counts ({}) ".format(motion_count) +
"differed from roll call counts ({}) ".format(
vote_count) +
"for {0} on {1}".format(category_name, cur_bill_id)
)
for item in vote.counts:
if item['option'] == keys[category_name]:
vote_count = motion_count
yield vote
# With the vote successfully processed,
# wipe its data and continue to the next one
results = {}
cur_motion = ""
bills = []
| votervoice/openstates | openstates/nd/votes.py | Python | gpl-3.0 | 12,605 |
# -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.constants import LoginType
from synapse.api.errors import LoginError, SynapseError, Codes
from synapse.http.servlet import RestServlet
from synapse.util.async import run_on_reactor
from ._base import client_v2_pattern, parse_json_dict_from_request
import logging
logger = logging.getLogger(__name__)
class PasswordRestServlet(RestServlet):
PATTERN = client_v2_pattern("/account/password")
def __init__(self, hs):
super(PasswordRestServlet, self).__init__()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_handlers().auth_handler
@defer.inlineCallbacks
def on_POST(self, request):
yield run_on_reactor()
body = parse_json_dict_from_request(request)
authed, result, params = yield self.auth_handler.check_auth([
[LoginType.PASSWORD],
[LoginType.EMAIL_IDENTITY]
], body, self.hs.get_ip_from_request(request))
if not authed:
defer.returnValue((401, result))
user_id = None
if LoginType.PASSWORD in result:
# if using password, they should also be logged in
auth_user, _, _ = yield self.auth.get_user_by_req(request)
if auth_user.to_string() != result[LoginType.PASSWORD]:
raise LoginError(400, "", Codes.UNKNOWN)
user_id = auth_user.to_string()
elif LoginType.EMAIL_IDENTITY in result:
threepid = result[LoginType.EMAIL_IDENTITY]
if 'medium' not in threepid or 'address' not in threepid:
raise SynapseError(500, "Malformed threepid")
# if using email, we must know about the email they're authing with!
threepid_user_id = yield self.hs.get_datastore().get_user_id_by_threepid(
threepid['medium'], threepid['address']
)
if not threepid_user_id:
raise SynapseError(404, "Email address not found", Codes.NOT_FOUND)
user_id = threepid_user_id
else:
logger.error("Auth succeeded but no known type!", result.keys())
raise SynapseError(500, "", Codes.UNKNOWN)
if 'new_password' not in params:
raise SynapseError(400, "", Codes.MISSING_PARAM)
new_password = params['new_password']
yield self.auth_handler.set_password(
user_id, new_password
)
defer.returnValue((200, {}))
def on_OPTIONS(self, _):
return 200, {}
class ThreepidRestServlet(RestServlet):
PATTERN = client_v2_pattern("/account/3pid")
def __init__(self, hs):
super(ThreepidRestServlet, self).__init__()
self.hs = hs
self.identity_handler = hs.get_handlers().identity_handler
self.auth = hs.get_auth()
self.auth_handler = hs.get_handlers().auth_handler
@defer.inlineCallbacks
def on_GET(self, request):
yield run_on_reactor()
auth_user, _, _ = yield self.auth.get_user_by_req(request)
threepids = yield self.hs.get_datastore().user_get_threepids(
auth_user.to_string()
)
defer.returnValue((200, {'threepids': threepids}))
@defer.inlineCallbacks
def on_POST(self, request):
yield run_on_reactor()
body = parse_json_dict_from_request(request)
if 'threePidCreds' not in body:
raise SynapseError(400, "Missing param", Codes.MISSING_PARAM)
threePidCreds = body['threePidCreds']
auth_user, _, _ = yield self.auth.get_user_by_req(request)
threepid = yield self.identity_handler.threepid_from_creds(threePidCreds)
if not threepid:
raise SynapseError(
400, "Failed to auth 3pid", Codes.THREEPID_AUTH_FAILED
)
for reqd in ['medium', 'address', 'validated_at']:
if reqd not in threepid:
logger.warn("Couldn't add 3pid: invalid response from ID sevrer")
raise SynapseError(500, "Invalid response from ID Server")
yield self.auth_handler.add_threepid(
auth_user.to_string(),
threepid['medium'],
threepid['address'],
threepid['validated_at'],
)
if 'bind' in body and body['bind']:
logger.debug(
"Binding emails %s to %s",
threepid, auth_user.to_string()
)
yield self.identity_handler.bind_threepid(
threePidCreds, auth_user.to_string()
)
defer.returnValue((200, {}))
def register_servlets(hs, http_server):
PasswordRestServlet(hs).register(http_server)
ThreepidRestServlet(hs).register(http_server)
| iot-factory/synapse | synapse/rest/client/v2_alpha/account.py | Python | apache-2.0 | 5,356 |
from detonator import main
import os
for root, subdir, fnames in os.walk("programs"):
for fname in fnames:
if fname.endswith(".py") and not fname.startswith("_"):
# Quick hack to import the modules and automagify things.
__import__("programs.{}".format(fname[:-3]))
if __name__ == "__main__":
main() | shuhaowu/detonator | run.py | Python | gpl-3.0 | 322 |
#!/usr/bin/env python3
# -*- coding: utf8 -*- #
#
#
# Copyright (C) by [email protected], 1998 - 2017
#
# This file is part of tau4.
#
# tau4 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# tau4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with tau4. If not, see <http://www.gnu.org/licenses/>.
#
################################################################################
import abc
from tau4 import Id
from tau4.ce._common import _AlgorithmDigital
from tau4.ce import eulerbw
from tau4.oop import overrides
from tau4.data import pandora
EulerBw4DT1 = eulerbw.DT1
# Für Rückwärtskomp.
EulerBw4gPIDT1p = eulerbw.gPIDT1p
# Für Rückwärtskomp.
EulerBw4I = eulerbw.I
# Für Rückwärtskomp.
EulerBw4P = eulerbw.P
# Für Rückwärtskomp.
Euler4Lead = eulerbw.Lead
# Für Rückwärtskomp.
EulerBw4PDT1 = eulerbw.PDT1
# Für Rückwärtskomp.
EulerBw4PIDT1 = eulerbw.PIDT1
# Für Rückwärtskomp.
EulerBw4PIDT1p = eulerbw.PIDT1p
# Für Rückwärtskomp.
EulerBw4PT1 = eulerbw.PT1
# Für Rückwärtskomp.
EulerBw4PI = eulerbw.PI
# Für Rückwärtskomp.
EulerBw4PT1PT1 = eulerbw.PT1PT1
# Für Rückwärtskomp.
| p-o-seidon/tau4 | src/tau4/ce/__init__.py | Python | gpl-3.0 | 1,941 |
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.limiter
~~~~~~~~~~~~~~~~~~~~~~~~
A middleware that limits incoming data. This works around problems with
Trac_ or Django_ because those directly stream into the memory.
.. _Trac: http://trac.edgewall.org/
.. _Django: http://www.djangoproject.com/
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from warnings import warn
from werkzeug.wsgi import LimitedStream
class StreamLimitMiddleware(object):
"""Limits the input stream to a given number of bytes. This is useful if
you have a WSGI application that reads form data into memory (django for
example) and you don't want users to harm the server by uploading tons of
data.
Default is 10MB
.. versionchanged:: 0.9
Deprecated middleware.
"""
def __init__(self, app, maximum_size=1024 * 1024 * 10):
warn(DeprecationWarning('This middleware is deprecated'))
self.app = app
self.maximum_size = maximum_size
def __call__(self, environ, start_response):
limit = min(self.maximum_size, int(environ.get('CONTENT_LENGTH') or 0))
environ['wsgi.input'] = LimitedStream(environ['wsgi.input'], limit)
return self.app(environ, start_response)
| jeremydane/Info3180-Project4 | server/lib/werkzeug/contrib/limiter.py | Python | apache-2.0 | 1,373 |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, comma_or, nowdate, getdate
from frappe import _
from frappe.model.document import Document
from erpnext.accounts.party_status import notify_status
def validate_status(status, options):
if status not in options:
frappe.throw(_("Status must be one of {0}").format(comma_or(options)))
status_map = {
"Lead": [
["Converted", "has_customer"],
["Opportunity", "has_opportunity"],
],
"Opportunity": [
["Quotation", "has_quotation"],
["Converted", "has_ordered_quotation"],
["Lost", "eval:self.status=='Lost'"],
["Closed", "eval:self.status=='Closed'"]
],
"Quotation": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Lost", "eval:self.status=='Lost'"],
["Ordered", "has_sales_order"],
["Cancelled", "eval:self.docstatus==2"],
],
"Sales Order": [
["Draft", None],
["To Deliver and Bill", "eval:self.per_delivered < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_delivered == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Deliver", "eval:self.per_delivered < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_delivered == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.order_type == 'Maintenance' and self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Sales Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Credit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1"],
["Paid", "eval:self.outstanding_amount==0 and self.docstatus==1 and self.is_return==0"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Invoice": [
["Draft", None],
["Submitted", "eval:self.docstatus==1"],
["Return", "eval:self.is_return==1 and self.docstatus==1"],
["Debit Note Issued", "eval:self.outstanding_amount < 0 and self.docstatus==1"],
["Paid", "eval:self.outstanding_amount==0 and self.docstatus==1 and self.is_return==0"],
["Unpaid", "eval:self.outstanding_amount > 0 and getdate(self.due_date) >= getdate(nowdate()) and self.docstatus==1"],
["Overdue", "eval:self.outstanding_amount > 0 and getdate(self.due_date) < getdate(nowdate()) and self.docstatus==1"],
["Cancelled", "eval:self.docstatus==2"],
],
"Purchase Order": [
["Draft", None],
["To Receive and Bill", "eval:self.per_received < 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Bill", "eval:self.per_received == 100 and self.per_billed < 100 and self.docstatus == 1"],
["To Receive", "eval:self.per_received < 100 and self.per_billed == 100 and self.docstatus == 1"],
["Completed", "eval:self.per_received == 100 and self.per_billed == 100 and self.docstatus == 1"],
["Delivered", "eval:self.status=='Delivered'"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Delivery Note": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
],
"Purchase Receipt": [
["Draft", None],
["To Bill", "eval:self.per_billed < 100 and self.docstatus == 1"],
["Completed", "eval:self.per_billed == 100 and self.docstatus == 1"],
["Cancelled", "eval:self.docstatus==2"],
["Closed", "eval:self.status=='Closed'"],
]
}
class StatusUpdater(Document):
"""
Updates the status of the calling records
Delivery Note: Update Delivered Qty, Update Percent and Validate over delivery
Sales Invoice: Update Billed Amt, Update Percent and Validate over billing
Installation Note: Update Installed Qty, Update Percent Qty and Validate over installation
"""
def update_prevdoc_status(self):
self.update_qty()
self.validate_qty()
def set_status(self, update=False, status=None, update_modified=True):
if self.is_new():
return
if self.doctype in status_map:
_status = self.status
if status and update:
self.db_set("status", status)
sl = status_map[self.doctype][:]
sl.reverse()
for s in sl:
if not s[1]:
self.status = s[0]
break
elif s[1].startswith("eval:"):
if eval(s[1][5:]):
self.status = s[0]
break
elif getattr(self, s[1])():
self.status = s[0]
break
if self.status != _status and self.status not in ("Submitted", "Cancelled"):
self.add_comment("Label", _(self.status))
if update:
self.db_set('status', self.status, update_modified = update_modified)
def validate_qty(self):
"""Validates qty at row level"""
self.tolerance = {}
self.global_tolerance = None
for args in self.status_updater:
if "target_ref_field" not in args:
# if target_ref_field is not specified, the programmer does not want to validate qty / amount
continue
# get unique transactions to update
for d in self.get_all_children():
if d.doctype == args['source_dt'] and d.get(args["join_field"]):
args['name'] = d.get(args['join_field'])
# get all qty where qty > target_field
item = frappe.db.sql("""select item_code, `{target_ref_field}`,
`{target_field}`, parenttype, parent from `tab{target_dt}`
where `{target_ref_field}` < `{target_field}`
and name=%s and docstatus=1""".format(**args),
args['name'], as_dict=1)
if item:
item = item[0]
item['idx'] = d.idx
item['target_ref_field'] = args['target_ref_field'].replace('_', ' ')
# if not item[args['target_ref_field']]:
# msgprint(_("Note: System will not check over-delivery and over-booking for Item {0} as quantity or amount is 0").format(item.item_code))
if args.get('no_tolerance'):
item['reduce_by'] = item[args['target_field']] - item[args['target_ref_field']]
if item['reduce_by'] > .01:
self.limits_crossed_error(args, item)
elif item[args['target_ref_field']]:
self.check_overflow_with_tolerance(item, args)
def check_overflow_with_tolerance(self, item, args):
"""
Checks if there is overflow condering a relaxation tolerance
"""
# check if overflow is within tolerance
tolerance, self.tolerance, self.global_tolerance = get_tolerance_for(item['item_code'],
self.tolerance, self.global_tolerance)
overflow_percent = ((item[args['target_field']] - item[args['target_ref_field']]) /
item[args['target_ref_field']]) * 100
if overflow_percent - tolerance > 0.01:
item['max_allowed'] = flt(item[args['target_ref_field']] * (100+tolerance)/100)
item['reduce_by'] = item[args['target_field']] - item['max_allowed']
self.limits_crossed_error(args, item)
def limits_crossed_error(self, args, item):
'''Raise exception for limits crossed'''
frappe.throw(_('This document is over limit by {0} {1} for item {4}. Are you making another {3} against the same {2}?')
.format(
frappe.bold(_(item["target_ref_field"].title())),
frappe.bold(item["reduce_by"]),
frappe.bold(_(args.get('target_dt'))),
frappe.bold(_(self.doctype)),
frappe.bold(item.get('item_code'))
) + '<br><br>' +
_('To allow over-billing or over-ordering, update "Allowance" in Stock Settings or the Item.'),
title = _('Limit Crossed'))
def update_qty(self, update_modified=True):
"""Updates qty or amount at row level
:param update_modified: If true, updates `modified` and `modified_by` for target parent doc
"""
for args in self.status_updater:
# condition to include current record (if submit or no if cancel)
if self.docstatus == 1:
args['cond'] = ' or parent="%s"' % self.name.replace('"', '\"')
else:
args['cond'] = ' and parent!="%s"' % self.name.replace('"', '\"')
self._update_children(args, update_modified)
if "percent_join_field" in args:
self._update_percent_field_in_targets(args, update_modified)
def _update_children(self, args, update_modified):
"""Update quantities or amount in child table"""
for d in self.get_all_children():
if d.doctype != args['source_dt']:
continue
self._update_modified(args, update_modified)
# updates qty in the child table
args['detail_id'] = d.get(args['join_field'])
args['second_source_condition'] = ""
if args.get('second_source_dt') and args.get('second_source_field') \
and args.get('second_join_field'):
if not args.get("second_source_extra_cond"):
args["second_source_extra_cond"] = ""
args['second_source_condition'] = """ + ifnull((select sum(%(second_source_field)s)
from `tab%(second_source_dt)s`
where `%(second_join_field)s`="%(detail_id)s"
and (`tab%(second_source_dt)s`.docstatus=1) %(second_source_extra_cond)s), 0) """ % args
if args['detail_id']:
if not args.get("extra_cond"): args["extra_cond"] = ""
frappe.db.sql("""update `tab%(target_dt)s`
set %(target_field)s = (
(select ifnull(sum(%(source_field)s), 0)
from `tab%(source_dt)s` where `%(join_field)s`="%(detail_id)s"
and (docstatus=1 %(cond)s) %(extra_cond)s)
%(second_source_condition)s
)
%(update_modified)s
where name='%(detail_id)s'""" % args)
def _update_percent_field_in_targets(self, args, update_modified=True):
"""Update percent field in parent transaction"""
distinct_transactions = set([d.get(args['percent_join_field'])
for d in self.get_all_children(args['source_dt'])])
for name in distinct_transactions:
if name:
args['name'] = name
self._update_percent_field(args, update_modified)
def _update_percent_field(self, args, update_modified=True):
"""Update percent field in parent transaction"""
self._update_modified(args, update_modified)
if args.get('target_parent_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(target_parent_field)s = round(
ifnull((select
ifnull(sum(if(%(target_ref_field)s > %(target_field)s, abs(%(target_field)s), abs(%(target_ref_field)s))), 0)
/ sum(abs(%(target_ref_field)s)) * 100
from `tab%(target_dt)s` where parent="%(name)s"), 0), 2)
%(update_modified)s
where name='%(name)s'""" % args)
# update field
if args.get('status_field'):
frappe.db.sql("""update `tab%(target_parent_dt)s`
set %(status_field)s = if(%(target_parent_field)s<0.001,
'Not %(keyword)s', if(%(target_parent_field)s>=99.99,
'Fully %(keyword)s', 'Partly %(keyword)s'))
where name='%(name)s'""" % args)
if update_modified:
target = frappe.get_doc(args["target_parent_dt"], args["name"])
target.set_status(update=True)
target.notify_update()
notify_status(target)
def _update_modified(self, args, update_modified):
args['update_modified'] = ''
if update_modified:
args['update_modified'] = ', modified = now(), modified_by = "{0}"'\
.format(frappe.db.escape(frappe.session.user))
def update_billing_status_for_zero_amount_refdoc(self, ref_dt):
ref_fieldname = ref_dt.lower().replace(" ", "_")
zero_amount_refdoc = []
all_zero_amount_refdoc = frappe.db.sql_list("""select name from `tab%s`
where docstatus=1 and base_net_total = 0""" % ref_dt)
for item in self.get("items"):
if item.get(ref_fieldname) \
and item.get(ref_fieldname) in all_zero_amount_refdoc \
and item.get(ref_fieldname) not in zero_amount_refdoc:
zero_amount_refdoc.append(item.get(ref_fieldname))
if zero_amount_refdoc:
self.update_billing_status(zero_amount_refdoc, ref_dt, ref_fieldname)
def update_billing_status(self, zero_amount_refdoc, ref_dt, ref_fieldname):
for ref_dn in zero_amount_refdoc:
ref_doc_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0) from `tab%s Item`
where parent=%s""" % (ref_dt, '%s'), (ref_dn))[0][0])
billed_qty = flt(frappe.db.sql("""select ifnull(sum(qty), 0)
from `tab%s Item` where %s=%s and docstatus=1""" %
(self.doctype, ref_fieldname, '%s'), (ref_dn))[0][0])
per_billed = ((ref_doc_qty if billed_qty > ref_doc_qty else billed_qty)\
/ ref_doc_qty)*100
ref_doc = frappe.get_doc(ref_dt, ref_dn)
ref_doc.db_set("per_billed", per_billed)
ref_doc.set_status(update=True)
def get_tolerance_for(item_code, item_tolerance={}, global_tolerance=None):
"""
Returns the tolerance for the item, if not set, returns global tolerance
"""
if item_tolerance.get(item_code):
return item_tolerance[item_code], item_tolerance, global_tolerance
tolerance = flt(frappe.db.get_value('Item',item_code,'tolerance') or 0)
if not tolerance:
if global_tolerance == None:
global_tolerance = flt(frappe.db.get_value('Stock Settings', None, 'tolerance'))
tolerance = global_tolerance
item_tolerance[item_code] = tolerance
return tolerance, item_tolerance, global_tolerance
| indautgrp/erpnext | erpnext/controllers/status_updater.py | Python | gpl-3.0 | 13,277 |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
Created on Sun Sep 18 20:24:29 2016
"""
list1 = [3, 44, 38, 5, 47, 15, 36, 26, 27, 2, 46, 4, 19, 50, 48]
list2 = [1,1,1,1,1,1,1,1]
list3 = [1,2,3,4,5,6,7,8]
list4 = [2,3,6,7,5,2,2,2]
list5 = [8,7,6,5,4,3,2,1]
#检查函数
def check(func):
print sort_bubble(list1)==func(list1)
print sort_bubble(list2)==func(list2)
print sort_bubble(list3)==func(list3)
print sort_bubble(list4)==func(list4)
#冒泡
def sort_bubble(l):
while True:
swapped = False
for i in range(len(l)-1):
if l[i]>l[i+1]:
l[i],l[i+1] = l[i+1],l[i]
swapped = True
if swapped == False:
break;
return l
#选择
def sort_select(l):
for i in range(len(l)-1):
min_num = l[i]
index_min = i
for j in range(i,len(l)):
if l[j]<min_num:
min_num = l[j]
index_min = j
l[i],l[index_min] = l[index_min],l[i]
return l
#插入
def sort_insert(l):
for i in range(1,len(l)):
temp = l[i]
del l[i]
for j in range(i-1,-1,-1):
if j==0 and l[j] > temp:
l.insert(0,temp)
elif l[j] > temp:
pass
else:
l.insert(j+1,temp)
break
return l
#归并
def sort_merge(l):
if len(l) <= 1:
return l
num = int( len(l)/2 )
left = sort_merge(l[:num])
right = sort_merge(l[num:])
return Merge(left, right)
def Merge(left,right):
r, l=0, 0
result=[]
while l<len(left) and r<len(right):
if left[l] < right[r]:
result.append(left[l])
l += 1
else:
result.append(right[r])
r += 1
result += right[r:]
result+= left[l:]
return result
# | zfrxiaxia/Code-zfr | visualgo数据结构/01_sort.py | Python | gpl-3.0 | 1,847 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Default celery configuration."""
import logging
import ssl
from airflow.configuration import conf
from airflow.exceptions import AirflowConfigException, AirflowException
def _broker_supports_visibility_timeout(url):
return url.startswith("redis://") or url.startswith("sqs://")
log = logging.getLogger(__name__)
broker_url = conf.get('celery', 'BROKER_URL')
broker_transport_options = conf.getsection('celery_broker_transport_options') or {}
if 'visibility_timeout' not in broker_transport_options:
if _broker_supports_visibility_timeout(broker_url):
broker_transport_options['visibility_timeout'] = 21600
DEFAULT_CELERY_CONFIG = {
'accept_content': ['json'],
'event_serializer': 'json',
'worker_prefetch_multiplier': conf.getint('celery', 'worker_prefetch_multiplier'),
'task_acks_late': True,
'task_default_queue': conf.get('operators', 'DEFAULT_QUEUE'),
'task_default_exchange': conf.get('operators', 'DEFAULT_QUEUE'),
'task_track_started': conf.getboolean('celery', 'task_track_started'),
'broker_url': broker_url,
'broker_transport_options': broker_transport_options,
'result_backend': conf.get('celery', 'RESULT_BACKEND'),
'worker_concurrency': conf.getint('celery', 'WORKER_CONCURRENCY'),
'worker_enable_remote_control': conf.getboolean('celery', 'worker_enable_remote_control'),
}
celery_ssl_active = False
try:
celery_ssl_active = conf.getboolean('celery', 'SSL_ACTIVE')
except AirflowConfigException:
log.warning("Celery Executor will run without SSL")
try:
if celery_ssl_active:
if 'amqp://' in broker_url:
broker_use_ssl = {
'keyfile': conf.get('celery', 'SSL_KEY'),
'certfile': conf.get('celery', 'SSL_CERT'),
'ca_certs': conf.get('celery', 'SSL_CACERT'),
'cert_reqs': ssl.CERT_REQUIRED,
}
elif 'redis://' in broker_url:
broker_use_ssl = {
'ssl_keyfile': conf.get('celery', 'SSL_KEY'),
'ssl_certfile': conf.get('celery', 'SSL_CERT'),
'ssl_ca_certs': conf.get('celery', 'SSL_CACERT'),
'ssl_cert_reqs': ssl.CERT_REQUIRED,
}
else:
raise AirflowException(
'The broker you configured does not support SSL_ACTIVE to be True. '
'Please use RabbitMQ or Redis if you would like to use SSL for broker.'
)
DEFAULT_CELERY_CONFIG['broker_use_ssl'] = broker_use_ssl
except AirflowConfigException:
raise AirflowException(
'AirflowConfigException: SSL_ACTIVE is True, '
'please ensure SSL_KEY, '
'SSL_CERT and SSL_CACERT are set'
)
except Exception as e:
raise AirflowException(
f'Exception: There was an unknown Celery SSL Error. Please ensure you want to use SSL and/or have '
f'all necessary certs and key ({e}).'
)
result_backend = DEFAULT_CELERY_CONFIG['result_backend']
if 'amqp://' in result_backend or 'redis://' in result_backend or 'rpc://' in result_backend:
log.warning(
"You have configured a result_backend of %s, it is highly recommended "
"to use an alternative result_backend (i.e. a database).",
result_backend,
)
| apache/airflow | airflow/config_templates/default_celery.py | Python | apache-2.0 | 4,059 |
import numpy as np
import copy
import os
import pickle
import multiprocessing as mp
import csv
import xml.etree.cElementTree as ET
import time as T
def findMagnitude(vector):
'''This function simply returns the magnitude of a given vector'''
return np.sqrt(vector[0]**2 + vector[1]**2 + vector[2]**2)
def updateXMLBoxLength(adjustedInputFileName, boxSize):
'''This function opens a hoomd xml and updates it to have a given simulation volume (boxSize)'''
with open(adjustedInputFileName, 'r') as xmlFile:
xmlData = xmlFile.readlines()
for lineNo in range(len(xmlData)):
if 'box' in xmlData[lineNo]:
newBoxLine = ''
quoteMarksLoc = findIndex(xmlData[lineNo], '"')
# The quote marks 0 and 1 are around the number for lx, 2 and 3 are ly,
# 4 and 5 are lz. Others are for skew (xy, xz, yz)
listOfLine = list(xmlData[lineNo])
listOfLine[quoteMarksLoc[4] + 1:quoteMarksLoc[5]] = str(boxSize[2])
listOfLine[quoteMarksLoc[2] + 1:quoteMarksLoc[3]] = str(boxSize[1])
listOfLine[quoteMarksLoc[0] + 1:quoteMarksLoc[1]] = str(boxSize[0])
for character in listOfLine:
newBoxLine += character
newBoxLine += '\n'
xmlData[lineNo] = newBoxLine
break
with open(adjustedInputFileName, 'w+') as xmlFile:
xmlFile.writelines(xmlData)
def findIndex(string, character):
'''This function returns the locations of an inputted character in an inputted string'''
index = 0
locations = []
while index < len(string):
if string[index] == character:
locations.append(index)
index += 1
if len(locations) == 0:
return None
return locations
def calculateSeparation(atom1, atom2):
'''This function calculates the distance between two input points (either as lists or np.arrays)'''
xdif = atom1[0] - atom2[0]
ydif = atom1[1] - atom2[1]
zdif = atom1[2] - atom2[2]
return np.sqrt(xdif**2 + ydif**2 + zdif**2)
def linearInterpDescendingY(targetValue, xArray, yArray):
'''This function takes in two numpy arrays, and then linearly interpolates to find the value of X when Y is equal to targetValue. yArray must be a descending array (doesn't have to be monotonic, but the function will report the first point at which the curve is below targetValue so be careful of noise!). The function returns a value of None if the yArray never drops below the targetValue'''
xVal = None
for index, value in enumerate(yArray):
if value > targetValue:
continue
xLo = xArray[index - 1]
xHi = xArray[index]
yHi = yArray[index - 1]
yLo = yArray[index]
yDiff = yHi - yLo
xDiff = xHi - xLo
yDeltaFrac = (yHi - targetValue) / yDiff
xVal = xLo + yDeltaFrac * xDiff
break
return xVal
def calcCOM(listOfPositions, listOfAtomTypes=None, listOfMasses=None):
'''This function calculates the centre of mass of a collection of sites/atoms (listOfPositions) with corresponding type (listOfAtomTypes) or mass (listOfMasses)
If listOfMasses is not specified, then listOfAtomTypes MUST be.'''
massWeighted = np.array([0.0, 0.0, 0.0])
if listOfMasses is None:
listOfMasses = []
for atomType in listOfAtomTypes:
# Masses obtained from nist.gov, for the atoms we are likely to simulate the most.
# Add in new atoms here if your molecule requires it!
if ('BR' in atomType) or ('Br' in atomType) or ('br' in atomType):
print()
listOfMasses.append(78.918338)
elif ('SI' in atomType) or ('Si' in atomType) or ('si' in atomType):
listOfMasses.append(27.976926)
elif ('C' in atomType) or ('c' in atomType):
listOfMasses.append(12.000000)
elif ('H' in atomType) or ('h' in atomType):
listOfMasses.append(1.007825)
elif ('S' in atomType) or ('s' in atomType):
listOfMasses.append(31.972071)
elif ('O' in atomType) or ('o' in atomType):
listOfMasses.append(15.994914)
elif ('N' in atomType) or ('n' in atomType):
listOfMasses.append(14.003074)
else:
raise SystemError("Unknown atomic mass", atomType, "please hardcode into helperFunctions.calcCOM.")
totalMass = np.sum(listOfMasses)
for atomID, position in enumerate(listOfPositions):
for axis in range(3):
massWeighted[axis] += position[axis] * listOfMasses[atomID]
return massWeighted / float(totalMass)
def findAxis(atom1, atom2, normalise=True):
'''This function determines the normalised vector from the location of atom1 to atom2. The positions can enter as lists or arrays, but are output as arrays'''
xSep = atom2[0] - atom1[0]
ySep = atom2[1] - atom1[1]
zSep = atom2[2] - atom1[2]
if normalise is True:
axisVector = normaliseVec(np.array([xSep, ySep, zSep]))
else:
axisVector = np.array([xSep, ySep, zSep])
return axisVector
def normaliseVec(vector):
'''This function normalises an input vector to unit magnitude'''
return vector / np.linalg.norm(vector) # float(np.sqrt(vector[0]**2 + vector[1]**2 + vector[2]**2))
def getRotationMatrix(vector1, vector2):
'''This function returns the rotation matrix around the origin that maps vector1 to vector 2'''
crossProduct = np.cross(vector1, vector2)
sinAngle = np.sqrt(((crossProduct[0]**2) + ((crossProduct[1])**2) + ((crossProduct[2])**2)))
cosAngle = np.dot(vector1, vector2)
skewMatrix = np.matrix([[0, -crossProduct[2], crossProduct[1]], [crossProduct[2], 0, -crossProduct[0]], [-crossProduct[1], crossProduct[0], 0]])
skewMatrixSquared = skewMatrix * skewMatrix
rotMatrix = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + skewMatrix + skewMatrixSquared * ((1 - cosAngle) / (sinAngle**2))
return rotMatrix
def parallelSort(list1, list2):
'''This function sorts a pair of lists by the first list in ascending order (for example, atom mass and corresponding position can be input, sorted by ascending mass, and the two lists output, where the mass[atom_i] still corresponds to position[atom_i]'''
data = zip(list1, list2)
data.sort()
list1, list2 = map(lambda t: list(t), zip(*data))
return list1, list2
def appendCSV(fileName, data):
'''Appends a CSV file (fileName) with a row given by data (as a list)'''
with open(fileName, 'a+') as csvFile:
document = csv.writer(csvFile, delimiter=',')
document.writerow(list(data))
def writeCSV(fileName, data):
'''Writes a CSV file given a 2D array `data' of arbitrary size'''
with open(fileName, 'w+') as csvFile:
document = csv.writer(csvFile, delimiter=',')
for row in data:
document.writerow(list(row))
print()
def rotationMatrix(vector1, vector2):
'''A function to return the rotation matrix around the origin that maps vector1 to vector 2'''
crossProduct = np.cross(vector1, vector2)
sinAngle = np.sqrt(((crossProduct[0]**2) + ((crossProduct[1])**2) + ((crossProduct[2])**2)))
cosAngle = np.dot(vector1, vector2)
skewMatrix = np.matrix([[0, -crossProduct[2], crossProduct[1]], [crossProduct[2], 0, -crossProduct[0]], [-crossProduct[1], crossProduct[0], 0]])
skewMatrixSquared = skewMatrix * skewMatrix
rotMatrix = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) + skewMatrix + skewMatrixSquared * ((1 - cosAngle) / (sinAngle**2))
return rotMatrix
def addUnwrappedPositions(inputDictionary):
'''This function takes a runHoomd.py input dictionary and updates the 'unwrapped_position' key based on the values of the 'position' and 'image' keys'''
simulationDimensions = [inputDictionary['lx'], inputDictionary['ly'], inputDictionary['lz']]
inputDictionary['unwrapped_position'] = [0] * len(inputDictionary['position'])
for i in range(len(inputDictionary['position'])):
position = inputDictionary['position'][i]
if len(inputDictionary['image']) > 0:
image = inputDictionary['image'][i]
else:
image = [0, 0, 0]
unwrappedPosition = []
for axis in range(len(image)):
unwrappedPosition.append((image[axis] * simulationDimensions[axis]) + position[axis])
inputDictionary['unwrapped_position'][i] = unwrappedPosition
return inputDictionary
def replaceWrappedPositions(inputDictionary):
'''This function takes a morphCT input dictionary and replaces the 'position' and 'image' keys with the 'unwrapped_position' key and '[0, 0, 0]' respectively.'''
for atomID, unwrapped_position in enumerate(inputDictionary['unwrapped_position']):
inputDictionary['position'][atomID] = unwrapped_position
inputDictionary['image'][atomID] = [0, 0, 0]
return inputDictionary
def addWrappedPositions(inputDictionary):
'''This function takes a runHoomd.py input dictionary and updates the 'position' and 'image' keys based on the values of the 'unwrapped_position' key'''
simulationDimensions = [inputDictionary['lx'], inputDictionary['ly'], inputDictionary['lz']]
inputDictionary['position'] = [0] * len(inputDictionary['unwrapped_position'])
inputDictionary['image'] = [0] * len(inputDictionary['unwrapped_position'])
for atomID in range(len(inputDictionary['unwrapped_position'])):
position = copy.deepcopy(inputDictionary['unwrapped_position'][atomID])
imageCoords = [0, 0, 0]
for axis in range(len(position)):
if position[axis] > (simulationDimensions[axis] / 2.0):
while position[axis] > (simulationDimensions[axis] / 2.0):
imageCoords[axis] += 1
position[axis] -= simulationDimensions[axis]
elif position[axis] < -(simulationDimensions[axis] / 2.0):
while position[axis] < -(simulationDimensions[axis] / 2.0):
imageCoords[axis] -= 1
position[axis] += simulationDimensions[axis]
inputDictionary['position'][atomID] = position
inputDictionary['image'][atomID] = imageCoords
return inputDictionary
def addMasses(inputDictionary):
'''This function takes a runHoomd.py input dictionary and updates the 'mass' key based on the values of the 'type' key. Note that more hardcoding is required to add aditional atom types'''
inputDictionary['mass'] = [1.0] * len(inputDictionary['type'])
for atomID in range(len(inputDictionary['type'])):
if 'H' in inputDictionary['type'][atomID]:
inputDictionary['mass'][atomID] = 1.00794
elif 'C' in inputDictionary['type'][atomID]:
inputDictionary['mass'][atomID] = 12.0107
elif 'S' in inputDictionary['type'][atomID]:
inputDictionary['mass'][atomID] = 32.0660
return inputDictionary
def addDiameters(inputDictionary):
'''This function takes a runHoomd.py input dictionary and updates the 'diameter' key based on the values of the 'type' key. Note that more hardcoding is required to add aditional atom types'''
inputDictionary['diameter'] = [1.0] * len(inputDictionary['type'])
for atomID in range(len(inputDictionary['type'])):
if 'H' in inputDictionary['type'][atomID]:
inputDictionary['diameter'][atomID] = 0.53
elif 'C' in inputDictionary['type'][atomID]:
inputDictionary['diameter'][atomID] = 0.67
elif 'S' in inputDictionary['type'][atomID]:
inputDictionary['diameter'][atomID] = 0.88
return inputDictionary
def addTerminatingHydrogen(inputDictionary, terminatingConnection, terminatingUnit=[['H1', 0, 0, 0]], terminatingUnitBonds=None):
'''This function takes a runHoomd.py input dictionary, and the atom that the terminating unit is bonded to'''
# Examine the current bonds on the terminating connection
previousBondVectors = []
for bond in inputDictionary['bond']:
if bond[1] == terminatingConnection:
previousBondVectors.append(np.array(inputDictionary['unwrapped_position'][bond[2]]) - np.array(inputDictionary['unwrapped_position'][bond[1]]))
elif bond[2] == terminatingConnection:
previousBondVectors.append(np.array(inputDictionary['unwrapped_position'][bond[1]]) - np.array(inputDictionary['unwrapped_position'][bond[2]]))
bondLengths = []
for previousBond in previousBondVectors:
bondLengths.append(np.linalg.norm(previousBond))
terminatingBondLength = np.average(bondLengths)
terminatingBondVector = -np.sum(previousBondVectors, 0) / terminatingBondLength
# The first atom of the terminatingUnit connects to the terminatingConnection.
# Add this atom and the bond
terminatingAtomPosn = list(np.array(inputDictionary['unwrapped_position'][terminatingConnection]) + terminatingBondVector + np.array(terminatingUnit[0][1:]))
inputDictionary['unwrapped_position'].append(terminatingAtomPosn)
inputDictionary['type'].append(terminatingUnit[0][0])
# NOTE: This is hardcoded, change to soft when implementing different terminating units
inputDictionary['mass'].append(1.007825)
inputDictionary['diameter'].append(0.53)
inputDictionary['body'].append(-1)
inputDictionary['charge'].append(inputDictionary['charge'][terminatingConnection])
# Add the wrapped coordinates
simulationDimensions = [inputDictionary['lx'], inputDictionary['ly'], inputDictionary['lz']]
image = [0, 0, 0]
position = copy.deepcopy(terminatingAtomPosn)
for axis in range(len(image)):
while position[axis] > simulationDimensions[axis] / 2.0:
position[axis] -= simulationDimensions[axis]
image[axis] += 1
while position[axis] < -simulationDimensions[axis] / 2.0:
position[axis] += simulationDimensions[axis]
image[axis] -= 1
inputDictionary['position'].append(position)
inputDictionary['image'].append(image)
# Add the bond
inputDictionary['bond'].append([inputDictionary['type'][terminatingConnection] + '-' + terminatingUnit[0][0], terminatingConnection, len(inputDictionary['type']) - 1])
# Add any subsequent atoms and bonds in the terminatingUnit as defined by the terminatingUnitBonds
for atom in terminatingUnit[1:]:
raise SystemError("Multiple-atom terminating units not yet implemented.")
# Finally, update the number of atoms in the system (we just added one!)
inputDictionary['natoms'] += 1
return inputDictionary, inputDictionary['natoms'] - 1
def loadMorphologyXMLETree(xmlPath, sigma=1.0):
atomProps3DFloat = ['position']
atomProps3DInt = ['image']
atomPropsInt = ['body']
atomPropsFloat = ['mass', 'diameter', 'charge']
atomPropsStr = ['type']
constraintProps = ['bond', 'angle', 'dihedral', 'improper']
atomDictionary = {}
with open(xmlPath, 'r') as xmlFileName:
xmlFile = ET.parse(xmlFileName)
morphologyConfig = xmlFile.getroot()[-1]
for axis, systemDim in morphologyConfig.find('box').attrib.iteritems():
atomDictionary[axis] = float(systemDim)
for key in atomPropsInt:
atomDictionary[key] = map(int, morphologyConfig.find(key).text.split('\n')[1:-1])
for key in atomPropsFloat:
atomDictionary[key] = map(float, morphologyConfig.find(key).text.split('\n')[1:-1])
for key in atomPropsStr:
atomDictionary[key] = morphologyConfig.find(key).text.split('\n')[1:-1]
for key in atomProps3DInt:
atomDictionary[key] = [map(int, x.split(' ')) for x in morphologyConfig.find(key).text.split('\n')[1:-1]]
for key in atomProps3DFloat:
atomDictionary[key] = [list(np.array(map(float, x.split(' '))) * sigma) for x in morphologyConfig.find(key).text.split('\n')[1:-1]]
for key in constraintProps:
atomDictionary[key] = [[x.split(' ')[0]] + map(int, x.split(' ')[1:]) for x in morphologyConfig.find(key).text.split('\n')[1:-1]]
return atomDictionary
def loadMorphologyXML(xmlPath, sigma=1.0):
# XML has SimDims as <box
# Positions as <position and <image
# Velocities as <velocity
# Mass as <mass
# Diameters as <diameter
# Type as <type
# "Body" as <body
# Bonds as <bond, with each line as bondA, bondB, etc.
# Angles as <angle, with each angle as angleA, angleB, etc.
# Dihedral as <dihedral
# Improper as <improper (usually none in xml)
# Charge as <charge
AtomDictionary = {'position': [], 'image': [], 'mass': [], 'diameter': [], 'type': [], 'body': [], 'bond': [], 'angle': [], 'dihedral': [], 'improper': [], 'charge': []}
record = False
with open(xmlPath, 'r') as xmlFile:
xmlData = xmlFile.readlines()
for line in xmlData:
if ('</' in line):
record = False
elif ('<configuration' in line) or ('<box' in line):
# Get configuration data from this line (timestep, natoms etc)
splitLine = line.split(' ')
for i in range(1, len(splitLine)):
equalsLoc = findIndex(splitLine[i], '=')
if equalsLoc is None:
# Skip any elements without equals
continue
quotationLoc = findIndex(splitLine[i], '"')
if ('.' in splitLine[i][quotationLoc[0] + 1:quotationLoc[1]]):
# Catch float in the value (excludes the = and quotation marks)
if ('<box' in line):
AtomDictionary[splitLine[i][:equalsLoc[0]]] = float(splitLine[i][quotationLoc[0] + 1:quotationLoc[1]]) * sigma
else:
AtomDictionary[splitLine[i][:equalsLoc[0]]] = float(splitLine[i][quotationLoc[0] + 1:quotationLoc[1]])
else:
if ('<box' in line):
AtomDictionary[splitLine[i][:equalsLoc[0]]] = int(splitLine[i][quotationLoc[0] + 1:quotationLoc[1]]) * sigma
else:
AtomDictionary[splitLine[i][:equalsLoc[0]]] = int(splitLine[i][quotationLoc[0] + 1:quotationLoc[1]])
elif ('<position' in line):
record = True
recordType = 'position'
continue
elif ('<image' in line):
record = True
recordType = 'image'
continue
elif ('<mass' in line):
record = True
recordType = 'mass'
continue
elif ('<diameter' in line):
record = True
recordType = 'diameter'
continue
elif ('<type' in line):
record = True
recordType = 'type'
continue
elif ('<body' in line):
record = True
recordType = 'body'
continue
elif ('<bond' in line):
record = True
recordType = 'bond'
continue
elif ('<angle' in line):
record = True
recordType = 'angle'
continue
elif ('<dihedral' in line):
record = True
recordType = 'dihedral'
continue
elif ('<improper' in line):
record = True
recordType = 'improper'
continue
elif ('<charge' in line):
record = True
recordType = 'charge'
continue
# Now we know what the variable is, append it to the dictionary data
if (record is True):
if (recordType == 'position'):
# NOTE: VELOCITIES ARE NOT NORMALISED IN THE MORPHOLOGY FILE...DO THEY NEED TO BE SCALED BY SIGMA OR NOT? CURRENTLY THEY ARE.
# Write to dictionary as floats scaled by sigma
splitLine = line.split(' ')
# Remove the "\n"
splitLine[-1] = splitLine[-1][:-1]
if (len(splitLine) == 1):
AtomDictionary[recordType].append(float(splitLine[0]) * sigma)
continue
for i in range(len(splitLine)):
splitLine[i] = float(splitLine[i]) * sigma
AtomDictionary[recordType].append(splitLine)
elif (recordType == 'mass') or (recordType == 'diameter') or (recordType == 'charge'):
# Write to dictionary as floats
splitLine = line.split(' ')
# Remove the "\n"
splitLine[-1] = splitLine[-1][:-1]
if (len(splitLine) == 1):
AtomDictionary[recordType].append(float(splitLine[0]))
continue
for i in range(len(splitLine)):
splitLine[i] = float(splitLine[i])
AtomDictionary[recordType].append(splitLine)
elif (recordType == 'image') or (recordType == 'body'):
# Write to dictionary as int
splitLine = line.split(' ')
# Remove the "\n"
splitLine[-1] = splitLine[-1][:-1]
if (len(splitLine) == 1):
AtomDictionary[recordType].append(int(splitLine[0]))
continue
for i in range(len(splitLine)):
splitLine[i] = int(splitLine[i])
AtomDictionary[recordType].append(splitLine)
elif (recordType == 'type'):
# Write to dictionary as str
splitLine = line.split(' ')
# Remove the "\n"
splitLine[-1] = splitLine[-1][:-1]
AtomDictionary[recordType].append(str(splitLine[0]))
else:
# (recordType == 'bond') or (recordType == 'angle') or (recordType == 'dihedral') or (recordType == 'improper')
# Write to dictionary as combination
splitLine = line.split(' ')
# Remove the "\n"
splitLine[-1] = splitLine[-1][:-1]
splitLine[0] = str(splitLine[0])
for i in range(1, len(splitLine)):
splitLine[i] = int(splitLine[i])
AtomDictionary[recordType].append(splitLine)
return AtomDictionary
def removeRigidBodies(inputDictionary):
print()
atomsToRemove = []
bondsToRemove = []
# First remove the anchor points
for index, typeData in enumerate(inputDictionary['type']):
if ('T' in typeData) or ('X' in typeData):
atomsToRemove.append(index)
# Then remove the bonds
for index, bondData in enumerate(inputDictionary['bond']):
if ('T' in bondData[0]) or ('X' in bondData[0]):
bondsToRemove.append(index)
for atomIndex in sorted(atomsToRemove, reverse=True):
for key in ['position', 'image', 'mass', 'diameter', 'type', 'body', 'charge']:
inputDictionary[key].pop(atomIndex)
for bondIndex in sorted(bondsToRemove, reverse=True):
inputDictionary['bond'].pop(bondIndex)
# Undo all of the rigid bodies
for index, bodyData in enumerate(inputDictionary['body']):
inputDictionary['body'][index] = -1
# Finally, update the number of atoms in the morphology
inputDictionary['natoms'] -= len(atomsToRemove)
print()
return inputDictionary
def writeMorphologyXMLETree(inputDictionary, outputFile):
print()
inputDictionary = checkWrappedPositions(inputDictionary)
systemProps = ['box']
atomProps3D = ['position', 'image']
atomProps = ['mass', 'diameter', 'type', 'body', 'charge']
constraintProps = ['bond', 'angle', 'dihedral', 'improper']
root = ET.Element('hoomd_xml', version="1.5")
root.text = '\n'
config = ET.Element('configuration', time_step=str(inputDictionary['time_step']), dimensions="3", natoms=str(inputDictionary['natoms']))
config.text = '\n'
config.tail = '\n'
for element in systemProps + atomProps3D + atomProps + constraintProps:
ET.SubElement(config, element)
config[-1].text = '\n'
config[-1].tail = '\n'
for axis in ['lx', 'ly', 'lz']:
config.find('box').attrib[axis] = str(inputDictionary[axis])
for axis in ['xy', 'xz', 'yz']:
config.find('box').attrib[axis] = str(0)
config.find('box').text = ""
config.attrib['natoms'] = str(inputDictionary['natoms'])
for atomID, atomType in enumerate(inputDictionary['type']):
for atomProp3D in atomProps3D:
config.find(atomProp3D).text += ' '.join([str(x) for x in inputDictionary[atomProp3D][atomID]]) + '\n'
config.find(atomProp3D).attrib['num'] = str(len(inputDictionary[atomProp3D]))
for atomProp in atomProps:
config.find(atomProp).text += str(inputDictionary[atomProp][atomID]) + '\n'
config.find(atomProp).attrib['num'] = str(len(inputDictionary[atomProp]))
for constraintType in constraintProps:
for constraintID, constraint in enumerate(inputDictionary[constraintType]):
config.find(constraintType).text += ' '.join([str(x) for x in inputDictionary[constraintType][constraintID]]) + '\n'
config.find(constraintType).attrib['num'] = str(len(inputDictionary[constraintType]))
root.insert(0, config)
tree = ET.ElementTree(root)
tree.write(outputFile, xml_declaration=True, encoding='UTF-8')
print()
def writeMorphologyXML(inputDictionary, outputFile):
# First, need to check the positions of the atoms to ensure that everything is correctly contained inside the box
print()
inputDictionary = checkWrappedPositions(inputDictionary)
# inputDictionary['position'], inputDictionary['image'] = pbc.shift_pbc(inputDictionary['position'], [inputDictionary['lx'], inputDictionary['ly'], inputDictionary['lz']])
# print inputDictionary['image'][:20]
# raw_input('HALT')
# Add Boiler Plate first
linesToWrite = ['<?xml version="1.0" encoding="UTF-8"?>\n', '<hoomd_xml version="1.4">\n', '<configuration time_step="0" dimensions="3" natoms="' + str(inputDictionary['natoms']) + '" >\n', '<box lx="' + str(inputDictionary['lx']) + '" ly="' + str(inputDictionary['ly']) + '" lz="' + str(inputDictionary['lz']) + '" />\n']
# Position
linesToWrite.append('<position num="' + str(inputDictionary['natoms']) + '">\n')
for positionData in inputDictionary['position']:
linesToWrite.append(" ".join(str(coord) for coord in positionData) + '\n')
linesToWrite.append('</position>\n')
# Image
linesToWrite.append('<image num="' + str(inputDictionary['natoms']) + '">\n')
for imageData in inputDictionary['image']:
linesToWrite.append(" ".join(str(coord) for coord in imageData) + '\n')
linesToWrite.append('</image>\n')
# Mass
linesToWrite.append('<mass num="' + str(inputDictionary['natoms']) + '">\n')
for massData in inputDictionary['mass']:
linesToWrite.append(str(massData) + '\n')
linesToWrite.append('</mass>\n')
# Diameter
linesToWrite.append('<diameter num="' + str(inputDictionary['natoms']) + '">\n')
for diameterData in inputDictionary['diameter']:
linesToWrite.append(str(diameterData) + '\n')
linesToWrite.append('</diameter>\n')
# Type
linesToWrite.append('<type num="' + str(inputDictionary['natoms']) + '">\n')
for typeData in inputDictionary['type']:
linesToWrite.append(str(typeData) + '\n')
linesToWrite.append('</type>\n')
# Body
linesToWrite.append('<body num="' + str(inputDictionary['natoms']) + '">\n')
for bodyData in inputDictionary['body']:
linesToWrite.append(str(bodyData) + '\n')
linesToWrite.append('</body>\n')
# Bond
linesToWrite.append('<bond num="' + str(len(inputDictionary['bond'])) + '">\n')
for bondData in inputDictionary['bond']:
linesToWrite.append(" ".join(str(coord) for coord in bondData) + '\n')
linesToWrite.append('</bond>\n')
# Angle
linesToWrite.append('<angle num="' + str(len(inputDictionary['angle'])) + '">\n')
for angleData in inputDictionary['angle']:
linesToWrite.append(" ".join(str(coord) for coord in angleData) + '\n')
linesToWrite.append('</angle>\n')
# Dihedral
linesToWrite.append('<dihedral num="' + str(len(inputDictionary['dihedral'])) + '">\n')
for dihedralData in inputDictionary['dihedral']:
linesToWrite.append(" ".join(str(coord) for coord in dihedralData) + '\n')
linesToWrite.append('</dihedral>\n')
# Improper
linesToWrite.append('<improper num="' + str(len(inputDictionary['improper'])) + '">\n')
for improperData in inputDictionary['improper']:
linesToWrite.append(" ".join(str(coord) for coord in improperData) + '\n')
linesToWrite.append('</improper>\n')
# Charge
linesToWrite.append('<charge num="' + str(inputDictionary['natoms']) + '">\n')
for chargeData in inputDictionary['charge']:
linesToWrite.append(str(chargeData) + '\n')
linesToWrite.append('</charge>\n')
linesToWrite.append('</configuration>\n')
linesToWrite.append('</hoomd_xml>\n')
with open(outputFile, 'w+') as xmlFile:
xmlFile.writelines(linesToWrite)
print()
def writePOSCARFile(inputDict, outputFile):
'''This function takes an input dictionary and converts it to a POSCAR for use in DFT calculations'''
# This POSCAR is ordered as C, S, H for Izaak.
linesToWrite = []
atomsByType = [[], [], []] # C, S, H
for atomID in range(len(inputDict['type'])):
atomType = inputDict['type'][atomID][0]
if atomType == 'C':
atomsByType[0].append(atomID)
elif atomType == 'S':
atomsByType[1].append(atomID)
elif atomType == 'H':
atomsByType[2].append(atomID)
# linesToWrite = []
# typeList = []
# freqList = []
# previousType = None
# numberOfTypes = 0
# for atomID in range(len(inputDict['type'])):
# atomType = inputDict['type'][atomID][0]
# if atomType != previousType:
# if previousType != None:
# typeList.append(previousType)
# freqList.append(numberOfTypes)
# previousType = atomType
# numberOfTypes = 1
# else:
# numberOfTypes += 1
# # Now have to add the final lot of atoms:
# typeList.append(previousType)
# freqList.append(numberOfTypes)
# Line 1 = CommentLine
slashLocs = findIndex(outputFile, '/')
linesToWrite.append(str(outputFile[slashLocs[-3] + 1:slashLocs[-2] + 1]) + str(outputFile[slashLocs[-1] + 1:]).replace('.POSCAR', '') + ' VASP input file.\n')
# Line 2 = Scale Factor
linesToWrite.append('1.000000000000\n')
# Lines 3-5 = Box Dimensions
boxDims = []
for key in ['lx', 'ly', 'lz']:
boxDims.append(inputDict[key])
boxDimsMatrix = np.diag(np.array(boxDims))
for row in boxDimsMatrix:
boxRow = ''
for element in row:
boxRow += "{:22.15f}".format(element)
linesToWrite.append(boxRow + '\n')
# Line 6 = Atom Types
# linesToWrite.append(' '.join(typeList)+'\n')
linesToWrite.append('C S H \n')
# Line 7 = Frequency of Types
# linesToWrite.append(' '.join(map(str, freqList))+'\n')
linesToWrite.append(str(len(atomsByType[0])) + ' ' + str(len(atomsByType[1])) + ' ' + str(len(atomsByType[2])) + '\n')
# Line 8 = 'Cartesian'
linesToWrite.append('Cartesian\n')
# Lines 9+ = Positions
# Positions are not set to be origin in the middle, origin is bottom left corner. As such, we need to add L/2 to each coordinate
writeOrder = []
for atomType in atomsByType:
writeOrder += atomType
# for position in inputDict['position']:
# coordinates = ''
# for axis in range(3):
# coordinates += "{:22.15f}".format(position[axis]+(boxDims[axis]/2.))
# linesToWrite.append(coordinates+'\n')
for atomID in writeOrder:
coordinates = ''
for axis in range(3):
coordinates += "{:22.15f}".format(inputDict['position'][atomID][axis] + (boxDims[axis] / 2.))
linesToWrite.append(coordinates + '\n')
with open(outputFile, 'w+') as POSCARFile:
POSCARFile.writelines(linesToWrite)
with open(outputFile.replace('POSCAR', 'pickle'), 'w+') as bondPickle:
pickle.dump(inputDict['bond'], bondPickle)
print()
def writeXYZFile(inputDict, outputFile):
'''This function takes an input dictionary and converts it to an XYZ for use in DFT calculations'''
# First line is atom numbers, second line is boiler plate
rowsToWrite = [str(inputDict['natoms']) + '\n', 'XYZ file generated from XML using helperFunctions.XMLToXYZ\n']
# Format of xyz is Type, X Pos, Y Pos, Z Pos
for atomID in range(len(inputDict['type'])):
# Note, this will break for atoms that have two-letter symbols (e.g. Al, Ca etc.)
atomType = inputDict['type'][atomID][0]
while len(atomType) < 10:
atomType += ' '
atomX = str(inputDict['position'][atomID][0])
while len(atomX) < 20:
atomX += ' '
atomY = str(inputDict['position'][atomID][1])
while len(atomY) < 20:
atomY += ' '
atomZ = str(inputDict['position'][atomID][2])
lineToWrite = atomType + atomX + atomY + atomZ + '\n'
rowsToWrite.append(lineToWrite)
with open(outputFile, 'w+') as xyzFile:
xyzFile.writelines(rowsToWrite)
print()
def createSlurmSubmissionScript(outputDir, runName, mode):
'''This function creates a slurm submission script for Kestrel from a template file sample.sh'''
queue = 'batch'
jobName = str(runName)
outputFile = outputDir + '/' + str(runName)[:-4] + '.o'
with open(os.getcwd() + '/templates/sample.sh', 'r') as template:
templateLines = template.readlines()
for lineNo in range(len(templateLines)):
if '-p batch' in templateLines[lineNo]:
# This is queue select
templateLines[lineNo] = templateLines[lineNo].replace('batch', queue)
elif 'JOBNAME' in templateLines[lineNo]:
# This is job name
templateLines[lineNo] = templateLines[lineNo].replace('JOBNAME', jobName)
elif 'OUTFILE' in templateLines[lineNo]:
# This is outfile
templateLines[lineNo] = templateLines[lineNo].replace('OUTFILE', outputFile)
elif 'CHANGEME' in templateLines[lineNo]:
# E-mail address
templateLines[lineNo] = templateLines[lineNo].replace('CHANGEME', 'mattyjones')
elif '-t 12:00:00' in templateLines[lineNo]:
# Wallclock time
templateLines[lineNo] = templateLines[lineNo].replace('12:00:00', '01:00:00')
elif 'cd /scratch/${USER}' in templateLines[lineNo]:
templateLines[lineNo] = templateLines[lineNo].replace('/scratch/${USER}', os.getcwd())
elif 'myfile.py' in templateLines[lineNo]:
# This is actual execute line
templateLines[lineNo] = templateLines[lineNo].replace('myfile.py', os.getcwd() + '/code/' + 'runHoomd.py ' + outputDir + '/' + runName)
if mode == 'cpu':
templateLines[lineNo] = templateLines[lineNo].replace('--mode=gpu --gpu=0', '--mode=cpu')
# Finally, sort out the /scratch/ space and move the output files somewhere useful
submissionScriptName = outputDir + '/' + runName[:-4] + '.sh'
with open(submissionScriptName, 'w+') as submissionScript:
submissionScript.writelines(templateLines)
return submissionScriptName
def incrementAtomIDs(originalInputDictionary, ghostDictionary, increment, modifyGhostDictionary=False):
inputDictionary = copy.deepcopy(originalInputDictionary)
constraintTypes = ['bond', 'angle', 'dihedral', 'improper']
for constraintType in constraintTypes:
for constraintNo, constraint in enumerate(inputDictionary[constraintType]):
inputDictionary[constraintType][constraintNo][1:] = [x + increment for x in inputDictionary[constraintType][constraintNo][1:]]
if modifyGhostDictionary is True:
for bondNo, bond in enumerate(ghostDictionary['bond']):
if str(bond[1])[0] == '_':
ghostDictionary['bond'][bondNo][1] = int(bond[1][1:]) + increment
if str(bond[2])[0] == '_':
ghostDictionary['bond'][bondNo][2] = int(bond[2][1:]) + increment
return inputDictionary, ghostDictionary
def scale(inputDictionary, scaleFactor):
for ID, position in enumerate(inputDictionary['position']):
# if ID == 24104:
# print "Initial Position =", inputDictionary['position'][ID], inputDictionary['image'][ID]
inputDictionary['position'][ID] = list(scaleFactor * np.array(position))
# if ID == 24104:
# print "Scaled Position =", inputDictionary['position'][ID], inputDictionary['image'][ID]
for element in ['lx', 'ly', 'lz']:
if element in inputDictionary:
inputDictionary[element] *= scaleFactor
return inputDictionary
def centre(inputDictionary, centreOfMass):
COM = np.array(centreOfMass)
for index, position in enumerate(inputDictionary['position']):
inputDictionary['position'][index] = list(position - COM)
return inputDictionary
def checkWrappedPositions(inputDictionary):
atomPositions = np.array(inputDictionary['position'])
atomImages = np.array(inputDictionary['image'])
xhi = inputDictionary['lx'] / 2.0
xlo = -inputDictionary['lx'] / 2.0
yhi = inputDictionary['ly'] / 2.0
ylo = -inputDictionary['ly'] / 2.0
zhi = inputDictionary['lz'] / 2.0
zlo = -inputDictionary['lz'] / 2.0
# tp=pbc.plain_pbc(atomPositions,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
# tp=pbc.plain_pbc(tp,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
# tp=pbc.plain_pbc(tp,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
# tp=pbc.plain_pbc(tp,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
# tp=pbc.plain_pbc(tp,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
# tp=pbc.plain_pbc(tp,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
# tp=pbc.plain_pbc(tp,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
# tp=pbc.plain_pbc(tp,(inputDictionary['lx'],inputDictionary['ly'],inputDictionary['lz']) )
for atomID in range(len(atomPositions)):
while atomPositions[atomID][0] > xhi:
atomPositions[atomID][0] -= inputDictionary['lx']
atomImages[atomID][0] += 1
while atomPositions[atomID][0] < xlo:
atomPositions[atomID][0] += inputDictionary['lx']
atomImages[atomID][0] -= 1
while atomPositions[atomID][1] > yhi:
atomPositions[atomID][1] -= inputDictionary['ly']
atomImages[atomID][1] += 1
while atomPositions[atomID][1] < ylo:
atomPositions[atomID][1] += inputDictionary['ly']
atomImages[atomID][1] -= 1
while atomPositions[atomID][2] > zhi:
atomPositions[atomID][2] -= inputDictionary['lz']
atomImages[atomID][2] += 1
while atomPositions[atomID][2] < zlo:
atomPositions[atomID][2] += inputDictionary['lz']
atomImages[atomID][2] -= 1
inputDictionary['position'] = list(atomPositions)
inputDictionary['image'] = list(atomImages)
# print np.sum(np.absolute(atomPositions-tp) > 0.)
return inputDictionary
def alignMolecule(inputDictionary, vectorToAlignTo):
'''This function rotates a molecule such that the vector between the first and last sulfur atoms in the chain (assumed
to be the backbone vector) is mapped to vectorToAlignTo'''
sulfurAtomIDs = []
for atomIndex, atomType in enumerate(inputDictionary['type']):
if atomType[0] == 'S':
sulfurAtomIDs.append(atomIndex)
sulfurAtomIDs.sort()
chainOrientationVector = findAxis(inputDictionary['position'][sulfurAtomIDs[0]], inputDictionary['position'][sulfurAtomIDs[-1]])
vectorToAlignTo = np.array(vectorToAlignTo)
rotationMatrix = getRotationMatrix(chainOrientationVector, vectorToAlignTo)
for atomID, pos in enumerate(inputDictionary['position']):
positionArray = np.copy(pos)
rotatedPosition = np.transpose(rotationMatrix * np.transpose(np.matrix(positionArray)))
inputDictionary['position'][atomID] = [rotatedPosition[0, 0], rotatedPosition[0, 1], rotatedPosition[0, 2]]
return inputDictionary
def cellSearchBonds(moleculeDict):
'''This function finds the bonds in the system based on the proximity of atoms to their neighbours'''
raise SystemError("THIS FUNCTION DOES NOT WORK AND IT'S NONE-TRIVIAL TO IMPLEMENT")
moleculeDict['neighbourCell'] = []
maximumBondLength = 1.6 # Bond length in angstroems
atomIDs = np.arange(len(moleculeDict['position']))
for coordinates in moleculeDict['position']:
cellLocation = np.copy(coordinates)
moleculeDict['neighbourCell'].append(map(int, np.round(cellLocation / maximumBondLength)))
print()
neighbourCells = np.copy(moleculeDict['neighbourCell'])
print()
parallelSort(neighbourCells, atomIDs)
for i in range(len(atomIDs)):
print()
def getAAIDsByMolecule(CGtoAAIDs):
'''This function extracts the molecule AAIDs given a dictionary CGtoAAIDs which describes the mapping of all atom particles to each CG site'''
moleculeAAIDs = []
for moleculeID, CGtoAAIDDict in enumerate(CGtoAAIDs):
moleculeAAIDs.append([])
for dictionaryValue in CGtoAAIDs[moleculeID].values():
moleculeAAIDs[-1] += dictionaryValue[1]
moleculeAAIDs[-1].sort()
return moleculeAAIDs
def getsScale(outputDir, morphologyName):
morphologyFiles = os.listdir(outputDir + '/morphology')
for fileName in morphologyFiles:
if 'scaled' in fileName:
scaledXMLName = fileName
break
underscoreLocs = findIndex(scaledXMLName, '_')
inverseScaleFactor = scaledXMLName[underscoreLocs[-2] + 1:underscoreLocs[-1]]
return float(inverseScaleFactor)
def loadDict(masterDict, moleculeIDs, bondPickleName):
'''This function generates a molecule dictionary by picking the relevant data from a masterDict using a list of atomIDs given by moleculeIDs'''
moleculeDict = {'position': [], 'unwrapped_position': [], 'type': [], 'diameter': [], 'image': [], 'charge': [], 'mass': []}
# First get atom-specific properties
for atomID in moleculeIDs:
for key in moleculeDict.keys():
moleculeDict[key].append(masterDict[key][atomID])
# Then add in the simulation properties
for key in ['lx', 'ly', 'lz', 'xy', 'xz', 'yz', 'dimensions']:
try:
moleculeDict[key] = masterDict[key]
except:
continue
# Then load the relevant bonds
with open(bondPickleName, 'r') as bondPickle:
moleculeDict['bond'] = pickle.load(bondPickle)
# Now need to unwrap the coordinates
# moleculeDict = addUnwrappedPositions(moleculeDict)
# # Set the unwrapped coordinates to the default 'position' (saves time on rewriting some analyseMolecules functions and shouldn't affect anything)
# moleculeDict['position'] = moleculeDict['unwrapped_position']
return moleculeDict
def loadPoscar(inputFilePath):
'''This function loads a poscar file located at inputFilePath, and creates a dictionary of the atomic types and positions.
It also loads the pickle file containing the bond information and adds it to the dictionary.'''
moleculeDict = {'position': [], 'type': []}
with open(inputFilePath, 'r') as poscarFile:
poscarData = poscarFile.readlines()
simDims = []
for unitCellLine in poscarData[2:5]:
simDims.append([])
for coordinate in unitCellLine[:-1].split(' '):
if len(coordinate) > 0:
simDims[-1].append(float(coordinate))
moleculeDict['lx'] = simDims[0][0]
moleculeDict['ly'] = simDims[1][1]
moleculeDict['lz'] = simDims[2][2]
simBoxDims = ['lx', 'ly', 'lz']
typeList = poscarData[5].split('\n')[0].split(' ')
freqList = map(int, poscarData[6].split('\n')[0].split(' '))
for i in range(len(typeList)):
if len(typeList[i]) != 0: # Catch for the extra space I had to put in to make VMD behave properly
moleculeDict['type'] += [typeList[i]] * freqList[i]
for atomCoords in poscarData[8:]:
moleculeDict['position'].append([])
for coordinate in atomCoords.split('\n')[0].split(' '):
coordinatesToWrite = []
if len(coordinate) > 0:
coordinatesToWrite.append(float(coordinate))
for i in range(len(coordinatesToWrite)):
moleculeDict['position'][-1].append(coordinatesToWrite[i] - (moleculeDict[simBoxDims[i]] / 2.0))
with open(inputFilePath.replace('POSCAR', 'pickle'), 'r') as bondPickle:
moleculeDict['bond'] = pickle.load(bondPickle)
moleculeDict = addMasses(moleculeDict)
return moleculeDict
def checkORCAFileStructure(outputDir):
'''This function checks that the correct directories are in place for the ORCA transfer-integral calculation and KMC simulations'''
morphologyDirList = os.listdir(outputDir)
if 'KMC' not in morphologyDirList:
os.makedirs(outputDir + '/KMC')
if 'chromophores' not in morphologyDirList:
print()
os.makedirs(outputDir + '/chromophores')
os.makedirs(outputDir + '/chromophores/inputORCA')
os.makedirs(outputDir + '/chromophores/inputORCA/single')
os.makedirs(outputDir + '/chromophores/inputORCA/pair')
os.makedirs(outputDir + '/chromophores/outputORCA')
os.makedirs(outputDir + '/chromophores/outputORCA/single')
os.makedirs(outputDir + '/chromophores/outputORCA/pair')
else:
chromophoresDirList = os.listdir(outputDir + '/chromophores')
if 'inputORCA' not in chromophoresDirList:
print()
os.makedirs(outputDir + '/chromophores/inputORCA')
os.makedirs(outputDir + '/chromophores/inputORCA/single')
os.makedirs(outputDir + '/chromophores/inputORCA/pair')
else:
inputDirList = os.listdir(outputDir + '/chromophores/inputORCA')
if 'single' not in inputDirList:
os.makedirs(outputDir + '/chromophores/inputORCA/single')
if 'pair' not in inputDirList:
os.makedirs(outputDir + '/chromophores/inputORCA/pair')
if 'outputORCA' not in chromophoresDirList:
print()
os.makedirs(outputDir + '/chromophores/outputORCA')
os.makedirs(outputDir + '/chromophores/outputORCA/single')
os.makedirs(outputDir + '/chromophores/outputORCA/pair')
else:
outputDirList = os.listdir(outputDir + '/chromophores/outputORCA')
if 'single' not in outputDirList:
os.makedirs(outputDir + '/chromophores/outputORCA/single')
if 'pair' not in outputDirList:
os.makedirs(outputDir + '/chromophores/outputORCA/pair')
def writeORCAInp(inputDictList, outputDir, mode):
'''This function loads the ORCA input template and creates the segment pair ORCA inputs for this morphology, for running later'''
chromophore1 = inputDictList[0]
chromo1Name = str(chromophore1['realChromoID'])
while len(chromo1Name) < 4:
chromo1Name = '0' + chromo1Name
if mode == 'pair':
chromophore2 = inputDictList[1]
# First check that the file doesn't already exist
chromo2Name = str(chromophore2['realChromoID'])
while len(chromo2Name) < 4:
chromo2Name = '0' + chromo2Name
ORCAFileName = 'chromo' + chromo1Name + '_chromo' + chromo2Name + '.inp'
elif mode == 'single':
ORCAFileName = 'chromo' + chromo1Name + '.inp'
# Check by opening the file - saves time on regenerating the os.listdirs list for many thousands of files
try:
with open(outputDir + '/chromophores/inputORCA/' + mode + '/' + ORCAFileName, 'r'):
fileExists = True
except IOError:
fileExists = False
inputFileName = outputDir + '/chromophores/inputORCA/' + mode + '/' + ORCAFileName
if fileExists is True:
print()
return
print()
inputFileName = inputFileName.replace('.inp', '_2.inp')
# chromophore1COM = calcCOM(chromophore1['position'], chromophore1['mass'])
# if mode == 'pair':
# # Centre the dimer pair at the origin
# chromophore2COM = calcCOM(chromophore2['position'], chromophore2['mass'])
# COM = calcCOM(chromophore1['position']+chromophore2['position'], chromophore1['mass']+chromophore2['mass'])
# chromophore1 = centre(chromophore1, COM-chromophore1COM)
# chromophore2 = centre(chromophore2, COM-chromophore2COM)
# elif mode == 'single':
# # Centre the chromophore at the origin
# COM = chromophore1COM
# chromophore1 = centre(chromophore1, COM)
# chromophore1 = centre(chromophore1, COM)
# if mode == 'pair':
# chromophore2 = centre(chromophore2, COM)
# Now write the file
with open(os.getcwd() + '/templates/template.inp', 'r') as templateFile:
inpFileLines = templateFile.readlines()
linesToWrite = []
for atomID, atomCoords in enumerate(chromophore1['position']):
thisAtomData = ' ' + chromophore1['type'][atomID][0] + ' ' + ' '.join(map(str, atomCoords)) + '\n'
linesToWrite.append(thisAtomData)
if mode == 'pair':
for atomID, atomCoords in enumerate(chromophore2['position']):
thisAtomData = ' ' + chromophore2['type'][atomID][0] + ' ' + ' '.join(map(str, atomCoords)) + '\n'
linesToWrite.append(thisAtomData)
linesToWrite = list(set(linesToWrite)) # Randomly the code adds a repeat line which breaks ORCA. No idea why it does this, so this should fix it
inpFileLines[-1:-1] = linesToWrite
with open(inputFileName, 'w+') as ORCAInputFile:
ORCAInputFile.writelines(inpFileLines)
print()
if fileExists is True:
print()
raw_input("Hit return to continue...")
def getCPUCores():
# Determine the number of available processors, either by querying the SLURM_NPROCS environment variable, or by using multiprocessing to count the number of visible CPUs.
try:
procIDs = list(np.arange(int(os.environ.get('SLURM_NPROCS'))))
except (AttributeError, TypeError):
# Was not loaded using SLURM, so use all physical processors
procIDs = list(np.arange(mp.cpu_count()))
return procIDs
def writeToFile(logFile, stringList, mode='logFile'):
if mode == 'outputFile':
openAs = 'w+'
else:
openAs = 'a+'
with open(logFile, openAs) as logWrite:
for line in stringList:
logWrite.writelines(line + '\n')
def loadPickle(pickleLocation):
print()
with open(pickleLocation, 'r') as pickleFile:
(AAMorphologyDict, CGMorphologyDict, CGToAAIDMaster, parameterDict, chromophoreList, carrierList) = pickle.load(pickleFile)
print()
return AAMorphologyDict, CGMorphologyDict, CGToAAIDMaster, parameterDict, chromophoreList, carrierList
def writePickle(toPickle, pickleFileName):
print()
with open(pickleFileName, 'w+') as pickleFile:
pickle.dump(toPickle, pickleFile)
print()
def obtainBondedList(bondList):
# Create a lookup table `neighbour list' for all connected atoms called {bondedAtoms}
bondedAtoms = {}
for bond in bondList:
if bond[1] not in bondedAtoms:
bondedAtoms[bond[1]] = [bond[2]]
else:
bondedAtoms[bond[1]].append(bond[2])
if bond[2] not in bondedAtoms:
bondedAtoms[bond[2]] = [bond[1]]
else:
bondedAtoms[bond[2]].append(bond[1])
return bondedAtoms
def convertStringToInt(x):
for i in range(len(x)):
try:
return int(x[i:])
except:
continue
return 99999
def TEMPAddBondedChromos(chromophoreList, CGMorphologyDict):
t0 = T.time()
for chromo1ID, chromo1 in enumerate(chromophoreList):
print()
for chromo2ID in [x[0] for x in chromo1.neighbours]:
chromo2 = chromophoreList[chromo2ID]
for bond in CGMorphologyDict['bond']:
if ((bond[1] in chromo1.CGIDs) and (bond[2] in chromo2.CGIDs)) or ((bond[2] in chromo1.CGIDs) and (bond[1] in chromo2.CGIDs)):
if chromo1.ID not in chromo2.bondedChromos:
chromo2.bondedChromos.append(chromo1.ID)
if chromo2.ID not in chromo1.bondedChromos:
chromo1.bondedChromos.append(chromo2.ID)
break
t1 = T.time()
print()
return chromophoreList
| matty-jones/MorphCT | tests/assets/update_pickle/MCT1.0_pickle/code/helperFunctions.py | Python | gpl-3.0 | 53,553 |
"""Common test support for all numpy test scripts.
This single module should provide all the common functionality for numpy tests
in a single location, so that test scripts can just import it and work right
away.
"""
from unittest import TestCase
from ._private.utils import *
from ._private.utils import (_assert_valid_refcount, _gen_alignment_data,
IS_PYSTON)
from ._private import decorators as dec
from ._private.nosetester import (
run_module_suite, NoseTester as Tester
)
__all__ = _private.utils.__all__ + ['TestCase', 'run_module_suite']
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
| simongibbons/numpy | numpy/testing/__init__.py | Python | bsd-3-clause | 680 |
from rdkit import Chem
from rdkit import rdBase
from rdkit import RDConfig
import os
from rdkit.Chem import rdMolDescriptors as rdMD
from rdkit.Chem import AllChem
import time
def get3D(m,is3d):
if not is3d:
m = Chem.AddHs(m)
# define the new code from RDKit Molecule 3D ETKDG.
ps = AllChem.ETKDG()
ps.randomSeed = 0xf00d
AllChem.EmbedMolecule(m,ps)
r= rdMD.CalcAUTOCORR3D(m)+rdMD.CalcRDF(m)+rdMD.CalcMORSE(m)+rdMD.CalcWHIM(m)+rdMD.CalcGETAWAY(m, precision=0.001)
return r
def generateAll():
filename='/Users/GVALMTGG/Github/rdkit_mine/Code/GraphMol/Descriptors/test_data/PBF_egfr.sdf'
suppl = Chem.SDMolSupplier(filename,removeHs=False)
mols = [x for x in suppl]
start = time.time()
for m in mols:
r= get3D(m,True)
print(r)
end = time.time()
print end-start
def simple_case():
start = time.time()
smi = 'CCC(C)COCCCC'
m = Chem.MolFromSmiles(smi)
T = get3D(m,False)
print T
end = time.time()
print end-start
if(__name__=='__main__'):
# FIX: We need to actually add some tests here, but this doees not need to
# to be done until the C++ code and tests are straightened out.
generateAll();
start = time.time()
smi = 'CCC(C)COCCCC'
m = Chem.MolFromSmiles(smi)
T = get3D(m,False)
print T
end = time.time()
print end-start
| ptosco/rdkit | Code/GraphMol/Descriptors/test3D.py | Python | bsd-3-clause | 1,389 |
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio mail sending utilities. send_email() is the main API function
people should be using; just check out its docstring.
"""
__revision__ = "$Id$"
from time import sleep
import re
import os
import sys
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email import Encoders
from email.MIMEImage import MIMEImage
from email.Utils import formatdate
from cStringIO import StringIO
from flask import g
from formatter import DumbWriter, AbstractFormatter
from flask.ext.email.message import EmailMultiAlternatives, EmailMessage
from invenio.config import \
CFG_EMAIL_BACKEND, \
CFG_SITE_SUPPORT_EMAIL, \
CFG_SITE_URL, \
CFG_SITE_LANG, \
CFG_SITE_ADMIN_EMAIL, \
CFG_MISCUTIL_SMTP_HOST, \
CFG_MISCUTIL_SMTP_PORT, \
CFG_VERSION, \
CFG_DEVEL_SITE, \
CFG_LOGDIR
from invenio.config import CFG_MISCUTIL_SMTP_HOST, CFG_MISCUTIL_SMTP_PORT
try:
from invenio.config import \
CFG_MISCUTIL_SMTP_USER,\
CFG_MISCUTIL_SMTP_PASS,\
CFG_MISCUTIL_SMTP_TLS
except ImportError:
CFG_MISCUTIL_SMTP_USER = ''
CFG_MISCUTIL_SMTP_PASS = ''
CFG_MISCUTIL_SMTP_TLS = False
from invenio.errorlib import register_exception
from invenio.miscutil_config import InvenioMiscUtilError
from invenio.jinja2utils import render_template_to_string
from invenio.webinterface_handler_flask_utils import unicodifier
def initialize_email_backend(app):
"""
Prepare application config from Invenio configuration.
@see: https://flask-email.readthedocs.org/en/latest/#configuration
"""
app.config['DEFAULT_FROM_EMAIL'] = CFG_SITE_SUPPORT_EMAIL
app.config['SERVER_EMAIL'] = CFG_SITE_ADMIN_EMAIL
app.config['ADMINS'] = (CFG_SITE_ADMIN_EMAIL, )
app.config['MANAGERS'] = (CFG_SITE_SUPPORT_EMAIL, )
if app.config.get('EMAIL_BACKEND') is None:
if app.config.get('CFG_EMAIL_BACKEND') or CFG_EMAIL_BACKEND:
app.config['EMAIL_BACKEND'] = app.config.get('CFG_EMAIL_BACKEND',
CFG_EMAIL_BACKEND)
elif CFG_MISCUTIL_SMTP_HOST and CFG_MISCUTIL_SMTP_PORT:
app.config['EMAIL_BACKEND'] = 'flask.ext.email.backends.smtp.Mail'
# Defaults to 'flask.ext.email.backends.locmem.Mail'
app.config['EMAIL_HOST'] = CFG_MISCUTIL_SMTP_HOST
app.config['EMAIL_PORT'] = CFG_MISCUTIL_SMTP_PORT
app.config['EMAIL_HOST_USER'] = CFG_MISCUTIL_SMTP_USER
app.config['EMAIL_HOST_PASSWORD'] = CFG_MISCUTIL_SMTP_PASS
app.config['EMAIL_USE_TLS'] = CFG_MISCUTIL_SMTP_TLS
# app.config['EMAIL_USE_SSL']: defaults to False
app.config['EMAIL_FILE_PATH'] = CFG_LOGDIR
def scheduled_send_email(fromaddr,
toaddr,
subject="",
content="",
header=None,
footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
user=None,
other_bibtasklet_arguments=None,
replytoaddr=""):
"""
Like send_email, but send an email via the bibsched
infrastructure.
@param fromaddr: sender
@type fromaddr: string
@param toaddr: list of receivers
@type toaddr: string (comma separated) or list of strings
@param subject: the subject
@param content: the body of the message
@param header: optional header, otherwise default is used
@param footer: optional footer, otherwise default is used
@param copy_to_admin: set to 1 in order to send email the admins
@param attempt_times: try at least n times before giving up sending
@param attempt_sleeptime: number of seconds to sleep between two attempts
@param user: the user name to user when scheduling the bibtasklet. If
None, the sender will be used
@param other_bibtasklet_arguments: other arguments to append to the list
of arguments to the call of task_low_level_submission
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@return: the scheduled bibtasklet
"""
from invenio.bibtask import task_low_level_submission
if not isinstance(toaddr, (unicode, str)):
toaddr = ','.join(toaddr)
if not isinstance(replytoaddr, (unicode, str)):
replytoaddr = ','.join(replytoaddr)
toaddr = remove_temporary_emails(toaddr)
if user is None:
user = fromaddr
if other_bibtasklet_arguments is None:
other_bibtasklet_arguments = []
else:
other_bibtasklet_arguments = list(other_bibtasklet_arguments)
if not header is None:
other_bibtasklet_arguments.extend(("-a", "header=%s" % header))
if not footer is None:
other_bibtasklet_arguments.extend(("-a", "footer=%s" % footer))
return task_low_level_submission(
"bibtasklet", user, "-T", "bst_send_email",
"-a", "fromaddr=%s" % fromaddr,
"-a", "toaddr=%s" % toaddr,
"-a", "replytoaddr=%s" % replytoaddr,
"-a", "subject=%s" % subject,
"-a", "content=%s" % content,
"-a", "copy_to_admin=%s" % copy_to_admin,
"-a", "attempt_times=%s" % attempt_times,
"-a", "attempt_sleeptime=%s" % attempt_sleeptime,
*other_bibtasklet_arguments)
def send_email(fromaddr,
toaddr,
subject="",
content="",
html_content='',
html_images=None,
header=None,
footer=None,
html_header=None,
html_footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
debug_level=0,
ln=CFG_SITE_LANG,
charset=None,
replytoaddr="",
attachments=None
):
"""Send a forged email to TOADDR from FROMADDR with message created from subjet, content and possibly
header and footer.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ',')
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] header to add, None for the Default
@param footer: [string] footer to add, None for the Default
@param html_header: [string] header to add to the html part, None for the Default
@param html_footer: [string] footer to add to the html part, None for the Default
@param copy_to_admin: [int] if 1 add CFG_SITE_ADMIN_EMAIL in receivers
@param attempt_times: [int] number of tries
@param attempt_sleeptime: [int] seconds in between tries
@param debug_level: [int] debug level
@param ln: [string] invenio language
@param charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
If sending fails, try to send it ATTEMPT_TIMES, and wait for
ATTEMPT_SLEEPTIME seconds in between tries.
e.g.:
send_email('[email protected]', '[email protected]', 'Let\'s try!'', 'check 1234', '<strong>check</strong> <em>1234</em><img src="cid:image1">', {'image1': '/tmp/quantum.jpg'})
@return: [bool]: True if email was sent okay, False if it was not.
"""
if html_images is None:
html_images = {}
if type(toaddr) is str:
toaddr = toaddr.strip().split(',')
toaddr = remove_temporary_emails(toaddr)
usebcc = len(toaddr.split(',')) > 1 # More than one address, let's use Bcc in place of To
if copy_to_admin:
if CFG_SITE_ADMIN_EMAIL not in toaddr:
toaddr.append(CFG_SITE_ADMIN_EMAIL)
body = forge_email(fromaddr, toaddr, subject, content, html_content,
html_images, usebcc, header, footer, html_header,
html_footer, ln, charset, replytoaddr, attachments)
if attempt_times < 1 or not toaddr:
try:
raise InvenioMiscUtilError(g._('The system is not attempting to send an email from %s, to %s, with body %s.') % (fromaddr, toaddr, body))
except InvenioMiscUtilError:
register_exception()
return False
sent = False
while not sent and attempt_times > 0:
try:
sent = body.send()
except Exception:
register_exception()
if debug_level > 1:
try:
raise InvenioMiscUtilError(g._('Error in sending message. Waiting %s seconds. Exception is %s, while sending email from %s to %s with body %s.') % (attempt_sleeptime, sys.exc_info()[0], fromaddr, toaddr, body))
except InvenioMiscUtilError:
register_exception()
if not sent:
attempt_times -= 1
if attempt_times > 0: # sleep only if we shall retry again
sleep(attempt_sleeptime)
if not sent:
try:
raise InvenioMiscUtilError(g._('Error in sending email from %s to %s with body %s.') % (fromaddr, toaddr, body))
except InvenioMiscUtilError:
register_exception()
return sent
def attach_embed_image(email, image_id, image_path):
"""
Attach an image to the email.
"""
with open(image_path, 'rb') as image_data:
img = MIMEImage(image_data.read())
img.add_header('Content-ID', '<%s>' % image_id)
img.add_header('Content-Disposition', 'attachment', filename=os.path.split(image_path)[1])
email.attach(img)
def forge_email(fromaddr, toaddr, subject, content, html_content='',
html_images=None, usebcc=False, header=None, footer=None,
html_header=None, html_footer=None, ln=CFG_SITE_LANG,
charset=None, replytoaddr="", attachments=None):
"""Prepare email. Add header and footer if needed.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ',')
@param usebcc: [bool] True for using Bcc in place of To
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] None for the default header
@param footer: [string] None for the default footer
@param ln: language
@charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@return: forged email as a string"""
if html_images is None:
html_images = {}
content = render_template_to_string('mail_text.tpl',
content=unicodifier(content),
header=unicodifier(header),
footer=unicodifier(footer)
).encode('utf8')
if type(toaddr) is not str:
toaddr = ','.join(toaddr)
if type(replytoaddr) is not str:
replytoaddr = ','.join(replytoaddr)
toaddr = remove_temporary_emails(toaddr)
headers = {}
kwargs = {'to': [], 'cc': [], 'bcc': []}
if replytoaddr:
headers['Reply-To'] = replytoaddr
if usebcc:
headers['Bcc'] = toaddr
kwargs['bcc'] = toaddr.split(',')
kwargs['to'] = ['Undisclosed.Recipients:']
else:
kwargs['to'] = toaddr.split(',')
headers['From'] = fromaddr
headers['Date'] = formatdate(localtime=True)
headers['User-Agent'] = 'Invenio %s at %s' % (CFG_VERSION, CFG_SITE_URL)
if html_content:
html_content = render_template_to_string(
'mail_html.tpl',
content=unicodifier(html_content),
header=unicodifier(html_header),
footer=unicodifier(html_footer)
).encode('utf8')
msg_root = EmailMultiAlternatives(subject=subject, body=content,
from_email=fromaddr,
headers=headers, **kwargs)
msg_root.attach_alternative(html_content, "text/html")
#if not html_images:
# # No image? Attach the HTML to the root
# msg_root.attach(msg_text)
#else:
if html_images:
# Image(s)? Attach the HTML and image(s) as children of a
# "related" block
msg_related = MIMEMultipart('related')
#msg_related.attach(msg_text)
for image_id, image_path in html_images.iteritems():
attach_embed_image(msg_related, image_id, image_path)
msg_root.attach(msg_related)
else:
msg_root = EmailMessage(subject=subject, body=content,
from_email=fromaddr, headers=headers, **kwargs)
if attachments:
from invenio.bibdocfile import _mimes, guess_format_from_url
#old_msg_root = msg_root
#msg_root = MIMEMultipart()
#msg_root.attach(old_msg_root)
for attachment in attachments:
try:
mime = None
if type(attachment) in (list, tuple):
attachment, mime = attachment
if mime is None:
## Automatic guessing of mimetype
mime = _mimes.guess_type(attachment)[0]
if mime is None:
ext = guess_format_from_url(attachment)
mime = _mimes.guess_type("foo" + ext)[0]
if not mime:
mime = 'application/octet-stream'
part = MIMEBase(*mime.split('/', 1))
part.set_payload(open(attachment, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(attachment))
msg_root.attach(part)
except:
register_exception(alert_admin=True, prefix="Can't attach %s" % attachment)
return msg_root
RE_NEWLINES = re.compile(r'<br\s*/?>|</p>', re.I)
RE_SPACES = re.compile(r'\s+')
RE_HTML_TAGS = re.compile(r'<.+?>')
def email_strip_html(html_content):
"""Strip html tags from html_content, trying to respect formatting."""
html_content = RE_SPACES.sub(' ', html_content)
html_content = RE_NEWLINES.sub('\n', html_content)
html_content = RE_HTML_TAGS.sub('', html_content)
html_content = html_content.split('\n')
out = StringIO()
out_format = AbstractFormatter(DumbWriter(out))
for row in html_content:
out_format.add_flowing_data(row)
out_format.end_paragraph(1)
return out.getvalue()
def remove_temporary_emails(emails):
"""
Removes the temporary emails (which are constructed randomly when user logs in
with an external authentication provider which doesn't supply an email
address) from an email list.
@param emails: email list (if string, then receivers are separated by ',')
@type emails: str|[str]
@rtype: str
"""
from invenio.access_control_config import CFG_TEMP_EMAIL_ADDRESS
if not isinstance(emails, (str, unicode)):
emails = ','.join(emails)
# Remove all of the spaces
emails = emails.replace(' ', '')
# Remove all of the emails formatted like CFG_TEMP_EMAIL_ADDRESS
emails = re.sub((CFG_TEMP_EMAIL_ADDRESS % '\w+') + '(,|$)', '', emails,
re.IGNORECASE)
# Remove all consecutive commas
emails = re.sub(',+', ',', emails)
if emails[0] == ',':
# Remove the comma at the beginning of the string
emails = emails[1:]
if emails[-1] == ',':
# Remove the comma at the end of the string
emails = emails[:-1]
return emails
| EUDAT-B2SHARE/invenio-old | modules/miscutil/lib/mailutils.py | Python | gpl-2.0 | 17,746 |
#!/usr/bin/env python
# coding=utf-8
import tornado.web
import common.result as CR
class DashHandler(tornado.web.RequestHandler):
def get(self):
self.render("dashboard.html")
def post(self):
select = self.get_argument("select","")
data=CR.result2json('./common/FKPIDB.txt', sep=",")
print(select)
self.write(data)
| gongshijun/ystechweb | handlers/dashboard.py | Python | gpl-3.0 | 374 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from datetime import datetime
from typing import Any, Dict
import httpx
from airflow.decorators import dag, task
from airflow.models.baseoperator import BaseOperator
from airflow.operators.email import EmailOperator
from airflow.utils.context import Context
class GetRequestOperator(BaseOperator):
"""Custom operator to send GET request to provided url"""
def __init__(self, *, url: str, **kwargs):
super().__init__(**kwargs)
self.url = url
def execute(self, context: Context):
return httpx.get(self.url).json()
# [START dag_decorator_usage]
@dag(schedule_interval=None, start_date=datetime(2021, 1, 1), catchup=False, tags=['example'])
def example_dag_decorator(email: str = '[email protected]'):
"""
DAG to send server IP to email.
:param email: Email to send IP to. Defaults to [email protected].
"""
get_ip = GetRequestOperator(task_id='get_ip', url="http://httpbin.org/get")
@task(multiple_outputs=True)
def prepare_email(raw_json: Dict[str, Any]) -> Dict[str, str]:
external_ip = raw_json['origin']
return {
'subject': f'Server connected from {external_ip}',
'body': f'Seems like today your server executing Airflow is connected from IP {external_ip}<br>',
}
email_info = prepare_email(get_ip.output)
EmailOperator(
task_id='send_email', to=email, subject=email_info['subject'], html_content=email_info['body']
)
dag = example_dag_decorator()
# [END dag_decorator_usage]
| Acehaidrey/incubator-airflow | airflow/example_dags/example_dag_decorator.py | Python | apache-2.0 | 2,314 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import json
from apps.publish import init_app
from superdesk.publish.subscribers import SUBSCRIBER_TYPES
from superdesk.tests import TestCase
from .aap_newscentre_formatter import AAPNewscentreFormatter
class AapNewscentreFormatterTest(TestCase):
subscribers = [{"_id": "1", "name": "newscentre", "subscriber_type": SUBSCRIBER_TYPES.WIRE, "media_type": "media",
"is_active": True, "sequence_num_settings": {"max": 10, "min": 1},
"destinations": [{"name": "AAP NEWSCENTRE", "delivery_type": "email", "format": "AAP NEWSCENTRE",
"config": {"recipients": "[email protected]"}
}]
}]
desks = [{'_id': 1, 'name': 'National'},
{'_id': 2, 'name': 'Sports'},
{'_id': 3, 'name': 'Finance'}]
article = {
'source': 'AAP',
'anpa_category': [{'qcode': 'a'}],
'headline': 'This is a test headline',
'byline': 'joe',
'slugline': 'slugline',
'subject': [{'qcode': '02011001'}],
'anpa_take_key': 'take_key',
'unique_id': '1',
'format': 'preserved',
'type': 'text',
'body_html': '<p>The story body</p>',
'word_count': '1',
'priority': 1,
'place': [{'qcode': 'VIC', 'name': 'VIC'}],
'genre': []
}
vocab = [{'_id': 'categories', 'items': [
{'is_active': True, 'name': 'Overseas Sport', 'qcode': 'S', 'subject': '15000000'},
{'is_active': True, 'name': 'Finance', 'qcode': 'F', 'subject': '04000000'},
{'is_active': True, 'name': 'General News', 'qcode': 'A'}
]}, {'_id': 'geographical_restrictions', 'items': [{'name': 'New South Wales', 'qcode': 'NSW', 'is_active': True},
{'name': 'Victoria', 'qcode': 'VIC', 'is_active': True}]}]
def setUp(self):
self.app.data.insert('subscribers', self.subscribers)
self.app.data.insert('vocabularies', self.vocab)
self.app.data.insert('desks', self.desks)
init_app(self.app)
def testNewscentreFormatterWithNoSelector(self):
subscriber = self.app.data.find('subscribers', None, None)[0]
f = AAPNewscentreFormatter()
seq, item = f.format(self.article, subscriber)[0]
item = json.loads(item)
self.assertGreater(int(seq), 0)
self.assertEqual(seq, item['sequence'])
item.pop('sequence')
self.assertDictEqual(item,
{'category': 'A', 'fullStory': 1, 'ident': '0',
'headline': 'VIC:This is a test headline', 'originator': 'AAP',
'take_key': 'take_key', 'article_text': ' By joe\r\n\r\nThe story body\r\nAAP',
'usn': '1', 'subject_matter': 'international law', 'news_item_type': 'News',
'subject_reference': '02011001', 'subject': 'crime, law and justice',
'subject_detail': 'international court or tribunal',
'selector_codes': ' ',
'genre': 'Current', 'keyword': 'slugline', 'author': 'joe'})
def testNewscentreHtmlToText(self):
article = {
'source': 'AAP',
'anpa_category': [{'qcode': 'A'}],
'headline': 'This is a test headline',
'byline': 'joe',
'slugline': 'slugline',
'subject': [{'qcode': '02011001'}],
'anpa_take_key': 'take_key',
'unique_id': '1',
'type': 'text',
'body_html': '<p>The story body line 1<br>Line 2</p>'
'<p>abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi abcdefghi more</p>',
'word_count': '1',
'priority': 1
}
subscriber = self.app.data.find('subscribers', None, None)[0]
f = AAPNewscentreFormatter()
seq, item = f.format(article, subscriber)[0]
item = json.loads(item)
expected = ' By joe\r\n\r\n The story body line 1\r\nLine 2\r\n\r\n abcdefghi ' \
'abcdefghi abcdefghi abcdefghi abcdefghi ' + \
'abcdefghi abcdefghi abcdefghi more\r\n\r\n\r\nAAP'
self.assertEqual(item['article_text'], expected)
def testMultipleCategories(self):
article = {
'source': 'AAP',
'anpa_category': [{'name': 'Finance', 'qcode': 'F'},
{'name': 'Overseas Sport', 'qcode': 'S'}],
'headline': 'This is a test headline',
'byline': 'joe',
'slugline': 'slugline',
'subject': [{'qcode': '04001005'}, {'qcode': '15011002'}],
'anpa_take_key': 'take_key',
'unique_id': '1',
'type': 'text',
'body_html': '<p>body</p>',
'word_count': '1',
'priority': 1,
'task': {'desk': 1},
'place': [{'qcode': 'VIC', 'name': 'VIC'}]
}
subscriber = self.app.data.find('subscribers', None, None)[0]
f = AAPNewscentreFormatter()
docs = f.format(article, subscriber, ['Aaa', 'Bbb', 'Ccc'])
self.assertEqual(len(docs), 2)
for seq, doc in docs:
doc = json.loads(doc)
if doc['category'] == 'S':
self.assertEqual(doc['subject_reference'], '15011002')
self.assertEqual(doc['subject_detail'], 'four-man sled')
self.assertEqual(doc['headline'], 'VIC:This is a test headline')
if doc['category'] == 'F':
self.assertEqual(doc['subject_reference'], '04001005')
self.assertEqual(doc['subject_detail'], 'viniculture')
self.assertEqual(doc['headline'], 'VIC:This is a test headline')
codes = set(doc['selector_codes'].split(' '))
expected_codes = set('AAA BBB CCC'.split(' '))
self.assertSetEqual(codes, expected_codes)
def testNewscentreFormatterNoSubject(self):
article = {
'source': 'AAP',
'anpa_category': [{'qcode': 'a'}],
'headline': 'This is a test headline',
'byline': 'joe',
'slugline': 'slugline',
'subject': [],
'anpa_take_key': 'take_key',
'unique_id': '1',
'type': 'text',
'body_html': '<p>body</p>',
'word_count': '1',
'priority': 1,
'task': {'desk': 1},
'urgency': 1,
'place': [{'qcode': 'VIC', 'name': 'VIC'}]
}
subscriber = self.app.data.find('subscribers', None, None)[0]
f = AAPNewscentreFormatter()
seq, doc = f.format(article, subscriber)[0]
doc = json.loads(doc)
self.assertEqual(doc['subject_reference'], '00000000')
self.assertEqual(doc['headline'], 'VIC:This is a test headline')
article = {
'source': 'AAP',
'anpa_category': [{'qcode': 'a'}],
'headline': 'This is a test headline',
'byline': 'joe',
'slugline': 'slugline',
'subject': None,
'anpa_take_key': 'take_key',
'unique_id': '1',
'type': 'text',
'body_html': '<p>body</p>',
'word_count': '1',
'priority': 1,
'task': {'desk': 1},
'urgency': 1,
'place': None
}
seq, doc = f.format(article, subscriber)[0]
doc = json.loads(doc)
self.assertEqual(doc['subject_reference'], '00000000')
self.assertEqual(doc['headline'], 'This is a test headline')
def test_aap_newscentre_formatter_with_body_footer(self):
subscriber = self.app.data.find('subscribers', None, None)[0]
doc = self.article.copy()
doc['body_footer'] = '<p>call helpline 999 if you are planning to quit smoking</p>'
f = AAPNewscentreFormatter()
seq, item = f.format(doc, subscriber, ['Axx'])[0]
item = json.loads(item)
self.assertGreater(int(seq), 0)
self.assertEqual(seq, item['sequence'])
item.pop('sequence')
self.maxDiff = None
self.assertDictEqual(item,
{'category': 'A', 'fullStory': 1, 'ident': '0',
'headline': 'VIC:This is a test headline', 'originator': 'AAP',
'take_key': 'take_key',
'article_text': ' By joe\r\n\r\nThe story body\r\ncall helpline 999 if you are '
'planning '
'to quit smoking\r\nAAP',
'usn': '1',
'subject_matter': 'international law', 'news_item_type': 'News',
'subject_reference': '02011001', 'subject': 'crime, law and justice',
'subject_detail': 'international court or tribunal',
'selector_codes': 'AXX',
'genre': 'Current', 'keyword': 'slugline', 'author': 'joe'})
| akintolga/superdesk-aap | server/aap/publish/formatters/aap_newscentre_formatter_test.py | Python | agpl-3.0 | 9,537 |
__description__ = """"""
__author__ = "Michael J. Harms"
__date__ = ""
import base, experiments, instruments
from base import AvivError
class ATF_Titration(base.Parser,instruments.ATF,experiments.Titration):
"""
Processes an ATF Titration experiment.
"""
pass
class CD_Titration(base.Parser,instruments.CD,experiments.Titration):
"""
Processes a CD Titration experiment.
"""
pass
class ATF_pH(base.Parser,instruments.ATF,experiments.pH):
"""
Processes an ATF pH experiment.
"""
pass
class CD_pH(base.Parser,instruments.CD,experiments.pH):
"""
Processes a CD pH experiment.
"""
pass
class ATF_Temperature(base.Parser,instruments.ATF,experiments.Temperature):
"""
Processes an ATF Temperature experiment.
"""
pass
class CD_Temperature(base.Parser,instruments.CD,experiments.Temperature):
"""
Processes a CD Temperature experiment.
"""
pass
class CD_Wavelength(base.Parser,instruments.CD,experiments.Wavelength):
"""
Processes a CD wavelength scan experiment.
"""
pass
available_parsers = {("ATF","Titration"): ATF_Titration,
("CD" ,"Titration"): CD_Titration,
("ATF","pH"): ATF_pH,
("CD" ,"pH"): CD_pH,
("ATF","Temperature"):ATF_Temperature,
("CD" ,"Temperature"):CD_Temperature,
("CD" ,"Wavelength"): CD_Wavelength}
def preParse(input_file):
"""
"""
# Create a dummy_parser
exp_id = instruments.Unknown(input_file).identifyExperiment()
dummy_parser = available_parsers[exp_id]()
dummy_parser.exp_id = exp_id
# Make up values for required keywords for this parser, then parse file.
# This allows extraction of instrument parameters, etc. prior to user input.
kwarg_dict = dummy_parser.experiment_kwargs[:]
kwarg_dict.extend(dummy_parser.instrument_kwargs)
kwarg_dict = [(k[0],k[1](1)) for k in kwarg_dict if k[2] == "required"]
kwarg_dict = dict(kwarg_dict)
# Do parsing and return experiment object
dummy_parser.processFile(input_file=input_file,**kwarg_dict)
return dummy_parser
#import sys
#yo(sys.argv[1])
#tmp_exp = instruments.Unknown(sys.argv[1])
#print tmp_exp.basicConfiguration()
#exp_id = identifyExperiment(sys.argv[1])
#parser = available_parsers[exp_id]
#exp = parser()
#exp.processFile(input_file=sys.argv[1],num_residues=143,molec_weight=16000,initial_conc=50.,sam_buf=.0,sam_titr=0.0,sample=True,reference=True,ref_buf=0.1,ref_titr=0.2,qc_corr=True,titrant_conc=7)
| pansapiens/process-aviv | aviv/parsers.py | Python | gpl-3.0 | 2,653 |
import csv
import json
import zipfile
from pprint import pformat
from cStringIO import StringIO
import grequests
from crowdflower import logger
def read_zip_csv(zf):
for zipinfo in zf.filelist:
zipinfo_fp = zf.open(zipinfo)
reader = csv.DictReader(zipinfo_fp)
for row in reader:
yield row
def to_params(props):
# not sure if this is properly recursive
for key, value in props.items():
if isinstance(value, list):
# Rails cruft inherent in the CrowdFlower API
for subvalue in value:
yield '[%s][]' % key, subvalue
elif isinstance(value, dict):
for subkey, subvalue in to_params(value):
yield '[%s]%s' % (key, subkey), subvalue
else:
yield '[%s]' % key, value
class Job(object):
'''
Read / Write attributes
auto_order
auto_order_threshold
auto_order_timeout
cml
cml_fields
confidence_fields
css
custom_key
excluded_countries
gold_per_assignment
included_countries
instructions
js
judgments_per_unit
language
max_judgments_per_unit
max_judgments_per_contributor
min_unit_confidence
options
pages_per_assignment
problem
send_judgments_webhook
state
title
units_per_assignment
webhook_uri
Read-only attributes
completed
completed_at
created_at
gold
golds_count
id
judgments_count
units_count
updated_at
Not sure about:
payment_cents
'''
READ_WRITE_FIELDS = ['auto_order', 'auto_order_threshold', 'auto_order_timeout', 'cml', 'cml_fields', 'confidence_fields', 'css', 'custom_key', 'excluded_countries', 'gold_per_assignment', 'included_countries', 'instructions', 'js', 'judgments_per_unit', 'language', 'max_judgments_per_unit', 'max_judgments_per_contributor', 'min_unit_confidence', 'options', 'pages_per_assignment', 'problem', 'send_judgments_webhook', 'state', 'title', 'units_per_assignment', 'webhook_uri']
def __init__(self, job_id, connection):
self.id = job_id
self._connection = connection
# cacheable:
self._properties = {}
self._units = {}
def __json__(self):
return self.properties
def __repr__(self):
return pformat(self.properties)
@property
def properties(self):
if len(self._properties) == 0:
self._properties = self._connection.request('/jobs/%s' % self.id)
return self._properties
@property
def units(self):
if len(self._units) == 0:
self._units = self._connection.request('/jobs/%s/units' % self.id)
return self._units
def clear_units(self, parallel=20):
reqs = (self._connection.grequest('/jobs/%s/units/%s' % (self.id, unit_id), method='DELETE')
for unit_id in self.units.keys())
for response in grequests.imap(reqs, size=parallel):
yield response
def upload(self, units):
headers = {'Content-Type': 'application/json'}
data = '\n'.join(json.dumps(unit) for unit in units)
res = self._connection.request('/jobs/%s/upload' % self.id, method='POST', headers=headers, data=data)
# reset cached units
self._units = {}
return res
def update(self, props):
params = [('job' + key, value) for key, value in to_params(props)]
logger.debug('Updating Job#%d: %r', self.id, params)
res = self._connection.request('/jobs/%s' % self.id, method='PUT', params=params)
# reset cached properties
self._properties = {}
return res
def channels(self):
'''
Manual channel control is deprecated.
The API documentation includes a PUT call at this endpoint, but I'm
not sure if it actually does anything.
'''
return self._connection.request('/jobs/%s/channels' % self.id)
def legend(self):
'''
From the CrowdFlower documentation:
> The legend will show you the generated keys that will end up being
> submitted with your form.
'''
return self._connection.request('/jobs/%s/legend' % self.id)
def gold_reset(self):
'''
Mark all of this job's test questions (gold data) as NOT gold.
Splitting the /jobs/:job_id/gold API call into gold_reset() and
gold_add() is not faithful to the API, but resetting gold marks
and adding them should not have the same API endpoint in the first place.
'''
params = dict(reset='true')
res = self._connection.request('/jobs/%s/gold' % self.id, method='PUT', params=params)
# reset cache
self._properties = {}
self._units = {}
return res
def gold_add(self, check, check_with=None):
'''
Configure the gold labels for a task.
* check: the name of the field being checked against
- Can call /jobs/{job_id}/legend to see options
- And as far as I can tell, the job.properties['gold'] field is a
hash with keys that are "check" names, and values that are "with" names.
* check_with: the name of the field containing the gold label for check
- Crowdflower calls this field "with", which is a Python keyword
- defaults to check + '_gold'
I'm not sure why convert_units would be anything but true.
'''
params = dict(check=check, convert_units='true')
if check_with is not None:
params['with'] = check_with
res = self._connection.request('/jobs/%s/gold' % self.id, method='PUT', params=params)
# reset cache
self._properties = {}
self._units = {}
return res
def delete(self):
return self._connection.request('/jobs/%s' % self.id, method='DELETE')
def download(self, full=True):
'''The resulting CSV will have headers like:
_unit_id
Integer
Unique ID per unit
_created_at
Date: m/d/yyyy hh:mm:ss
_golden
Enum: "true" | "false"
_canary
Always empty, ???
_id
Integer
Unique ID per judgment
_missed
???
_started_at
Date: m/d/yyyy hh:mm:ss
Can use
_tainted
Always false, ???
_channel
Enum: "neodev" | "clixsense" | [etc.]
_trust
Always 1, ???
_worker_id
Integer
Unique ID per worker
_country
3-letter ISO code
_region
String
A number for all countries except UK, USA, Canada (others?)
_city
String
City name
_ip
String
IPv4 address
And then the rest just copies over whatever fields were originally used, e.g.:
id
text
sentiment
sentiment_gold
'''
# pulls down the csv endpoint, unzips it, and returns a list of all the rows
params = dict(full='true' if full else 'false')
# use .csv, not headers=dict(Accept='text/csv'), which Crowdflower rejects
req = self._connection.create_request('/jobs/%s.csv' % self.id, method='GET', params=params)
res = self._connection.send_request(req)
# because ZipFile insists on seeking, we can't simply pass over the res.raw stream
fp = StringIO()
fp.write(res.content)
# fp.seek(0)
zf = zipfile.ZipFile(fp)
# yield each row?
return list(read_zip_csv(zf))
| jfrazee/crowdflower | crowdflower/job.py | Python | mit | 7,986 |
#!/usr/bin/env python
import sys
import pygtk
pygtk.require("2.0")
import gtk
import gtk.glade
import commands
import os
import gettext
from user import home
import webkit
import string
# i18n
gettext.install("xswelcome", "/usr/share/ututo/locale")
class MintWelcome():
def __init__(self):
gladefile = "/usr/lib/ututo/xsWelcome/xsWelcome.glade"
wTree = gtk.glade.XML(gladefile,"main_window")
wTree.get_widget("main_window").set_title(_("Welcome to Ututo XS"))
wTree.get_widget("main_window").set_icon_from_file("/usr/lib/ututo/xsWelcome/ututo.png")
sys.path.append('/usr/lib/ututo/common')
from configobj import ConfigObj
if os.path.exists(home + "/.ututoxs/xsWelcome/info"):
config = ConfigObj(home + "/.ututoxs/xsWelcome/info")
else:
os.system("/usr/lib/ututo/xsWelcome/crea_info.sh")
config = ConfigObj(home + "/.ututoxs/xsWelcome/info")
description = config['DESCRIPTION']
codename = config['CODENAME']
edition = config['EDITION']
release = config['RELEASE']
description = description.replace("\"", "")
wTree.get_widget("main_window").connect("destroy", gtk.main_quit)
browser = webkit.WebView()
wTree.get_widget("scrolled_welcome").add(browser)
browser.connect("button-press-event", lambda w, e: e.button == 3)
subs = {}
subs['release'] = release + " (" + codename + ")"
subs['edition'] = edition
subs['title'] = _("Welcome to Ututo XS")
subs['release_title'] = _("Release")
subs['edition_title'] = _("Edition")
subs['site_title'] = _("Project")
subs['user_guide_title'] = _("Documentation")
subs['support_title'] = _("Support")
subs['news_title'] = _("Community")
subs['project'] = _("Web Site")
subs['user_guide'] = _("User Guide")
subs['forums'] = _("Forums")
subs['news'] = _("News")
subs['project_title'] = _("UTUTO Project")
subs['show'] = _("Show this dialog at startup")
subs['close'] = _("Close")
if os.path.exists("norun.flag"):
subs['checked'] = ("")
else:
subs['checked'] = ("CHECKED")
subs['welcome'] = _("Welcome and thank you for choosing Ututo XS. We hope you'll enjoy using it as much as we did designing it. The links below will help you get started with your new operating system. Have a great time and don't hesitate to send us your feedback.")
subs['licence']= _("This GNU System is entirely made with only free software from source code.<br/>Copying, modification and redistribution of this entire GNU System are permitted provided this notice is preserved.<br/>")
subs['author']= _("Developed by UTUTO Development Team<br/>(C) UTUTO Project")
template = open("/usr/lib/ututo/xsWelcome/templates/welcome.html").read()
html = string.Template(template).safe_substitute(subs)
browser.load_html_string(html, "file:/")
browser.connect('title-changed', self.title_changed)
wTree.get_widget("main_window").show_all()
def title_changed(self, view, frame, title):
if title.startswith("nop"):
return
# call directive looks like:
# "call:func:arg1,arg2"
# "call:func"
elif title == "event_site":
os.system("xdg-open http://www.ututo.org")
elif title == "event_user_guide":
os.system("xdg-open http://www.ututo.org/cmsd/content/manual-ututo-xs")
elif title == "event_forums":
os.system("xdg-open http://www.ututo.org/cmsd/forum")
elif title == "event_news":
os.system("xdg-open http://proyecto.ututo.net/cmsd/noticias")
elif title == "event_close_true":
if os.path.exists(home + "/.ututoxs/xsWelcome/norun.flag"):
os.system("rm -rf " + home + "/.ututoxs/xsWelcome/norun.flag")
gtk.main_quit()
elif title == "event_close_false":
os.system("mkdir -p " + home + "/.ututoxs/xsWelcome")
os.system("touch " + home + "/.ututoxs/xsWelcome/norun.flag")
gtk.main_quit()
elif title == "checkbox_checked":
if os.path.exists(home + "/.ututoxs/xsWelcome/norun.flag"):
os.system("rm -rf " + home + "/.ututoxs/xsWelcome/norun.flag")
elif title == "checkbox_unchecked":
os.system("mkdir -p " + home + "/.ututoxs/xsWelcome")
os.system("touch " + home + "/.ututoxs/xsWelcome/norun.flag")
if __name__ == "__main__":
MintWelcome()
gtk.main()
| PROYECTO-UTUTO/uget | usr/lib/ututo/xsWelcome/xsWelcome.py | Python | gpl-3.0 | 4,668 |
from __future__ import absolute_import, print_function
from rest_framework.response import Response
from sentry.api.base import Endpoint, EnvironmentMixin
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import (
serialize,
SharedEventSerializer,
SharedGroupSerializer,
SharedProjectSerializer,
)
from sentry.models import Group
class SharedGroupDetailsEndpoint(Endpoint, EnvironmentMixin):
permission_classes = ()
def get(self, request, share_id):
"""
Retrieve an aggregate
Return details on an individual aggregate specified by it's shared ID.
{method} {path}
Note: This is not the equivilant of what you'd receive with the standard
group details endpoint. Data is more restrictive and designed
specifically for sharing.
"""
try:
group = Group.from_share_id(share_id)
except Group.DoesNotExist:
raise ResourceDoesNotExist
if group.organization.flags.disable_shared_issues:
raise ResourceDoesNotExist
event = group.get_latest_event()
context = serialize(
group,
request.user,
SharedGroupSerializer(
environment_func=self._get_environment_func(request, group.project.organization_id)
),
)
# TODO(dcramer): move latestEvent/project into SharedGroupSerializer
context["latestEvent"] = serialize(event, request.user, SharedEventSerializer())
context["project"] = serialize(group.project, request.user, SharedProjectSerializer())
return Response(context)
| mvaled/sentry | src/sentry/api/endpoints/shared_group_details.py | Python | bsd-3-clause | 1,669 |
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.resource import Resource
from content_api.items.resource import schema
from copy import copy
class PublishResource(Resource):
"""A class defining and configuring the /publish API endpoint."""
# Example of an ID of an object in database (whitout quotes):
#
# "tag:example.com,0000:newsml_BRE9A605"
# "tag:localhost:2015:f4b35e12-559b-4a2b-b1f2-d5e64048bde8"
#
item_url = 'regex("[\w,.:-]+")'
schema = copy(schema)
schema.update(_id={'type': 'string'})
datasource = {
'source': 'items',
'search_backend': 'elastic',
}
item_methods = ['PATCH', 'DELETE']
resource_methods = ['POST']
| m038/superdesk-content-api | content_api/publish/resource.py | Python | agpl-3.0 | 984 |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception as boto_exception
from neutronclient.common import exceptions as neutron_exceptions
from novaclient import exceptions as nova_exc
from oslo_config import cfg
from saharaclient.api import base as saharaclient_base
from rally.common import logging
from rally.common.plugin import discover
from rally.common import utils
from rally.plugins.openstack.cleanup import base
from rally.plugins.openstack.scenarios.fuel import utils as futils
from rally.plugins.openstack.scenarios.keystone import utils as kutils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.wrappers import glance as glance_wrapper
from rally.plugins.openstack.wrappers import keystone as keystone_wrapper
from rally.task import utils as task_utils
CONF = cfg.CONF
CONF.import_opt("glance_image_delete_timeout",
"rally.plugins.openstack.scenarios.glance.utils",
"benchmark")
CONF.import_opt("glance_image_delete_poll_interval",
"rally.plugins.openstack.scenarios.glance.utils",
"benchmark")
LOG = logging.getLogger(__name__)
def get_order(start):
return iter(range(start, start + 99))
class SynchronizedDeletion(object):
def is_deleted(self):
return True
class QuotaMixin(SynchronizedDeletion):
def id(self):
return self.raw_resource
def name(self):
return None
def delete(self):
self._manager().delete(self.raw_resource)
def list(self):
return [self.tenant_uuid] if self.tenant_uuid else []
# HEAT
@base.resource("heat", "stacks", order=100, tenant_resource=True)
class HeatStack(base.ResourceManager):
def name(self):
return self.raw_resource.stack_name
# SENLIN
_senlin_order = get_order(150)
@base.resource(service=None, resource=None, admin_required=True)
class SenlinMixin(base.ResourceManager):
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def list(self):
return getattr(self._manager(), self._resource)()
def delete(self):
# make singular form of resource name from plural form
res_name = self._resource[:-1]
return getattr(self._manager(), "delete_%s" % res_name)(self.id)
@base.resource("senlin", "clusters", order=next(_senlin_order))
class SenlinCluster(SenlinMixin):
"""Resource class for Senlin Cluster."""
@base.resource("senlin", "profiles", order=next(_senlin_order))
class SenlinProfile(SenlinMixin):
"""Resource class for Senlin Profile."""
# NOVA
_nova_order = get_order(200)
@base.resource("nova", "servers", order=next(_nova_order),
tenant_resource=True)
class NovaServer(base.ResourceManager):
def list(self):
"""List all servers."""
if hasattr(self._manager().api, "api_version"):
# NOTE(andreykurilin): novaclient v2.27.0 includes ability to
# return all servers(see https://review.openstack.org/#/c/217101
# for more details). This release can be identified by presence
# of "api_version" property of ``novaclient.client.Client`` cls.
return self._manager().list(limit=-1)
else:
# FIXME(andreykurilin): Remove code below, when minimum version of
# novaclient in requirements will allow it.
# NOTE(andreykurilin): Nova API returns only limited number(
# 'osapi_max_limit' option in nova.conf) of servers, so we need
# to use 'marker' option to list all pages of servers.
result = []
marker = None
while True:
servers = self._manager().list(marker=marker)
if not servers:
break
result.extend(servers)
marker = servers[-1].id
return result
def delete(self):
if getattr(self.raw_resource, "OS-EXT-STS:locked", False):
self.raw_resource.unlock()
super(NovaServer, self).delete()
@base.resource("nova", "floating_ips", order=next(_nova_order))
class NovaFloatingIPs(SynchronizedDeletion, base.ResourceManager):
def name(self):
return None
@base.resource("nova", "keypairs", order=next(_nova_order))
class NovaKeypair(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("nova", "security_groups", order=next(_nova_order),
tenant_resource=True)
class NovaSecurityGroup(SynchronizedDeletion, base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "default",
super(NovaSecurityGroup, self).list())
@base.resource("nova", "quotas", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaQuotas(QuotaMixin, base.ResourceManager):
pass
@base.resource("nova", "flavors", order=next(_nova_order),
admin_required=True, perform_for_admin_only=True)
class NovaFlavors(base.ResourceManager):
def list(self):
return [r for r in self._manager().list()
if utils.name_matches_object(r.name, nova_utils.NovaScenario)]
def is_deleted(self):
try:
self._manager().get(self.name())
except nova_exc.NotFound:
return True
return False
@base.resource("nova", "floating_ips_bulk", order=next(_nova_order),
admin_required=True)
class NovaFloatingIpsBulk(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.address
def name(self):
return None
def list(self):
return [floating_ip for floating_ip in self._manager().list()
if utils.name_matches_object(floating_ip.pool,
nova_utils.NovaScenario)]
@base.resource("nova", "networks", order=next(_nova_order),
admin_required=True, tenant_resource=True)
class NovaNetworks(SynchronizedDeletion, base.ResourceManager):
def name(self):
return self.raw_resource.label
def list(self):
# NOTE(stpierre): any plugin can create a nova network via the
# network wrapper, and that network's name will be created
# according to its owner's random name generation
# parameters. so we need to check if there are nova networks
# whose name pattern matches those of any loaded plugin that
# implements RandomNameGeneratorMixin
classes = list(discover.itersubclasses(utils.RandomNameGeneratorMixin))
return [net for net in self._manager().list()
if utils.name_matches_object(net.label, *classes)]
# EC2
_ec2_order = get_order(250)
class EC2Mixin(object):
def _manager(self):
return getattr(self.user, self._service)()
@base.resource("ec2", "servers", order=next(_ec2_order))
class EC2Server(EC2Mixin, base.ResourceManager):
def is_deleted(self):
try:
instances = self._manager().get_only_instances(
instance_ids=[self.id()])
except boto_exception.EC2ResponseError as e:
# NOTE(wtakase): Nova EC2 API returns 'InvalidInstanceID.NotFound'
# if instance not found. In this case, we consider
# instance has already been deleted.
return getattr(e, "error_code") == "InvalidInstanceID.NotFound"
# NOTE(wtakase): After instance deletion, instance can be 'terminated'
# state. If all instance states are 'terminated', this
# returns True. And if get_only_instances() returns an
# empty list, this also returns True because we consider
# instance has already been deleted.
return all(map(lambda i: i.state == "terminated", instances))
def delete(self):
self._manager().terminate_instances(instance_ids=[self.id()])
def list(self):
return self._manager().get_only_instances()
# NEUTRON
_neutron_order = get_order(300)
@base.resource(service=None, resource=None, admin_required=True)
class NeutronMixin(SynchronizedDeletion, base.ResourceManager):
# Neutron has the best client ever, so we need to override everything
def supports_extension(self, extension):
exts = self._manager().list_extensions().get("extensions", [])
if any(ext.get("alias") == extension for ext in exts):
return True
return False
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource["id"]
def name(self):
return self.raw_resource.get("name", "")
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return filter(lambda r: r["tenant_id"] == self.tenant_uuid,
list_method(tenant_id=self.tenant_uuid)[resources])
class NeutronLbaasV1Mixin(NeutronMixin):
def list(self):
if self.supports_extension("lbaas"):
return super(NeutronLbaasV1Mixin, self).list()
return []
@base.resource("neutron", "vip", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Vip(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "health_monitor", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Healthmonitor(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "pool", order=next(_neutron_order),
tenant_resource=True)
class NeutronV1Pool(NeutronLbaasV1Mixin):
pass
@base.resource("neutron", "port", order=next(_neutron_order),
tenant_resource=True)
class NeutronPort(NeutronMixin):
def delete(self):
if (self.raw_resource["device_owner"] == "network:router_interface" or
self.raw_resource["device_owner"] ==
"network:router_interface_distributed"):
self._manager().remove_interface_router(
self.raw_resource["device_id"],
{"port_id": self.raw_resource["id"]})
else:
try:
self._manager().delete_port(self.id())
except neutron_exceptions.PortNotFoundClient:
# Port can be already auto-deleted, skip silently
LOG.debug("Port %s was not deleted. Skip silently because "
"port can be already auto-deleted."
% self.id())
@base.resource("neutron", "router", order=next(_neutron_order),
tenant_resource=True)
class NeutronRouter(NeutronMixin):
pass
@base.resource("neutron", "subnet", order=next(_neutron_order),
tenant_resource=True)
class NeutronSubnet(NeutronMixin):
pass
@base.resource("neutron", "network", order=next(_neutron_order),
tenant_resource=True)
class NeutronNetwork(NeutronMixin):
pass
@base.resource("neutron", "floatingip", order=next(_neutron_order),
tenant_resource=True)
class NeutronFloatingIP(NeutronMixin):
pass
@base.resource("neutron", "security_group", order=next(_neutron_order),
tenant_resource=True)
class NeutronSecurityGroup(NeutronMixin):
def list(self):
tenant_sgs = super(NeutronSecurityGroup, self).list()
# NOTE(pirsriva): Filter out "default" security group deletion
# by non-admin role user
return filter(lambda r: r["name"] != "default",
tenant_sgs)
@base.resource("neutron", "quota", order=next(_neutron_order),
admin_required=True, tenant_resource=True)
class NeutronQuota(QuotaMixin, NeutronMixin):
def delete(self):
self._manager().delete_quota(self.tenant_uuid)
# CINDER
_cinder_order = get_order(400)
@base.resource("cinder", "backups", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeBackup(base.ResourceManager):
pass
@base.resource("cinder", "volume_snapshots", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeSnapshot(base.ResourceManager):
pass
@base.resource("cinder", "transfers", order=next(_cinder_order),
tenant_resource=True)
class CinderVolumeTransfer(base.ResourceManager):
pass
@base.resource("cinder", "volumes", order=next(_cinder_order),
tenant_resource=True)
class CinderVolume(base.ResourceManager):
pass
@base.resource("cinder", "quotas", order=next(_cinder_order),
admin_required=True, tenant_resource=True)
class CinderQuotas(QuotaMixin, base.ResourceManager):
pass
# MANILA
_manila_order = get_order(450)
@base.resource("manila", "shares", order=next(_manila_order),
tenant_resource=True)
class ManilaShare(base.ResourceManager):
pass
@base.resource("manila", "share_networks", order=next(_manila_order),
tenant_resource=True)
class ManilaShareNetwork(base.ResourceManager):
pass
@base.resource("manila", "security_services", order=next(_manila_order),
tenant_resource=True)
class ManilaSecurityService(base.ResourceManager):
pass
# GLANCE
@base.resource("glance", "images", order=500, tenant_resource=True)
class GlanceImage(base.ResourceManager):
def _client(self):
return getattr(self.admin or self.user, self._service)
def _wrapper(self):
return glance_wrapper.wrap(self._client(), self)
def list(self):
return self._wrapper().list_images(owner=self.tenant_uuid)
def delete(self):
client = self._client()
client().images.delete(self.raw_resource.id)
task_utils.wait_for_status(
self.raw_resource, ["deleted"],
check_deletion=True,
update_resource=self._wrapper().get_image,
timeout=CONF.benchmark.glance_image_delete_timeout,
check_interval=CONF.benchmark.glance_image_delete_poll_interval)
# SAHARA
_sahara_order = get_order(600)
@base.resource("sahara", "job_executions", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobExecution(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "jobs", order=next(_sahara_order),
tenant_resource=True)
class SaharaJob(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binary_internals", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinaryInternals(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "job_binaries", order=next(_sahara_order),
tenant_resource=True)
class SaharaJobBinary(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "data_sources", order=next(_sahara_order),
tenant_resource=True)
class SaharaDataSource(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "clusters", order=next(_sahara_order),
tenant_resource=True)
class SaharaCluster(base.ResourceManager):
# Need special treatment for Sahara Cluster because of the way the
# exceptions are described in:
# https://github.com/openstack/python-saharaclient/blob/master/
# saharaclient/api/base.py#L145
def is_deleted(self):
try:
self._manager().get(self.id())
return False
except saharaclient_base.APIException as e:
return e.error_code == 404
@base.resource("sahara", "cluster_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaClusterTemplate(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("sahara", "node_group_templates", order=next(_sahara_order),
tenant_resource=True)
class SaharaNodeGroup(SynchronizedDeletion, base.ResourceManager):
pass
# CEILOMETER
@base.resource("ceilometer", "alarms", order=700, tenant_resource=True)
class CeilometerAlarms(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.alarm_id
def list(self):
query = [{
"field": "project_id",
"op": "eq",
"value": self.tenant_uuid
}]
return self._manager().list(q=query)
# ZAQAR
@base.resource("zaqar", "queues", order=800)
class ZaqarQueues(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self.user.zaqar().queues()
# DESIGNATE
_designate_order = get_order(900)
class DesignateResource(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
# Map resource names to api / client version
resource_versions = {
"domains": "1",
"servers": "1",
"recordsets": 2,
"zones": "2"
}
version = resource_versions[self._resource]
return getattr(getattr(self.user, self._service)(version),
self._resource)
def _walk_pages(self, func, *args, **kwargs):
"""Generator that keeps fetching pages until there's none left."""
marker = None
while True:
items = func(marker=marker, limit=100, *args, **kwargs)
if not items:
break
for item in items:
yield item
marker = items[-1]["id"]
@base.resource("designate", "domains", order=next(_designate_order))
class DesignateDomain(DesignateResource):
pass
@base.resource("designate", "servers", order=next(_designate_order),
admin_required=True, perform_for_admin_only=True)
class DesignateServer(DesignateResource):
pass
@base.resource("designate", "recordsets", order=next(_designate_order),
tenant_resource=True)
class DesignateRecordSets(DesignateResource):
def _client(self):
# Map resource names to api / client version
resource_versions = {
"domains": "1",
"servers": "1",
"recordsets": 2,
"zones": "2"
}
version = resource_versions[self._resource]
return getattr(self.user, self._service)(version)
def list(self):
criterion = {"name": "s_rally_*"}
for zone in self._walk_pages(self._client().zones.list,
criterion=criterion):
for recordset in self._walk_pages(self._client().recordsets.list,
zone["id"]):
yield recordset
@base.resource("designate", "zones", order=next(_designate_order),
tenant_resource=True)
class DesignateZones(DesignateResource):
def list(self):
criterion = {"name": "s_rally_*"}
return self._walk_pages(self._manager().list, criterion=criterion)
# SWIFT
_swift_order = get_order(1000)
class SwiftMixin(SynchronizedDeletion, base.ResourceManager):
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(client, self._service)()
def id(self):
return self.raw_resource
def name(self):
# NOTE(stpierre): raw_resource is a list of either [container
# name, object name] (as in SwiftObject) or just [container
# name] (as in SwiftContainer).
return self.raw_resource[-1]
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
# NOTE(weiwu): *self.raw_resource is required because for deleting
# container we are passing only container name, to delete object we
# should pass as first argument container and second is object name.
delete_method(*self.raw_resource)
@base.resource("swift", "object", order=next(_swift_order),
tenant_resource=True)
class SwiftObject(SwiftMixin):
def list(self):
object_list = []
containers = self._manager().get_account(full_listing=True)[1]
for con in containers:
objects = self._manager().get_container(con["name"],
full_listing=True)[1]
for obj in objects:
raw_resource = [con["name"], obj["name"]]
object_list.append(raw_resource)
return object_list
@base.resource("swift", "container", order=next(_swift_order),
tenant_resource=True)
class SwiftContainer(SwiftMixin):
def list(self):
containers = self._manager().get_account(full_listing=True)[1]
return [[con["name"]] for con in containers]
# MISTRAL
@base.resource("mistral", "workbooks", order=1100, tenant_resource=True)
class MistralWorkbooks(SynchronizedDeletion, base.ResourceManager):
def delete(self):
self._manager().delete(self.raw_resource.name)
# MURANO
_murano_order = get_order(1200)
@base.resource("murano", "environments", tenant_resource=True,
order=next(_murano_order))
class MuranoEnvironments(SynchronizedDeletion, base.ResourceManager):
pass
@base.resource("murano", "packages", tenant_resource=True,
order=next(_murano_order))
class MuranoPackages(base.ResourceManager):
def list(self):
return filter(lambda x: x.name != "Core library",
super(MuranoPackages, self).list())
# IRONIC
_ironic_order = get_order(1300)
@base.resource("ironic", "node", admin_required=True,
order=next(_ironic_order), perform_for_admin_only=True)
class IronicNodes(base.ResourceManager):
def id(self):
return self.raw_resource.uuid
# FUEL
@base.resource("fuel", "environment", order=1400,
admin_required=True, perform_for_admin_only=True)
class FuelEnvironment(base.ResourceManager):
"""Fuel environment.
That is the only resource that can be deleted by fuelclient explicitly.
"""
def id(self):
return self.raw_resource["id"]
def name(self):
return self.raw_resource["name"]
def is_deleted(self):
return not self._manager().get(self.id())
def list(self):
return [env for env in self._manager().list()
if utils.name_matches_object(env["name"],
futils.FuelScenario)]
# WATCHER
@base.resource("watcher", "audit_template", order=1500,
admin_required=True, tenant_resource=True)
class WatcherTemplate(SynchronizedDeletion, base.ResourceManager):
def id(self):
return self.raw_resource.uuid
def is_deleted(self):
from watcherclient.common.apiclient import exceptions
try:
self._manager().get(self.id())
return False
except exceptions.NotFound:
return True
def list(self):
return self._manager().list(limit=0)
# KEYSTONE
_keystone_order = get_order(9000)
class KeystoneMixin(SynchronizedDeletion):
def _manager(self):
return keystone_wrapper.wrap(getattr(self.admin, self._service)())
def delete(self):
delete_method = getattr(self._manager(), "delete_%s" % self._resource)
delete_method(self.id())
def list(self):
# TODO(boris-42): We should use such stuff in all list commands.
resources = self._resource + "s"
list_method = getattr(self._manager(), "list_%s" % resources)
return [r for r in list_method()
if utils.name_matches_object(r.name, kutils.KeystoneScenario)]
@base.resource("keystone", "user", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneUser(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "project", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneProject(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "service", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneService(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "role", order=next(_keystone_order),
admin_required=True, perform_for_admin_only=True)
class KeystoneRole(KeystoneMixin, base.ResourceManager):
pass
@base.resource("keystone", "ec2", tenant_resource=True,
order=next(_keystone_order))
class KeystoneEc2(SynchronizedDeletion, base.ResourceManager):
def list(self):
return self._manager().list(self.raw_resource)
| gluke77/rally | rally/plugins/openstack/cleanup/resources.py | Python | apache-2.0 | 25,404 |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Training.total_views'
db.add_column('website_training', 'total_views',
self.gf('django.db.models.fields.IntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Training.total_views'
db.delete_column('website_training', 'total_views')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.country': {
'Meta': {'object_name': 'Country'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'website.field': {
'Meta': {'object_name': 'Field'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'website.organization': {
'Meta': {'object_name': 'Organization'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Country']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members_role': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'website.publicprofilepermissions': {
'Meta': {'object_name': 'PublicProfilePermissions'},
'allowed_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'allowed_user'", 'unique': 'True', 'to': "orm['auth.User']"}),
'public_user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'public_user'", 'primary_key': 'True', 'to': "orm['auth.User']"})
},
'website.training': {
'Meta': {'object_name': 'Training'},
'cowriters': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cowriters'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'creator'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_displayed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_live': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_updated': ('django.db.models.fields.IntegerField', [], {'default': '1327217400'}),
'participants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'participants'", 'symmetrical': 'False', 'to': "orm['auth.User']"}),
'resource_id': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'total_views': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'website.trainingparticipation': {
'Meta': {'object_name': 'TrainingParticipation'},
'count': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'website.trainingschedule': {
'Meta': {'object_name': 'TrainingSchedule'},
'event_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"})
},
'website.trainingtempshare': {
'Meta': {'object_name': 'TrainingTempShare'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 1, 25, 0, 0)'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '12'}),
'training': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Training']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'website.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about_me': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'enable_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Field']", 'null': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'isUniStar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_email_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_organization_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_student': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'linkedin_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'twitter_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
}
}
complete_apps = ['website'] | UniShared/unishared | UniShared_python/website/migrations/0010_auto__add_field_training_total_views.py | Python | mit | 10,657 |
# Copyright IBM Corp. 2015, 2015 All Rights Reserved
# Copyright (c) 2010-2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from swiftclient import client
def put_local_file(url, token, container, local_dir, local_file, headers=None):
"""
Put local file to swift
:param url: swift endpoint url
:param token: token string to access to swift
:param local_dir: directory path where the target file is placed
:param loca_file: name of the file to be put to swift
:param headers: headers parameters to be included in request headers
"""
resp = dict()
with open(os.path.join(local_dir, local_file), 'rb') as f:
client.put_object(url, token, container, local_file, f,
headers=headers,
content_type="application/octet-stream",
response_dict=resp)
status = resp.get('status', 0)
assert (status // 100 == 2)
def put_storlet_object(url, token, storlet, dependencies, storlet_main_class,
language='Java', version=None):
"""
Put storlet file to swift
:param url: swift endpoint url
:param token: token string to access to swift
:param storlet: storlet file to be registerd
:param dependencies: a list of dependency files
:param storlet_main_class: name of the storlet main class
:param language: storlet language. default value is Java
:param version: storlet language version. defaulte is 2.7 for python
"""
headers = {'X-Object-Meta-Storlet-Language': language,
'X-Object-Meta-Storlet-Interface-Version': '1.0',
'X-Object-Meta-Storlet-Object-Metadata': 'no',
'X-Object-Meta-Storlet-Main': storlet_main_class}
if dependencies:
headers['X-Object-Meta-Storlet-Dependency'] = dependencies
if version and language.lower() == 'python':
headers['X-Object-Meta-Storlet-Language-Version'] = version
put_local_file(url, token, 'storlet', os.path.dirname(storlet),
os.path.basename(storlet), headers)
def put_storlet_executable_dependencies(url, token, deps):
"""
Put dependency files to swift with 755 permission
:param url: swift endpoint url:
:param token: token swring to access to swift
:param deps: a list of dependency files to be registered
"""
for dep in deps:
headers = {'X-Object-Meta-Storlet-Dependency-Version': '1',
'X-Object-Meta-Storlet-Dependency-Permissions': '0755'}
put_local_file(url, token, 'dependency', os.path.dirname(dep),
os.path.basename(dep), headers)
def deploy_storlet(url, token, storlet, storlet_main_class, dependencies,
language='Java', version=None):
"""
Deploy storlet file and required dependencies as swift objects
:param url: swift endpoint url
:param token: token string to access swift
:param storlet: storlet file to be registerd
:param dependencies: a list of dependency files to be registered
:param language: storlet language. default value is Java
:param version: storlet language version. defaulte is 2.7 for python
"""
# No need to create containers every time
# put_storlet_containers(url, token)
put_storlet_object(url, token, storlet,
','.join(os.path.basename(x) for x in dependencies),
storlet_main_class, language, version)
put_storlet_executable_dependencies(url, token, dependencies)
def get_auth(conf, user, passwd):
"""
Get token string to access to swift
:param conf: a dict of config parameters
:returns: (swift endpoint url, token string)
"""
auth_url = conf.auth_uri
project = conf.project_name
os_options = {'user_domain_name': conf.domain_name,
'project_name': conf.project_name,
'region_name': conf.region}
url, token = client.get_auth(auth_url, project + ':' + user, passwd,
os_options=os_options,
auth_version=conf.auth_version)
return url, token
def get_admin_auth(conf):
admin_user = conf.admin_user
admin_passwd = conf.admin_password
return get_auth(conf, admin_user, admin_passwd)
def get_member_auth(conf):
member_user = conf.member_user
member_passd = conf.member_password
return get_auth(conf, member_user, member_passd)
| openstack/storlets | storlets/tools/utils.py | Python | apache-2.0 | 4,999 |
#!/usr/bin/env python
"""A small wrapper around nosetests.
Avoids disruptive messages when viewing error messages.
"""
import sys
import nose
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG)
# Suppress sigma-clipping debug log:
logging.getLogger('tkp.sourcefinder.image.sigmaclip').setLevel(logging.ERROR)
# logging.getLogger().setLevel(logging.ERROR)
nose.run(argv=sys.argv)
| transientskp/tkp | tests/runtests.py | Python | bsd-2-clause | 436 |
# Copyright 2008 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from Tkinter import *
from abstracttkdialog import AbstractTkDialog
from mabot.settings import SETTINGS
from mabot import utils
START = 1.0
class SettingsDialog(AbstractTkDialog):
def __init__(self, parent, title):
self.new_settings = None
AbstractTkDialog.__init__(self, parent, title)
def body(self, master):
methods = [ self._default_message,
self._ask_tags_added_to_modified_tests_at_startup,
self._tag_info_label,
self._addition_tags_for_executed_test,
self._tags_allowed_only_once,
self._info_label,
self._always_load_old_data_from_xml,
self._check_simultaneous_save,
self._include,
self._exclude ]
for index, method in enumerate(methods):
method(master, index)
def _info_label(self, master, row):
text = "\nSettings below will affect after loading new data\n"
self._label(master, row, text)
def _tag_info_label(self, master, row):
text = "Note: In all tag related settings separator is comma and space ', '."
self._label(master, row, text)
def _label(self, master, row, text):
Label(master, text=text).grid(row=row, column=0, sticky='NW')
def _default_message(self, master, row):
Label(master, text="Default Message:").grid(row=row, column=0, sticky=NW)
self.default_message = Text(master, height=10, width=50)
self.default_message.insert(START, SETTINGS["default_message"])
self.default_message.grid(row=row, column=1)
def _ask_tags_added_to_modified_tests_at_startup(self, master, row):
self.ask_tags_added_to_modified_tests_at_startup = self._create_radio_buttons(master,
"Ask Tags Added to Modified Tests at Start Up:", SETTINGS["ask_tags_added_to_modified_tests_at_startup"], row)
def _addition_tags_for_executed_test(self, master, row):
self.addition_tags = self._create_entry(master, "Tags Added to Modified Tests (i.e. executed-by-x, build-y):",
', '.join(SETTINGS["tags_added_to_modified_tests"]), row)
def _tags_allowed_only_once(self, master, row):
self.tags_allowed_only_once = self._create_entry(master, "Tags allowed only once (i.e. executed-by-, build-):",
', '.join(SETTINGS["tags_allowed_only_once"]), row)
def _always_load_old_data_from_xml(self, master, row):
self.always_load_old_data_from_xml = self._create_radio_buttons(master,
"Always Load Old Data from XML:", SETTINGS["always_load_old_data_from_xml"], row)
def _check_simultaneous_save(self, master, row):
self.check_simultaneous_save = self._create_radio_buttons(master,
"Check Simultaneous Save:", SETTINGS["check_simultaneous_save"], row)
def _include(self, master, row):
title="Include Tags (i.e. smoke, manual);"
self.include = self._create_entry(master, title,
', '.join(SETTINGS["include"]), row)
def _exclude(self, master, row):
title = "Exclude Tags (i.e. not-ready, some-other):"
self.exclude = self._create_entry(master, title,
', '.join(SETTINGS["exclude"]), row)
def _create_radio_buttons(self, master, title, value, row):
self._label(master, row, title)
variable = BooleanVar()
variable.set(value)
radio_buttons_container = CommonFrame(master)
radio_buttons_container.grid(row=row, column=1, sticky=W)
Radiobutton(radio_buttons_container, text="ON", value=True,
variable=variable).grid(row=0, column=0)
Radiobutton(radio_buttons_container, text="OFF", value=False,
variable=variable).grid(row=0, column=1)
return variable
def _create_entry(self, master, header, value, row):
self._label(master, row, header)
entry = Entry(master, width=50)
entry.insert(0, value)
entry.grid(row=row, column=1)
return entry
def apply(self):
ask_tags_added_to_modified_tests = self.ask_tags_added_to_modified_tests_at_startup.get()
tags_added_to_modified_tests = self._get_tags(self.addition_tags)
tags_allowed_only_once = self._get_tags(self.tags_allowed_only_once)
load_always = self.always_load_old_data_from_xml.get()
check_simultaneous = self.check_simultaneous_save.get()
include = self._get_tags(self.include)
exclude = self._get_tags(self.exclude)
self.new_settings = {"default_message":self.default_message.get(START, END).strip(),
"ask_tags_added_to_modified_tests_at_startup":ask_tags_added_to_modified_tests,
"tags_allowed_only_once":tags_allowed_only_once,
"tags_added_to_modified_tests":tags_added_to_modified_tests,
"always_load_old_data_from_xml":load_always,
"check_simultaneous_save":check_simultaneous,
"include":include,
"exclude":exclude,
}
def _get_tags(self, field):
return utils.get_tags_from_string(field.get())
def validate(self):
return True
class ChangeStatusDialog(AbstractTkDialog):
def __init__(self, parent):
AbstractTkDialog.__init__(self, parent, "Set Failed")
def body(self, master):
Label(master, text="Give reason for failure:").pack(fill=BOTH)
scrollbar = Scrollbar(master, orient=VERTICAL)
self.message_field = Text(master, yscrollcommand=scrollbar.set)
self.message_field.insert(START, SETTINGS["default_message"])
scrollbar.config(command=self.message_field.yview)
scrollbar.pack(side=RIGHT, fill=Y)
self.message_field.pack(fill=BOTH, expand=1)
return self.message_field # initial focus
def apply(self):
self.message = self.message_field.get(START, END).strip()
def validate(self):
return self.message_field.get(START, END).strip() != ''
class RemoveTagsDialog(AbstractTkDialog):
def __init__(self, parent, tags):
self._all_tags = tags
self.tags = []
AbstractTkDialog.__init__(self, parent, 'Remove Tags')
def body(self, master):
scrollbar = Scrollbar(master, orient=VERTICAL)
self.listbox = Listbox(master, selectmode=EXTENDED,
yscrollcommand=scrollbar.set)
scrollbar.config(command=self.listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
self.listbox.pack(fill=BOTH, expand=1)
for tag in self._all_tags:
self.listbox.insert(END, tag)
def validate(self):
return self.listbox.curselection()
def apply(self):
self.tags = [ self._all_tags[int(i)] for i in self.listbox.curselection() ]
class CommonFrame(Frame):
def __init__(self, master, **cnf):
Frame.__init__(self, master, background='white', **cnf)
| qitaos/robotframework-mabot | src/mabot/ui/ui.py | Python | apache-2.0 | 7,825 |
import tkinter
from tkinter_async import *
import asyncio
from functools import partial
def make_submitter(root, label, button_text, callback_coro, respawn=Respawn.CONCURRENT):
frame = tkinter.Frame(root)
label = tkinter.Label(frame, text=label)
label.grid(row=0, column=0, sticky='w')
entry = tkinter.Entry(frame)
entry.grid(row=0, column=1, sticky='we')
@asyncio.coroutine
def submit_callback():
yield from callback_coro(entry.get())
button = tkinter.Button(frame, text=button_text,
command=spawn(submit_callback, respawn=respawn))
button.grid(row=0, column=2, sticky='we')
return frame
@asyncio.coroutine
def tk_app():
root = tkinter.Tk()
@asyncio.coroutine
def popup(text):
'''
This coroutine creates a popup dialogue with some text. It destroys the
popup five seconds later.
'''
with scoped_window(root) as popup:
label = tkinter.Label(popup, text=text)
label.grid()
yield from asyncio.sleep(5)
make_submitter(root, "Field 1:", "Go!", popup, Respawn.CONCURRENT).grid(row=0, column=0)
make_submitter(root, "Field 2:", "Go!", popup, Respawn.SKIP).grid(row=1, column=0)
make_submitter(root, "Field 3:", "Go!", popup, Respawn.CANCEL).grid(row=2, column=0)
@asyncio.coroutine
def exception_coro():
raise RuntimeError("This coroutine raised an exception")
tkinter.Button(root, text="Exception!", command=spawn(exception_coro)).grid(row=3)
@asyncio.coroutine
def confirm_quit():
with scoped_window(root) as popup:
label = tkinter.Label(popup, text="Are you sure you want to quit?")
label.grid(columnspan=2)
result = asyncio.Future()
do_quit = partial(result.set_result, False)
no_quit = partial(result.set_result, True)
yes = tkinter.Button(popup, text="Yes", command=do_quit)
yes.grid(row=1)
no = tkinter.Button(popup, text="No", command=no_quit)
no.grid(row=1, column=1)
popup.protocol("WM_DELETE_WINDOW", no_quit)
return (yield from result)
yield from async_mainloop(root, quit_coro=confirm_quit)
@asyncio.coroutine
def run():
yield from tk_app()
if __name__== "__main__":
asyncio.get_event_loop().run_until_complete(run())
| Lucretiel/tkinter-async | demo.py | Python | lgpl-3.0 | 2,383 |
from components.base.ecu.types.abst_ecu import AbstractECU
from components.base.ecu.hardware.impl_transceiver_std import StdTransceiver
from components.base.ecu.hardware.impl_controller_can_std import StdCanController
from components.base.ecu.software.ecu_software import ECUSoftware
from components.base.ecu.hardware.impl_micro_controller_std import StdMicrocontroller
from components.base.ecu.software.impl_app_layer_simple import SimpleApplicationLayer
from components.base.ecu.hardware.ecu_hardware import ECUHardware
from components.base.ecu.software.impl_comm_module_simple import StdCommModule
import config.timing_registration as time
from components.base.ecu.software.impl_physical_layers import StdPhysicalLayer
from tools.general import General as G
import uuid
from io_processing.surveillance_handler import MonitorTags, MonitorInput
from components.base.ecu.software.impl_datalink_layers import RapidDatalinkLayer, \
StdDatalinkLayer
from config.specification_set import GeneralSpecPreset
class CANGateway(AbstractECU):
''' Simply receives a signal and transmits it to all connected Busses '''
def __init__(self, sim_env=None, ecu_id=None, data_rate=None):
''' Constructor
Input: sim_env simpy.Environment environment of this component
ecu_id string id of the corresponding AbstractECU
data_rate float datarate of the ecu
Output: -
'''
self._GATEWAY = True
# set settings
self.set_settings()
self._connected_busses = []
self._transceivers = []
self._controller = []
self._physical_layer = []
self._datalink_layer = []
self._trans_bus_filter_values = {}
self._trans_bus_dep_filter_active = False
self._bus_dep_filter_active = False
self._filter_values = False
self._bus_filter_values = {} # key: Can ID value: lst allowed msg ids
# create hardware and software
if sim_env == None: return # no instantiation
AbstractECU.__init__(self, sim_env, uuid.uuid4(), data_rate)
self.ecuHW = ECUHardware(sim_env, StdTransceiver(sim_env), StdCanController(sim_env), StdMicrocontroller(sim_env))
self.ecuSW = ECUSoftware(sim_env, StdCommModule(sim_env), SimpleApplicationLayer(sim_env, ecu_id))
self._connect_hw_sw()
self._override_methods(self.ecuHW.transceiver)
# project parameters
self.GW_TRANSITION_PROCESS = time.GW_TRANSITION_PROCESS
def set_transmit_filter_from_can_dict(self, can_dict):
''' installs a filter at the HW that is connected
to the corresponding bus. Only defined messages will
be forwarded
key: can_id
value: values to be forwarded
Input: can_dict dictinary key: can_id; value: values to be forwarded
Output: -
'''
self._trans_bus_filter_values = can_dict
self._trans_bus_dep_filter_active = True
def set_filter_from_can_dict(self, can_dict):
''' installs a filter at the HW that is connected
to the corresponding bus. Only the defined messages
will be received from the bus
key: can_id
value: values to be filtered
Input: can_dict dictinary key: can_id; value: values to be filtered
Output: -
'''
self._bus_dep_filter_active = True
self._bus_filter_values = can_dict
for ky in self._bus_filter_values:
trans = self._trans_by_bus_id(ky)
if trans == None: return
trans.install_filter(self._bus_filter_values[ky])
def install_filter(self, message_id_list):
''' installs a filter for all Busses on each
transceiver at every port
Input: message_id_list list list of message ids that are allowed
Output: -
'''
self.ecuHW.transceiver.install_filter(message_id_list)
for trans in self._transceivers:
trans.install_filter(message_id_list)
self._filter_values = message_id_list
def get_type_id(self):
''' returns the id of this ECU type
Input: -
Output: ecu_type string type of this ECU; e.g.'TLSECU'
'''
return "CAN_Gateway"
def set_settings(self):
''' sets the initial setting association between the settings variables
and the actual parameter
Input: -
Output: -
'''
self.settings = {}
self.settings['t_transition_process'] = 'GW_TRANSITION_PROCESS'
def _override_methods(self, transceiver):
''' overrides the transceivers get method
to be able to intercept incoming messages
Input: transceiver AbstractTransceiver transceiver of the gateway
Output: -
'''
transceiver.get = self._transceiver_get
def _trans_by_bus_id(self, bus_id):
''' returns the transceiver which is connected
to the bus with the given bus_id
Input: bus_id string identifier of the bus
Output: transceiver AbstractTransceiver transceiver that is connected to the bus
'''
for transceiver in self._transceivers:
if transceiver.connected_bus.comp_id == bus_id:
return transceiver
return None
def _transceiver_get(self, message):
''' this method overrides the get method of the transceiver and
directly redirects traffic coming from one transceiver to all
other connected transceivers
Thereby if specified, traffic in chosen directions is
filtered by letting pass only messages with specified
message identifiers
Input: message CANSegMessage message that is sent over this transceiver
Output: -
'''
# filter messages
if self.ecuHW.transceiver.filter_active:
if not message.message_identifier in self.ecuHW.transceiver.allowed_items: return
# bus dependent filter active: bus dependent elements are filtered out
if self._bus_dep_filter_active:
trans = self._trans_by_bus_id(message.current_bus)
if trans == None: return
if not message.message_identifier in trans.allowed_items:
return
# forward to data link layer
for i in range(len(self._connected_busses)):
# not to sender
if self._connected_busses[i].comp_id == message.current_bus:
continue
# Set the gateway Id to avoid loops
try: message.gw_id += [self.ecu_id]
except: message.gw_id = [self.ecu_id]
# forward message (if allowed)
if self._filter_forward(message.message_identifier, self._connected_busses[i].comp_id): continue
self.sim_env.process(self.put_delayed_message(self._datalink_layer[i], message))
def monitor_update(self):
''' returns the input for the monitor
Input: -
Output: monitor_list list List of MonitorInput objects
'''
lst = []
for i in range(len(self._connected_busses)):
try:
# buffer information
items_1 = len(self._datalink_layer[i].controller.receive_buffer.get_bytes())
items_2 = len(self._datalink_layer[i].controller.transmit_buffer.get_bytes())
lst.append(MonitorInput(items_1, MonitorTags.BT_ECU_RECEIVE_BUFFER, "BUS (%s) GW_%s" % (self._connected_busses[i].comp_id, self._ecu_id), self.sim_env.now))
lst.append(MonitorInput(items_2, MonitorTags.BT_ECU_TRANSMIT_BUFFER, "BUS (%s) GW_%s" % (self._connected_busses[i].comp_id, self._ecu_id), self.sim_env.now))
except:
pass
return lst
def put_delayed_message(self, dll_layer, message):
''' this method puts the passed message on the
passed data link layer after waiting the
specified gateway delay
Input: message CANFDSegMessage message that is forwarded
Output: -
'''
G().to_t(self.sim_env, self.GW_TRANSITION_PROCESS * self._jitter, 'GW_TRANSITION_PROCESS', self.__class__.__name__, self)
yield self.sim_env.timeout(self.GW_TRANSITION_PROCESS * self._jitter)
self.sim_env.process(dll_layer.put_msg(message))
@property
def connected_bus(self):
return self._connected_busses
@connected_bus.setter
def connected_bus(self, new_bus):
''' if called adds a new port to this
gateway. This port has all three layers
and a whole hardware equipment
Input: new_bus CANBus bus to be connected to new port
Output: -
'''
if new_bus != None:
# create whole layer package per connected Bus
self._connected_busses.append(new_bus)
# create layers
# preset used
if GeneralSpecPreset().enabled:
self._transceivers.append(StdTransceiver(self.sim_env))
self._controller.append(StdCanController(self.sim_env))
self._physical_layer.append(GeneralSpecPreset().physical_layer(self.sim_env))
self._datalink_layer.append(GeneralSpecPreset().datalink_layer(self.sim_env))
else:
self._transceivers.append(StdTransceiver(self.sim_env))
self._controller.append(StdCanController(self.sim_env))
self._physical_layer.append(StdPhysicalLayer(self.sim_env))
self._datalink_layer.append(StdDatalinkLayer(self.sim_env))
# interconnect new layers
self._datalink_layer[-1].controller = self._controller[-1]
self.sim_env.process(self._datalink_layer[-1].process())
self._datalink_layer[-1].physical_lay = self._physical_layer[-1]
self._physical_layer[-1].transceiver = self._transceivers[-1]
self._physical_layer[-1].transceiver.connected_bus = self._connected_busses[-1]
# intercept gateway methods
self._override_methods(self._physical_layer[-1].transceiver)
# activate filter
if self._filter_values: # install the fixed filter for all ecus
self._transceivers[-1].install_filter(self._filter_values)
if new_bus.comp_id in self._bus_filter_values: # install the filter for special busses
self._transceivers[-1].install_filter(self._bus_filter_values[new_bus.comp_id])
def _filter_forward(self, message_id, bus_id):
''' filters messages in the forward direction. Returns
true if the message has to be filtered.
Input: message_id integer message id of the message considered
bus_id string Identifier of the bus under consideration
Output: -
'''
if self._trans_bus_dep_filter_active:
try:
allowed = self._trans_bus_filter_values[bus_id]
except:
return True
if not message_id in allowed:
return True
return False
| PhilippMundhenk/IVNS | ECUSimulation/components/base/gateways/impl_can_gateway.py | Python | mit | 12,344 |
"""
About this library
==================
Jenkins is the market leading continuous integration system, originally created by Kohsuke Kawaguchi.
This API makes Jenkins even easier to use by providing an easy to use conventional python interface.
Jenkins (and It's predecessor Hudson) are fantastic projects - but they are somewhat Java-centric.
Thankfully the designers have provided an excellent and complete REST interface. This library
wraps up that interface as more conventional python objects in order to make most Jenkins oriented
tasks simpler.
This library can help you:
* Query the test-results of a completed build
* Get a objects representing the latest builds of a job
* Search for artefacts by simple criteria
* Block until jobs are complete
* Install artefacts to custom-specified directory structures
* username/password auth support for jenkins instances with auth turned on
* Ability to search for builds by subversion revision
* Ability to add/remove/query jenkins slaves
Installing JenkinsAPI
=====================
Egg-files for this project are hosted on PyPi. Most Python users should be able to use pip or distribute
to automatically install this project.
Most users can do the following:
easy_install jenkinsapi
If you'd like to install in multi-version mode:
easy_install -m jenkinsapi
Project Authors
===============
* Salim Fadhley ([email protected])
* Ramon van Alteren ([email protected])
* Ruslan Lutsenko ([email protected])
Current code lives on github: https://github.com/salimfadhley/jenkinsapi
"""
import pkg_resources
from jenkinsapi import (
# Modules
command_line,
utils,
# Files
api, artifact, build, config, constants, custom_exceptions, fingerprint, executors, executor,
jenkins, jenkinsbase, job, node, result_set, result, view
)
__all__ = [
"command_line", "utils",
"api", "artifact", "build", "config", "constants", "custom_exceptions", "executors", "executor",
"fingerprint", "jenkins", "jenkinsbase", "job", "node", "result_set", "result", "view"
]
__docformat__ = "epytext"
__version__ = pkg_resources.working_set.by_key['jenkinsapi'].version
| 117111302/jenkinsapi | jenkinsapi/__init__.py | Python | mit | 2,160 |
import sys
import cherrypy
def process_body():
"""Return (params, method) from request body."""
try:
import xmlrpclib
return xmlrpclib.loads(cherrypy.request.body.read())
except Exception:
return ('ERROR PARAMS', ), 'ERRORMETHOD'
def patched_path(path):
"""Return 'path', doctored for RPC."""
if not path.endswith('/'):
path += '/'
if path.startswith('/RPC2/'):
# strip the first /rpc2
path = path[5:]
return path
def _set_response(body):
# The XML-RPC spec (http://www.xmlrpc.com/spec) says:
# "Unless there's a lower-level error, always return 200 OK."
# Since Python's xmlrpclib interprets a non-200 response
# as a "Protocol Error", we'll just return 200 every time.
response = cherrypy.response
response.status = '200 OK'
response.body = body
response.headers['Content-Type'] = 'text/xml'
response.headers['Content-Length'] = len(body)
def respond(body, encoding='utf-8', allow_none=0):
import xmlrpclib
if not isinstance(body, xmlrpclib.Fault):
body = (body,)
_set_response(xmlrpclib.dumps(body, methodresponse=1,
encoding=encoding,
allow_none=allow_none))
def on_error(*args, **kwargs):
body = str(sys.exc_info()[1])
import xmlrpclib
_set_response(xmlrpclib.dumps(xmlrpclib.Fault(1, body)))
| cread/ec2id | cherrypy/lib/xmlrpc.py | Python | apache-2.0 | 1,470 |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
from pymatgen.core import Lattice
from pymatgen.electronic_structure.core import Orbital, Spin, Magmom
import numpy as np
class SpinTest(unittest.TestCase):
def test_init(self):
self.assertEqual(int(Spin.up), 1)
self.assertEqual(int(Spin.down), -1)
def test_from_int(self):
self.assertEqual(Spin(1), Spin.up)
self.assertEqual(Spin(-1), Spin.down)
self.assertRaises(ValueError, Spin, 0)
def test_cached(self):
self.assertEqual(id(Spin(1)), id(Spin.up))
class OrbitalTest(unittest.TestCase):
def test_init(self):
for orb in Orbital:
self.assertEqual(Orbital(orb.value), orb)
self.assertRaises(ValueError, Orbital, 100)
def test_cached(self):
self.assertEqual(id(Orbital(0)), id(Orbital.s))
class MagmomTest(unittest.TestCase):
def test_init(self):
# backwards compatibility for scalar-like magmoms
magmom = Magmom(2.0)
self.assertEqual(float(magmom), 2.0)
# backwards compatibility for list-like magmoms
magmom2 = Magmom([1, 2, 3])
self.assertEqual(list(magmom2), [1, 2, 3])
self.assertEqual(magmom2.global_moment.tolist(), [1, 2, 3])
# non-default saxis, normalized internally
magmom3 = Magmom([1, 2, 3], saxis=[1, 1, 1])
self.assertTrue(np.allclose(magmom3.saxis, [np.sqrt(1/3.)]*3))
# test construction from known global moment and desired, non-default saxis
magmom4 = Magmom.from_global_moment_and_saxis([1, 2, 3], saxis=[1, 0, 0])
self.assertTrue(np.allclose(magmom4.moment, [-3, 2, 1]))
# test global moments with non-default saxis
magmom5 = Magmom([-3, 2, 1], saxis=[1, 0, 0])
self.assertTrue(np.allclose(magmom5.global_moment, [1, 2, 3]))
def test_get_moments(self):
# simple cases
magmom_along_x = Magmom([1, 0, 0])
self.assertTrue(np.allclose(magmom_along_x.get_moment(saxis=[1, 0, 0]), [0, 0, 1]))
magmom_along_y = Magmom([0, 1, 0])
self.assertTrue(np.allclose(magmom_along_y.get_moment(saxis=[0, 1, 0]), [0, 0, 1]))
# test transformations
magmoms = [[0, 0, 0],
[0, 0, 1],
[0, 0, -1],
[1, 2, 3],
[-1, 2, 3],
[-1, -2, -3]]
for magmom in magmoms:
magmom1 = Magmom(magmom)
# transform to non-default saxis
magmom2 = magmom1.get_00t_magmom_with_xyz_saxis()
# and back to default saxis
magmom3 = magmom2.get_xyz_magmom_with_001_saxis()
self.assertTrue(np.allclose(magmom1.moment, magmom))
self.assertTrue(np.allclose(magmom1.saxis, [0, 0, 1]))
self.assertTrue(np.allclose(magmom1.get_moment(saxis=magmom1.saxis), magmom1.moment))
self.assertTrue(np.allclose(magmom1.get_moment(saxis=magmom2.saxis), magmom2.moment))
self.assertTrue(np.allclose(magmom2.get_moment(saxis=[0, 0, 1]), magmom1.moment))
self.assertTrue(np.allclose(magmom2.get_moment(saxis=magmom2.saxis), magmom2.moment))
self.assertTrue(np.allclose(magmom3.moment, magmom1.moment))
def test_is_collinear(self):
magmoms_list = [[0, 0, 0],
[1, 1, 1],
[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 1], [0, 0, 1], [0, 0, 1]],
[[0, 0, -1], [0, 0, 1], [0, 0, 1]],
[[2, 2, 2], [-2, -2, -2], [2, 2, 2]]]
for magmoms in magmoms_list:
self.assertEqual(Magmom.are_collinear(magmoms), True)
ncl_magmoms = [[[0, 0, 1], [0, 0, 1], [1, 2, 3]]]
self.assertEqual(Magmom.are_collinear(ncl_magmoms), False)
def test_have_consistent_saxis(self):
magmom1 = Magmom([1, 2, 3])
magmom2 = Magmom([1, 2, 3])
magmom3 = Magmom([1, 2, 3], saxis=[0, 0, -1])
magmom4 = Magmom([1, 2, 3], saxis=[1, 2, 3])
self.assertTrue(Magmom.have_consistent_saxis([magmom1, magmom2]))
self.assertFalse(Magmom.have_consistent_saxis([magmom1, magmom3]))
self.assertFalse(Magmom.have_consistent_saxis([magmom1, magmom4]))
def test_get_consistent_set_and_saxis(self):
magmoms = [1, 1, 2, 2, 0, 0, 2]
magmoms, saxis = Magmom.get_consistent_set_and_saxis(magmoms)
self.assertTrue(np.allclose(saxis, [0, 0, 1]))
magmoms = [[0, 0, 0],
[1, 1, 1],
[2, 2, 2]]
magmoms, saxis = Magmom.get_consistent_set_and_saxis(magmoms)
self.assertTrue(np.allclose(saxis, [np.sqrt(1/3.)]*3))
def test_relative_to_crystal_axes(self):
lattice = Lattice.from_parameters(5, 10, 5, 90, 110, 90)
moment = [1, 0, 2]
magmom = Magmom.from_moment_relative_to_crystal_axes(moment, lattice)
self.assertTrue(np.allclose(magmom.moment, [0.93969262, 0.0, 1.65797986]))
self.assertTrue(np.allclose(magmom.get_moment_relative_to_crystal_axes(lattice), moment))
def test_equality(self):
self.assertTrue(Magmom([1, 1, 1]) == Magmom([1, 1, 1]))
self.assertFalse(Magmom([1, 1, 2]) == Magmom([1, 1, 1]))
self.assertTrue(Magmom([0, 0, 10]) == 10)
def test_negative(self):
self.assertEqual(-Magmom([1, 2, 3]), Magmom([-1, -2, -3]))
if __name__ == '__main__':
unittest.main()
| dongsenfo/pymatgen | pymatgen/electronic_structure/tests/test_core.py | Python | mit | 5,553 |
# Copyright 2017 FiberHome Telecommunication Technologies CO.,LTD
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v3 import extensions_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestExtensionsClient(base.BaseServiceTest):
FAKE_EXTENSION_LIST = {
"extensions": [
{
"updated": "2012-03-12T00:00:00+00:00",
"name": "QuotaClasses",
"links": [],
"namespace": "fake-namespace-1",
"alias": "os-quota-class-sets",
"description": "Quota classes management support."
},
{
"updated": "2013-05-29T00:00:00+00:00",
"name": "VolumeTransfer",
"links": [],
"namespace": "fake-namespace-2",
"alias": "os-volume-transfer",
"description": "Volume transfer management support."
},
{
"updated": "2014-02-10T00:00:00+00:00",
"name": "VolumeManage",
"links": [],
"namespace": "fake-namespace-3",
"alias": "os-volume-manage",
"description": "Manage existing backend storage by Cinder."
}
]
}
def setUp(self):
super(TestExtensionsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = extensions_client.ExtensionsClient(fake_auth,
'volume',
'regionOne')
def _test_list_extensions(self, bytes_body=False):
self.check_service_client_function(
self.client.list_extensions,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_EXTENSION_LIST,
bytes_body)
def test_list_extensions_with_str_body(self):
self._test_list_extensions()
def test_list_extensions_with_bytes_body(self):
self._test_list_extensions(bytes_body=True)
| masayukig/tempest | tempest/tests/lib/services/volume/v3/test_extensions_client.py | Python | apache-2.0 | 2,693 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CheckGroupMembershipParameters(Model):
"""Request parameters for IsMemberOf API call.
:param group_id: The object ID of the group to check.
:type group_id: str
:param member_id: The object ID of the contact, group, user, or service
principal to check for membership in the specified group.
:type member_id: str
"""
_validation = {
'group_id': {'required': True},
'member_id': {'required': True},
}
_attribute_map = {
'group_id': {'key': 'groupId', 'type': 'str'},
'member_id': {'key': 'memberId', 'type': 'str'},
}
def __init__(self, group_id, member_id):
self.group_id = group_id
self.member_id = member_id
| v-iam/azure-sdk-for-python | azure-graphrbac/azure/graphrbac/models/check_group_membership_parameters.py | Python | mit | 1,230 |
from larray_editor.api import *
__version__ = '0.34-dev'
| larray-project/larray-editor | larray_editor/__init__.py | Python | gpl-3.0 | 58 |
#!/usr/bin/env python
# *-* coding:utf-8 *-*
"""
Date :
Author : Vianney Gremmel [email protected]
"""
from math import factorial
from time import time
start = time()
digits_factorial_sum = lambda n: sum(factorial(int(d)) for d in str(n))
class FactorialLoopsBuilder(dict):
def __missing__(self, k):
r = self[k] = digits_factorial_sum(k)
return r
def __call__(self, n):
res = [n,]
n = self[n]
while n not in res:
res.append(n)
n= self[n]
return res
flb = FactorialLoopsBuilder()
print sum(1 for n in xrange(1000000) if len(flb(n)) == 60)
| vianney-g/python-exercices | eulerproject/pb0074.py | Python | gpl-2.0 | 629 |
"""
Command line utility for the Mesosphere Datacenter Operating
System (DCOS)
'dcos help' lists all available subcommands. See 'dcos <command> --help'
to read about a specific subcommand.
Usage:
dcos [options] [<command>] [<args>...]
Options:
--help Show this screen
--version Show version
--log-level=<log-level> If set then print supplementary messages to
stderr at or above this level. The severity
levels in the order of severity are: debug,
info, warning, error, and critical. E.g.
Setting the option to warning will print
warning, error and critical messages to stderr.
Note: that this does not affect the output sent
to stdout by the command.
--debug If set then enable further debug messages which
are sent to stdout.
Environment Variables:
DCOS_LOG_LEVEL If set then it specifies that message should be
printed to stderr at or above this level. See
the --log-level option for details.
DCOS_CONFIG This environment variable points to the
location of the DCOS configuration file.
[default: ~/.dcos/dcos.toml]
DCOS_DEBUG If set then enable further debug messages which
are sent to stdout.
"""
import os
import signal
import sys
from functools import wraps
from subprocess import PIPE, Popen
import dcoscli
import docopt
from dcos import auth, constants, emitting, errors, http, subcommand, util
from dcos.errors import DCOSException
from dcoscli import analytics
emitter = emitting.FlatEmitter()
def main():
try:
return _main()
except DCOSException as e:
emitter.publish(e)
return 1
def _main():
signal.signal(signal.SIGINT, signal_handler)
args = docopt.docopt(
__doc__,
version='dcos version {}'.format(dcoscli.version),
options_first=True)
log_level = args['--log-level']
if log_level and not _config_log_level_environ(log_level):
return 1
if args['--debug']:
os.environ[constants.DCOS_DEBUG_ENV] = 'true'
util.configure_process_from_environ()
if args['<command>'] != 'config' and \
not auth.check_if_user_authenticated():
auth.force_auth()
command = args['<command>']
http.silence_requests_warnings()
if not command:
command = "help"
executable = subcommand.command_executables(command)
subproc = Popen([executable, command] + args['<args>'],
stderr=PIPE)
if dcoscli.version != 'SNAPSHOT':
return analytics.wait_and_track(subproc)
else:
return analytics.wait_and_capture(subproc)[0]
def _config_log_level_environ(log_level):
"""
:param log_level: Log level to set
:type log_level: str
:returns: True if the log level was configured correctly; False otherwise.
:rtype: bool
"""
log_level = log_level.lower()
if log_level in constants.VALID_LOG_LEVEL_VALUES:
os.environ[constants.DCOS_LOG_LEVEL_ENV] = log_level
return True
msg = 'Log level set to an unknown value {!r}. Valid values are {!r}'
emitter.publish(msg.format(log_level, constants.VALID_LOG_LEVEL_VALUES))
return False
def signal_handler(signal, frame):
emitter.publish(
errors.DefaultError("User interrupted command with Ctrl-C"))
sys.exit(0)
def decorate_docopt_usage(func):
"""Handle DocoptExit exception
:param func: function
:type func: function
:return: wrapped function
:rtype: function
"""
@wraps(func)
def wrapper(*args, **kwargs):
try:
result = func(*args, **kwargs)
except docopt.DocoptExit as e:
emitter.publish("Command not recognized\n")
emitter.publish(e)
return 1
return result
return wrapper
| Yhgenomics/dcos-cli | cli/dcoscli/main.py | Python | apache-2.0 | 4,242 |
from django.http import HttpResponse
from django.views.generic import TemplateView, View
from django import template
from core.models import *
import json
import os
import time
import tempfile
class MCordView(TemplateView):
head_template = r"""{% extends "admin/dashboard/dashboard_base.html" %}
{% load admin_static %}
{% block content %}
"""
tail_template = r"{% endblock %}"
def get(self, request, name="root", *args, **kwargs):
head_template = self.head_template
tail_template = self.tail_template
title = request.GET.get('service', '')
url = "/mcord/?service=%s" % (title)
form = """
<h2 class="content-title">Change %s Service</h2>
<div id="content-main">
<form class="form-horizontal">
<div class="tab-content tab-content-main">
<div class="suit-include suit-tab suit-tab-administration hide">
<div class="left-nav">
<ul>
<li><a href="/admin/ceilometer/monitoringchannel/">Monitoring Channels</a></li>
</ul>
</div>
</div>
<fieldset class="module aligned suit-tab suit-tab-general show">
<div class="panel fieldset-body">
<div class="form-group field-backend_status_text ">
<label class="control-label col-xs-12 col-sm-2"><label>Backend status text:</label></label>
<div class="form-column col-xs-12 col-sm-8 col-md-6 col-lg-4">
<p><img src="/static/admin/img/icon_clock.gif"> Pending sync, last_status = 0 - Provisioning in progress</p>
</div>
</div>
<div class="form-group field-name ">
<label class="control-label col-xs-12 col-sm-2"><label class="required" for="id_name">Name:</label></label>
<div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
<input class="vTextField form-control" id="id_name" maxlength="30" name="name" type="text" value="%s">
<div class="help-block">Service Name</div>
</div>
</div>
<div class="form-group field-enabled ">
<label class="control-label col-xs-12 col-sm-2"><label class="vCheckboxLabel" for="id_enabled">Enabled</label></label>
<div class="form-column widget-CheckboxInput col-xs-12 col-sm-8 col-md-6 col-lg-4">
<input checked="checked" id="id_enabled" name="enabled" type="checkbox">
</div>
</div>
<div class="form-group field-versionNumber ">
<label class="control-label col-xs-12 col-sm-2"><label class="required" for="id_versionNumber">VersionNumber:</label></label>
<div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
<input class="vTextField form-control" id="id_versionNumber" maxlength="30" name="versionNumber" type="text">
<div class="help-block">Version of Service Definition</div>
</div>
</div>
<div class="form-group field-description ">
<label class="control-label col-xs-12 col-sm-2"><label for="id_description">Description:</label></label>
<div class="form-column widget-AdminTextareaWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
<textarea class="vLargeTextField form-control" cols="40" id="id_description" maxlength="254" name="description" rows="10"></textarea>
<div class="help-block">Description of Service</div>
</div>
</div>
<div class="form-group field-view_url ">
<label class="control-label col-xs-12 col-sm-2"><label for="id_view_url">View url:</label></label>
<div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
<input class="vTextField form-control" id="id_view_url" maxlength="1024" name="view_url" type="text" value="%s">
</div>
</div>
<div class="form-group field-icon_url ">
<label class="control-label col-xs-12 col-sm-2"><label for="id_icon_url">Icon url:</label></label>
<div class="form-column widget-AdminTextInputWidget col-xs-12 col-sm-8 col-md-6 col-lg-4">
<input class="vTextField form-control" id="id_icon_url" maxlength="1024" name="icon_url" type="text">
</div>
</div>
</div>
</fieldset>
</div>
</form>
<div class="form-buttons clearfix">
<button type="submit" class="btn btn-high btn-success" name="_save">Save</button>
<button type="submit" name="_continue" class=" btn btn-high btn-info">Save and continue editing</button>
<button type="submit" name="_addanother" class="btn btn-info">Save and add another</button>
<a href="delete/" class="text-error deletelink">Delete</a>
</div>
</div>
""" % (title, title, url)
t = template.Template(head_template + form + tail_template)
response_kwargs = {}
response_kwargs.setdefault('content_type', self.content_type)
return self.response_class(
request=request,
template=t,
**response_kwargs
)
| cboling/xos | xos/services/mcord/view.py | Python | apache-2.0 | 6,362 |
from django.conf.urls import patterns, url
from imager_images.views import AlbumAddView, AlbumUpdateView, AlbumDeleteView
from imager_images.views import PhotoAddView, PhotoUpdateView, PhotoDeleteView
from django.contrib.auth.decorators import login_required
urlpatterns = patterns('',
url(r'library/',
'imager_images.views.library',
name='library'),
url(r'stream/', 'imager_images.views.stream',
name='stream'),
url(r'album/add/$',
login_required(AlbumAddView.as_view()),
name='album_add'),
url(r'album/update/(?P<pk>\d+)/$',
login_required(AlbumUpdateView.as_view()),
name='album_update'),
url(r'album/delete/(?P<pk>\d+)/$',
login_required(AlbumDeleteView.as_view()),
name='album_delete'),
url(r'^photo/add/$',
login_required(PhotoAddView.as_view()),
name='photo_add'),
url(r'^photo/update/(?P<pk>\d+)/$',
login_required(PhotoUpdateView.as_view()),
name='photo_update'),
url(r'^photo/delete/(?P<pk>\d+)/$',
login_required(PhotoDeleteView.as_view()),
name='photo_delete'),
url(r'^album/(?P<pk>\d+)/$',
'imager_images.views.AlbumPhotoList',
name='albumphoto_list'),
url(r'^photos/loose/$',
'imager_images.views.LoosePhotosList',
name='loosephotos_list'),
url(r'^photos/all/$',
'imager_images.views.AllPhotosList',
name='allphotos_list')
)
| henrykh/django-imager | imager_images/urls.py | Python | mit | 2,083 |
import ert.util
from cwrap import BaseCClass
from ert.enkf import EnkfPrototype, LocalObsdata, LocalObsdataNode, LocalDataset
class LocalMinistep(BaseCClass):
TYPE_NAME = "local_ministep"
_alloc = EnkfPrototype("void* local_ministep_alloc(char*)", bind = False)
_add_node = EnkfPrototype("void local_ministep_add_obsdata_node(local_ministep, local_obsdata_node)")
_get_local_obs_data = EnkfPrototype("local_obsdata_ref local_ministep_get_obsdata(local_ministep)")
_get_local_data = EnkfPrototype("local_dataset_ref local_ministep_get_dataset(local_ministep , char*)")
_has_local_data = EnkfPrototype("bool local_ministep_has_dataset(local_ministep , char*)")
_free = EnkfPrototype("void local_ministep_free(local_ministep)")
_attach_obsdata = EnkfPrototype("void local_ministep_add_obsdata(local_ministep, local_obsdata)")
_attach_dataset = EnkfPrototype("void local_ministep_add_dataset(local_ministep, local_dataset)")
_name = EnkfPrototype("char* local_ministep_get_name(local_ministep)")
_data_size = EnkfPrototype("int local_ministep_get_num_dataset(local_ministep)")
def __init__(self, ministep_key):
raise NotImplementedError("Class can not be instantiated directly!")
# Will used the data keys; and ignore observation keys.
def __getitem__(self, data_key):
if isinstance(data_key, int):
raise TypeError('Keys must be strings, not int!')
if data_key in self:
return self._get_local_data(data_key)
else:
raise KeyError('No such data key: "%s"' % data_key)
def __len__(self):
return self._data_size()
def __contains__(self , data_key):
return self._has_local_data(data_key)
def addNode(self, node):
assert isinstance(node, LocalObsdataNode)
self._add_node(node)
def attachObsset(self, obs_set):
assert isinstance(obs_set, LocalObsdata)
self._attach_obsdata(obs_set)
def attachDataset(self, dataset):
assert isinstance(dataset, LocalDataset)
self._attach_dataset(dataset)
def getLocalObsData(self):
""" @rtype: LocalObsdata """
return self._get_local_obs_data()
def name(self):
return self._name()
def getName(self):
""" @rtype: str """
return self.name()
def free(self):
self._free()
def __repr__(self):
return 'LocalMinistep(name = %s, len = %d) at 0x%x' % (self.name(), len(self), self._address())
| Ensembles/ert | python/python/ert/enkf/local_ministep.py | Python | gpl-3.0 | 2,602 |
import test_utils
class TestLocaleMiddleware(test_utils.TestCase):
def test_default_redirect(self):
# User wants en-us, we send en-US
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='en-us')
self.assertRedirects(response, '/en-US/', status_code=301)
# User wants fr-FR, we send fr
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='fr-fr')
self.assertRedirects(response, '/fr/', status_code=301)
# User wants xx, we send en-US
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='xx')
self.assertRedirects(response, '/en-US/', status_code=301)
# User doesn't know what they want, we send en-US
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='')
self.assertRedirects(response, '/en-US/', status_code=301)
def test_mixed_case_header(self):
"""Accept-Language is case insensitive."""
response = self.client.get('/', follow=True,
HTTP_ACCEPT_LANGUAGE='en-US')
self.assertRedirects(response, '/en-US/', status_code=301)
def test_specificity(self):
"""Requests for /fr-FR/ should end up on /fr/"""
reponse = self.client.get('/fr-FR/', follow=True)
self.assertRedirects(reponse, '/fr/', status_code=301)
def test_partial_redirect(self):
"""Ensure that /en/ gets directed to /en-US/."""
response = self.client.get('/en/', follow=True)
self.assertRedirects(response, '/en-US/', status_code=301)
def test_lower_to_upper(self):
"""/en-us should redirect to /en-US."""
response = self.client.get('/en-us/', follow=True)
self.assertRedirects(response, '/en-US/', status_code=301)
| mastizada/kuma | kuma/core/tests/test_locale_middleware.py | Python | mpl-2.0 | 1,930 |
# -*- coding: utf-8 -*-
from crypt import Crypt
import base64
from gi.repository import Gtk
import ntpath
class Controller:
def __init__(self):
self.text_tabs = {'None(0)': 'Sin nombre'}
self.tabs = {0: 'None(0)'}
def base64_encode(self, gui):
textview = self.get_current_textview(gui)
textbuffer = Gtk.TextBuffer()
textbuffer = textview.get_buffer()
texto = textbuffer.get_text(textbuffer.get_start_iter(),
textbuffer.get_end_iter(), True)
texto_encoded = base64.b64encode(texto)
textbuffer.set_text(texto_encoded)
textview.set_buffer(textbuffer)
def base64_decode(self, gui):
textview = self.get_current_textview(gui)
textbuffer = Gtk.TextBuffer()
textbuffer = textview.get_buffer()
buffer_start = textbuffer.get_start_iter()
buffer_end = textbuffer.get_end_iter()
texto = textbuffer.get_text(buffer_start, buffer_end, True)
try:
texto_encoded = base64.b64decode(texto)
except TypeError:
print('Type Error')
texto_encoded = texto
textbuffer.set_text(texto_encoded)
textview.set_buffer(textbuffer)
def new_file(self, gui):
scrolled = Gtk.ScrolledWindow()
textview = Gtk.TextView()
textview.set_wrap_mode(1)
scrolled.add(textview)
self.new_tab(gui, scrolled)
print((self.text_tabs))
def open_file(self, gui, file_dialog):
textview = Gtk.TextView()
textview.set_wrap_mode(1)
scrolled = Gtk.ScrolledWindow()
filename = file_dialog.get_filename()
try:
f = open(filename, 'r')
content = f.read()
except IOError:
print('I/O Error')
text_buffer = Gtk.TextBuffer()
text_buffer.set_text(content)
textview.set_buffer(text_buffer)
scrolled.add(textview)
gui.current_textview = textview
justname = ntpath.basename(filename)
self.new_tab(gui, scrolled, filename, justname)
file_dialog.hide()
def save_file(self, gui, file_dialog):
textview = self.get_current_textview(gui)
notebook = gui.builder.get_object('notebook')
scrolled = gui.builder.get_object('scrolledwindow1')
textbuffer = Gtk.TextBuffer()
textbuffer = textview.get_buffer()
buffer_start = textbuffer.get_start_iter()
buffer_end = textbuffer.get_end_iter()
content = textbuffer.get_text(buffer_start, buffer_end, True)
filename = file_dialog.get_filename()
try:
f = open(filename, 'w')
f.write(content)
except IOError:
print ('I/O Error')
justname = ntpath.basename(filename)
notebook.set_tab_label_text(scrolled, justname)
file_dialog.hide()
del self.text_tabs[filename]
def encrypt_text(self, gui, password_entry):
textentry = self.get_current_textview(gui)
password = password_entry.get_text()
textbuffer = Gtk.TextBuffer()
textbuffer = textentry.get_buffer()
buffer_start = textbuffer.get_start_iter()
buffer_end = textbuffer.get_end_iter()
content = textbuffer.get_text(buffer_start, buffer_end, True)
key = password
Cipher = Crypt()
content_crypted = Cipher.encrypt(key, content)
try:
textbuffer.set_text(content_crypted)
textentry.set_buffer(textbuffer)
except TypeError:
print("Type Error")
def decrypt_text(self, gui, password_entry):
textentry = self.get_current_textview(gui)
password = password_entry.get_text()
textbuffer = Gtk.TextBuffer()
textbuffer = textentry.get_buffer()
buffer_start = textbuffer.get_start_iter()
buffer_end = textbuffer.get_end_iter()
content = textbuffer.get_text(buffer_start, buffer_end, True)
key = password
Cipher = Crypt()
content_decrypted = Cipher.decrypt(key, content)
try:
textbuffer.set_text(content_decrypted)
textentry.set_buffer(textbuffer)
except TypeError:
print("Type Error")
def get_current_textview(self, gui):
notebook = gui.builder.get_object('notebook')
page = notebook.get_current_page()
scrolled = notebook.get_nth_page(page)
children = scrolled.get_children()
textview = children[0]
return textview
def new_tab(self, gui, child, filename='None', name='Sin nombre'):
notebook = gui.builder.get_object('notebook')
notebook.append_page(child, None)
notebook.set_tab_label_text(child, name)
notebook.show_all()
notebook.set_current_page(-1)
page = notebook.get_current_page()
if filename == 'None':
filename = 'None(' + str(page) + ')'
if filename in self.text_tabs:
filename = 'None(' + str(page + page) + ')'
self.text_tabs[filename] = name
self.tabs[page] = filename
def close_file_tab(self, gui):
notebook = gui.builder.get_object('notebook')
page = notebook.get_current_page()
notebook.remove_page(page)
| vladzur/textcrypt | src/controller.py | Python | gpl-3.0 | 5,267 |
#!/usr/bin/env python
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/%s' % FOLDER_GREP
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' %s | grep -v '%s'" % (CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' %s" % (CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet'])
def main():
used = check_output(CMD_GREP_ARGS, shell=True)
docd = check_output(CMD_GREP_DOCS, shell=True)
args_used = set(re.findall(REGEX_ARG,used))
args_docd = set(re.findall(REGEX_DOC,docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print "Args used : %s" % len(args_used)
print "Args documented : %s" % len(args_docd)
print "Args undocumented: %s" % len(args_need_doc)
print args_need_doc
print "Args unknown : %s" % len(args_unknown)
print args_unknown
exit(len(args_need_doc))
if __name__ == "__main__":
main()
| hyperwang/bitcoin | contrib/devtools/check-doc.py | Python | mit | 1,702 |
from utils import set_state_task
from utils import operation_task
from utils import link_tasks
from utils import CustomContext
from utils import generate_native_node_workflows
# content of this fn can be generated by workflow plugin (see workflows.py in generated blueprint)
def _build_tasks(ctx, graph, custom_context):
# just paste here the generated code
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'PHP', 'starting', 'PHP_starting', custom_context)
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Wordpress', 'started', 'Wordpress_started', custom_context)
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Mysql', 'configuring', 'Mysql_configuring', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.create', 'create_Mysql', custom_context)
custom_context.add_customized_wf_node('Apache')
set_state_task(ctx, graph, 'Apache', 'initial', 'Apache_initial', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.configure', 'configure_Mysql', custom_context)
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Wordpress', 'configured', 'Wordpress_configured', custom_context)
custom_context.register_native_delegate_wf_step('Compute', 'Compute_install')
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Wordpress', 'starting', 'Wordpress_starting', custom_context)
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Mysql', 'created', 'Mysql_created', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.start', 'start_PHP', custom_context)
custom_context.add_customized_wf_node('Apache')
set_state_task(ctx, graph, 'Apache', 'starting', 'Apache_starting', custom_context)
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'PHP', 'configuring', 'PHP_configuring', custom_context)
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'PHP', 'created', 'PHP_created', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.start', 'start_Wordpress', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.configure', 'configure_Apache', custom_context)
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Mysql', 'creating', 'Mysql_creating', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.create', 'create_Apache', custom_context)
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Wordpress', 'initial', 'Wordpress_initial', custom_context)
custom_context.add_customized_wf_node('Apache')
set_state_task(ctx, graph, 'Apache', 'configured', 'Apache_configured', custom_context)
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'PHP', 'started', 'PHP_started', custom_context)
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Wordpress', 'configuring', 'Wordpress_configuring', custom_context)
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'PHP', 'creating', 'PHP_creating', custom_context)
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.configure', 'configure_Wordpress', custom_context)
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Mysql', 'starting', 'Mysql_starting', custom_context)
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Mysql', 'configured', 'Mysql_configured', custom_context)
custom_context.add_customized_wf_node('Apache')
set_state_task(ctx, graph, 'Apache', 'creating', 'Apache_creating', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.create', 'create_PHP', custom_context)
operation_task(ctx, graph, 'Apache', 'cloudify.interfaces.lifecycle.start', 'start_Apache', custom_context)
operation_task(ctx, graph, 'PHP', 'cloudify.interfaces.lifecycle.configure', 'configure_PHP', custom_context)
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Mysql', 'started', 'Mysql_started', custom_context)
custom_context.register_native_delegate_wf_step('Network', 'Network_install')
operation_task(ctx, graph, 'Wordpress', 'cloudify.interfaces.lifecycle.create', 'create_Wordpress', custom_context)
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Wordpress', 'creating', 'Wordpress_creating', custom_context)
custom_context.add_customized_wf_node('Mysql')
set_state_task(ctx, graph, 'Mysql', 'initial', 'Mysql_initial', custom_context)
custom_context.add_customized_wf_node('Apache')
set_state_task(ctx, graph, 'Apache', 'created', 'Apache_created', custom_context)
custom_context.add_customized_wf_node('Apache')
set_state_task(ctx, graph, 'Apache', 'started', 'Apache_started', custom_context)
custom_context.add_customized_wf_node('Wordpress')
set_state_task(ctx, graph, 'Wordpress', 'created', 'Wordpress_created', custom_context)
custom_context.add_customized_wf_node('Apache')
set_state_task(ctx, graph, 'Apache', 'configuring', 'Apache_configuring', custom_context)
operation_task(ctx, graph, 'Mysql', 'cloudify.interfaces.lifecycle.start', 'start_Mysql', custom_context)
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'PHP', 'initial', 'PHP_initial', custom_context)
custom_context.add_customized_wf_node('PHP')
set_state_task(ctx, graph, 'PHP', 'configured', 'PHP_configured', custom_context)
custom_context.register_native_delegate_wf_step('Compute2', 'Compute2_install')
custom_context.register_native_delegate_wf_step('DeletableConfigurableBlockStorage', 'DeletableConfigurableBlockStorage_install')
generate_native_node_workflows(ctx, graph, custom_context, 'install')
link_tasks(graph, 'PHP_starting', 'PHP_configured', custom_context)
link_tasks(graph, 'Wordpress_started', 'start_Wordpress', custom_context)
link_tasks(graph, 'Mysql_configuring', 'Wordpress_created', custom_context)
link_tasks(graph, 'Mysql_configuring', 'Mysql_created', custom_context)
link_tasks(graph, 'create_Mysql', 'Mysql_creating', custom_context)
link_tasks(graph, 'Apache_initial', 'Compute2_install', custom_context)
link_tasks(graph, 'configure_Mysql', 'Mysql_configuring', custom_context)
link_tasks(graph, 'Wordpress_configured', 'configure_Wordpress', custom_context)
link_tasks(graph, 'Wordpress_starting', 'Wordpress_configured', custom_context)
link_tasks(graph, 'Mysql_created', 'create_Mysql', custom_context)
link_tasks(graph, 'start_PHP', 'PHP_starting', custom_context)
link_tasks(graph, 'Apache_starting', 'Apache_configured', custom_context)
link_tasks(graph, 'PHP_configuring', 'PHP_created', custom_context)
link_tasks(graph, 'PHP_configuring', 'Wordpress_created', custom_context)
link_tasks(graph, 'PHP_created', 'create_PHP', custom_context)
link_tasks(graph, 'start_Wordpress', 'Wordpress_starting', custom_context)
link_tasks(graph, 'configure_Apache', 'Apache_configuring', custom_context)
link_tasks(graph, 'Mysql_creating', 'Mysql_initial', custom_context)
link_tasks(graph, 'create_Apache', 'Apache_creating', custom_context)
link_tasks(graph, 'Wordpress_initial', 'Apache_started', custom_context)
link_tasks(graph, 'Apache_configured', 'configure_Apache', custom_context)
link_tasks(graph, 'PHP_started', 'start_PHP', custom_context)
link_tasks(graph, 'Wordpress_configuring', 'Mysql_started', custom_context)
link_tasks(graph, 'Wordpress_configuring', 'PHP_started', custom_context)
link_tasks(graph, 'Wordpress_configuring', 'Wordpress_created', custom_context)
link_tasks(graph, 'PHP_creating', 'PHP_initial', custom_context)
link_tasks(graph, 'configure_Wordpress', 'Wordpress_configuring', custom_context)
link_tasks(graph, 'Mysql_starting', 'Mysql_configured', custom_context)
link_tasks(graph, 'Mysql_configured', 'configure_Mysql', custom_context)
link_tasks(graph, 'Apache_creating', 'Apache_initial', custom_context)
link_tasks(graph, 'create_PHP', 'PHP_creating', custom_context)
link_tasks(graph, 'start_Apache', 'Apache_starting', custom_context)
link_tasks(graph, 'configure_PHP', 'PHP_configuring', custom_context)
link_tasks(graph, 'Mysql_started', 'start_Mysql', custom_context)
link_tasks(graph, 'create_Wordpress', 'Wordpress_creating', custom_context)
link_tasks(graph, 'Wordpress_creating', 'Wordpress_initial', custom_context)
link_tasks(graph, 'Mysql_initial', 'Compute_install', custom_context)
link_tasks(graph, 'Apache_created', 'create_Apache', custom_context)
link_tasks(graph, 'Apache_started', 'start_Apache', custom_context)
link_tasks(graph, 'Wordpress_created', 'create_Wordpress', custom_context)
link_tasks(graph, 'Apache_configuring', 'Apache_created', custom_context)
link_tasks(graph, 'start_Mysql', 'Mysql_starting', custom_context)
link_tasks(graph, 'PHP_initial', 'Compute2_install', custom_context)
link_tasks(graph, 'PHP_configured', 'configure_PHP', custom_context)
def build_tasks(ctx, graph):
custom_context = CustomContext(ctx)
_build_tasks(ctx, graph, custom_context)
return custom_context
| victorkeophila/alien4cloud-cloudify3-provider | src/test/python/workflows/tasks.py | Python | apache-2.0 | 9,581 |
# TODO: clean up
from django.template import RequestContext, Template, Context, loader
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.admin.views.decorators import staff_member_required
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseForbidden
from django.conf import settings
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext as _
from django import forms
import Image
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from gallery import models
from gallery.config import MAX_FILE_SIZE, MAX_IMAGE_SIZE
class AlbumForm(forms.ModelForm):
description = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 4, 'cols': 60}))
class Meta:
model = models.Album
#fields = ('name', 'description', 'parent', 'position', 'ordering', 'preview')
fields = ('name', 'description', 'position',)
@staff_member_required
def album_add_edit(request, id=None):
if id:
album = get_object_or_404(models.Album, pk=id)
form = AlbumForm(request.method == 'POST' and request.POST or None, instance=album)
add = False
else:
form = AlbumForm(request.method == 'POST' and request.POST or None)
album = None
add = True
if request.method == 'POST':
if form.is_valid():
album = form.save()
return HttpResponseRedirect('../%d/' % album.id)
return render_to_response('gallery/album_add_edit.html', {
'title':'%s %s' % (add and _('Add') or _('Edit'), _('album')),
'album': album,
'add': add,
}, context_instance=RequestContext(request))
@staff_member_required
def album_objectlist(request, id=None):
album = get_object_or_404(models.Album, pk=id)
return render_to_response('gallery/album_objectlist.html', {
'album': album,
}, context_instance=RequestContext(request))
def album_upload(request, id, sessionid=None):
album = get_object_or_404(models.Album, pk=id)
if sessionid:
# We are getting the session id in the URL, so we can't just use request.user
from django.contrib.sessions.middleware import SessionMiddleware
from django.contrib.auth import get_user
request.COOKIES[settings.SESSION_COOKIE_NAME] = sessionid
SessionMiddleware().process_request(request)
user = get_user(request)
else:
user = request.user
# For some reason, only the following status codes (which have a body) work:
# 400 401 403 404 411 416 500 501 503 505
# Make sure the user has permission
if not user.is_staff:
return HttpResponseForbidden()
# Finally handle the upload
filedata = request.FILES['fileinput']
print "file size", filedata.size
if filedata.size > MAX_FILE_SIZE:
return HttpResponse(str(e), status=400)
# Check if it's an image
try:
img = Image.open(StringIO(filedata['content']))
except Exception, e:
return HttpResponse(str(e), status=401)
if img.size[0] > MAX_IMAGE_SIZE[0] or img.size[1] > MAX_IMAGE_SIZE[1]:
return HttpResponseNotAllowed("Image too big.")
# Save it
object = models.Object(
album=album,
type='p',
name=str(filedata),
)
try:
object.save_original_file(filedata['filename'], filedata['content'])
except Exception, e:
print e
return HttpResponse('OK')
def get_album_and_object(album_id, object_id):
album = get_object_or_404(models.Album, pk=album_id)
object = get_object_or_404(models.Object, pk=object_id)
if object.album != album:
raise Http404
return album, object
@staff_member_required
def album_object_set_preview(request, album_id, object_id):
album, object = get_album_and_object(album_id, object_id)
album.preview = object
album.save()
return HttpResponseRedirect('../../')
@staff_member_required
def album_object_set_description(request, album_id, object_id):
album, object = get_album_and_object(album_id, object_id)
object.description = request.POST.get('description')
object.save()
return HttpResponseRedirect('../../')
@staff_member_required
def album_object_delete(request, album_id, object_id):
album, object = get_album_and_object(album_id, object_id)
# Do not delete the whole album when deleting the preview object!
if album.preview == object:
album.preview = None
album.save()
object.delete()
return HttpResponseRedirect('../../')
| lucasvo/django-gallery | gallery/admin_views.py | Python | bsd-3-clause | 4,672 |
from spec.python import db_connection
import pytest
import cPickle
import datetime
from sam.pages import alerts
from sam.models.security.alerts import Alerts as AlertsModel, AlertFilter
from spec.python.models.security import test_warnings
from sam.models.security import anomaly_plugin as model_ap
from sam import errors
db = db_connection.db
sub_id = db_connection.default_sub
ds_full = db_connection.dsid_default
def reset_mock_alerts():
table = "s{}_Alerts".format(sub_id)
db.delete(table, where='1')
# alert 1 with standard metadata
details = cPickle.dumps({
'src': 2292489578L,
'dst': 3181255194L,
'port': 389L,
'protocol': u'UDP',
'timestamp': datetime.datetime(2016, 6, 21, 18, 0),
'links': 2L,
'bytes_sent': 496L,
'bytes_received': 0L,
'packets_sent': 4L,
'packets_received': 0L,
'duration': 1811L,
})
db.insert(table, ipstart=2292489578, ipend=2292489578, log_time=1466532000, report_time=1496886794, severity=6,
viewed=False, label="LDAP Access", rule_id=4, rule_name="Demo Alert", details=details)
# alert 2 with details popup
details = cPickle.dumps({
'Unusual Inbound Port Access': 'Warning score: 3',
'activities': [{'chart': {'data1': [0.2, 0.65, 0.15],
'data2': [0.25, 0.75, 0],
'h': 3.5,
'legend': ['Current', 'Usual'],
'sd1': None,
'sd2': [0.1, 0.1, 0.1],
'title': 'Inbound Ports',
'w': 6,
'xlabel': 'Port',
'xticks': ['80', '443', '3306'],
'ylabel': 'Connections Portion'},
'description': 'Usually this host receives 100% of connections on ports 80 and 443. '
'Currently it is also receiving connections on port 3306.',
'score': 2,
'title': 'Unusual Inbound Port Access',
'warning_id': 3L}]})
db.insert(table, ipstart=590000000, ipend=590000000, log_time=1496700000, report_time=1503699051, severity=5,
viewed=False, label='Unusual inbound traffic', rule_id=None, rule_name='A.D.E.L.E.', details=details)
def test_time_to_seconds():
assert alerts.time_to_seconds("0") == 0
assert alerts.time_to_seconds("1 year") == 31556926
assert alerts.time_to_seconds("3 min, 6 hr, 1 second 1 year 20 weeks") == 43674707
assert alerts.time_to_seconds("500 trash") == 0
def test_iprange_to_string():
assert alerts.iprange_to_string(167772160, 184549375) == "10.0.0.0/8"
assert alerts.iprange_to_string(167772160, 167837695) == "10.0.0.0/16"
assert alerts.iprange_to_string(167772160, 167772415) == "10.0.0.0/24"
assert alerts.iprange_to_string(167772160, 167772160) == "10.0.0.0"
def test_fuzzy_time():
assert alerts.fuzzy_time(0) == '0 seconds'
assert alerts.fuzzy_time(10) == '10 seconds'
assert alerts.fuzzy_time(100) == '100 seconds'
assert alerts.fuzzy_time(1000) == '17 minutes'
assert alerts.fuzzy_time(10000) == '2.8 hours'
assert alerts.fuzzy_time(100000) == '28 hours'
assert alerts.fuzzy_time(1000000) == '12 days'
assert alerts.fuzzy_time(10000000) == '17 weeks'
assert alerts.fuzzy_time(100000000) == '3.2 years'
assert alerts.fuzzy_time(1000000000) == '32 years'
assert alerts.fuzzy_time(10000000000) == '317 years'
# --------------- Alerts ---------------
def test_alerts_get_decode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.Alerts()
data = {}
request = a_page.decode_get_request(data)
expected = {
'subnet': None,
'severity': 1,
'time': 604800,
'sort': 'id',
'sort_dir': 'DESC',
'page_size': 50,
'page_num': 1,
}
assert request == expected
data = {'subnet': '127.0.0.0/24', 'severity': '6', 'time': '1 day', 'sort': 'severity', 'sort_dir': 'aSC',
'page_size': '10', 'page_num': '2'}
request = a_page.decode_get_request(data)
expected = {
'subnet': '127.0.0.0/24',
'severity': 6,
'time': 86400,
'sort': 'severity',
'sort_dir': 'ASC',
'page_size': 10,
'page_num': 2,
}
assert request == expected
def test_alerts_get_perform():
reset_mock_alerts()
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.Alerts()
request = {
'subnet': '127.0.0.0/24',
'severity': 6,
'time': 86400,
'sort': 'severity',
'sort_dir': 'ASC',
'page_size': 10,
'page_num': 2,
}
response = a_page.perform_get_command(request)
assert set(response.keys()) == {'results', 'page', 'pages', 'alerts'}
assert response['results'] == 0
assert response['page'] == 2
assert response['pages'] == 0
assert response['alerts'] == []
request = {
'subnet': None,
'severity': 1,
'time': 10,
'sort': 'id',
'sort_dir': 'DESC',
'page_size': 50,
'page_num': 1,
}
response = a_page.perform_get_command(request)
assert set(response.keys()) == {'results', 'page', 'pages', 'alerts'}
assert response['results'] == 0
assert response['page'] == 1
assert response['pages'] == 0
assert response['alerts'] == []
request = {
'subnet': None,
'severity': 1,
'time': 1000000000,
'sort': 'id',
'sort_dir': 'DESC',
'page_size': 50,
'page_num': 1,
}
response = a_page.perform_get_command(request)
assert set(response.keys()) == {'results', 'page', 'pages', 'alerts'}
assert response['results'] == 2
assert response['page'] == 1
assert response['pages'] == 1
assert len(response['alerts']) == 2
def test_alerts_get_encode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.Alerts()
# empty response:
response = {
'alerts': [],
'results': 0,
'page': 1,
'pages': 0,
}
encoded = a_page.encode_get_response(response)
expected = {
'alerts': [],
'results': 0,
'page': 1,
'pages': 0,
}
assert encoded == expected
# real response
response = {
'page': 1,
'pages': 1,
'results': 2L,
'alerts': [
{'report_time': 1503699051L, 'severity': 5, 'ipstart': 590000000L, 'ipend': 590000000L,
'label': u'Unusual inbound traffic', 'rule_name': u'A.D.E.L.E.', 'id': 2L,
'log_time': 1496700000L, 'rule_id': None},
{'report_time': 1496886794L, 'severity': 6, 'ipstart': 2292489578L, 'ipend': 2292489578L,
'label': u'LDAP Access', 'rule_name': u'Demo Alert', 'id': 1L,
'log_time': 1466532000L, 'rule_id': 4L}],
}
encoded = a_page.encode_get_response(response)
expected = {
'page': 1,
'pages': 1,
'results': 2L,
'alerts': [
{
'id': '2',
'host': '35.42.175.128',
'severity': 'sev5',
'label': u'Unusual inbound traffic',
'rule_name': u'A.D.E.L.E.',
'log_time': '2017-06-05 15:00:00',
'report_time': '2017-08-25 15:10:51',
}, {
'id': '1',
'host': '136.164.157.106',
'severity': 'sev6',
'label': u'LDAP Access',
'rule_name': u'Demo Alert',
'log_time': '2016-06-21 11:00:00',
'report_time': '2017-06-07 18:53:14',
}
],
}
assert encoded == expected
def test_alerts_post_decode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.Alerts()
data = {}
with pytest.raises(errors.MalformedRequest):
a_page.decode_post_request(data)
data = {'method': 'delete'}
with pytest.raises(errors.RequiredKey):
a_page.decode_post_request(data)
data = {'method': 'delete', 'id': 'NaN'}
with pytest.raises(errors.RequiredKey):
a_page.decode_post_request(data)
data = {'method': 'delete', 'id': 199}
request = a_page.decode_post_request(data)
expected = {'method': 'delete', 'id': 199}
assert request == expected
data = {'method': 'delete_all'}
request = a_page.decode_post_request(data)
expected = {'method': 'delete_all', 'id': None}
assert request == expected
data = {'method': 'delete_all', 'id': 201}
request = a_page.decode_post_request(data)
expected = {'method': 'delete_all', 'id': None}
assert request == expected
def test_alerts_post_perform():
reset_mock_alerts()
a_model = AlertsModel(db, sub_id)
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.Alerts()
assert a_model.count() == 2
request = {'method': 'delete_all', 'id': None}
response = a_page.perform_post_command(request)
assert response == 'success'
assert a_model.count() == 0
reset_mock_alerts()
assert a_model.count() == 2
all_alerts = a_model.get(AlertFilter())
a_id = all_alerts[0]['id']
request = {'method': 'delete', 'id': a_id}
a_page.perform_post_command(request)
assert a_model.count() == 1
all_alerts = a_model.get(AlertFilter())
# we deleted the right one.
assert all_alerts[0]['id'] != a_id
def test_alerts_post_encode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a = alerts.Alerts()
assert a.encode_post_response("success") == {'result': 'success'}
assert a.encode_post_response("failure") == {'result': 'failure'}
# --------------- Alert Details ---------------
def test_alertdetails_get_decode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
ad_page = alerts.AlertDetails()
data = {}
with pytest.raises(errors.RequiredKey):
ad_page.decode_get_request(data)
data = {'id': 'not a number'}
with pytest.raises(errors.RequiredKey):
ad_page.decode_get_request(data)
data = {'id': 199}
request = ad_page.decode_get_request(data)
expected = {'id': 199}
assert request == expected
def test_alertdetails_get_perform():
reset_mock_alerts()
a_model = AlertsModel(db, sub_id)
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
ad_page = alerts.AlertDetails()
# bad id
request = {'id': -500}
response = ad_page.perform_get_command(request)
expected = {
'for': -500,
'details': None
}
assert response == expected
all_alerts = a_model.get(AlertFilter())
a_id1 = [a['id'] for a in all_alerts if a['label'] == 'LDAP Access'][0]
a_id2 = [a['id'] for a in all_alerts if a['label'] != 'LDAP Access'][0]
# good ids
request = {'id': a_id1}
response = ad_page.perform_get_command(request)
expected = {
'details': {'bytes_received': 0L,
'bytes_sent': 496L,
'dst': 3181255194L,
'duration': 1811L,
'links': 2L,
'packets_received': 0L,
'packets_sent': 4L,
'port': 389L,
'protocol': u'UDP',
'src': 2292489578L,
'timestamp': datetime.datetime(2016, 6, 21, 18, 0)},
# 'id': 7L, # dynamically assigned. may not reliably be 7.
'ipend': 2292489578L,
'ipstart': 2292489578L,
'label': u'LDAP Access',
'log_time': 1466532000L,
'report_time': 1496886794L,
'rule_id': 4L,
'rule_name': u'Demo Alert',
'severity': 6,
'viewed': 0
}
assert set(response.keys()) == {'for', 'details'}
assert response['for'] == a_id1
response['details'].pop('id')
assert response['details'] == expected
request = {'id': a_id2}
response = ad_page.perform_get_command(request)
expected = {
'details': {'Unusual Inbound Port Access': 'Warning score: 3',
'activities': [{'chart': {'data1': [0.2, 0.65, 0.15],
'data2': [0.25, 0.75, 0],
'h': 3.5,
'legend': ['Current', 'Usual'],
'sd1': None,
'sd2': [0.1, 0.1, 0.1],
'title': 'Inbound Ports',
'w': 6,
'xlabel': 'Port',
'xticks': ['80', '443', '3306'],
'ylabel': 'Connections Portion'},
'description': 'Usually this host receives 100% of connections on ports 80 and 443. Currently it is also receiving connections on port 3306.',
'score': 2,
'title': 'Unusual Inbound Port Access',
'warning_id': 3L}]},
# 'id': 8L, # dynamically assigned. may not reliably be 8.
'ipend': 590000000L,
'ipstart': 590000000L,
'label': u'Unusual inbound traffic',
'log_time': 1496700000L,
'report_time': 1503699051L,
'rule_id': None,
'rule_name': u'A.D.E.L.E.',
'severity': 5,
'viewed': 0}
assert set(response.keys()) == {'for', 'details'}
assert response['for'] == a_id2
response['details'].pop('id')
assert response['details'] == expected
def test_alertdetails_get_encode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.AlertDetails()
response = {'for': -500, 'details': None}
encoded = a_page.encode_get_response(response)
expected = {
'for': -500,
'time': None,
'host': None,
'severity': None,
'label': None,
'rule_name': None,
'details': None,
'description': None
}
assert encoded == expected
response = {'for': 7, 'details': {
'details': {'bytes_received': 0L,
'bytes_sent': 496L,
'dst': 3181255194L,
'duration': 1811L,
'links': 2L,
'packets_received': 0L,
'packets_sent': 4L,
'port': 389L,
'protocol': u'UDP',
'src': 2292489578L,
'timestamp': datetime.datetime(2016, 6, 21, 18, 0)},
# 'id': 7L, # dynamically assigned. may not reliably be 7.
'ipend': 2292489578L,
'ipstart': 2292489578L,
'label': u'LDAP Access',
'log_time': 1466532000L,
'report_time': 1496886794L,
'rule_id': 4L,
'rule_name': u'Demo Alert',
'severity': 6,
'viewed': 0
}}
encoded = a_page.encode_get_response(response)
expected = {
'for': 7,
'time': '2017-06-07 18:53:14',
'host': '136.164.157.106',
'severity': 6,
'label': 'LDAP Access',
'rule_name': 'Demo Alert',
'description': 'Rule "Demo Alert" triggered on 136.164.157.106',
'details': {'src': '136.164.157.106',
'dst': '189.158.26.26',
'port': 389L,
'protocol': u'UDP',
'links': 2L,
'duration': '30 minutes',
'bytes_sent': 496L,
'bytes_received': 0L,
'packets_sent': 4L,
'packets_received': 0L,
'timestamp': '2016-06-21 18:00:00'},
}
assert encoded == expected
response = {'for': 8, 'details': {
'details': {'Unusual Inbound Port Access': 'Warning score: 3',
'activities': [{'chart': {'data1': [0.2, 0.65, 0.15],
'data2': [0.25, 0.75, 0],
'h': 3.5,
'legend': ['Current', 'Usual'],
'sd1': None,
'sd2': [0.1, 0.1, 0.1],
'title': 'Inbound Ports',
'w': 6,
'xlabel': 'Port',
'xticks': ['80', '443', '3306'],
'ylabel': 'Connections Portion'},
'description': 'Usually this host receives 100% of connections on ports 80 and 443. Currently it is also receiving connections on port 3306.',
'score': 2,
'title': 'Unusual Inbound Port Access',
'warning_id': 3L}]},
# 'id': 8L, # dynamically assigned. may not reliably be 8.
'ipend': 590000000L,
'ipstart': 590000000L,
'label': u'Unusual inbound traffic',
'log_time': 1496700000L,
'report_time': 1503699051L,
'rule_id': None,
'rule_name': u'A.D.E.L.E.',
'severity': 5,
'viewed': 0
}}
encoded = a_page.encode_get_response(response)
expected = {
'for': 8,
'time': '2017-08-25 15:10:51',
'host': '35.42.175.128',
'severity': 5,
'label': 'Unusual inbound traffic',
'rule_name': 'A.D.E.L.E.',
'description': 'Rule "A.D.E.L.E." triggered on 35.42.175.128',
'details': {'Unusual Inbound Port Access': 'Warning score: 3',
'activities': [{'chart': {'data1': [0.2, 0.65, 0.15],
'data2': [0.25, 0.75, 0],
'h': 3.5,
'legend': ['Current', 'Usual'],
'sd1': None,
'sd2': [0.1, 0.1, 0.1],
'title': 'Inbound Ports',
'w': 6,
'xlabel': 'Port',
'xticks': ['80', '443', '3306'],
'ylabel': 'Connections Portion'},
'description': 'Usually this host receives 100% of connections on ports 80 and 443. Currently it is also receiving connections on port 3306.',
'score': 2,
'title': 'Unusual Inbound Port Access',
'warning_id': 3L
}]
},
}
assert encoded == expected
def test_alertdetails_post_decode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.AlertDetails()
data = {}
with pytest.raises(errors.RequiredKey):
a_page.decode_post_request(data)
data = {'method': 'bananas'}
with pytest.raises(errors.RequiredKey):
a_page.decode_post_request(data)
data = {'method': 'update_label'}
with pytest.raises(errors.RequiredKey):
a_page.decode_post_request(data)
data = {'method': 'update_label', 'id': 'non-number', 'label': 'new label'}
with pytest.raises(errors.RequiredKey):
a_page.decode_post_request(data)
data = {'method': 'update_label', 'id': '55'}
with pytest.raises(errors.RequiredKey):
a_page.decode_post_request(data)
data = {'method': 'update_label', 'id': '55', 'label': 'new label'}
request = a_page.decode_post_request(data)
expected = {
'method': 'update_label',
'id': 55,
'label': 'new label'
}
assert request == expected
def test_alertdetails_post_perform():
reset_mock_alerts()
a_model = AlertsModel(db, sub_id)
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a_page = alerts.AlertDetails()
all_alerts = a_model.get(AlertFilter())
a_id1 = [a['id'] for a in all_alerts if a['label'] == 'LDAP Access'][0]
a_id2 = [a['id'] for a in all_alerts if a['label'] != 'LDAP Access'][0]
request = {
'method': 'update_label',
'id': a_id1,
'label': 'new label'
}
a_page.perform_post_command(request)
request = {
'method': 'update_label',
'id': a_id2,
'label': 'meh label'
}
a_page.perform_post_command(request)
labels = {row['label'] for row in db.select("s{}_Alerts".format(sub_id), what='label')}
assert labels == {'new label', 'meh label'}
def test_alertdetails_post_encode():
with db_connection.env(mock_input=True, login_active=False, mock_session=True):
a = alerts.AlertDetails()
assert a.encode_post_response("success") == {'result': 'success'}
assert a.encode_post_response("failure") == {'result': 'failure'} | JoePelz/SAM | spec/python/pages/test_alerts.py | Python | gpl-3.0 | 23,407 |
from __future__ import division
from math import sqrt
def solve(s1, s2, q, L):
side = L * sqrt(2) - sqrt(2.0 * q)
return side / abs(s2 - s1)
if __name__ == '__main__':
L, s1, s2 = map(int, raw_input().strip().split())
Q = int(raw_input())
for _ in range(Q):
q = int(raw_input())
print '%.4lf' % solve(s1, s2, q, L) | m00nlight/hackerrank | algorithm/contests/Ad-Infinitum-10/A.py | Python | gpl-2.0 | 357 |
# OfflineIMAP synchronization master code
# Copyright (C) 2002-2007 John Goerzen
# <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import imaplib
from offlineimap import imapserver, repository, folder, mbnames, threadutil, version
from offlineimap.threadutil import InstanceLimitedThread, ExitNotifyThread
import offlineimap.accounts
from offlineimap.accounts import SyncableAccount
from offlineimap.ui import UIBase
import re, os, os.path, offlineimap, sys
from ConfigParser import ConfigParser
from threading import *
def syncaccount(threads, config, accountname):
account = SyncableAccount(config, accountname)
thread = InstanceLimitedThread(instancename = 'ACCOUNTLIMIT',
target = account.syncrunner,
name = "Account sync %s" % accountname)
thread.setDaemon(1)
thread.start()
threads.add(thread)
def syncitall(accounts, config):
currentThread().setExitMessage('SYNC_WITH_TIMER_TERMINATE')
ui = UIBase.getglobalui()
threads = threadutil.threadlist()
mbnames.init(config, accounts)
for accountname in accounts:
syncaccount(threads, config, accountname)
# Wait for the threads to finish.
threads.reset()
| avsm/lifedb-plugins | IMAP/offlineimap/offlineimap/syncmaster.py | Python | gpl-2.0 | 1,949 |
import os
from pythonforandroid.recipes.libffi import LibffiRecipe
from pythonforandroid.util import load_source
util = load_source('util', os.path.join(os.path.dirname(os.path.dirname(__file__)), 'util.py'))
assert LibffiRecipe._version == "v3.3"
assert LibffiRecipe.depends == []
assert LibffiRecipe.python_depends == []
class LibffiRecipePinned(util.InheritedRecipeMixin, LibffiRecipe):
sha512sum = "62798fb31ba65fa2a0e1f71dd3daca30edcf745dc562c6f8e7126e54db92572cc63f5aa36d927dd08375bb6f38a2380ebe6c5735f35990681878fc78fc9dbc83"
recipe = LibffiRecipePinned()
| GroestlCoin/electrum-grs | contrib/android/p4a_recipes/libffi/__init__.py | Python | gpl-3.0 | 575 |
#!/usr/bin/env python
"""Game context and configuration classes."""
import offer
import cspparser
import problem
class Config(object):
"""Represents a CSP configuration object."""
def __init__(self, gamekind=None, turnduration=None, mindecrement=None,
initacc=None, maxProposals=0, minProposals=0,
minPropositions=0, objective=None, predicate=None,
numrounds=None, profitfactor=None, otrounds=None,
maxClauses=0, hasSecrets=False, secretRatio=0):
"""A Config object with type enforcement."""
self.gamekind = gamekind
self.turnduration = int(turnduration)
self.mindecrement = float(mindecrement)
self.initacc = float(initacc)
self.maxoffers = self.maxproposals = int(maxProposals)
self.minproposals = int(minProposals)
self.minpropositions = int(minPropositions)
self.maxclauses = int(maxClauses)
self.objective = objective
self.predicate = predicate
self.numrounds = int(numrounds)
self.profitfactor = float(profitfactor)
self.otrounds = int(otrounds)
if hasSecrets == 'true':
self.hassecrets = True
else:
self.hassecrets = False
self.secretratio = float(secretRatio)
@classmethod
def FromString(cls, input):
"""Get a config object from an input string."""
ps = cspparser.Config.searchString(input)
if ps:
return cls.FromParsed(ps[0])
else:
raise Exception('Configuration not found in input string')
@classmethod
def FromParsed(cls, parse_obj):
"""Get a config object from the parser output."""
return cls(**parse_obj.asDict())
class PlayerContext(object):
"""Represent a CSP PlayerContext object from the administrator."""
def __init__(self, config=None, their_offered=None,
our_offered=None, accepted=None,
provided=None, playerid=None, currentround=None,
balance=None):
self.their_offered = offer.Offer.GetOfferList(their_offered)
self.their_offered.sort()
self.our_offered = offer.Offer.GetOfferList(our_offered)
self.accepted = offer.AcceptedChallenge.GetAcceptedChallengeList(accepted)
self.provided = problem.Problem.GetProblemList(provided)
self.config = config
self.playerid = int(playerid)
self.currentround = int(currentround)
self.balance = float(balance)
self.endbalance = float(self.balance)
@classmethod
def FromString(cls, input):
"""Get a playercontext from the inputstring."""
ps = cspparser.PlayerContext.searchString(input)
if ps:
return cls.FromParsed(ps[0])
else:
raise Exception('PlayerContext not found in input string')
@classmethod
def FromParsed(cls, parsed):
"""Get a playercontext from the parser."""
return cls(config=Config.FromParsed(parsed.config),
their_offered=parsed.their_offered,
our_offered=parsed.our_offered,
accepted=parsed.accepted, provided=parsed.provided,
playerid=parsed.playerid, currentround=parsed.currentround,
balance=parsed.balance)
| compbrain/Athena-SCG-Bot | src/playercontext.py | Python | bsd-3-clause | 3,093 |
import sys, socket
from flask import Flask, request, Response
# Carregando a classe Pokemon
from pokemon.pokemon import Pokemon
# Tarefas comuns para o servidor e o cliente ficaram no módulo battle.py
from battle import validate, simple_duel, make_battle_state, command_line
# Permitindo apenas erros como mensagens do console.
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
pserver = None
pclient = None
client_turn = False
serverxml = ""
@app.route('/')
def index():
return "Hello, World!"
@app.route('/battle/', methods=['POST'])
def battle():
global pserver
global pclient
global client_turn
global serverxml
if pserver is None or pclient is not None:
resp = Response("", status=423, mimetype='text/plain')
return resp
else:
# Aqui é carregado o XML!
pclient = Pokemon()
p = request.form['pokemon']
pclient.load_xml(validate(p)[0])
# Define a ordem da batalha
if pserver.get_SPD() > pclient.get_SPD():
print("\nServer turn")
simple_duel(pserver, pclient)
else:
print("Vez do cliente, aguarde!")
client_turn = True
# Gerando a resposta
battle_state = make_battle_state(pserver, pclient)
resp = Response(battle_state, status=200, mimetype='application/xml')
# Se o pokemon do cliente desmaiou, recarrega o pokemon
# do servidor e se prepara pra próxima batalha
if pclient.get_HP()==0:
pclient = None
client_turn = False
if pserver.load_xml(validate(serverxml)[0]) is not None:
print("Pokemon %s recarregado com sucesso" % pserver.get_name())
return resp
@app.route('/battle/attack/<int:n>', methods=['GET'])
def attack(n):
global pserver
global pclient
global client_turn
if pserver is None or pclient is None or not client_turn:
resp = Response("", status=423, mimetype='text/plain')
return resp
else:
# Realiza o ataque do cliente e do servidor em sequencia.
print("\nClient turn")
simple_duel(pclient, pserver, n)
if pserver.get_HP() > 0:
print("\nServer turn")
simple_duel(pserver, pclient)
# Retorna o battle_state atual em XML
battle_state = make_battle_state(pserver, pclient)
resp = Response(battle_state, status=200, mimetype='application/xml')
# Recarrega o pokemon do servidor se a batalha tiver terminado.
if pclient.get_HP()==0 or pserver.get_HP()==0:
pclient = None
client_turn = False
if pserver.load_xml(validate(serverxml)[0]) is not None:
print("Pokemon %s recarregado com sucesso" % pserver.get_name())
else:
print("Falha ao recarregar pokemon no servidor!")
return resp
if __name__ == '__main__':
(pserver, port, host) = command_line(sys.argv[1:])
if pserver is not None:
print("Pokemon %s carregado com sucesso" % pserver.get_name())
serverxml = make_battle_state(pserver)
try:
app.run(host=host, port=port, debug=False)
except socket.gaierror:
print("Por favor, passe um hostname valido ou endereco de IP.")
else:
print("Pokemon passado incorretamente.\nPrograma encerrado.") | andredalton/bcc | 2014/MAC0242/Projeto/battle_server.py | Python | apache-2.0 | 3,422 |
"""celery_django URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from demo.views import (
readme,
crash,
notify,
crashcallback,
notifymeta,
notifycontext,
notifyseverity,
)
urlpatterns = [
url(r'^$', readme),
url(r'^celerycrash$', crash),
url(r'^celerycallback$', crashcallback),
url(r'^celerynotify$', notify),
url(r'^celerynotifymetadata$', notifymeta),
url(r'^celerynotifycontext$', notifycontext),
url(r'^celerynotifyseverity$', notifyseverity),
]
| bugsnag/bugsnag-python | example/celery+django/celery_django/urls.py | Python | mit | 1,125 |
from cspreporter.core.plugins import LogFormat
import json
class Nginx(LogFormat):
title = 'Clean up log'
desc = 'This plugin cleans up log entries to JSON data only'
def setup(self):
pass
def process(self, s):
tmp = s.decode('unicode-escape')
return tmp[tmp.rfind('{"csp-report"'):].strip('"\n ')
def teardown(self):
pass
| yandex/csp-reporter | cspreporter/plugins/nginx/nginx.py | Python | gpl-2.0 | 379 |
# -*- coding: utf-8 -*-
import unittest
from cliez.component import Component
class ComponentTestCase(unittest.TestCase):
def test_load_resource(self):
"""
资源加载测试,正常情况以下功能不抛出异常
:return:
"""
a = Component()
a.load_resource('component.py', root='cliez')
a.load_resource('/component.py', root='cliez')
a.load_resource('cliez/component.py')
a.load_resource(__file__.rsplit('/', 2)[0] + '/cliez/component.py')
pass
pass
| 9nix00/cliez | tests/component.py | Python | mit | 545 |
"""Collection of helper methods.
All containing methods are legacy helpers that should not be used by new
components. Instead call the service directly.
"""
from homeassistant.components.vacuum import (
ATTR_FAN_SPEED,
ATTR_PARAMS,
DOMAIN,
SERVICE_CLEAN_SPOT,
SERVICE_LOCATE,
SERVICE_PAUSE,
SERVICE_RETURN_TO_BASE,
SERVICE_SEND_COMMAND,
SERVICE_SET_FAN_SPEED,
SERVICE_START,
SERVICE_START_PAUSE,
SERVICE_STOP,
)
from homeassistant.const import (
ATTR_COMMAND,
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.loader import bind_hass
@bind_hass
def turn_on(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum on."""
hass.add_job(async_turn_on, hass, entity_id)
async def async_turn_on(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum on."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
@bind_hass
def turn_off(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum off."""
hass.add_job(async_turn_off, hass, entity_id)
async def async_turn_off(hass, entity_id=ENTITY_MATCH_ALL):
"""Turn all or specified vacuum off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
@bind_hass
def toggle(hass, entity_id=ENTITY_MATCH_ALL):
"""Toggle all or specified vacuum."""
hass.add_job(async_toggle, hass, entity_id)
async def async_toggle(hass, entity_id=ENTITY_MATCH_ALL):
"""Toggle all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data, blocking=True)
@bind_hass
def locate(hass, entity_id=ENTITY_MATCH_ALL):
"""Locate all or specified vacuum."""
hass.add_job(async_locate, hass, entity_id)
async def async_locate(hass, entity_id=ENTITY_MATCH_ALL):
"""Locate all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_LOCATE, data, blocking=True)
@bind_hass
def clean_spot(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to perform a spot clean-up."""
hass.add_job(async_clean_spot, hass, entity_id)
async def async_clean_spot(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to perform a spot clean-up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_CLEAN_SPOT, data, blocking=True)
@bind_hass
def return_to_base(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to return to base."""
hass.add_job(async_return_to_base, hass, entity_id)
async def async_return_to_base(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to return to base."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_RETURN_TO_BASE, data, blocking=True)
@bind_hass
def start_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or pause the current task."""
hass.add_job(async_start_pause, hass, entity_id)
async def async_start_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_START_PAUSE, data, blocking=True)
@bind_hass
def start(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or resume the current task."""
hass.add_job(async_start, hass, entity_id)
async def async_start(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or specified vacuum to start or resume the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_START, data, blocking=True)
@bind_hass
def pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or the specified vacuum to pause the current task."""
hass.add_job(async_pause, hass, entity_id)
async def async_pause(hass, entity_id=ENTITY_MATCH_ALL):
"""Tell all or the specified vacuum to pause the current task."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_PAUSE, data, blocking=True)
@bind_hass
def stop(hass, entity_id=ENTITY_MATCH_ALL):
"""Stop all or specified vacuum."""
hass.add_job(async_stop, hass, entity_id)
async def async_stop(hass, entity_id=ENTITY_MATCH_ALL):
"""Stop all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
await hass.services.async_call(DOMAIN, SERVICE_STOP, data, blocking=True)
@bind_hass
def set_fan_speed(hass, fan_speed, entity_id=ENTITY_MATCH_ALL):
"""Set fan speed for all or specified vacuum."""
hass.add_job(async_set_fan_speed, hass, fan_speed, entity_id)
async def async_set_fan_speed(hass, fan_speed, entity_id=ENTITY_MATCH_ALL):
"""Set fan speed for all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_FAN_SPEED] = fan_speed
await hass.services.async_call(DOMAIN, SERVICE_SET_FAN_SPEED, data, blocking=True)
@bind_hass
def send_command(hass, command, params=None, entity_id=ENTITY_MATCH_ALL):
"""Send command to all or specified vacuum."""
hass.add_job(async_send_command, hass, command, params, entity_id)
async def async_send_command(hass, command, params=None, entity_id=ENTITY_MATCH_ALL):
"""Send command to all or specified vacuum."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_COMMAND] = command
if params is not None:
data[ATTR_PARAMS] = params
await hass.services.async_call(DOMAIN, SERVICE_SEND_COMMAND, data, blocking=True)
| jawilson/home-assistant | tests/components/vacuum/common.py | Python | apache-2.0 | 6,031 |
'''
inherit_docstrings.py - this file is part of S3QL (http://s3ql.googlecode.com)
Copyright © 2008 Nikolaus Rath <[email protected]>
This program can be distributed under the terms of the GNU GPLv3.
---
This module defines a metaclass and function decorator that allows
to inherit the docstring for a function from the superclass.
'''
from functools import partial
from abc import ABCMeta
from .calc_mro import calc_mro
__all__ = [ 'copy_ancestor_docstring', 'prepend_ancestor_docstring',
'InheritableDocstrings', 'ABCDocstMeta' ]
# This definition is only used to assist static code analyzers
def copy_ancestor_docstring(fn):
'''Copy docstring for method from superclass
For this decorator to work, the class has to use the `InheritableDocstrings`
metaclass.
'''
raise RuntimeError('Decorator can only be used in classes '
'using the `InheritableDocstrings` metaclass')
def _copy_ancestor_docstring(mro, fn):
'''Decorator to set docstring for *fn* from *mro*'''
if fn.__doc__ is not None:
raise RuntimeError('Function already has docstring')
# Search for docstring in superclass
for cls in mro:
super_fn = getattr(cls, fn.__name__, None)
if super_fn is None:
continue
fn.__doc__ = super_fn.__doc__
break
else:
raise RuntimeError("Can't inherit docstring for %s: method does not "
"exist in superclass" % fn.__name__)
return fn
# This definition is only used to assist static code analyzers
def prepend_ancestor_docstring(fn):
'''Prepend docstring from superclass method
For this decorator to work, the class has to use the `InheritableDocstrings`
metaclass.
'''
raise RuntimeError('Decorator can only be used in classes '
'using the `InheritableDocstrings` metaclass')
def _prepend_ancestor_docstring(mro, fn):
'''Decorator to prepend ancestor docstring to *fn*'''
if fn.__doc__ is None:
fn.__doc__ = ''
# Search for docstring in superclass
for cls in mro:
super_fn = getattr(cls, fn.__name__, None)
if super_fn is None:
continue
if super_fn.__doc__.endswith('\n') and fn.__doc__.startswith('\n'):
fn.__doc__ = super_fn.__doc__ + fn.__doc__
else:
fn.__doc__ = '%s\n%s' % (super_fn.__doc__, fn.__doc__)
break
else:
raise RuntimeError("Can't find ancestor docstring for %s: method does not "
"exist in superclass" % fn.__name__)
return fn
DECORATORS = (('copy_ancestor_docstring', _copy_ancestor_docstring),
('prepend_ancestor_docstring', _prepend_ancestor_docstring))
class InheritableDocstrings(type):
@classmethod
def __prepare__(cls, name, bases, **kwds):
classdict = super().__prepare__(name, bases, *kwds)
mro = calc_mro(*bases)
# Inject decorators into class namespace
for (name, fn) in DECORATORS:
classdict[name] = partial(fn, mro)
return classdict
def __new__(cls, name, bases, classdict):
for (dec_name, fn) in DECORATORS:
# Decorators may not exist in class dict if the class (metaclass
# instance) was constructed with an explicit call to `type`
# (Pythonbug? reported as http://bugs.python.org/issue18334)
if dec_name not in classdict:
continue
# Make sure that class definition hasn't messed with decorator
if getattr(classdict[dec_name], 'func', None) is not fn:
raise RuntimeError('No %s attribute may be created in classes using '
'the InheritableDocstrings metaclass' % name)
# Delete decorator from class namespace
del classdict[dec_name]
return super().__new__(cls, name, bases, classdict)
# Derive new metaclass to add docstring inheritance
class ABCDocstMeta(ABCMeta, InheritableDocstrings):
pass
| krnflake/s3ql | src/s3ql/inherit_docstrings.py | Python | gpl-3.0 | 4,064 |
"""add view_count column in posts
Revision ID: eb64b7b3d4c2
Revises: fc532e183eaa
Create Date: 2016-07-08 11:01:40.948143
"""
# revision identifiers, used by Alembic.
revision = 'eb64b7b3d4c2'
down_revision = 'fc532e183eaa'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('view_count', sa.Integer(), nullable=True))
op.drop_column('posts', 'body_html')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('body_html', mysql.TEXT(collation=u'utf8_unicode_ci'), nullable=True))
op.drop_column('posts', 'view_count')
### end Alembic commands ###
| yelongyu/chihu | migrations/versions/eb64b7b3d4c2_add_view_count_column_in_posts.py | Python | gpl-3.0 | 814 |
x = "There are %d types of people." % 10
binary = "binary"
do_not = "don't"
y = "Those who %s and those who %s." % (binary, do_not)
print x
print y
print "I said: %r." % x
print "I also said: %s." % y
hilarious = False
joke_evaluation = "Isn't that joke so funny! %r"
print joke_evaluation % hilarious
w = "This is the left side of..."
e = "a string with a right side."
print w + e | SaileshPatel/Python-Exercises | ex6.py | Python | mit | 387 |
# -*- coding: utf-8 -*-
__author__ = """JanFan"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| JanFan/py-aho-corasick | py_aho_corasick/__init__.py | Python | mit | 114 |
"""
Magnolya: de novo copy number variation detection
Usage: magnolya [OPTIONS] -i [FILE]
Input:
-i Tab delimited text file with read counts (required)
-l Tab delimited text file with contig locations on reference
Output:
-o CNV calls [stdout]
-f Model parameters
Parameters:
-m Number of Poisson components [Estimate using Baysian Information Criterion]
-g Gamma settings {haploid, diploid, None} [None]
-s Minimum contig size to use for EM [500]
-e Log-likelihood change EM stopping criterion [10e-6]
-r Maximum iterations of EM [200]
-t CNV threshold: only contigs with CNV probability higher than this value are reported [report all]
-S Report Size threshold: don't report contigs smaller than this threshold [500]
-M Multiplier for zero copy number threshold. CN = 0 if count/length < mu - M*stdev [5]
Date: July 25, 2012
Contact: Jurgen F. Nijkamp, Dick de Ridder
Publication: De novo detection of copy number variation by co-assembly (2012) Bioinformatics
"""
import getopt
from numpy import array, mean,median, std, sum, float64, exp, int32, log10, argmax, argmin,zeros, sqrt
import scipy.stats as stats
from math import isnan
from sys import stdout, stderr
import sys
from copy import deepcopy
import numpy as np
import scipy as sp
import scipy.stats as stats
import pdb
import math
import time
import random
from sys import stderr
import itertools
import pdb
def poisson_geometric(x,t,A,mix,mu,tries,N,alpha):
x_delay = x - np.int32(mu[-1]*t)
minxt = mu[-1]
# P(data|model i) and P(data)
pxi = np.array([np.zeros(N)]*int(A)); px = np.zeros(N);
for i in range(int(A)):
if i < int(A)-1:
# Fill in poisson
pxi[i,:] = stats.poisson.pmf(x,mu[i]*t) # Add machine precision
px = px + mix[i] * pxi[i,:]
else:
# Fill in geometric
pxi[i,:] = stats.distributions.geom.pmf(x_delay,alpha/np.float64(t))
px = px + mix[i] * pxi[i,:]
return px, pxi, np.sum([x_delay > 0])
def poisson(x,t,A,mix,mu,tries,N):
# P(data|model i) and P(data)
pxi = np.array([np.zeros(N)]*A); px = np.zeros(N);
for i in range(int(A)):
# Fill in poisson
pxi[i,:] = stats.poisson.pmf(x,mu[i]*t)
px = px + mix[i] * pxi[i,:]
return px,pxi
def dirichlet(gamma,n,I):
"""Initialize Dirichlet parameter
INPUT:
gamma: 'haploid', 'diploid', 'uniform', or an list of gamma's of length n
n: Number of models
I: Hyperparameter that controls the prior's impact during model selection (I>0)
OUTPUT:
gamma array
"""
if n<2:
raise Exception, "Poisson mixture needs at leat two Poisson models"
if I <= 0:
raise Exception, "Hyperparameter I should be larger then 0"
if gamma == 'haploid':
res = (np.ones(n))
res[0] += I
elif gamma == 'diploid':
res = (np.ones(n))
res[1] += I
elif gamma == 'allo-triploid':
res = (np.ones(n))
res[0] += np.float64(I)/2
res[1] += np.float64(I)/2
elif gamma == 'di-triploid':
res = (np.ones(n))
res[1] += np.float64(I)/2
res[2] += np.float64(I)/2
elif (type(gamma) == type(np.array([])) or type(gamma) == type([])) and len(gamma)==n:
res = np.array(gamma)
elif gamma is None or gamma == 'uniform':
res = (np.ones(n)+np.float64(I)/n)
else:
raise Exception, "Provide a valid gamma, see the docstring of em.Dirichlet"
return res
def emPoissonBic(count,interval,k=None,epsilon=2e-13,\
max_iters=100,gamma='haploid',figname='poisson.pdf', addUniform=1,\
initPercList=None,bw=True,plotContigSize=1000,maxval=None,ymax=None):
print "****Starting model selection using BIC****"
print "Number of contigs used to train EM: " , len(count)
print "Median contig length: " , np.median(interval)
plot_mixture=0
if k is None:
k=[3,4,6,8,10,12,15,20]
elif type(k) is not type([]):
raise Exception, "Please provide a range of cluster numbers in a List"
if len(count) == 0:
print >>stderr, "Warning: Zero reads detected"
return [0]*len(k),[0]*len(k),[0]*len(k),[0]*len(k),k
likelihoods = []; mus = []; mixs = []; alphas = [];
for ki in k:
likelihood, mu, mix, alpha = emPoissonInit(count,interval,ki,False,epsilon,\
max_iters,gamma,figname, addUniform,initPercList,\
plotContigSize,maxval)
likelihoods.append(likelihood)
mus.append(mu)
mixs.append(mix)
alphas.append(alpha)
# Calculate Bayesian Information Criterion
n = len(count)
BICS = []
for i in range(len(likelihoods)):
BICS.append(-2 * likelihoods[i] + (k[i]+1+2*addUniform)*np.log(n)) # pi's + lambda + geom (alpha and pi)
print 'k = %d; BIC = %2.2f; L = %2.2f; lambda = %2.2e;' % \
(k[i], BICS[i], likelihoods[i], mus[i][0])
best = np.argmin(BICS)
return likelihoods, mus, mixs, BICS, k, alphas
def emPoissonInit(count,interval,k=10,plot_mixture=0,epsilon=2e-13,\
max_iters=100,gamma='haploid',figname='poisson.pdf', addUniform=1,initPercList=None,\
plotContigSize=1000,maxval=None):
"""
INITPERCLIST List with percentiles to initialize the EM
"""
if initPercList is None and (gamma is not 'haploid' and gamma is not 'diploid'):
initPercList = [1, 5, 12.5, 25, 50, 70]
#initPercList = [1, 50, 70]
print "Performing initializations at 1, 5, 12.5, 25 and 50th percentile"
elif initPercList is None and (gamma == 'haploid' or gamma == 'diploid'):
initPercList = [50]
print "Performing initialization at 50th percentile"
elif type(initPercList) is not type([]):
raise Exception, "Invalid initPercList. Provide a valid list of initialisation percentages or \'None\' for default initialisation"
likelihoods = []; mus = []; mixs = [];alphas = [];
for perc in initPercList:
lambda_init = stats.scoreatpercentile(count/np.float64(interval),perc)
likelihood, mu, mix, alpha = emPoisson(count,interval,k,False,epsilon,max_iters,\
gamma,figname,addUniform,lambda_init,plotContigSize,maxval)
likelihoods.append(likelihood)
mus.append(mu)
mixs.append(mix)
alphas.append(alpha)
print "Lambda init: %2.2e for %2.2f-th percentile -> L = %2.2f " % (lambda_init, perc,likelihood)
best = np.argmax(likelihoods)
return likelihoods[best], mus[best], mixs[best], alphas[best]
def emPoisson(count,interval,k=10,plot_mixture=0,epsilon=2e-13,\
max_iters=100,gamma='haploid',figname='poisson.pdf', addUniform=1, lambda_init=None,\
plotContigSize=1000,maxval=None):
"""
[L, MU, S2, MIX] = EM (COUNT, INTERVAL, K, STEPK, FIX_S2, REG, PLOT, EPS, MAXITER)
Uses the EM algorithm to fit a mixture of K 1D Poissons, where the means
and the variances of the models are spaced as (1 + k/STEPK), k = 0..K-1.
Optional parameters are:
FIX_S2 set the variance to the mean (1, dflt.) or estimate (0)
REG regularisation constant (dflt. 0)
PLOT plot progress (dflt. 0)
EPS epsilon, the change in likelihood to stop training (dflt. 1e-5)
MAXITER maximum number of iterations (dlft. 100)
Returns the likelihood L, the means MU(), the variances S2() and priors MIX.
"""
start = time.clock()
if addUniform not in [0,1]: raise Exception, "addUniform should be 0 or 1"
k = k+addUniform # One additional k for the Uniform distribution
x = np.array(count)
t = np.array(interval)
N = np.float64(x.size)
A = np.float64(k)
mu = np.array([0.0]*k)
mix = np.array([0.0]*k)
sumR = np.array([0.0]*k)
alpha = 0.0001
mix_new = np.array([0.0]*k)
mu_new = np.array([0.0]*k)
min_data = max(min(x/np.float64(t)),epsilon); max_data = max(x/np.float64(t)) # Normalized for interval
tries = 0; retry = 1;
while ((retry == 1) and (tries <= 5)):
# Initialisation
if gamma == 'haploid':
gamma = dirichlet(gamma,k,N)
if lambda_init is None:
lambda_init = np.median(x/np.float64(t))
print ("Haploid initialization, lamda: " + str(lambda_init))
for i in range(int(A)):
mu[i] = lambda_init * (i+1)
mix[i] = 0.4/(k-1)
mix[0] = 0.6
elif gamma == 'diploid':
gamma = dirichlet(gamma,k,N)
if lambda_init is None:
lambda_init = np.median(x/np.float64(t)) / 2
print ("Diploid initialization, lamda: " + str(lambda_init))
for i in range(int(A)):
mu[i] = lambda_init * (i+1)
mix[i] = 0.4/(k-1)
mix[0] = 0.6
elif gamma == 'allo-triploid':
gamma = dirichlet(gamma,k,N)
if lambda_init is None:
lambda_init = np.median(x/np.float64(t)) / 3
for i in range(int(A)):
mu[i] = lambda_init * (i+1)
mix[i] = 0.2/(k-2)
mix[1] = 0.4
mix[2] = 0.4
print ("haplo-diploid initialization, lamda: " + str(lambda_init))
elif gamma == 'di-triploid':
gamma = dirichlet(gamma,k,N)
if lambda_init is None:
lambda_init = np.median(x/np.float64(t)) / 3
print ("Di-triploid initialization, lamda: " + str(lambda_init))
for i in range(int(A)):
mu[i] = lambda_init * (i+1)
mix[i] = 0.2/(k-2)
mix[1] = 0.4
mix[2] = 0.4
elif gamma == 'uniform':
gamma = dirichlet(gamma,k,N)
if lambda_init is None:
lambda_init = np.median(x/np.float64(t))
print ("Default initialization, lamda: " + str(lambda_init))
for i in range(int(A)):
mu[i] = lambda_init * (i+1)
mix[i] = 1./(k)
#mix[-1] = 0.001
elif gamma is None:
if lambda_init is None:
lambda_init = np.median(x/np.float64(t)) / 2
print ("Default gamma initialization, lamda: " + str(lambda_init))
for i in range(int(A)):
mu[i] = lambda_init * (i+1)
mix[i] = 1./(k)
else:
gamma = dirichlet(gamma,k,N)
if lambda_init is None:
lambda_init = np.median(x/np.float64(t))
print ("Default gamma initialization, lamda: " + str(lambda_init))
for i in range(int(A)):
mu[i] = lambda_init * (i+1)
mix[i] = 0.4/(k-1)
mix[0] = 0.6
R = np.random.rand(N,A) # N points x A models
done = 0; retry = 0;
iter = 0; prev_likelihood = 1.0e20;
while ((not done) and (not retry)):
iter = iter + 1;
done = 1;
if addUniform:
px,pxi,outliers = poisson_geometric(x,t,A,mix,mu,tries,N,alpha)
else:
px,pxi = poisson(x,t,A,mix,mu,tries,N)
if (not retry):
for i in range(int(A)):
for contig in range(len(pxi[i,:])):
if px[contig] > 0:
# Aandeel van model i in px
R[contig,i] = np.transpose((pxi[i,contig] * mix[i]) / px[contig])
else:
R[contig,i] = 0
sumR[i] = sum(R[:,i])
if gamma is not None:
mix_new[i] = ((1/N)*sumR[i] + (1/N)*(gamma[i]-1)) / (1 + (sum(gamma)-k)/N);
else:
mix_new[i] = sumR[i] / N; # Gemiddelde aandeel alle datapunten
likelihood = sum(np.log(px+epsilon))
for i in range(int(A)):
likelihood += sum(R[:,i]*np.log(mix[i]+epsilon))
if gamma is not None:
likelihood += (gamma[i]-1)*np.log(mix[i]+epsilon)
denomgeom = A*t*R[:,-1]*np.log(1-alpha/np.float64(t))
denomgeom[x-A*t*mu[0] < 0] = 0
denomgeom_sum = sum(denomgeom)
denompois = 0
for i in range(0,int(A)-addUniform):
denompois += sum(R[:,i]*(i+1)*t)
denom = denomgeom_sum + denompois
numer = 0
for i in range(0,int(A)-addUniform):
numer += sum(R[:,i]*x)
mu_new[0] = numer/denom
for i in range(1,int(A)):
mu_new[i] = mu_new[0]*(i+1)
alpha = sumR[-1] / sum(R[:,-1]*(x/np.float64(t)-mu[-1]+1)) # alpha geometric
if math.isnan(alpha):
alpha = 10e-6
if (iter%100 == 0):
print '[%3d] L: %2.2f (change: %2.2e); sum (P(j|x)) = ; alpha = %2.2e; lambda = %2.2e' % \
(iter, likelihood, abs((likelihood - prev_likelihood)/likelihood),alpha,mu_new[0]),
for i in range(int(A)):
print '%2.2f ' % sumR[i],
print '; P(j) = ',
for i in range(int(A)-1):
print '%2.2f ' % mix[i],
print '\n',
print 'Outliers:',outliers,
print '\n'
done = (abs ((likelihood - prev_likelihood)/likelihood) < 1e-5); # Controle voor verbetering
if iter >= max_iters:
print "Maximum number of iterations reached"
done = True
if done:
print "Number of iterations needed to converge: ", iter
prev_likelihood = likelihood;
# Update
for i in range(int(A)):
mix[i] = mix_new[i];
mu[i] = mu_new[i];
elapsed = (time.clock() - start)
print "EM took ", elapsed, " to finish for " , len(count), " contigs"
return likelihood, mu[:k-addUniform], mix[:k-addUniform], alpha
class Contiglocs(dict):
def __init__(self, tabfile):
"""Read contig locations from tab delimited file. Columns in file should be:
chromosome <tab> start <tab> end <tab> contigID
(show-tiling out/.delta | tiling2tab.py)
"""
H = open(tabfile)
for line in H:
chr,start,end,id,orient,clen = line.split("\t")
id = id.rstrip()
self[id] = {"chr":chr,"start":int32(start),"end":int32(end),"orient":orient,"clen":int32(clen)}
H.close()
class Cnv():
"""Calculate CNV based on mixture model"""
def __init__(self, data, nrsamples, k, epsilon, max_iters, gammas, minSize,minCP,maxCP):
if type(k) == type(2):
k = [k]
self.mixtures = []
self.posteriors = []
first = True
figname = 'poisson' + str(0) + '.pdf'
[counts,intervals] = self.getCountsAndlenghts(data, 0)
[counts,intervals] = self._filterContigs(counts,intervals,minSize,minCP,maxCP)
print "Fitting mixture model sample: " + str(0)
self.mixtures.append(PoissonMixture(counts, intervals,k, epsilon,max_iters,gammas[0]))
# Calculate optimal k base on a combined BIC
BICall = zeros(len(self.mixtures[0].BICs))
if self.mixtures[0].BICs is not None:
BICall += self.mixtures[0].BICs
best = argmin(BICall)
print "Combined BIC optimal is: ", self.mixtures[0].ks[best]
# Set new k
self.mixtures[0].setModelNumber(best)
print "Calculating posteriors"
self.posteriors.append(self.posteriorAllContigs(self.mixtures[0],data,0))
def printParameters(self,prefix):
if prefix == "":
modelfile = "model.txt"
else:
modelfile = prefix + ".model.txt"
h = open(modelfile,"w")
for mixture in self.mixtures:
str = '%d\t%.6f\t%.6f' % (len(mixture.mu), mixture.mu[0], mixture.alpha)
for i in range(len(mixture.mix)):
str += '\t%.6f' % (mixture.mix[i])
print >>h, str
h.close()
return
def getCountsAndlenghts(self,data,sample):
counts = []; lengths = []
contigs = data.keys()
for contig in contigs:
counts.append(data[contig]['counts'][sample])
lengths.append(data[contig]['clen'])
return counts, lengths
def cnv(self, sample1, sample2):
pCnv = {}
for node in self.posteriors[sample1]:
pCnv[node] = self.probabilityCNV(self.posteriors[sample1][node],self.posteriors[sample2][node])
return pCnv
def cn(self,sample):
res = {}
for node in self.posteriors[sample]:
res[node] = argmax(self.posteriors[sample][node]) + 1
return res
def isZeroCN(self, contigid, data, sample, multiplier):
"""Set CN to zero if it is lower than 'multiplier x lambda'"""
mu = self.mixtures[sample].mu[0]
counts = data[contigid]['counts'][sample]
if (counts/float64(data[contigid]['clen']) < (mu - multiplier*sqrt(mu))):
return True
else:
return False
#def printCN(self,am,sample,ref=None,file=stdout):
# if file != stdout:
# file = open(file,"w")
# cns = self.cn(sample)
# for node in am.ass.contigGraph.contigStats:
# count = am.ass.contigGraph.contigStats[node]['counts'][0]
# length = am.ass.contigGraph.contigStats[node]['length']
# cn = cns[node]
# if ref and ref.contigLocation.has_key(node):
# chr = ref.contigLocation[node]['chr']
# start = ref.contigLocation[node]['feat'].location.nofuzzy_start+1
# end = ref.contigLocation[node]['feat'].location.nofuzzy_end
# else:
# chr = ''
# start = 0
# end = 0
# # contig ID, length, count, CN, CHR, start, stop
# print >>file,'%s\t%d\t%d\t%d\t%s\t%d\t%d' % (node,length,count,cn+1,chr,start,end)
# if file != stdout:
# file.close()
def posteriorAllContigs(self,mixture, data, sample):
"""Calculate posteriar probabilities for each contig"""
prob = {}
for node in data.keys():
# TODO fix this in absence of am
#if am.ass.contigGraph.contigStats[node]['sampleMems'][sample]:
if data[node]['counts'][sample] >= 1:
prob[node] = mixture._posterior(data[node]['counts'][sample],data[node]['clen'])
else:
prob[node] = [0]*len(mixture.mix)
return prob
def probabilityCNVAllContigs(self, s1, s2):
res = {}
post1 = self.posteriors[s1]
post2 = self.posteriors[s2]
for node in post1.keys():
res[node] = self.probabilityCNV(post1[node], post2[node])
return res
def probabilityCNV(self, post1, post2):
"""Probability a contig has a different copy number in the two samples
Input: Two lists with posterior probabilities for all copy numbers (k)"""
p_equal = 0
for k in range(len(post1)):
p_equal += post1[k] * post2[k]
return 1-p_equal
def _filterContigs(self, counts, intervals, minSize, minCP, maxCP):
"""Filter contigs with too little reads or too small
minSize: minimum contig size
minCP : minimum number of reads per 100 nucleotides [1]
maxCP : maximum number of reads per 100 nucleotides [mean(count/100)*5]"""
counts = array(counts)
intervals = array(intervals)
countsCP = (counts / float64(intervals)) * 100
if maxCP == "Default":
maxCP = mean(countsCP) * 5
print "Contigs < ", minSize, " are not used to fit the model"
ind = (intervals >= minSize) & (countsCP >= minCP) & (countsCP <= maxCP)
return counts[ind], intervals[ind]
class PoissonMixture():
"""Poisson mixture model to extimate copy number for each contig"""
def __init__(self,counts,intervals,k=None, epsilon=1e-15,max_iters=100,\
gamma=None):
print "Gamma = " , gamma
self.counts = counts
self.intervals = intervals
[self.L,self.mu,self.mix] = \
self._emPoisson(counts, intervals,k, epsilon,max_iters,gamma)
print "L: ", self.L
print "lambda: ", self.mu
print "p(k): ", self.mix
def setModelNumber(self, KsIndex):
self.L = self.likelihoods[KsIndex]; self.mu = self.mus[KsIndex];
self.mix = self.mixs[KsIndex]; self.alpha = self.alphas[KsIndex];
print "Changed model paramters to:"
print "L: ", self.L
print "lambda: ", self.mu
print "p(k): ", self.mix
def _priorDepth(self,x,t):
px = float64(0)
for k in range(len(self.mix)):
px += self.mix[k]*self._pmf(x, self.mu[k],t)
return px
def _posterior(self,x,t):
"""Posterior probability:
p(k|x) = p(k). (px|k).p(x))"""
posterior = []
px = self._priorDepth(x,t)
for k in range(len(self.mix)):
z = self.mix[k] * self._pmf(x, self.mu[k],t) / px
if(isnan(z)):
z = 10e-6;
posterior.append(z)
return posterior
def _pmf(self,x,mu,t):
"""evaluate gaussian at x"""
return stats.poisson.pmf(x,mu*t)
def _emPoisson(self,counts, intervals,k, epsilon,max_iters,gamma):
# Remove contigs with lenght zero
counts = array([counts[i] for i in range(len(intervals)) if intervals[i]!=0])
intervals = array([intervals[i] for i in range(len(intervals)) if intervals[i]!=0])
# filter repeats with too high copy number
x = counts/float64(intervals)
print "k: ", k
[self.likelihoods, self.mus, self.mixs, self.BICs, self.ks, self.alphas] = \
emPoissonBic(counts, intervals,k,epsilon,max_iters,gamma,\
addUniform=1,initPercList=None)
best = argmin(self.BICs)
L = self.likelihoods[best]; mu = self.mus[best]; mix = self.mixs[best]
return L,mu,mix
class Mutation():
def __init__(self,type="mutation", location=None):
self.type = type
self.location = location
def setLocation(self,chr,start,end):
"""provide Python range for start to end, i.e. a deletion of
length 5 at 2nd position of a sequence has coordinated start=1, end=6"""
self.location = {"chr":chr,"loc":FeatureLocation(start,end)}
class CNVcall(Mutation):
def __init__(self, cid, probCNV, posteriors,type="cnv"):
Mutation.__init__(self, type=type)
self.cid = cid
self.probCNV = probCNV
self.posteriors = posteriors
class Run():
"""Fit mixture models and find copy number variation"""
def __init__(self,infile,gammas,minSize,locs,k,epsilon,max_iters,minCP,maxCP,cnvThrs,minReportSize,prefix,multiplier):
data = {}
nrsamples = 0
init = False
if locs is not None:
locs = Contiglocs(locs)
# Read counts from file
h = open(infile)
for line in h.readlines():
vals = line.split("\t")
data[vals[0]] = {"clen":int32(vals[1]), "counts":int32(vals[2:])}
if not init:
nrsamples = 1
if len(gammas) == 1:
gammas *= nrsamples # Each sample gets the sample gamma if 1 provided
else:
if len(gammas) != nrsamples:
raise Exception, "Number of provided gamma's should be equal to number of samples"
if nrsamples > 1:
# Train model on samples to find CNVs
self.cnv,self.probCNV,self.cnvC = self.getCnvContigs(data, k, epsilon, max_iters, gammas, minSize,minCP,maxCP,nrsamples,cnvThrs)
# Print CNVs to file
self.printCnv(data,locs,minReportSize,prefix,multiplier)
elif nrsamples == 1:
# Train model on single sample to find CNs
self.cnv = self.getCNContigs(data, k, epsilon, max_iters, gammas, minSize,minCP,maxCP,nrsamples,cnvThrs)
self.printCN(data,locs,minReportSize,prefix,multiplier)
# Print model parameters to file (for plotting etc)
self.cnv.printParameters(prefix)
def getCNContigs(self, data, k, epsilon, max_iters, gammas, minSize,minCP,maxCP,nrsamples,cnvThrs):
res = []
cnv = Cnv(data, nrsamples, k, epsilon, max_iters, gammas, minSize, minCP, maxCP)
return cnv
def getCnvContigs(self, data, k, epsilon, max_iters, gammas, minSize,minCP,maxCP,nrsamples,cnvThrs):
res = []
# TODO Change this to allow for multiple samples?
sid1 = 0
sid2 = 1
cnv = Cnv(data, nrsamples, k, epsilon, max_iters, gammas, minSize, minCP, maxCP)
probCNV = cnv.probabilityCNVAllContigs(sid1,sid2)
for k in probCNV:
if probCNV[k] > cnvThrs:
posteriors = []
posteriors.append(cnv.posteriors[sid1][k])
posteriors.append(cnv.posteriors[sid2][k])
res.append(CNVcall(k,probCNV[k],posteriors,type="cnv"))
return cnv,probCNV,res
def printCN(self,data,locs,minReportSize,prefix,multiplier):
if prefix == "":
file = "copynumbers.txt"
else:
file = prefix + ".copynumbers.txt"
file = open(file,"w")
cns = self.cnv.cn(0)
for cid,cn in cns.iteritems():
if data[cid]["clen"] < minReportSize:
continue
# Set copy number to zero for very low read counts
if self.cnv.isZeroCN(cid, data, 0, multiplier): cn = 0
if locs is not None and locs.has_key(cid):
chr = locs[cid]['chr']
start = locs[cid]["start"]
end = locs[cid]["end"]
else:
chr = ""
start = -1
end = -1
print >>file,'%s\t%d' % (cid, cn)
if file != stdout:
file.close()
def printCnv(self,data,locs,minReportSize,prefix,multiplier):
if prefix == "":
file = "cnvcalls.txt"
else:
file = prefix + ".cnvcalls.txt"
file = open(file,"w")
for contig in sorted(self.cnvC, key=lambda cnvcall:(1-cnvcall.probCNV)):
if data[contig.cid]["clen"] < minReportSize:
continue
cn1 = array(self.cnv.posteriors[0][contig.cid]).argmax() + 1
cn2 = array(self.cnv.posteriors[1][contig.cid]).argmax() + 1
# Set copy number to zero for very low read counts
if self.cnv.isZeroCN(contig.cid, data, 0, multiplier): cn1 = 0
if self.cnv.isZeroCN(contig.cid, data, 1, multiplier): cn2 = 0
if locs is not None and locs.has_key(contig.cid):
chr = locs[contig.cid]['chr']
start = locs[contig.cid]["start"]
end = locs[contig.cid]["end"]
else:
chr = ""
start = -1
end = -1
print >>file,'%s\t%.6f\t%s\t%d\t%d\t%d\t%d\t%d' % (contig.cid,\
contig.probCNV,\
chr,\
start,\
end,\
data[contig.cid]["clen"],\
cn1,\
cn2)
if file != stdout:
file.close()
def main():
# Set default
infile = None # Input file with counts
#outfile = stdout # Output file name
prefix = ""
gammas = [None] # Gamma settings (optional)
minSize = 500 # Minimum contig size (optional)
locs = None # Contig locations (optional)
m = None # Number of models (optional)
epsilon = 10e-6
max_iters = 500
minCP = 1
maxCP = 10e5
cnvThrs = -1
minReportSize = 500
multiplier = 5
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "g:s:i:l:e:r:m:t:p:S:hM:", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
if o == "-g":
if a == "None":
gamma = [None]
else:
gammas = list(a.split(","))
#gamma = a
if o == "-s":
minSize = int(a)
if o =="-i":
infile = a
if o =="-l":
locs = a
#if o =="-o":
# outfile = a
if o =="-e":
epsilon = a
if o =="-r":
max_iters = int32(a)
if o =="-m":
m = [int32(a)]
if o =="-t":
cnvThrs = float64(a)
if o =="-S":
minReportSize = float64(a)
if o =="-p":
prefix = a
if o == "-M":
multiplier = float64(a)
if infile is None:
print "Specifiy input file with counts"
print "for help use --help"
sys.exit(2)
session = Run(infile,gammas,minSize,locs,m,epsilon,max_iters,minCP,maxCP,cnvThrs,minReportSize,prefix,multiplier)
if __name__ == "__main__":
main()
| 1dayac/CNVera | tools/magnolia.py | Python | gpl-3.0 | 31,332 |
# -*- coding: utf-8 -*-
#
# AzimuthSwath documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AzimuthSwath'
copyright = u'2013, Little Earth GIS Consulting Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_TemplateModuleNames = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'TemplateClassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'AzimuthSwath.tex', u'AzimuthSwath Documentation',
u'Little Earth GIS Consulting Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'TemplateClass', u'AzimuthSwath Documentation',
[u'Little Earth GIS Consulting Inc.'], 1)
]
| popkinj/azimuth-swath | help/source/conf.py | Python | mit | 7,131 |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow layers with added variables for parameter masking.
Branched from tensorflow/contrib/layers/python/layers/layers.py
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib.framework.python.ops import add_arg_scope
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.contrib.model_pruning.python.layers import core_layers as core
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as tf_variables
def _model_variable_getter(getter,
name,
shape=None,
dtype=None,
initializer=None,
regularizer=None,
trainable=True,
collections=None,
caching_device=None,
partitioner=None,
rename=None,
use_resource=None,
**_):
"""Getter that uses model_variable for compatibility with core layers."""
short_name = name.split('/')[-1]
if rename and short_name in rename:
name_components = name.split('/')
name_components[-1] = rename[short_name]
name = '/'.join(name_components)
return variables.model_variable(
name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
collections=collections,
trainable=trainable,
caching_device=caching_device,
partitioner=partitioner,
custom_getter=getter,
use_resource=use_resource)
def _build_variable_getter(rename=None):
"""Build a model variable getter that respects scope getter and renames."""
# VariableScope will nest the getters
def layer_variable_getter(getter, *args, **kwargs):
kwargs['rename'] = rename
return _model_variable_getter(getter, *args, **kwargs)
return layer_variable_getter
def _add_variable_to_collections(variable, collections_set, collections_name):
"""Adds variable (or all its parts) to all collections with that name."""
collections = utils.get_variable_collections(collections_set,
collections_name) or []
variables_list = [variable]
if isinstance(variable, tf_variables.PartitionedVariable):
variables_list = [v for v in variable]
for collection in collections:
for var in variables_list:
if var not in ops.get_collection(collection):
ops.add_to_collection(collection, var)
@add_arg_scope
def masked_convolution(inputs,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
data_format=None,
rate=1,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds an 2D convolution followed by an optional batch_norm layer.
The layer creates a mask variable on top of the weight variable. The input to
the convolution operation is the elementwise multiplication of the mask
variable and the weigh
It is required that 1 <= N <= 3.
`convolution` creates a variable called `weights`, representing the
convolutional kernel, that is convolved (actually cross-correlated) with the
`inputs` to produce a `Tensor` of activations. If a `normalizer_fn` is
provided (such as `batch_norm`), it is then applied. Otherwise, if
`normalizer_fn` is None and a `biases_initializer` is provided then a `biases`
variable would be created and added the activations. Finally, if
`activation_fn` is not `None`, it is applied to the activations as well.
Performs atrous convolution with input stride/dilation rate equal to `rate`
if a value > 1 for any dimension of `rate` is specified. In this case
`stride` values != 1 are not supported.
Args:
inputs: A Tensor of rank N+2 of shape
`[batch_size] + input_spatial_shape + [in_channels]` if data_format does
not start with "NC" (default), or
`[batch_size, in_channels] + input_spatial_shape` if data_format starts
with "NC".
num_outputs: Integer, the number of output filters.
kernel_size: A sequence of N positive integers specifying the spatial
dimensions of of the filters. Can be a single integer to specify the same
value for all spatial dimensions.
stride: A sequence of N positive integers specifying the stride at which to
compute output. Can be a single integer to specify the same value for all
spatial dimensions. Specifying any `stride` value != 1 is incompatible
with specifying any `rate` value != 1.
padding: One of `"VALID"` or `"SAME"`.
data_format: A string or None. Specifies whether the channel dimension of
the `input` and output is the last dimension (default, or if `data_format`
does not start with "NC"), or the second dimension (if `data_format`
starts with "NC"). For N=1, the valid values are "NWC" (default) and
"NCW". For N=2, the valid values are "NHWC" (default) and "NCHW".
For N=3, the valid values are "NDHWC" (default) and "NCDHW".
rate: A sequence of N positive integers specifying the dilation rate to use
for atrous convolution. Can be a single integer to specify the same
value for all spatial dimensions. Specifying any `rate` value != 1 is
incompatible with specifying any `stride` value != 1.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collection per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for `variable_scope`.
Returns:
A tensor representing the output of the operation.
Raises:
ValueError: If `data_format` is invalid.
ValueError: Both 'rate' and `stride` are not uniformly 1.
"""
if data_format not in [None, 'NWC', 'NCW', 'NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('Invalid data_format: %r' % (data_format,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope, 'Conv', [inputs], reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
input_rank = inputs.get_shape().ndims
if input_rank == 3:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
elif input_rank == 4:
layer_class = core.MaskedConv2D
elif input_rank == 5:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
else:
raise ValueError('Sparse Convolution not supported for input with rank',
input_rank)
if data_format is None or data_format == 'NHWC':
df = 'channels_last'
elif data_format == 'NCHW':
df = 'channels_first'
else:
raise ValueError('Unsupported data format', data_format)
layer = layer_class(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
data_format=df,
dilation_rate=rate,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.use_bias:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
if normalizer_fn is not None:
normalizer_params = normalizer_params or {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
masked_conv2d = masked_convolution
@add_arg_scope
def masked_fully_connected(
inputs,
num_outputs,
activation_fn=nn.relu,
normalizer_fn=None,
normalizer_params=None,
weights_initializer=initializers.xavier_initializer(),
weights_regularizer=None,
biases_initializer=init_ops.zeros_initializer(),
biases_regularizer=None,
reuse=None,
variables_collections=None,
outputs_collections=None,
trainable=True,
scope=None):
"""Adds a sparse fully connected layer. The weight matrix is masked.
`fully_connected` creates a variable called `weights`, representing a fully
connected weight matrix, which is multiplied by the `inputs` to produce a
`Tensor` of hidden units. If a `normalizer_fn` is provided (such as
`batch_norm`), it is then applied. Otherwise, if `normalizer_fn` is
None and a `biases_initializer` is provided then a `biases` variable would be
created and added the hidden units. Finally, if `activation_fn` is not `None`,
it is applied to the hidden units as well.
Note: that if `inputs` have a rank greater than 2, then `inputs` is flattened
prior to the initial matrix multiply by `weights`.
Args:
inputs: A tensor of at least rank 2 and static value for the last dimension;
i.e. `[batch_size, depth]`, `[None, None, None, channels]`.
num_outputs: Integer or long, the number of output units in the layer.
activation_fn: Activation function. The default value is a ReLU function.
Explicitly set it to None to skip it and maintain a linear activation.
normalizer_fn: Normalization function to use instead of `biases`. If
`normalizer_fn` is provided then `biases_initializer` and
`biases_regularizer` are ignored and `biases` are not created nor added.
default set to None for no normalizer function
normalizer_params: Normalization function parameters.
weights_initializer: An initializer for the weights.
weights_regularizer: Optional regularizer for the weights.
biases_initializer: An initializer for the biases. If None skip biases.
biases_regularizer: Optional regularizer for the biases.
reuse: Whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
variables_collections: Optional list of collections for all the variables or
a dictionary containing a different list of collections per variable.
outputs_collections: Collection to add the outputs.
trainable: If `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
scope: Optional scope for variable_scope.
Returns:
The tensor variable representing the result of the series of operations.
Raises:
ValueError: If x has rank less than 2 or if its last dimension is not set.
"""
if not isinstance(num_outputs, six.integer_types):
raise ValueError('num_outputs should be int or long, got %s.' %
(num_outputs,))
layer_variable_getter = _build_variable_getter({
'bias': 'biases',
'kernel': 'weights'
})
with variable_scope.variable_scope(
scope,
'fully_connected', [inputs],
reuse=reuse,
custom_getter=layer_variable_getter) as sc:
inputs = ops.convert_to_tensor(inputs)
layer = core.MaskedFullyConnected(
units=num_outputs,
activation=None,
use_bias=not normalizer_fn and biases_initializer,
kernel_initializer=weights_initializer,
bias_initializer=biases_initializer,
kernel_regularizer=weights_regularizer,
bias_regularizer=biases_regularizer,
activity_regularizer=None,
trainable=trainable,
name=sc.name,
dtype=inputs.dtype.base_dtype,
_scope=sc,
_reuse=reuse)
outputs = layer.apply(inputs)
# Add variables to collections.
_add_variable_to_collections(layer.kernel, variables_collections, 'weights')
if layer.bias is not None:
_add_variable_to_collections(layer.bias, variables_collections, 'biases')
# Apply normalizer function / layer.
if normalizer_fn is not None:
if not normalizer_params:
normalizer_params = {}
outputs = normalizer_fn(outputs, **normalizer_params)
if activation_fn is not None:
outputs = activation_fn(outputs)
return utils.collect_named_outputs(outputs_collections,
sc.original_name_scope, outputs)
| nburn42/tensorflow | tensorflow/contrib/model_pruning/python/layers/layers.py | Python | apache-2.0 | 15,360 |
# Copyright 2012, VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import mock
import six
import testtools
from neutron.agent.linux import utils
from neutron.tests import base
_marker = object()
class AgentUtilsExecuteTest(base.BaseTestCase):
def setUp(self):
super(AgentUtilsExecuteTest, self).setUp()
self.test_file = self.get_temp_file_path('test_execute.tmp')
open(self.test_file, 'w').close()
self.process = mock.patch('eventlet.green.subprocess.Popen').start()
self.process.return_value.returncode = 0
self.mock_popen = self.process.return_value.communicate
def test_without_helper(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["ls", self.test_file])
self.assertEqual(result, expected)
def test_with_helper(self):
expected = "ls %s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
self.config(group='AGENT', root_helper='echo')
result = utils.execute(["ls", self.test_file], run_as_root=True)
self.assertEqual(result, expected)
def test_stderr_true(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
out = utils.execute(["ls", self.test_file], return_stderr=True)
self.assertIsInstance(out, tuple)
self.assertEqual(out, (expected, ""))
def test_check_exit_code(self):
self.mock_popen.return_value = ["", ""]
stdout = utils.execute(["ls", self.test_file[:-1]],
check_exit_code=False)
self.assertEqual(stdout, "")
def test_execute_raises(self):
self.mock_popen.side_effect = RuntimeError
self.assertRaises(RuntimeError, utils.execute,
["ls", self.test_file[:-1]])
def test_process_input(self):
expected = "%s\n" % self.test_file[:-1]
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["cat"], process_input="%s\n" %
self.test_file[:-1])
self.assertEqual(result, expected)
def test_with_addl_env(self):
expected = "%s\n" % self.test_file
self.mock_popen.return_value = [expected, ""]
result = utils.execute(["ls", self.test_file],
addl_env={'foo': 'bar'})
self.assertEqual(result, expected)
def test_return_code_log_error_raise_runtime(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
self.assertRaises(RuntimeError, utils.execute,
['ls'])
self.assertTrue(log.error.called)
def test_return_code_log_error_no_raise_runtime(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
utils.execute(['ls'], check_exit_code=False)
self.assertTrue(log.error.called)
def test_return_code_log_debug(self):
self.mock_popen.return_value = ('', '')
with mock.patch.object(utils, 'LOG') as log:
utils.execute(['ls'])
self.assertTrue(log.debug.called)
def test_return_code_raise_runtime_do_not_log_fail_as_error(self):
self.mock_popen.return_value = ('', '')
self.process.return_value.returncode = 1
with mock.patch.object(utils, 'LOG') as log:
self.assertRaises(RuntimeError, utils.execute,
['ls'], log_fail_as_error=False)
self.assertFalse(log.error.called)
def test_encode_process_input(self):
str_idata = "%s\n" % self.test_file[:-1]
str_odata = "%s\n" % self.test_file
if six.PY3:
bytes_idata = str_idata.encode(encoding='utf-8')
bytes_odata = str_odata.encode(encoding='utf-8')
self.mock_popen.return_value = [bytes_odata, b'']
result = utils.execute(['cat'], process_input=str_idata)
self.mock_popen.assert_called_once_with(bytes_idata)
self.assertEqual(str_odata, result)
else:
self.mock_popen.return_value = [str_odata, '']
result = utils.execute(['cat'], process_input=str_idata)
self.mock_popen.assert_called_once_with(str_idata)
self.assertEqual(str_odata, result)
def test_return_str_data(self):
str_data = "%s\n" % self.test_file
self.mock_popen.return_value = [str_data, '']
result = utils.execute(['ls', self.test_file], return_stderr=True)
self.assertEqual((str_data, ''), result)
def test_raise_unicodeerror_in_decoding_out_data(self):
class m_bytes(bytes):
def decode(self, encoding=None):
raise UnicodeError
err_data = 'UnicodeError'
bytes_err_data = b'UnicodeError'
out_data = "%s\n" % self.test_file
bytes_out_data = m_bytes(out_data.encode(encoding='utf-8'))
if six.PY3:
self.mock_popen.return_value = [bytes_out_data, bytes_err_data]
result = utils.execute(['ls', self.test_file],
return_stderr=True)
self.assertEqual((bytes_out_data, err_data), result)
class AgentUtilsExecuteEncodeTest(base.BaseTestCase):
def setUp(self):
super(AgentUtilsExecuteEncodeTest, self).setUp()
self.test_file = self.get_temp_file_path('test_execute.tmp')
open(self.test_file, 'w').close()
def test_decode_return_data(self):
str_data = "%s\n" % self.test_file
result = utils.execute(['ls', self.test_file], return_stderr=True)
self.assertEqual((str_data, ''), result)
class AgentUtilsGetInterfaceMAC(base.BaseTestCase):
def test_get_interface_mac(self):
expect_val = '01:02:03:04:05:06'
with mock.patch('fcntl.ioctl') as ioctl:
ioctl.return_value = ''.join(['\x00' * 18,
'\x01\x02\x03\x04\x05\x06',
'\x00' * 232])
actual_val = utils.get_interface_mac('eth0')
self.assertEqual(actual_val, expect_val)
class AgentUtilsReplaceFile(base.BaseTestCase):
def test_replace_file(self):
# make file to replace
with mock.patch('tempfile.NamedTemporaryFile') as ntf:
ntf.return_value.name = '/baz'
with mock.patch('os.chmod') as chmod:
with mock.patch('os.rename') as rename:
utils.replace_file('/foo', 'bar')
expected = [mock.call('w+', dir='/', delete=False),
mock.call().write('bar'),
mock.call().close()]
ntf.assert_has_calls(expected)
chmod.assert_called_once_with('/baz', 0o644)
rename.assert_called_once_with('/baz', '/foo')
class TestFindChildPids(base.BaseTestCase):
def test_returns_empty_list_for_exit_code_1(self):
with mock.patch.object(utils, 'execute',
side_effect=RuntimeError('Exit code: 1')):
self.assertEqual(utils.find_child_pids(-1), [])
def test_returns_empty_list_for_no_output(self):
with mock.patch.object(utils, 'execute', return_value=''):
self.assertEqual(utils.find_child_pids(-1), [])
def test_returns_list_of_child_process_ids_for_good_ouput(self):
with mock.patch.object(utils, 'execute', return_value=' 123 \n 185\n'):
self.assertEqual(utils.find_child_pids(-1), ['123', '185'])
def test_raises_unknown_exception(self):
with testtools.ExpectedException(RuntimeError):
with mock.patch.object(utils, 'execute',
side_effect=RuntimeError()):
utils.find_child_pids(-1)
class TestGetRoothelperChildPid(base.BaseTestCase):
def _test_get_root_helper_child_pid(self, expected=_marker,
run_as_root=False, pids=None):
def _find_child_pids(x):
if not pids:
return []
pids.pop(0)
return pids
mock_pid = object()
with mock.patch.object(utils, 'find_child_pids',
side_effect=_find_child_pids):
actual = utils.get_root_helper_child_pid(mock_pid, run_as_root)
if expected is _marker:
expected = str(mock_pid)
self.assertEqual(expected, actual)
def test_returns_process_pid_not_root(self):
self._test_get_root_helper_child_pid()
def test_returns_child_pid_as_root(self):
self._test_get_root_helper_child_pid(expected='2', pids=['1', '2'],
run_as_root=True)
def test_returns_last_child_pid_as_root(self):
self._test_get_root_helper_child_pid(expected='3',
pids=['1', '2', '3'],
run_as_root=True)
def test_returns_none_as_root(self):
self._test_get_root_helper_child_pid(expected=None, run_as_root=True)
class TestPathUtilities(base.BaseTestCase):
def test_remove_abs_path(self):
self.assertEqual(['ping', '8.8.8.8'],
utils.remove_abs_path(['/usr/bin/ping', '8.8.8.8']))
def test_cmd_matches_expected_matches_abs_path(self):
cmd = ['/bar/../foo']
self.assertTrue(utils.cmd_matches_expected(cmd, cmd))
def test_cmd_matches_expected_matches_script(self):
self.assertTrue(utils.cmd_matches_expected(['python', 'script'],
['script']))
def test_cmd_matches_expected_doesnt_match(self):
self.assertFalse(utils.cmd_matches_expected('foo', 'bar'))
class FakeUser(object):
def __init__(self, name):
self.pw_name = name
class FakeGroup(object):
def __init__(self, name):
self.gr_name = name
class TestBaseOSUtils(base.BaseTestCase):
EUID = 123
EUNAME = 'user'
EGID = 456
EGNAME = 'group'
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_effective_user_id(self, getpwuid, geteuid):
self.assertTrue(utils.is_effective_user(self.EUID))
geteuid.assert_called_once_with()
self.assertFalse(getpwuid.called)
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_effective_user_str_id(self, getpwuid, geteuid):
self.assertTrue(utils.is_effective_user(str(self.EUID)))
geteuid.assert_called_once_with()
self.assertFalse(getpwuid.called)
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_effective_user_name(self, getpwuid, geteuid):
self.assertTrue(utils.is_effective_user(self.EUNAME))
geteuid.assert_called_once_with()
getpwuid.assert_called_once_with(self.EUID)
@mock.patch('os.geteuid', return_value=EUID)
@mock.patch('pwd.getpwuid', return_value=FakeUser(EUNAME))
def test_is_not_effective_user(self, getpwuid, geteuid):
self.assertFalse(utils.is_effective_user('wrong'))
geteuid.assert_called_once_with()
getpwuid.assert_called_once_with(self.EUID)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_effective_group_id(self, getgrgid, getegid):
self.assertTrue(utils.is_effective_group(self.EGID))
getegid.assert_called_once_with()
self.assertFalse(getgrgid.called)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_effective_group_str_id(self, getgrgid, getegid):
self.assertTrue(utils.is_effective_group(str(self.EGID)))
getegid.assert_called_once_with()
self.assertFalse(getgrgid.called)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_effective_group_name(self, getgrgid, getegid):
self.assertTrue(utils.is_effective_group(self.EGNAME))
getegid.assert_called_once_with()
getgrgid.assert_called_once_with(self.EGID)
@mock.patch('os.getegid', return_value=EGID)
@mock.patch('grp.getgrgid', return_value=FakeGroup(EGNAME))
def test_is_not_effective_group(self, getgrgid, getegid):
self.assertFalse(utils.is_effective_group('wrong'))
getegid.assert_called_once_with()
getgrgid.assert_called_once_with(self.EGID)
class TestUnixDomainHttpConnection(base.BaseTestCase):
def test_connect(self):
with mock.patch.object(utils, 'cfg') as cfg:
cfg.CONF.metadata_proxy_socket = '/the/path'
with mock.patch('socket.socket') as socket_create:
conn = utils.UnixDomainHTTPConnection('169.254.169.254',
timeout=3)
conn.connect()
socket_create.assert_has_calls([
mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
mock.call().settimeout(3),
mock.call().connect('/the/path')]
)
self.assertEqual(conn.timeout, 3)
class TestUnixDomainHttpProtocol(base.BaseTestCase):
def test_init_empty_client(self):
u = utils.UnixDomainHttpProtocol(mock.Mock(), '', mock.Mock())
self.assertEqual(u.client_address, ('<local>', 0))
def test_init_with_client(self):
u = utils.UnixDomainHttpProtocol(mock.Mock(), 'foo', mock.Mock())
self.assertEqual(u.client_address, 'foo')
class TestUnixDomainWSGIServer(base.BaseTestCase):
def setUp(self):
super(TestUnixDomainWSGIServer, self).setUp()
self.eventlet_p = mock.patch.object(utils, 'eventlet')
self.eventlet = self.eventlet_p.start()
self.server = utils.UnixDomainWSGIServer('test')
def test_start(self):
mock_app = mock.Mock()
with mock.patch.object(self.server, '_launch') as launcher:
self.server.start(mock_app, '/the/path', workers=5, backlog=128)
self.eventlet.assert_has_calls([
mock.call.listen(
'/the/path',
family=socket.AF_UNIX,
backlog=128
)]
)
launcher.assert_called_once_with(mock_app, workers=5)
def test_run(self):
self.server._run('app', 'sock')
self.eventlet.wsgi.server.assert_called_once_with(
'sock',
'app',
protocol=utils.UnixDomainHttpProtocol,
log=mock.ANY,
max_size=self.server.num_threads
)
| barnsnake351/neutron | neutron/tests/unit/agent/linux/test_utils.py | Python | apache-2.0 | 15,628 |
"""
This example routine simulates a number of pure Gaussian white noise
signals, then fits each one in terms of two regressors: a constant baseline,
and a linear function of time. The voxelwise t statistics associated
with the baseline coefficient are then computed.
"""
print __doc__
import numpy as np
from nipy.neurospin import glm
dimt = 100
dimx = 10
dimy = 11
dimz = 12
# axis defines the "time direction"
y = np.random.randn(dimt, dimx*dimy*dimz)
axis = 0
"""
y = random.randn(dimx, dimt, dimy, dimz)
axis = 1
"""
X = np.array([np.ones(dimt), range(dimt)])
X = X.transpose() ## the design matrix X must have dimt lines
#mod = glm.glm(y, X, axis=axis) ## default is spherical model using OLS
mod = glm.glm(y, X, axis=axis, model='ar1')
#mod = glm.glm(y, X, formula='y~x1+(x1|x2)', axis=axis, model='mfx')
##mod.save('toto')
##mod = glm.load('toto')
# Define a t contrast
tcon = mod.contrast([1,0])
# Compute the t-stat
t = tcon.stat()
## t = tcon.stat(baseline=1) to test effects > 1
# Compute the p-value
p = tcon.pvalue()
# Compute the z-score
z = tcon.zscore()
# Perform a F test without keeping the F stat
p = mod.contrast([[1,0],[1,-1]]).pvalue()
# Perform a conjunction test similarly
##p = mod.contrast([[1,0],[1,-1]], type='tmin').pvalue()
print np.shape(y)
print np.shape(X)
print np.shape(z)
| yarikoptic/NiPy-OLD | examples/neurospin/glm_lowlevel.py | Python | bsd-3-clause | 1,335 |
import argparse
parser = argparse.ArgumentParser(description='This is a simple script to show argparser')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
parser.add_argument('--arg1', dest='arg1', action='store',
help='The frist argument')
parser.add_argument('--req1', dest='req1', action='store',
help='The frist req argument', required=True)
args = parser.parse_args()
if __name__ == '__main__':
print(args.req1)
print(args.arg1) | fernandoalex/stuff | python/examples/argparser/simple.py | Python | mit | 517 |
"""
Configuration utilities.
Settings are stored in a dictionary-like configuration object.
All settings are modifiable by environment variables that encode
the path in the dictionary tree.
Inner nodes in the dictionary tree can be any dictionary.
A leaf node in the dictionary tree is represented by an inner node that
contains a value key.
"""
import copy
import logging
import os
import re
import sys
import typing as tp
import uuid
import warnings
import attr
import schema
import six
import yaml
from pkg_resources import DistributionNotFound, get_distribution
from plumbum import LocalPath, local
import benchbuild.utils.user_interface as ui
LOG = logging.getLogger(__name__)
class Indexable:
def __getitem__(self: 'Indexable', key: str) -> 'Indexable':
pass
try:
__version__ = get_distribution("benchbuild").version
except DistributionNotFound:
__version__ = "unknown"
LOG.error("could not find version information.")
def available_cpu_count() -> int:
"""
Get the number of available CPUs.
Number of available virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling
userspace-only program.
Returns:
Number of avaialable CPUs.
"""
# cpuset
# cpuset may restrict the number of *available* processors
try:
match = re.search(
r'(?m)^Cpus_allowed:\s*(.*)$',
open('/proc/self/status').read()
)
if match:
res = bin(int(match.group(1).replace(',', ''), 16)).count('1')
if res > 0:
return res
except IOError:
LOG.debug("Could not get the number of allowed CPUs")
# http://code.google.com/p/psutil/
try:
import psutil
return int(psutil.cpu_count()) # psutil.NUM_CPUS on old versions
except (ImportError, AttributeError):
LOG.debug("Could not get the number of allowed CPUs")
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError, ValueError):
LOG.debug("Could not get the number of allowed CPUs")
# Linux
try:
res = open('/proc/cpuinfo').read().count('processor\t:')
if res > 0:
return res
except IOError:
LOG.debug("Could not get the number of allowed CPUs")
raise Exception('Can not determine number of CPUs on this system')
def current_available_threads() -> int:
"""Returns the number of currently available threads for BB."""
return len(os.sched_getaffinity(0))
def get_number_of_jobs(config: 'Configuration') -> int:
"""Returns the number of jobs set in the config."""
jobs_configured = int(config["jobs"])
if jobs_configured == 0:
return current_available_threads()
return jobs_configured
class InvalidConfigKey(RuntimeWarning):
"""Warn, if you access a non-existing key benchbuild's configuration."""
def escape_yaml(raw_str: str) -> str:
"""
Shell-Escape a yaml input string.
Args:
raw_str: The unescaped string.
"""
escape_list = [char for char in raw_str if char in ['!', '{', '[']]
if len(escape_list) == 0:
return raw_str
str_quotes = '"'
i_str_quotes = "'"
if str_quotes in raw_str and str_quotes not in raw_str[1:-1]:
return raw_str
if str_quotes in raw_str[1:-1]:
raw_str = i_str_quotes + raw_str + i_str_quotes
else:
raw_str = str_quotes + raw_str + str_quotes
return raw_str
def is_yaml(cfg_file: str) -> bool:
"""Is the given cfg_file a YAML file."""
return os.path.splitext(cfg_file)[1] in [".yml", ".yaml"]
class ConfigLoader(yaml.SafeLoader):
"""Avoid polluting yaml's namespace with our modifications."""
class ConfigDumper(yaml.SafeDumper):
"""Avoid polluting yaml's namespace with our modifications."""
def to_yaml(value: tp.Any) -> tp.Optional[str]:
"""Convert a given value to a YAML string."""
stream = yaml.io.StringIO()
dumper = ConfigDumper(stream, default_flow_style=True, width=sys.maxsize)
val = None
try:
dumper.open()
dumper.represent(value)
val = str(stream.getvalue()).strip()
dumper.close()
finally:
dumper.dispose()
return val
def to_env_var(env_var: str, value: tp.Any) -> str:
"""
Create an environment variable from a name and a value.
This generates a shell-compatible representation of an
environment variable that is assigned a YAML representation of
a value.
Args:
env_var (str): Name of the environment variable.
value (Any): A value we convert from.
"""
val = to_yaml(value)
ret_val = "%s=%s" % (env_var, escape_yaml(str(val)))
return ret_val
InnerNode = tp.Dict[str, tp.Any]
# This schema allows a configuration to be initialized/set from a standard
# dictionary. If you want to nest a new configuration node deeper than 1 level,
# you have to use dummy nodes to help benchbuild validate your nodes as
# Configuration nodes instead of plain dictionary values.
#
# Example:
# CFG['container'] = {
# 'strategy': {
# 'dummy': { 'default': True, 'desc': 'Update portage tree' }
# }
# }
# This opens the 'strategy' node up for deeper nesting in a second step:
#
# CFG['container']['strategy']['polyjit'] = {
# 'sync': { 'default': True', 'desc': '...' }
# }
_INNER_NODE_VALUE = schema.Schema({
schema.Or('default', 'value'): object,
schema.Optional('desc'): str
})
_INNER_NODE_SCHEMA = schema.Schema({
schema.And(str, len): {
schema.Or('default', 'value'): object,
schema.Optional('desc'): str,
schema.Optional(str): dict
}
})
class Configuration(Indexable):
"""
Dictionary-like data structure to contain all configuration variables.
This serves as a configuration dictionary throughout benchbuild. You can
use it to access all configuration options that are available. Whenever the
structure is updated with a new subtree, all variables defined in the new
subtree are updated from the environment.
Environment variables are generated from the tree paths automatically.
CFG["build_dir"] becomes BB_BUILD_DIR
CFG["llvm"]["dir"] becomes BB_LLVM_DIR
The configuration can be stored/loaded as YAML.
"""
def __init__(
self,
parent_key: str,
node: tp.Optional[InnerNode] = None,
parent: tp.Optional['Configuration'] = None,
init: bool = True
):
self.parent = parent
self.parent_key = parent_key
self.node = node if node is not None else {}
if init:
self.init_from_env()
def filter_exports(self) -> None:
if self.has_default():
do_export = True
if "export" in self.node:
do_export = self.node["export"]
if not do_export:
if self.parent:
self.parent.node.pop(self.parent_key)
else:
selfcopy = copy.deepcopy(self)
for k in self.node:
if selfcopy[k].is_leaf():
selfcopy[k].filter_exports()
self.__dict__ = selfcopy.__dict__
def store(self, config_file: LocalPath) -> None:
""" Store the configuration dictionary to a file."""
selfcopy = copy.deepcopy(self)
selfcopy.filter_exports()
with open(config_file, 'w') as outf:
yaml.dump(
selfcopy.node,
outf,
width=80,
indent=4,
default_flow_style=False,
Dumper=ConfigDumper
)
def load(self, _from: LocalPath) -> None:
"""Load the configuration dictionary from file."""
def load_rec(
inode: tp.Dict[str, tp.Any], config: Configuration
) -> None:
"""Recursive part of loading."""
for k in config:
if isinstance(config[k], dict) and \
k not in ['value', 'default']:
if k in inode:
load_rec(inode[k], config[k])
else:
LOG.debug("+ config element: '%s'", k)
else:
inode[k] = config[k]
with open(str(_from), 'r') as infile:
obj: Configuration = yaml.load(infile, Loader=ConfigLoader)
upgrade(obj)
load_rec(self.node, obj)
self['config_file'] = os.path.abspath(_from)
def has_value(self) -> bool:
"""Check, if the node contains a 'value'."""
return isinstance(self.node, dict) and 'value' in self.node
def has_default(self) -> bool:
"""Check, if the node contains a 'default' value."""
return isinstance(self.node, dict) and 'default' in self.node
def is_leaf(self) -> bool:
"""Check, if the node is a 'leaf' node."""
return self.has_value() or self.has_default()
def init_from_env(self) -> None:
"""
Initialize this node from environment.
If we're a leaf node, i.e., a node containing a dictionary that
consist of a 'default' key, compute our env variable and initialize
our value from the environment.
Otherwise, init our children.
"""
if 'default' in self.node:
env_var = self.__to_env_var__().upper()
if self.has_value():
env_val = self.node['value']
else:
env_val = self.node['default']
env_val = os.getenv(env_var, to_yaml(env_val))
try:
self.node['value'] = yaml.load(
str(env_val), Loader=ConfigLoader
)
except ValueError:
self.node['value'] = env_val
else:
if isinstance(self.node, dict):
for k in self.node:
self[k].init_from_env()
@property
def value(self) -> tp.Any:
"""Return the node value, if we're a leaf node."""
def validate(node_value: tp.Any) -> tp.Any:
if hasattr(node_value, 'validate'):
node_value.validate()
return node_value
if 'value' in self.node:
return validate(self.node['value'])
return self
def __getitem__(self, key: str) -> 'Configuration':
if key not in self.node:
warnings.warn(
"Access to non-existing config element: {0}".format(key),
category=InvalidConfigKey,
stacklevel=2
)
return Configuration(key, init=False)
return Configuration(key, parent=self, node=self.node[key], init=False)
def __setitem__(self, key: str, val: tp.Any) -> None:
if _INNER_NODE_SCHEMA.is_valid(val) or _INNER_NODE_VALUE.is_valid(val):
self.node[key] = val
elif key in self.node:
self.node[key]['value'] = val
else:
self.node[key] = {'value': val}
def __iadd__(self, rhs: tp.Any) -> tp.Any:
"""Append a value to a list value."""
if not self.has_value():
raise TypeError("Inner configuration node does not support +=.")
value = self.node['value']
if not hasattr(value, '__iadd__'):
raise TypeError("Configuration node value does not support +=.")
value += rhs
return value
def __int__(self) -> int:
"""Convert the node's value to int, if available."""
if not self.has_value():
raise ValueError(
'Inner configuration nodes cannot be converted to int.'
)
return int(self.value)
def __bool__(self) -> bool:
"""Convert the node's value to bool, if available."""
if not self.has_value():
return True
return bool(self.value)
def __contains__(self, key: str) -> bool:
return key in self.node
def __str__(self) -> str:
if 'value' in self.node:
return str(self.node['value'])
return str(self.node)
def __repr__(self) -> str:
"""
Represents the configuration as a list of environment variables.
"""
_repr = []
if self.has_value():
return to_env_var(self.__to_env_var__(), self.node['value'])
if self.has_default():
return to_env_var(self.__to_env_var__(), self.node['default'])
for k in self.node:
_repr.append(repr(self[k]))
return "\n".join(sorted(_repr))
def __to_env_var__(self) -> str:
parent_key = self.parent_key
if self.parent:
return str(self.parent.__to_env_var__() + "_" + parent_key).upper()
return parent_key.upper()
def to_env_dict(self) -> tp.Mapping[str, tp.Any]:
"""Convert configuration object to a flat dictionary."""
if self.has_value():
return {self.__to_env_var__(): self.node['value']}
if self.has_default():
return {self.__to_env_var__(): self.node['default']}
entries: tp.Dict[str, str] = {}
for k in self.node:
entries.update(self[k].to_env_dict())
return entries
def convert_components(value: tp.Union[str, tp.List[str]]) -> tp.List[str]:
is_str = isinstance(value, six.string_types)
new_value = value
if is_str:
new_value = str(new_value)
if os.path.sep in new_value:
new_value = new_value.split(os.path.sep)
else:
new_value = [new_value]
new_value = [c for c in new_value if c != '']
return new_value
@attr.s(str=False, frozen=True)
class ConfigPath:
"""Wrapper around paths represented as list of strings."""
components = attr.ib(converter=convert_components)
def validate(self) -> None:
"""Make sure this configuration path exists."""
path = local.path(ConfigPath.path_to_str(self.components))
if not path.exists():
print("The path '%s' is required by your configuration." % path)
yes = ui.ask(
"Should I create '%s' for you?" % path,
default_answer=True,
default_answer_str="yes"
)
if yes:
path.mkdir()
else:
LOG.error("User denied path creation of '%s'.", path)
if not path.exists():
LOG.error("The path '%s' needs to exist.", path)
@staticmethod
def path_to_str(components: tp.List[str]) -> str:
if components:
return os.path.sep + os.path.sep.join(components)
return os.path.sep
def __str__(self) -> str:
return ConfigPath.path_to_str(self.components)
def path_representer(dumper, data):
"""
Represent a ConfigPath object as a scalar YAML node.
"""
return dumper.represent_scalar('!create-if-needed', '%s' % data)
def path_constructor(loader, node):
""""
Construct a ConfigPath object form a scalar YAML node.
"""
value = loader.construct_scalar(node)
return ConfigPath(value)
def find_config(
test_file: tp.Optional[str] = None,
defaults: tp.Optional[tp.List[str]] = None,
root: str = os.curdir
) -> tp.Optional[LocalPath]:
"""
Find the path to the default config file.
We look at :root: for the :default: config file. If we can't find it
there we start looking at the parent directory recursively until we
find a file named :default: and return the absolute path to it.
If we can't find anything, we return None.
Args:
test_file:
default: The name of the config file we look for.
root: The directory to start looking for.
Returns:
Path to the default config file, None if we can't find anything.
"""
if defaults is None:
defaults = [".benchbuild.yml", ".benchbuild.yaml"]
def walk_rec(cfg_name: str, root: str) -> LocalPath:
cur_path = local.path(root) / cfg_name
if cur_path.exists():
return cur_path
new_root = local.path(root) / os.pardir
return walk_rec(cfg_name, new_root) if new_root != root else None
if test_file is not None:
return walk_rec(test_file, root)
for test_f in defaults:
ret = walk_rec(test_f, root)
if ret is not None:
return ret
return None
def setup_config(
cfg: Configuration,
config_filenames: tp.Optional[tp.List[str]] = None,
env_var_name: tp.Optional[str] = None
) -> None:
"""
This will initialize the given configuration object.
The following resources are available in the same order:
1) Default settings.
2) Config file.
3) Environment variables.
WARNING: Environment variables do _not_ take precedence over the config
file right now. (init_from_env will refuse to update the
value, if there is already one.)
Args:
config_filenames: list of possible config filenames
env_var_name: name of the environment variable holding the config path
"""
if env_var_name is None:
env_var_name = "BB_CONFIG_FILE"
config_path = os.getenv(env_var_name, None)
if not config_path:
config_path = find_config(defaults=config_filenames)
if config_path:
cfg.load(config_path)
cfg["config_file"] = os.path.abspath(config_path)
cfg.init_from_env()
def update_env(cfg: Configuration) -> None:
env: tp.Dict[str, str] = dict(cfg["env"].value)
path = env.get("PATH", "")
path = os.path.pathsep.join(path)
if "PATH" in os.environ:
path = os.path.pathsep.join([path, os.environ["PATH"]])
os.environ["PATH"] = path
lib_path = env.get("LD_LIBRARY_PATH", "")
lib_path = os.path.pathsep.join(lib_path)
if "LD_LIBRARY_PATH" in os.environ:
lib_path = os.path.pathsep.join([
lib_path, os.environ["LD_LIBRARY_PATH"]
])
os.environ["LD_LIBRARY_PATH"] = lib_path
home = env.get("HOME", None)
if home is not None and "HOME" in os.environ:
os.environ["HOME"] = home
# Update local's env property because we changed the environment
# of the running python process.
local.env.update(PATH=os.environ["PATH"])
local.env.update(LD_LIBRARY_PATH=os.environ["LD_LIBRARY_PATH"])
if home is not None:
local.env.update(HOME=os.environ["HOME"])
def upgrade(cfg: Configuration) -> None:
"""Provide forward migration for configuration files."""
db_node = cfg["db"]
old_db_elems = ["host", "name", "port", "pass", "user", "dialect"]
has_old_db_elems = [x in db_node for x in old_db_elems]
if any(has_old_db_elems):
print(
"Old database configuration found. "
"Converting to new connect_string. "
"This will *not* be stored in the configuration automatically."
)
cfg["db"]["connect_string"] = \
"{dialect}://{user}:{password}@{host}:{port}/{name}".format(
dialect=cfg["db"]["dialect"]["value"],
user=cfg["db"]["user"]["value"],
password=cfg["db"]["pass"]["value"],
host=cfg["db"]["host"]["value"],
port=cfg["db"]["port"]["value"],
name=cfg["db"]["name"]["value"])
def uuid_representer(dumper, data):
"""Represent a uuid.UUID object as a scalar YAML node."""
return dumper.represent_scalar('!uuid', '%s' % data)
def uuid_constructor(loader, node):
""""Construct a uuid.UUID object form a scalar YAML node."""
value = loader.construct_scalar(node)
return uuid.UUID(value)
def uuid_add_implicit_resolver(loader=ConfigLoader, dumper=ConfigDumper):
"""Attach an implicit pattern resolver for UUID objects."""
uuid_regex = r'^\b[a-f0-9]{8}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{4}-\b[a-f0-9]{12}$'
pattern = re.compile(uuid_regex)
yaml.add_implicit_resolver('!uuid', pattern, Loader=loader, Dumper=dumper)
def __init_module__() -> None:
yaml.add_representer(uuid.UUID, uuid_representer, Dumper=ConfigDumper)
yaml.add_representer(ConfigPath, path_representer, Dumper=ConfigDumper)
yaml.add_constructor('!uuid', uuid_constructor, Loader=ConfigLoader)
yaml.add_constructor(
'!create-if-needed', path_constructor, Loader=ConfigLoader
)
uuid_add_implicit_resolver()
__init_module__()
| PolyJIT/benchbuild | benchbuild/utils/settings.py | Python | mit | 20,533 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 - 2019 Brian R. D'Urso
#
# This file is part of Python Instrument Control System, also known as Pythics.
#
# Pythics is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pythics is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pythics. If not, see <http://www.gnu.org/licenses/>.
#
#
# load libraries
#
import struct
import numpy as np
from pythics.settings import _TRY_PYSIDE
try:
if not _TRY_PYSIDE:
raise ImportError()
import PySide2.QtCore as _QtCore
import PySide2.QtGui as _QtGui
import PySide2.QtWidgets as _QtWidgets
import PySide2.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.Signal
Slot = QtCore.Slot
Property = QtCore.Property
USES_PYSIDE = True
except ImportError:
import PyQt5.QtCore as _QtCore
import PyQt5.QtGui as _QtGui
import PyQt5.QtWidgets as _QtWidgets
import PyQt5.QtPrintSupport as _QtPrintSupport
QtCore = _QtCore
QtGui = _QtGui
QtWidgets = _QtWidgets
QtPrintSupport = _QtPrintSupport
Signal = QtCore.pyqtSignal
Slot = QtCore.pyqtSlot
Property = QtCore.pyqtProperty
USES_PYSIDE = False
try:
if USES_PYSIDE:
from PySide import QtOpenGL
else:
from PyQt5 import QtOpenGL, QtWidgets
#from OpenGL import GL
OPENGL_AVAILABLE = True
except ImportError:
OPENGL_AVAILABLE = False
import pythics.libcontrol
#
# ScopePlot
#
class ScopePlot(pythics.libcontrol.Control):
"""An oscilloscope-like plot for real time display of 2-dimensional curves,
with OpenGL-accelerated drawing if available.
HTML parameters:
*antialias*: [ *True* (default) | *False* ]
Render curves antialiased. This does not typically slow down rendering
significantly but looks much nicer.
*aspect_locked*: [ *True* | *False* (default) ]
Whether to lock the aspect ratio of the plot coordinate system to 1:1.
*grid_color*: (r, g, b, a)
An RGBA tuple specifying the grid color.
*grid_line_width*: int (default 1)
Set the width of the grid lines in pixels.
*use_opengl*: [ *True* | *False* (default) ]
Whether to render with opengl for hardware acceleration (if available).
"""
def __init__(self, parent, antialias=True, aspect_locked=False, grid_color=(200,200,200,255), grid_line_width=1, use_opengl=False, **kwargs):
pythics.libcontrol.Control.__init__(self, parent, **kwargs)
self._widget = ScopePlotCanvas(antialias=antialias, aspect_locked=aspect_locked, grid_color=grid_color, grid_line_width=grid_line_width, use_opengl=use_opengl)
#---------------------------------------------------
# methods below used only for access by action proxy
def new_curve(self, key, **kwargs):
"""Create a new curve or set of points on the plot.
Arguments:
*key*: str
The name you give to this plot item for future access.
Optional keyword arguments:
*line_width*: int (default 1)
Set the width of the lines in pixels. Widths other than 1 may be
exceedingly slow without opengl hardware acceleration.
*line_color*: (r, g, b, a)
An RGBA tuple specifying the line color.
"""
self._widget.new_curve(key, **kwargs)
def set_data(self, key, x, y):
"""Change the data of a plot item.
Arguments:
*key*: str
The name you gave to the plot item when it was created.
*x*: one-dimensional numpy array
X data values.
*y*: one-dimensional numpy array
Y data values.
"""
self._widget.set_data(key, x, y)
def freeze(self):
"""Stop redrawing plot until thaw() is called.
"""
self._widget.freeze()
def thaw(self):
"""Update and resume redrawing plot (use with freeze()).
"""
self._widget.thaw()
self._widget.scene.update()
def delete(self, key):
"""Delete a plot item.
Arguments:
*key*: str
The name you gave to the plot item when it was created.
"""
self._widget.delete(key)
def clear(self):
"""Delete all plot items to clear the plot.
"""
self._widget.clear()
class ScopePlotCanvas(QtWidgets.QGraphicsView):
def __init__(self, antialias=False, aspect_locked=False, grid_color=(0,0,0,128), grid_line_width=1.0, use_opengl=False):
QtWidgets.QGraphicsView.__init__(self)
self.aspect_locked = aspect_locked
self.grid_color = grid_color
self.grid_line_width = grid_line_width
self.setCacheMode(QtWidgets.QGraphicsView.CacheBackground)
#self.setFocusPolicy(QtCore.Qt.StrongFocus)
#self.setFrameShape(QtGui.QFrame.NoFrame)
self.setFrameShape(QtWidgets.QFrame.Box)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setTransformationAnchor(QtWidgets.QGraphicsView.NoAnchor)
#self.setTransformationAnchor(QtGui.QGraphicsView.AnchorViewCenter)
self.setResizeAnchor(QtWidgets.QGraphicsView.AnchorViewCenter)
self.thaw()
if antialias:
self.setRenderHints(QtGui.QPainter.Antialiasing)
self.setOptimizationFlag(self.DontAdjustForAntialiasing, True)
self.setInteractive(False)
self.scene = QtWidgets.QGraphicsScene(self)
self.setScene(self.scene)
#self.aspect_locked = True
self.view_rect = QtCore.QRectF(0, 0, 1, 1)
self.scene.setSceneRect(self.view_rect)
if self.aspect_locked:
self.fitInView(self.view_rect, QtCore.Qt.KeepAspectRatio)
else:
self.fitInView(self.view_rect, QtCore.Qt.IgnoreAspectRatio)
if OPENGL_AVAILABLE and use_opengl:
if antialias:
qglf = QtOpenGL.QGLFormat()
qglf.setSampleBuffers(True)
qglw = QtOpenGL.QGLWidget(qglf)
self.setViewport(qglw)
else:
self.setViewport(QtOpenGL.QGLWidget())
# dictionary of plot objects such as lines, points, etc.
self.plot_items = dict()
def freeze(self):
self.setViewportUpdateMode(QtWidgets.QGraphicsView.NoViewportUpdate)
def thaw(self):
#self.setViewportUpdateMode(QtGui.QGraphicsView.MinimalViewportUpdate)
#self.setViewportUpdateMode(QtGui.QGraphicsView.FullViewportUpdate)
self.setViewportUpdateMode(QtWidgets.QGraphicsView.BoundingRectViewportUpdate)
def new_curve(self, key, **kwargs):
# if a curve with this name already exists, delete it
if key in self.plot_items:
self.delete(key)
# make the new curve
properties = {"line_color":(255, 0, 0, 255), "line_width":1}
properties.update(kwargs)
color = QtGui.QColor.fromRgb(*properties["line_color"])
pen = QtGui.QPen(color);
pen.setWidth(properties["line_width"])
pen.setCosmetic(True)
x = np.array([0.0, 1.0])
y = np.array([1.0, 1.0])
path = arrayToQPath(x, y)
item = ScopeCurveItem()
item.setPath(path)
item.setPen(pen)
self.scene.addItem(item)
self.plot_items[key] = item
def set_data(self, key, x, y):
item = self.plot_items[key]
path = arrayToQPath(x, 1.0-y)
item.setPath(path)
def delete(self, key):
item = self.plot_items.pop(key)
self.scene.removeItem(item)
def clear(self):
for item in self.plot_items:
self.scene.removeItem(item)
self.plot_items = dict()
def resizeEvent(self, ev):
if self.aspect_locked:
self.fitInView(self.view_rect, QtCore.Qt.KeepAspectRatio)
else:
self.fitInView(self.view_rect, QtCore.Qt.IgnoreAspectRatio)
def drawBackground(self, painter, rect):
if self.grid_line_width > 0:
#color = QtGui.QColor(*self.grid_color)
color = QtGui.QColor.fromRgb(*self.grid_color)
#pen = QtGui.QPen(color, QtCore.Qt.SolidLine)
pen = QtGui.QPen(color)
pen.setWidth(self.grid_line_width)
pen.setCosmetic(True)
painter.setPen(pen)
self.drawBackgroundLine(painter, 0.0, 0.0, 0.0, 1.0)
self.drawBackgroundLine(painter, 0.1, 0.0, 0.1, 1.0)
self.drawBackgroundLine(painter, 0.2, 0.0, 0.2, 1.0)
self.drawBackgroundLine(painter, 0.3, 0.0, 0.3, 1.0)
self.drawBackgroundLine(painter, 0.4, 0.0, 0.4, 1.0)
self.drawBackgroundLine(painter, 0.5, 0.0, 0.5, 1.0)
self.drawBackgroundLine(painter, 0.6, 0.0, 0.6, 1.0)
self.drawBackgroundLine(painter, 0.7, 0.0, 0.7, 1.0)
self.drawBackgroundLine(painter, 0.8, 0.0, 0.8, 1.0)
self.drawBackgroundLine(painter, 0.9, 0.0, 0.9, 1.0)
self.drawBackgroundLine(painter, 1.0, 0.0, 1.0, 1.0)
self.drawBackgroundLine(painter, 0.0, 0.0, 1.0, 0.0)
self.drawBackgroundLine(painter, 0.0, 0.1, 1.0, 0.1)
self.drawBackgroundLine(painter, 0.0, 0.2, 1.0, 0.2)
self.drawBackgroundLine(painter, 0.0, 0.3, 1.0, 0.3)
self.drawBackgroundLine(painter, 0.0, 0.4, 1.0, 0.4)
self.drawBackgroundLine(painter, 0.0, 0.5, 1.0, 0.5)
self.drawBackgroundLine(painter, 0.0, 0.6, 1.0, 0.6)
self.drawBackgroundLine(painter, 0.0, 0.7, 1.0, 0.7)
self.drawBackgroundLine(painter, 0.0, 0.8, 1.0, 0.8)
self.drawBackgroundLine(painter, 0.0, 0.9, 1.0, 0.9)
self.drawBackgroundLine(painter, 0.0, 1.0, 1.0, 1.0)
def drawBackgroundLine(self, painter, x1, y1, x2, y2):
line = QtCore.QLineF(x1, y1, x2, y2)
painter.drawLine(line)
class ScopeCurveItem(QtWidgets.QGraphicsPathItem):
def boundingRect(self):
return QtCore.QRectF(-0.1, -0.1, 1.1, 1.1)
"""
Function arrayToQPath is modified from pyqtgraph
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more infomation.
"""
def arrayToQPath(x, y):
"""Convert an array of x,y coordinats to QPainterPath as efficiently as possible.
The *connect* argument may be 'all', indicating that each point should be
connected to the next; 'pairs', indicating that each pair of points
should be connected, or an array of int32 values (0 or 1) indicating
connections.
"""
## Create all vertices in path. The method used below creates a binary format so that all
## vertices can be read in at once. This binary format may change in future versions of Qt,
## so the original (slower) method is left here for emergencies:
#path.moveTo(x[0], y[0])
#if connect == 'all':
#for i in range(1, y.shape[0]):
#path.lineTo(x[i], y[i])
#elif connect == 'pairs':
#for i in range(1, y.shape[0]):
#if i%2 == 0:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#elif isinstance(connect, np.ndarray):
#for i in range(1, y.shape[0]):
#if connect[i] == 1:
#path.lineTo(x[i], y[i])
#else:
#path.moveTo(x[i], y[i])
#else:
#raise Exception('connect argument must be "all", "pairs", or array')
## Speed this up using >> operator
## Format is:
## numVerts(i4) 0(i4)
## x(f8) y(f8) 0(i4) <-- 0 means this vertex does not connect
## x(f8) y(f8) 1(i4) <-- 1 means this vertex connects to the previous vertex
## ...
## 0(i4)
##
## All values are big endian--pack using struct.pack('>d') or struct.pack('>i')
path = QtGui.QPainterPath()
n = x.shape[0]
# create empty array, pad with extra space on either end
arr = np.empty(n+2, dtype=[('x', '>f8'), ('y', '>f8'), ('c', '>i4')])
# write first two integers
byteview = arr.view(dtype=np.ubyte)
byteview[:12] = 0
byteview.data[12:20] = struct.pack('>ii', n, 0)
# Fill array with vertex values
arr[1:-1]['x'] = x
arr[1:-1]['y'] = y
arr[1:-1]['c'] = 1
# write last 0
lastInd = 20*(n+1)
byteview.data[lastInd:lastInd+4] = struct.pack('>i', 0)
# create datastream object and stream into path
## Avoiding this method because QByteArray(str) leaks memory in PySide
#buf = QtCore.QByteArray(arr.data[12:lastInd+4]) # I think one unnecessary copy happens here
path.strn = byteview.data[12:lastInd+4] # make sure data doesn't run away
try:
buf = QtCore.QByteArray.fromRawData(path.strn)
except TypeError:
buf = QtCore.QByteArray(bytes(path.strn))
ds = QtCore.QDataStream(buf)
ds >> path
return path
| dursobr/Pythics | pythics/opengl.py | Python | gpl-3.0 | 13,535 |
# Copyright (c) 2008 Yann Ramin
# This file is part of quickmovie.
#
# quickmovie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# quickmovie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with quickmovie. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import types
from quickmovie.model import meta
def init(bind):
meta.engine = bind
meta.Session = orm.scoped_session(
orm.sessionmaker(autocommit = False, autoflush = True, bind = bind))
meta.metadata.create_all(bind=meta.engine)
load_orm()
def load_orm():
orm.mapper(Plot, plots_table)
orm.mapper(Movie, movies_table, properties={
'plots': orm.relation(Plot)
})
plots_table = sa.Table('plots', meta.metadata,
sa.Column('id', sa.Integer, primary_key = True),
sa.Column('movie_id', sa.Integer, sa.ForeignKey('movies.id')),
sa.Column('pick', sa.Boolean),
sa.Column('plot', sa.Unicode(600)))
movies_table = sa.Table('movies', meta.metadata,
sa.Column('id', sa.Integer, primary_key = True),
sa.Column('name', sa.Unicode(200)),
sa.Column('filename', sa.Unicode(300)),
sa.Column('length', sa.Unicode(100)),
sa.Column('imdb_id', sa.Unicode(100)),
sa.Column('rating', sa.Float),
sa.Column('taglines', sa.Unicode(500)),
sa.Column('year', sa.Integer),
sa.Column('genres', sa.Unicode(500)),
sa.Column('imageurl', sa.Unicode(200)))
class Movie(object):
pass
class Plot(object):
pass
| theatrus/quickmovie | quickmovie/model/__init__.py | Python | gpl-3.0 | 2,250 |
from django.conf import settings
from django import forms
from django.forms.widgets import HiddenInput
from crits.core.widgets import CalWidget
class AddCommentForm(forms.Form):
"""
Django form for adding a new Comment.
"""
error_css_class = 'error'
required_css_class = 'required'
comment = forms.CharField(widget=forms.Textarea(attrs={'rows':6, 'cols':40}))
parent_date = forms.DateTimeField(widget=HiddenInput, required=False)
parent_analyst = forms.CharField(widget=HiddenInput, required=False)
url_key = forms.CharField(widget=HiddenInput(attrs={'class':'no_clear'}))
#This field helps the server determine if we're on an object's
# detail page or the comments aggregation page. Set only on
# detail page.
subscribable = forms.CharField(widget=HiddenInput(attrs={'class':'no_clear'}), required=False)
class JumpToDateForm(forms.Form):
"""
Django form for finding comments on a specific date.
"""
error_css_class = 'error'
required_css_class = 'required'
date = forms.DateTimeField(widget=CalWidget(format=settings.PY_DATETIME_FORMAT, attrs={'class':'datetimeclass', 'size':'25', 'id':'id_comment_jump_to_date'}), input_formats=settings.PY_FORM_DATETIME_FORMATS)
class InlineCommentForm(forms.Form):
"""
Django form for adding comments inline.
"""
error_css_class = 'error'
required_css_class = 'required'
comment = forms.CharField(widget=forms.Textarea(attrs={'rows':6, 'cols':40}))
| cfossace/crits | crits/comments/forms.py | Python | mit | 1,498 |
#!/usr/bin/env python
"""
This example is similar to while(<>) in perl which processes input
both from standard input and from command line arguments.
"""
import fileinput
for line in fileinput.input():
line = line.rstrip()
print(line)
| veltzer/demos-python | src/examples/short/systems_programming/input_processing.py | Python | gpl-3.0 | 247 |
#!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: azure_rm_resourcegroup_facts
version_added: "2.1"
short_description: Get resource group facts.
description:
- Get facts for a specific resource group or all resource groups.
options:
name:
description:
- Limit results to a specific resource group.
required: false
default: null
tags:
description:
- Limit results by providing a list of tags. Format tags as 'key' or 'key:value'.
required: false
default: null
extends_documentation_fragment:
- azure
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
- name: Get facts for one resource group
azure_rm_resourcegroup_facts:
name: Testing
- name: Get facts for all resource groups
azure_rm_resourcegroup_facts:
- name: Get facts by tags
azure_rm_resourcegroup_facts:
tags:
- testing
- foo:bar
'''
RETURN = '''
azure_resourcegroups:
description: List of resource group dicts.
returned: always
type: list
example: [{
"id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing",
"location": "westus",
"name": "Testing",
"properties": {
"provisioningState": "Succeeded"
},
"tags": {
"delete": "never",
"testing": "testing"
}
}]
'''
try:
from msrestazure.azure_exceptions import CloudError
except:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
AZURE_OBJECT_CLASS = 'ResourceGroup'
class AzureRMResourceGroupFacts(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
name=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(azure_resourcegroups=[])
)
self.name = None
self.tags = None
super(AzureRMResourceGroupFacts, self).__init__(self.module_arg_spec,
supports_tags=False,
facts_module=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec:
setattr(self, key, kwargs[key])
if self.name:
self.results['ansible_facts']['azure_resourcegroups'] = self.get_item()
else:
self.results['ansible_facts']['azure_resourcegroups'] = self.list_items()
return self.results
def get_item(self):
self.log('Get properties for {0}'.format(self.name))
item = None
result = []
try:
item = self.rm_client.resource_groups.get(self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
result = [self.serialize_obj(item, AZURE_OBJECT_CLASS)]
return result
def list_items(self):
self.log('List all items')
try:
response = self.rm_client.resource_groups.list()
except CloudError as exc:
self.fail("Failed to list all items - {0}".format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS))
return results
def main():
AzureRMResourceGroupFacts()
if __name__ == '__main__':
main()
| ravibhure/ansible | lib/ansible/modules/cloud/azure/azure_rm_resourcegroup_facts.py | Python | gpl-3.0 | 4,030 |
# ========================================================================
# Copyright (c) 2007, Metaweb Technologies, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY METAWEB TECHNOLOGIES AND CONTRIBUTORS
# ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL METAWEB
# TECHNOLOGIES OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ========================================================================
#
# declarations for external metaweb api.
#
#
# from metaweb.api import HTTPMetawebSession
#
# mss = HTTPMetawebSession('sandbox.freebase.com')
# print mss.mqlread([dict(name=None, type='/type/type')])
#
#
#
__all__ = ['MetawebError', 'MetawebSession', 'HTTPMetawebSession', 'attrdict']
__version__ = '0.1'
import os, sys, re
import urllib2
import cookielib
import simplejson
from urllib import quote as urlquote
import pprint
import socket
import logging
try:
import httplib2
from httplib2cookie import CookiefulHttp
except ImportError:
httplib2 = None
CookiefulHttp = None
print ('freebase.api: you can install httplib2 for better performance')
import simplejson.encoder
# remove whitespace from json encoded output
simplejson.JSONEncoder.item_separator = ','
simplejson.JSONEncoder.key_separator = ':'
# don't escape slashes, we're not pasting into script tags here.
if simplejson.encoder.ESCAPE_DCT.get('/', None) == r'\/':
simplejson.encoder.ESCAPE_DCT['/'] = '/'
def urlencode_weak(s):
return urlquote(s, safe=',/:$')
# from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/361668
class attrdict(dict):
"""A dict whose items can also be accessed as member variables.
>>> d = attrdict(a=1, b=2)
>>> d['c'] = 3
>>> print d.a, d.b, d.c
1 2 3
>>> d.b = 10
>>> print d['b']
10
# but be careful, it's easy to hide methods
>>> print d.get('c')
3
>>> d['get'] = 4
>>> print d.get('a')
Traceback (most recent call last):
TypeError: 'int' object is not callable
"""
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__ = self
# TODO expose the common parts of the result envelope
class MetawebError(Exception):
"""
an error report from the metaweb service.
"""
pass
# TODO right now this is a completely unnecessary superclass.
# is there enough common behavior between session types
# to justify it?
class MetawebSession(object):
"""
MetawebSession is the base class for MetawebSession, subclassed for
different connection types. Only http is available externally.
This is more of an interface than a class
"""
# interface definition here...
# from httplib2
NORMALIZE_SPACE = re.compile(r'(?:\r\n)?[ \t]+')
def _normalize_headers(headers):
return dict([ (key.lower(), NORMALIZE_SPACE.sub(value, ' ').strip()) for (key, value) in headers.iteritems()])
class HTTPMetawebSession(MetawebSession):
"""
a MetawebSession is a request/response queue.
this version uses the HTTP api, and is synchronous.
"""
# share cookies across sessions, so that different sessions can
# see each other's writes immediately.
_default_cookiejar = cookielib.CookieJar()
def __init__(self, service_url, username=None, password=None, prev_session=None, cookiejar=None, cookiefile=None):
"""
create a new MetawebSession for interacting with the Metaweb.
a new session will inherit state from prev_session if present,
"""
super(HTTPMetawebSession, self).__init__()
self.log = logging.getLogger()
assert not service_url.endswith('/')
if not '/' in service_url: # plain host:port
service_url = 'http://' + service_url
self.service_url = service_url
self.username = username
self.password = password
self.tid = None
if prev_session:
self.service_url = prev.service_url
if cookiefile is not None:
cookiejar = self.open_cookie_file(cookiefile)
if cookiejar is not None:
self.cookiejar = cookiejar
elif prev_session:
self.cookiejar = prev_session.cookiejar
else:
self.cookiejar = self._default_cookiejar
if CookiefulHttp is not None:
self.httpclient = CookiefulHttp(cookiejar=self.cookiejar)
else:
cookiespy = urllib2.HTTPCookieProcessor(self.cookiejar)
self.opener = urllib2.build_opener(cookiespy)
def open_cookie_file(self, cookiefile=None):
if cookiefile is None or cookiefile == '':
if os.environ.has_key('HOME'):
cookiefile = os.path.join(os.environ['HOME'], '.pyfreebase/cookiejar')
else:
raise MetawebError("no cookiefile specified and no $HOME/.pyfreebase directory" % cookiefile)
cookiejar = cookielib.LWPCookieJar(cookiefile)
if os.path.exists(cookiefile):
cookiejar.load(ignore_discard=True)
return cookiejar
def _httpreq(self, service_path, method='GET', body=None, form=None,
headers=None):
"""
make an http request to the service.
form arguments are encoded in the url, even for POST, if a non-form
content-type is given for the body.
returns a pair (resp, body)
resp is the response object and may be different depending
on whether urllib2 or httplib2 is in use?
"""
if method == 'POST':
assert body is not None or form is not None
elif method == 'GET':
assert body is None
else:
assert 0, 'unknown method %s' % method
url = self.service_url + service_path
if headers is None:
headers = {}
else:
headers = _normalize_headers(headers)
# XXX This is a lousy way to parse Content-Type, where is
# the library?
ct = headers.get('content-type', None)
if ct is not None:
ct = ct.split(';')[0]
if body is not None:
# if body is provided, content-type had better be too
assert ct is not None
if form is not None:
qstr = '&'.join(['%s=%s' % (urlencode_weak(k), urlencode_weak(v))
for k,v in form.items()])
if method == 'POST':
# put the args on the url if we're putting something else
# in the body. this is used to add args to raw uploads.
if body is not None:
url += '?' + qstr
else:
if ct is None:
# XXX encoding and stuff
ct = 'application/x-www-form-urlencoded'
headers['content-type'] = ct
if ct == 'multipart/form-encoded':
# XXX fixme
raise NotImplementedError
elif ct == 'application/x-www-form-urlencoded':
body = qstr
else:
# for all methods other than POST, use the url
url += '?' + qstr
# assure the service that this isn't a CSRF form submission
headers['x-metaweb-request'] = 'Python'
if 'user-agent' not in headers:
headers['user-agent'] = 'python freebase.api-%s' % __version__
#if self.tid is not None:
# headers['x-metaweb-tid'] = self.tid
####### DEBUG MESSAGE - should check log level before generating
if form is None:
formstr = ''
else:
formstr = 'FORM:\n ' + '\n '.join(['%s=%s' % (k,v)
for k,v in form.items()])
if headers is None:
headerstr = ''
else:
headerstr = 'HEADERS:\n ' + '\n '.join([('%s: %s' % (k,v))
for k,v in headers.items()])
self.log.debug('%s %s%s%s', method, url, formstr, headerstr)
#######
if CookiefulHttp is not None:
return self._httplib2_request(url, method, body, headers)
else:
return self._urllib2_request(url, method, body, headers)
def _raise_service_error(self, status, ctype, body):
is_jsbody = (e.info().type.endswith('javascript')
or e.info().type.endswith('json'))
if str(status) == '400' and is_jsbody:
r = self._loadjson(body)
msg = r.messages[0]
raise MetawebError(u'%s %s %r' % (msg.get('code',''), msg.message, msg.info))
raise MetawebError, 'request failed: %s: %r %r' % (url, str(e), body)
def _urllib2_request(self, url, method, body, headers):
req = urllib2.Request(url, body, headers)
try:
resp = self.opener.open(req)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except urllib2.HTTPError, e:
_raise_service_error(e.code, e.info().type, e.fp.read())
for header in resp.info().headers:
self.log.debug('HTTP HEADER %s', header)
name, value = re.split("[:\n\r]", header, 1)
if name.lower() == 'x-metaweb-tid':
self.tid = value.strip()
return (resp, resp.read())
def _httplib2_request(self, url, method, body, headers):
try:
resp, content = self.httpclient.request(url, method=method,
body=body, headers=headers)
except socket.error, e:
self.log.error('SOCKET FAILURE: %s', e.fp.read())
raise MetawebError, 'failed contacting %s: %s' % (url, str(e))
except httplib2.HttpLib2ErrorWithResponse, e:
self._raise_service_error(resp.status, resp['content-type'], content)
except httplib2.HttpLib2Error, e:
raise MetawebError(u'HTTP error: %s' % (e,))
#tid = resp.get('x-metaweb-tid', None)
return (resp, content)
def _httpreq_json(self, *args, **kws):
resp, body = self._httpreq(*args, **kws)
return self._loadjson(body)
def _loadjson(self, json):
# TODO really this should be accomplished by hooking
# simplejson to create attrdicts instead of dicts.
def struct2attrdict(st):
"""
copy a json structure, turning all dicts into attrdicts.
copying descends instances of dict and list, including subclasses.
"""
if isinstance(st, dict):
return attrdict([(k,struct2attrdict(v)) for k,v in st.items()])
if isinstance(st, list):
return [struct2attrdict(li) for li in st]
return st
if json == '':
self.log.error('the empty string is not valid json')
raise MetawebError('the empty string is not valid json')
try:
r = simplejson.loads(json)
except ValueError, e:
self.log.error('error parsing json string %r' % json)
raise MetawebError, 'error parsing JSON string: %s' % e
return struct2attrdict(r)
def _check_mqlerror(self, r):
if r.code != '/api/status/ok':
for msg in r.messages:
self.log.error('mql error: %s %s %r' % (msg.code, msg.message, msg.get('query', None)))
raise MetawebError, 'query failed: %s %r' % (r.messages[0].code, r.messages[0].get('query', None))
def _mqlresult(self, r):
self._check_mqlerror(r)
# should check log level to avoid redundant simplejson.dumps
rstr = simplejson.dumps(r.result, indent=2)
if rstr[0] == '{':
rstr = rstr[1:-2]
self.log.info('result: %s', rstr)
return r.result
def login(self):
"""sign in to the service"""
assert self.username is not None
assert self.password is not None
self.log.debug('LOGIN USERNAME: %s', self.username)
try:
r = self._httpreq_json('/api/account/login', 'POST',
form=dict(username=self.username,
password=self.password))
except urllib2.HTTPError, e:
raise MetawebError("login error: %s", e)
if r.code != '/api/status/ok':
raise MetawebError(u'%s %r' % (r.get('code',''), r.messages))
self.log.debug('LOGIN RESP: %r', r)
self.log.debug('LOGIN COOKIES: %s', self.cookiejar)
def mqlreaditer(self, sq):
"""read a structure query"""
cursor = True
while 1:
subq = dict(query=[sq], cursor=cursor, escape=False)
qstr = simplejson.dumps(subq)
service = '/api/service/mqlread'
r = self._httpreq_json(service, form=dict(query=qstr))
for item in self._mqlresult(r):
yield item
if r['cursor']:
cursor = r['cursor']
self.log.info('CONTINUING with %s', cursor)
else:
return
def mqlread(self, sq):
"""read a structure query"""
subq = dict(query=sq, escape=False)
if isinstance(sq, list):
subq['cursor'] = True
service = '/api/service/mqlread'
# should check log level to avoid redundant simplejson.dumps
self.log.info('%s: %s',
service,
simplejson.dumps(sq, indent=2)[1:-2])
qstr = simplejson.dumps(subq)
r = self._httpreq_json(service, form=dict(query=qstr))
return self._mqlresult(r)
def trans(self, guid):
"""translate blob from guid """
url = '/api/trans/raw' + urlquote(guid)
self.log.info(url)
resp, body = self._httpreq(url)
self.log.info('%d bytes' % len(body))
return body
def mqlwrite(self, sq):
"""do a mql write"""
query = dict(query=sq, escape=False)
qstr = simplejson.dumps(query)
self.log.debug('MQLWRITE: %s', qstr)
service = '/api/service/mqlwrite'
# should check log level to avoid redundant simplejson.dumps
self.log.info('%s: %s',
service,
simplejson.dumps(sq, indent=2)[1:-2])
r = self._httpreq_json(service, 'POST',
form=dict(query=qstr))
self.log.debug('MQLWRITE RESP: %r', r)
return self._mqlresult(r)
def mqlflush(self):
"""ask the service not to hand us old data"""
self.log.debug('MQLFLUSH')
service = '/api/service/mqlwrite'
r = self._httpreq_json(service, 'POST', form={})
self._check_mqlerror(r)
return r
def upload(self, body, content_type, document_id=False):
"""upload to the metaweb"""
service = '/api/service/upload'
self.log.info('POST %s: %s (%d bytes)',
service, content_type, len(body))
headers = {}
if content_type is not None:
headers['content-type'] = content_type
form = None
if document_id is not False:
if document_id is None:
form = { 'document': '' }
else:
form = { 'document': document_id }
# note the use of both body and form.
# form parameters get encoded into the URL in this case
r = self._httpreq_json(service, 'POST',
headers=headers, body=body, form=form)
return self._mqlresult(r)
if __name__ == '__main__':
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
mss = HTTPMetawebSession('sandbox.freebase.com')
self.mss.log.setLevel(logging.DEBUG)
self.mss.log.addHandler(console)
print mss.mqlread([dict(name=None, type='/type/type')])
| artzub/code_swarm-gource-my-conf | tools/codeswarm/lib/freebase/api/session.py | Python | gpl-3.0 | 17,231 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Diceware documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 26 22:19:13 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
devdir = ''
try:
if os.environ['DEVDIR']:
devdir = os.environ['DEVDIR']
except KeyError:
print("Unable to obtain $DEVDIR from the environment")
exit(-1)
sys.path.insert(0, devdir + '/diceware')
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Diceware'
copyright = '2015, Tonko Mulder'
author = 'Tonko Mulder'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dicewaredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Diceware.tex', 'Diceware Documentation',
'Tonko Mulder', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'diceware', 'Diceware Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Diceware', 'Diceware Documentation',
author, 'Diceware', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
| Treggats/DicewarePassphrase | docs/conf.py | Python | gpl-2.0 | 11,767 |
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-12-09 16:37
import logging
from typing import Dict, Any, Union, Iterable, Callable, List
import torch
from hanlp.common.dataset import SamplerBuilder, PadSequenceDataLoader
from hanlp.common.transform import VocabDict
from hanlp.components.lemmatizer import TransformerLemmatizer
from hanlp.components.mtl.tasks import Task
from hanlp.layers.scalar_mix import ScalarMixWithDropoutBuilder
from hanlp.metrics.metric import Metric
from hanlp.metrics.mtl import MetricDict
from hanlp_common.util import merge_locals_kwargs
from torch.utils.data import DataLoader
class LinearDecoder(torch.nn.Module):
def __init__(self,
hidden_size,
num_labels) -> None:
super().__init__()
self.classifier = torch.nn.Linear(hidden_size, num_labels)
def forward(self, contextualized_embeddings: torch.FloatTensor, batch: Dict[str, torch.Tensor], mask=None):
return self.classifier(contextualized_embeddings)
class TransformerLemmatization(Task, TransformerLemmatizer):
def __init__(self,
trn: str = None,
dev: str = None,
tst: str = None,
sampler_builder: SamplerBuilder = None,
dependencies: str = None,
scalar_mix: ScalarMixWithDropoutBuilder = None,
use_raw_hidden_states=False,
lr=1e-3,
separate_optimizer=False,
cls_is_bos=False,
sep_is_eos=False,
max_seq_len=None,
sent_delimiter=None,
char_level=False,
hard_constraint=False,
token_key='token', **kwargs) -> None:
""" Transition based lemmatization (:cite:`kondratyuk-straka-2019-75`).
Args:
trn: Path to training set.
dev: Path to dev set.
tst: Path to test set.
sampler_builder: A builder which builds a sampler.
dependencies: Its dependencies on other tasks.
scalar_mix: A builder which builds a `ScalarMixWithDropout` object.
use_raw_hidden_states: Whether to use raw hidden states from transformer without any pooling.
lr: Learning rate for this task.
separate_optimizer: Use customized separate optimizer for this task.
cls_is_bos: ``True`` to treat the first token as ``BOS``.
sep_is_eos: ``True`` to treat the last token as ``EOS``.
max_seq_len: Sentences longer than ``max_seq_len`` will be split into shorter ones if possible.
sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can
be split here.
char_level: Whether the sequence length is measured at char level, which is never the case for
lemmatization.
hard_constraint: Whether to enforce hard length constraint on sentences. If there is no ``sent_delimiter``
in a sentence, it will be split at a token anyway.
token_key: The key to tokens in dataset. This should always be set to ``token`` in MTL.
**kwargs: Not used.
"""
super().__init__(**merge_locals_kwargs(locals(), kwargs))
self.vocabs = VocabDict()
def build_dataloader(self,
data: List[List[str]],
transform: Callable = None,
training=False,
device=None,
logger: logging.Logger = None,
cache=False,
gradient_accumulation=1,
**kwargs) -> DataLoader:
args = dict((k, self.config[k]) for k in
['delimiter', 'max_seq_len', 'sent_delimiter', 'char_level', 'hard_constraint'] if k in self.config)
dataset = self.build_dataset(data, cache=cache, transform=transform, **args)
dataset.append_transform(self.vocabs)
if self.vocabs.mutable:
self.build_vocabs(dataset, logger)
return PadSequenceDataLoader(
batch_sampler=self.sampler_builder.build(self.compute_lens(data, dataset, 'token_input_ids', 'token'),
shuffle=training, gradient_accumulation=gradient_accumulation),
device=device,
dataset=dataset)
def compute_loss(self,
batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
criterion) -> Union[torch.FloatTensor, Dict[str, torch.FloatTensor]]:
return TransformerLemmatizer.compute_loss(self, criterion, output, batch['tag_id'], batch['mask'])
def decode_output(self,
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
mask: torch.BoolTensor,
batch: Dict[str, Any],
decoder,
**kwargs) -> Union[Dict[str, Any], Any]:
return TransformerLemmatizer.decode_output(self, output, mask, batch, decoder)
def update_metrics(self,
batch: Dict[str, Any],
output: Union[torch.Tensor, Dict[str, torch.Tensor], Iterable[torch.Tensor], Any],
prediction: Dict[str, Any],
metric: Union[MetricDict, Metric]):
return TransformerLemmatizer.update_metrics(self, metric, output, batch['tag_id'], batch['mask'])
def build_model(self, encoder_size, training=True, **kwargs) -> torch.nn.Module:
return LinearDecoder(encoder_size, len(self.vocabs['tag']))
def build_metric(self, **kwargs):
return TransformerLemmatizer.build_metric(self, **kwargs)
def input_is_flat(self, data) -> bool:
return TransformerLemmatizer.input_is_flat(self, data)
def prediction_to_result(self, prediction: Dict[str, Any], batch: Dict[str, Any]) -> Union[List, Dict]:
return TransformerLemmatizer.prediction_to_human(self, prediction, self.vocabs['tag'].idx_to_token, batch,
token=batch['token'])
| hankcs/HanLP | hanlp/components/mtl/tasks/lem.py | Python | apache-2.0 | 6,308 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields
class AccountChartTemplate(models.Model):
_inherit = 'account.chart.template'
@api.model
def _prepare_all_journals(self, acc_template_ref, company, journals_dict=None):
journal_data = super(AccountChartTemplate, self)._prepare_all_journals(
acc_template_ref, company, journals_dict)
for journal in journal_data:
if journal['type'] in ('sale', 'purchase') and company.country_id == self.env.ref('base.lu'):
journal.update({'refund_sequence': True})
return journal_data
| ddico/odoo | addons/l10n_lu/models/account_chart_template.py | Python | agpl-3.0 | 684 |