code
stringlengths 10
805k
| def_use_chains
sequencelengths 0
667
|
---|---|
import json
from unittest import TestCase
from time import sleep
from cs3api4lab.tests.share_test_base import ShareTestBase
from traitlets.config import LoggingConfigurable
import urllib.parse
class TestLocks(ShareTestBase, TestCase):
einstein_id = '4c510ada-c86b-4815-8820-42cdf82c3d51'
einstein_idp = 'cernbox.cern.ch'
marie_id = 'f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c'
marie_idp = 'cesnet.cz'
richard_id = '932b4540-8d16-481e-8ef4-588e4b6b151c'
richard_idp = 'example.org'
receiver_role = 'viewer'
receiver_grantee_type = 'user'
file_path = '/home/test_locks.txt'
shared_file_path = '/reva/einstein/test_locks.txt'
storage_id = '123e4567-e89b-12d3-a456-426655440000'
share_id = None
conflict_name = None
def test_lock_created_when_file_written(self):
self.file_name = self.file_path + self.get_random_suffix()
try:
created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name)
self.share_id = created_share['opaque_id']
self.file_api.write_file(self.file_name, 'content')
file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/')
file_info = self.storage_logic._stat_internal(file_ref).info
self.assertTrue(file_info.arbitrary_metadata.metadata)
self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata)
lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"]))
self.assertEquals(lock['username'], 'einstein')
self.assertEquals(lock['idp'], 'cernbox.cern.ch')
self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51')
finally:
if self.share_id:
self.remove_test_share('einstein', self.share_id)
self.remove_test_file('einstein', self.file_name)
def test_lock_created_when_file_read(self):
self.file_name = self.file_path + self.get_random_suffix()
try:
created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name)
self.share_id = created_share['opaque_id']
for chunk in self.file_api.read_file(self.file_name):
continue
file_ref = self.storage_logic.get_unified_file_ref(self.file_name, '/')
file_info = self.storage_logic._stat_internal(file_ref).info
self.assertTrue(file_info.arbitrary_metadata.metadata)
self.assertIn("lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51", file_info.arbitrary_metadata.metadata)
lock = json.loads(urllib.parse.unquote(file_info.arbitrary_metadata.metadata["lock_einstein_cernbox.cern.ch_4c510ada-c86b-4815-8820-42cdf82c3d51"]))
self.assertEquals(lock['username'], 'einstein')
self.assertEquals(lock['idp'], 'cernbox.cern.ch')
self.assertEquals(lock['opaque_id'], '4c510ada-c86b-4815-8820-42cdf82c3d51')
finally:
if self.share_id:
self.remove_test_share('einstein', self.share_id)
self.remove_test_file('einstein', self.file_name)
def test_write_file_locked_conflict_created(self):
suffix = self.get_random_suffix()
self.file_name = self.file_path + suffix
shared_name = self.shared_file_path + suffix
try:
created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name)
self.share_id = created_share['opaque_id']
self.file_api.write_file(self.file_name, 'content')
self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content")
lock_stat = self.richard_file_api.stat(self.conflict_name)
self.assertEqual(lock_stat['filepath'], self.conflict_name)
content = self.read_file_content(self.richard_file_api, self.conflict_name)
self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content')
finally:
if self.share_id:
self.remove_test_share('einstein', self.share_id)
self.remove_test_file('einstein', self.file_name)
if self.conflict_name:
self.remove_test_file('richard', self.conflict_name)
def test_write_dir_file_locked(self):
suffix = self.get_random_suffix()
self.file_name = '/home/testdir/test_locks.txt' + suffix
shared_name = '/reva/einstein/testdir/test_locks.txt' + suffix
try:
try:
self.file_api.create_directory('/home/testdir')
except:
pass #ignore already existing directory
created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name)
self.share_id = created_share['opaque_id']
self.file_api.write_file(self.file_name, 'content')
self.conflict_name = self.richard_file_api.write_file(shared_name, "richard_content")
lock_stat = self.richard_file_api.stat(self.conflict_name)
self.assertEqual(lock_stat['filepath'], self.conflict_name)
content = self.read_file_content(self.richard_file_api, self.conflict_name)
self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content')
finally:
if self.share_id:
self.remove_test_share('einstein', self.share_id)
self.remove_test_file('einstein', self.file_name)
if self.conflict_name:
self.remove_test_file('richard', self.conflict_name)
def test_write_file_lock_expired(self):
suffix = self.get_random_suffix()
self.file_name = self.file_path + suffix
shared_name = self.shared_file_path + suffix
try:
created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name)
self.share_id = created_share['opaque_id']
self.file_api.write_file(self.file_name, 'content')
sleep(12)
self.richard_file_api.write_file(shared_name, "richard_content")
content = self.read_file_content(self.richard_file_api, shared_name)
self.assertEqual(content, 'richard_content', 'File ' + self.file_name + ' should contain the string: ' + 'richard_content')
finally:
if self.share_id:
self.remove_test_share('einstein', self.share_id)
self.remove_test_file('einstein', self.file_name)
def test_write_by_lock_owner_file_locked(self):
self.file_name = self.file_path + self.get_random_suffix()
try:
created_share = self.create_share('einstein', self.richard_id, self.richard_idp, self.file_name)
self.share_id = created_share['opaque_id']
self.file_api.write_file(self.file_name, 'content')
self.file_api.write_file(self.file_name, 'new_content')
content = self.read_file_content(self.file_api, self.file_name)
self.assertEqual(content, 'new_content', 'File ' + self.file_name + ' should contain the string: ' + 'new_content')
finally:
if self.share_id:
self.remove_test_share('einstein', self.share_id)
self.remove_test_file('einstein', self.file_name)
| [
[
[
7,
11
],
[
1518,
1522
],
[
2826,
2830
]
],
[
[
34,
42
],
[
226,
234
]
],
[
[
60,
65
],
[
6379,
6384
]
],
[
[
111,
124
],
[
211,
224
]
],
[
[
154,
173
]
],
[
[
181,
193
],
[
1529,
1535
],
[
2837,
2843
]
],
[
[
201,
210
]
]
] |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from typing import Any, Dict
import torchvision.transforms as pth_transforms
from classy_vision.dataset.transforms import build_transform, register_transform
from classy_vision.dataset.transforms.classy_transform import ClassyTransform
from classy_vision.generic.registry_utils import import_all_modules
# Below the transforms that require passing the labels as well. This is specifc
# to SSL only where we automatically generate the labels for training. All other
# transforms (including torchvision) require passing image only as input.
_TRANSFORMS_WITH_LABELS = ["ImgRotatePil", "ShuffleImgPatches"]
_TRANSFORMS_WITH_COPIES = [
"ImgReplicatePil",
"ImgPilToPatchesAndImage",
"ImgPilToMultiCrop",
]
_TRANSFORMS_WITH_GROUPING = ["ImgPilMultiCropRandomApply"]
# we wrap around transforms so that they work with the multimodal input
@register_transform("SSLTransformsWrapper")
class SSLTransformsWrapper(ClassyTransform):
"""
VISSL wraps around transforms so that they work with the multimodal input.
VISSL supports batches that come from several datasets and sources. Hence
the input batch (images, labels) always is a list.
To apply the user defined transforms, VISSL takes "indices" as input which
defines on what dataset/source data in the sample should the transform be
applied to. For example:
Assuming input sample is {
"data": [dataset1_imgX, dataset2_imgY],
"label": [dataset1_lblX, dataset2_lblY]
}
and the transform is:
TRANSFORMS:
- name: RandomGrayscale
p: 0.2
indices: 0
then the transform is applied only on dataset1_imgX. If however, the
indices are either not specified or set to 0, 1 then the transform
is applied on both dataset1_imgX and dataset2_imgY
Since this structure of data is introduced by vissl, the SSLTransformsWrapper
takes care of dealing with the multi-modality input by wrapping the
original transforms (pytorch transforms or custom transforms defined by user)
and calling each transform on each index.
VISSL also supports _TRANSFORMS_WITH_LABELS transforms that modify the label
or are used to generate the labels used in self-supervised learning tasks like
Jigsaw. When the transforms in _TRANSFORMS_WITH_LABELS are called, the new
label is also returned besides the transformed image.
VISSL also supports the _TRANSFORMS_WITH_COPIES which are transforms
that basically generate several copies of image. Common example
of self-supervised training methods that do this is SimCLR, SwAV, MoCo etc
When a transform from _TRANSFORMS_WITH_COPIES is used, the SSLTransformsWrapper
will flatten the transform output.
For example for the input [img1], if we apply ImgReplicatePil to replicate
the image 2 times:
SSLTransformsWrapper(
ImgReplicatePil(num_times=2), [img1]
)
will output [img1_1, img1_2] instead of nested list [[img1_1, img1_2]].
The benefit of this is that the next set of transforms specified by user can now
operate on img1_1 and img1_2 as the input becomes multi-modal nature.
VISSL also supports _TRANSFORMS_WITH_GROUPING which essentially means
that a single transform should be applied on the full multi-modal input
together instead of separately. This is common transform used in BYOL/
For example:
SSLTransformsWrapper(
ImgPilMultiCropRandomApply(
RandomApply, prob=[0.0, 0.2]
), [img1_1, img1_2]
)
this will apply RandomApply on img1_1 with prob=0.0 and on img1_2 with
prob=0.2
"""
def __init__(self, indices, **args):
"""
Args:
indices (List[int]) (Optional): the indices list on which transform should
be applied for the input which is always a list
Example: minibatch of size=2 looks like [[img1], [img2]]).
If indices is not specified, transform is applied
to all the multi-modal input.
args (dict): the arguments that the transform takes
"""
self.indices = set(indices)
self.name = args["name"]
self.transform = build_transform(args)
def _is_transform_with_labels(self):
"""
_TRANSFORMS_WITH_LABELS = ["ImgRotatePil", "ShuffleImgPatches"]
"""
if self.name in _TRANSFORMS_WITH_LABELS:
return True
return False
def _is_transform_with_copies(self):
"""
_TRANSFORMS_WITH_COPIES = [
"ImgReplicatePil",
"ImgPilToPatchesAndImage",
"ImgPilToMultiCrop",
]
"""
if self.name in _TRANSFORMS_WITH_COPIES:
return True
return False
def _is_grouping_transform(self):
"""
_TRANSFORMS_WITH_GROUPING = ["ImgPilMultiCropRandomApply"]
"""
if self.name in _TRANSFORMS_WITH_GROUPING:
return True
return False
def __call__(self, sample):
"""
Apply each transform on the specified indices of each entry in
the input sample.
"""
# Run on all indices if empty set is passed.
indices = self.indices if self.indices else set(range(len(sample["data"])))
if self._is_grouping_transform():
# if the transform needs to be applied to all the indices
# together. For example: one might want to vary the intensity
# of a transform across several crops of an image as in BYOL.
output = self.transform(sample["data"])
sample["data"] = output
else:
for idx in indices:
output = self.transform(sample["data"][idx])
if self._is_transform_with_labels():
sample["data"][idx] = output[0]
sample["label"][-1] = output[1]
else:
sample["data"][idx] = output
if self._is_transform_with_copies():
# if the transform makes copies of the data, we just flatten the list
# so the next set of transforms will operate on more indices
sample["data"] = [val for sublist in sample["data"] for val in sublist]
# now we replicate the rest of the metadata as well
num_times = len(sample["data"])
sample["label"] = sample["label"] * num_times
sample["data_valid"] = sample["data_valid"] * num_times
sample["data_idx"] = sample["data_idx"] * num_times
return sample
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "SSLTransformsWrapper":
indices = config.get("indices", [])
return cls(indices, **config)
def get_transform(input_transforms_list):
"""
Given the list of user specified transforms, return the
torchvision.transforms.Compose() version of the transforms. Each transform
in the composition is SSLTransformsWrapper which wraps the original
transforms to handle multi-modal nature of input.
"""
output_transforms = []
for transform_config in input_transforms_list:
transform = SSLTransformsWrapper.from_config(transform_config)
output_transforms.append(transform)
return pth_transforms.Compose(output_transforms)
FILE_ROOT = Path(__file__).parent
import_all_modules(FILE_ROOT, "vissl.data.ssl_transforms")
__all__ = ["SSLTransformsWrapper", "get_transform"]
| [
[
[
197,
201
],
[
7692,
7696
]
],
[
[
221,
224
],
[
6992,
6995
]
],
[
[
226,
230
],
[
6982,
6986
]
],
[
[
239,
279
],
[
7636,
7650
]
],
[
[
325,
340
],
[
4562,
4577
]
],
[
[
342,
360
],
[
1051,
1069
]
],
[
[
423,
438
],
[
1121,
1136
]
],
[
[
488,
506
],
[
7714,
7732
]
],
[
[
744,
767
],
[
4746,
4769
]
],
[
[
808,
831
],
[
5055,
5078
]
],
[
[
917,
942
],
[
5279,
5304
]
],
[
[
1100,
1120
],
[
7530,
7550
]
],
[
[
7113,
7126
]
],
[
[
7680,
7689
],
[
7733,
7742
]
],
[
[
7774,
7781
]
]
] |
import _plotly_utils.basevalidators
class CategoryarraysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='categoryarraysrc',
parent_name='layout.scene.zaxis',
**kwargs
):
super(CategoryarraysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type='none',
role='info',
**kwargs
)
| [
[
[
7,
35
],
[
70,
83
]
],
[
[
44,
69
],
[
267,
292
]
]
] |
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'elasticsearch',
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'gunicorn',
]
tests_requires = [
'mocker'
]
setup(name='wayta',
version='1.0b',
description='A tool to suggest the name of an institution or country in the original form and language.',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='SciELO',
author_email='[email protected]',
url='http://docs.scielo.org',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
setup_requires=["nose>=1.0", "coverage"],
tests_require=tests_requires,
test_suite="nose.collector",
entry_points="""\
[paste.app_factory]
main = wayta:main
""",
)
| [
[
[
7,
9
],
[
63,
65
],
[
79,
81
],
[
116,
118
],
[
188,
190
]
],
[
[
34,
39
],
[
409,
414
]
],
[
[
41,
54
],
[
978,
991
]
],
[
[
56,
60
],
[
129,
133
],
[
201,
205
]
],
[
[
153,
154
],
[
169,
170
]
],
[
[
160,
166
],
[
586,
592
]
],
[
[
226,
227
],
[
243,
244
]
],
[
[
233,
240
],
[
604,
611
]
],
[
[
253,
261
],
[
1073,
1081
]
],
[
[
374,
388
],
[
1151,
1165
]
]
] |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""File processing utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import errno
import functools
import glob as _glob
import os
import shutil
import threading
import six
class _GFileBase(six.Iterator):
"""Base I/O wrapper class. Similar semantics to Python's file object."""
# pylint: disable=protected-access
def _synchronized(fn):
"""Synchronizes file I/O for methods in GFileBase."""
@functools.wraps(fn)
def sync(self, *args, **kwargs):
# Sometimes a GFileBase method is called before the instance
# has been properly initialized. Check that _locker is available.
if hasattr(self, '_locker'): self._locker.lock()
try:
return fn(self, *args, **kwargs)
finally:
if hasattr(self, '_locker'): self._locker.unlock()
return sync
# pylint: enable=protected-access
def __init__(self, name, mode, locker):
"""Create the GFileBase object with the given filename, mode, and locker.
Args:
name: string, the filename.
mode: string, the mode to open the file with (e.g. "r", "w", "a+").
locker: the thread locking object (e.g. _PythonLocker) for controlling
thread access to the I/O methods of this class.
"""
self._name = name
self._mode = mode
self._locker = locker
self._fp = open(name, mode)
def __enter__(self):
"""Make GFileBase usable with "with" statement."""
return self
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make GFileBase usable with "with" statement."""
self.close()
@_synchronized
def __del__(self):
# __del__ is sometimes called before initialization, in which
# case the object is not fully constructed. Check for this here
# before trying to close the file handle.
if hasattr(self, '_fp'): self._fp.close()
@_synchronized
def flush(self):
"""Flush the underlying file handle."""
return self._fp.flush()
@property
@_synchronized
def closed(self):
"""Returns "True" if the file handle is closed. Otherwise False."""
return self._fp.closed
@_synchronized
def write(self, data):
"""Write data to the underlying file handle.
Args:
data: The string to write to the file handle.
"""
self._fp.write(data)
@_synchronized
def writelines(self, seq):
"""Write a sequence of strings to the underlying file handle."""
self._fp.writelines(seq)
@_synchronized
def tell(self):
"""Return the location from the underlying file handle.
Returns:
An integer location (which can be used in e.g., seek).
"""
return self._fp.tell()
@_synchronized
def seek(self, offset, whence=0):
"""Seek to offset (conditioned on whence) in the underlying file handle.
Args:
offset: int, the offset within the file to seek to.
whence: 0, 1, or 2. See python's seek() documentation for details.
"""
self._fp.seek(offset, whence)
@_synchronized
def truncate(self, new_size=None):
"""Truncate the underlying file handle to new_size.
Args:
new_size: Size after truncation. If None, the file handle is truncated
to 0 bytes.
"""
self._fp.truncate(new_size)
@_synchronized
def readline(self, max_length=-1):
"""Read a single line (up to max_length) from the underlying file handle.
Args:
max_length: The maximum number of chsaracters to read.
Returns:
A string, including any newline at the end, or empty string if at EOF.
"""
return self._fp.readline(max_length)
@_synchronized
def readlines(self, sizehint=None):
"""Read lines from the underlying file handle.
Args:
sizehint: See the python file.readlines() documentation.
Returns:
A list of strings from the underlying file handle.
"""
if sizehint is not None:
return self._fp.readlines(sizehint)
else:
return self._fp.readlines()
def __iter__(self):
"""Enable line iteration on the underlying handle (not synchronized)."""
return self
# Not synchronized
def __next__(self):
"""Enable line iteration on the underlying handle (not synchronized).
Returns:
An line iterator from the underlying handle.
Example:
# read a file's lines by consuming the iterator with a list
with open("filename", "r") as fp: lines = list(fp)
"""
return next(self._fp)
@_synchronized
def Size(self): # pylint: disable=invalid-name
"""Get byte size of the file from the underlying file handle."""
cur = self.tell()
try:
self.seek(0, 2)
size = self.tell()
finally:
self.seek(cur)
return size
@_synchronized
def read(self, n=-1):
"""Read n bytes from the underlying file handle.
Args:
n: Number of bytes to read (if negative, read to end of file handle.)
Returns:
A string of the bytes read, up to the end of file.
"""
return self._fp.read(n)
@_synchronized
def close(self):
"""Close the underlying file handle."""
self._fp.close()
# Declare wrappers as staticmethods at the end so that we can
# use them as decorators.
_synchronized = staticmethod(_synchronized)
class GFile(_GFileBase):
"""File I/O wrappers with thread locking."""
def __init__(self, name, mode='r'):
super(GFile, self).__init__(name, mode, _Pythonlocker())
class FastGFile(_GFileBase):
"""File I/O wrappers without thread locking."""
def __init__(self, name, mode='r'):
super(FastGFile, self).__init__(name, mode, _Nulllocker())
# locker classes. Note that locks must be reentrant, so that multiple
# lock() calls by the owning thread will not block.
class _Pythonlocker(object):
"""A locking strategy that uses standard locks from the thread module."""
def __init__(self):
self._lock = threading.RLock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
class _Nulllocker(object):
"""A locking strategy where lock() and unlock() methods are no-ops."""
def lock(self):
pass
def unlock(self):
pass
def Exists(path): # pylint: disable=invalid-name
"""Returns True iff "path" exists (as a dir, file, non-broken symlink)."""
return os.path.exists(path)
def IsDirectory(path): # pylint: disable=invalid-name
"""Return True iff "path" exists and is a directory."""
return os.path.isdir(path)
def Glob(glob): # pylint: disable=invalid-name
"""Return a list of filenames matching the glob "glob"."""
return _glob.glob(glob)
def MkDir(path, mode=0o755): # pylint: disable=invalid-name
"""Create the directory "path" with the given mode.
Args:
path: The directory path
mode: The file mode for the directory
Returns:
None
Raises:
OSError: if the path already exists
"""
os.mkdir(path, mode)
def MakeDirs(path, mode=0o755): # pylint: disable=invalid-name
"""Recursively create the directory "path" with the given mode.
Args:
path: The directory path.
mode: The file mode for the created directories
Raises:
OSError: if the path already exists
"""
# NOTE(mrry): MakeDirs("") should be a no-op to match other
# implementations of tf.gfile.
if path:
os.makedirs(path, mode)
def RmDir(directory): # pylint: disable=invalid-name
"""Removes the directory "directory" iff the directory is empty.
Args:
directory: The directory to remove.
Raises:
OSError: If the directory does not exist or is not empty.
"""
os.rmdir(directory)
def Remove(path): # pylint: disable=invalid-name
"""Delete the (non-directory) file "path".
Args:
path: The file to remove.
Raises:
OSError: If "path" does not exist, is a directory, or cannot be deleted.
"""
os.remove(path)
def Rename(oldpath, newpath, overwrite=False):
"""Rename or move a file, or a local directory.
Args:
oldpath: string; a pathname of a file.
newpath: string; a pathname to which the file will be moved.
overwrite: boolean; if false, it is an error for newpath to be
occupied by an existing file.
Raises:
OSError: If "newpath" is occupied by an existing file and overwrite=False.
"""
if not overwrite and Exists(newpath) and not IsDirectory(newpath):
raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath)
os.rename(oldpath, newpath)
def DeleteRecursively(path): # pylint: disable=invalid-name
"""Delete the file or directory "path" recursively.
Args:
path: The path to remove (may be a non-empty directory).
Raises:
OSError: If the path does not exist or cannot be deleted.
"""
if IsDirectory(path):
shutil.rmtree(path)
else:
Remove(path)
def ListDirectory(directory, return_dotfiles=False): # pylint: disable=invalid-name
"""Returns a list of files in dir.
As with the standard os.listdir(), the filenames in the returned list will be
the basenames of the files in dir (not absolute paths). To get a list of
absolute paths of files in a directory, a client could do:
file_list = gfile.ListDir(my_dir)
file_list = [os.path.join(my_dir, f) for f in file_list]
(assuming that my_dir itself specified an absolute path to a directory).
Args:
directory: the directory to list
return_dotfiles: if True, dotfiles will be returned as well. Even if
this arg is True, '.' and '..' will not be returned.
Returns:
['list', 'of', 'files']. The entries '.' and '..' are never returned.
Other entries starting with a dot will only be returned if return_dotfiles
is True.
Raises:
OSError: if there is an error retrieving the directory listing.
"""
files = os.listdir(directory)
if not return_dotfiles:
files = [f for f in files if not f.startswith('.')]
return files
def Walk(top, topdown=1, onerror=None):
"""Recursive directory tree generator.
Args:
top: string, a pathname.
topdown: bool, should traversal be pre-order (True) or post-order (False)
onerror: function, optional callback for errors.
By default, errors that occur when listing a directory are ignored.
(This is the same semantics as Python's os.walk() generator.) If the
optional argument "onerror" is specified, it should be a function. It
will be called with one argument, an os.error instance. It can return
to continue with the walk, or reraise the exception to abort the walk.
Yields:
# Each yield is a 3-tuple: the pathname of a directory, followed
# by lists of all its subdirectories and leaf files.
(dirname, [subdirname, subdirname, ...], [filename, filename, ...])
"""
return os.walk(top, topdown=topdown, onerror=onerror)
def Stat(path): # pylint: disable=invalid-name
"""Gets the status of a file.
Args:
path: The file to call Stat() on.
Does the equivalent of Stat() on the specified "path" and return file
properties.
Returns:
An object whose attributes give information on the file.
Raises:
OSError: If "path" does not exist.
"""
statinfo = os.stat(path)
filestat = collections.namedtuple('FileStat', ['mtime'])
filestat.mtime = statinfo.st_mtime
return filestat
def Copy(oldpath, newpath, overwrite=False):
"""Copy a file.
Args:
oldpath: string; a pathname of a file.
newpath: string; a pathname to which the file will be copied.
overwrite: boolean; if false, it is an error for newpath to be
occupied by an existing file.
Raises:
OSError: If "newpath" is occupied by an existing file and overwrite=False,
or any error thrown by shutil.copy.
"""
if not overwrite and Exists(newpath):
raise OSError(errno.EEXIST, os.strerror(errno.EEXIST), newpath)
shutil.copy(oldpath, newpath)
def Open(name, mode='r'):
"""Exact API match to the standard open.
Args:
name: a file name, either local or a gfile compatible.
mode: for example "w" to open the file for writing.
Returns:
A threadsafe gfile.GFile object.
"""
return GFile(name, mode=mode)
| [
[
[
735,
750
]
],
[
[
774,
782
]
],
[
[
806,
820
]
],
[
[
829,
840
],
[
11806,
11817
]
],
[
[
848,
853
],
[
9028,
9033
],
[
9054,
9059
],
[
12396,
12401
],
[
12422,
12427
]
],
[
[
861,
870
],
[
1181,
1190
]
],
[
[
878,
891
],
[
7274,
7279
]
],
[
[
899,
901
],
[
6987,
6989
],
[
7133,
7135
],
[
7568,
7570
],
[
7979,
7981
],
[
8257,
8259
],
[
8510,
8512
],
[
9042,
9044
],
[
9080,
9082
],
[
10415,
10417
],
[
11373,
11375
],
[
11779,
11781
],
[
12410,
12412
]
],
[
[
909,
915
],
[
9403,
9409
],
[
12448,
12454
]
],
[
[
923,
932
],
[
6580,
6589
]
],
[
[
941,
944
],
[
964,
967
]
],
[
[
953,
963
],
[
5967,
5977
],
[
6145,
6155
]
],
[
[
5961,
5966
],
[
6076,
6081
],
[
12739,
12744
]
],
[
[
6135,
6144
],
[
6257,
6266
]
],
[
[
6441,
6454
],
[
6110,
6123
]
],
[
[
6696,
6707
],
[
6295,
6306
]
],
[
[
6854,
6860
],
[
8964,
8970
],
[
12361,
12367
]
],
[
[
7014,
7025
],
[
8988,
8999
],
[
9380,
9391
]
],
[
[
7159,
7163
]
],
[
[
7297,
7302
]
],
[
[
7595,
7603
]
],
[
[
8009,
8014
]
],
[
[
8283,
8289
],
[
9435,
9441
]
],
[
[
8532,
8538
]
],
[
[
9114,
9131
]
],
[
[
9454,
9467
]
],
[
[
10540,
10544
]
],
[
[
11426,
11430
]
],
[
[
11913,
11917
]
],
[
[
12484,
12488
]
]
] |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from .. import models
class EdgeNodesOperations(object):
"""EdgeNodesOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API to be used with the client request. Current version is 2017-04-02. Constant value: "2017-04-02".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-04-02"
self.config = config
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Edgenodes are the global Point of Presence (POP) locations used to
deliver CDN content to end users.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of EdgeNode
:rtype:
~azure.mgmt.cdn.models.EdgeNodePaged[~azure.mgmt.cdn.models.EdgeNode]
:raises:
:class:`ErrorResponseException<azure.mgmt.cdn.models.ErrorResponseException>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
raise models.ErrorResponseException(self._deserialize, response)
return response
# Deserialize response
deserialized = models.EdgeNodePaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.EdgeNodePaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/providers/Microsoft.Cdn/edgenodes'}
| [
[
[
481,
485
],
[
2736,
2740
]
],
[
[
514,
531
]
],
[
[
548,
554
],
[
999,
1005
],
[
3479,
3485
],
[
3629,
3635
],
[
3336,
3342
]
],
[
[
563,
582
]
]
] |
"""
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
from __future__ import division
# pylint: disable=E1101,E1103
# pylint: disable=W0212,W0231,W0703,W0622
import functools
import collections
import itertools
import sys
import types
import warnings
from numpy import nan as NA
import numpy as np
import numpy.ma as ma
from pandas.core.common import (isnull, notnull, PandasError, _try_sort,
_default_index, _maybe_upcast, is_sequence,
_infer_dtype_from_scalar, _values_from_object,
is_list_like, _get_dtype, _maybe_box_datetimelike,
is_categorical_dtype, is_object_dtype, _possibly_infer_to_datetimelike)
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import Index, MultiIndex, _ensure_index
from pandas.core.indexing import (maybe_droplevels,
convert_to_index_sliceable,
check_bool_indexer)
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
from pandas.core.series import Series
from pandas.core.categorical import Categorical
import pandas.computation.expressions as expressions
from pandas.computation.eval import eval as _eval
from numpy import percentile as _quantile
from pandas.compat import(range, zip, lrange, lmap, lzip, StringIO, u,
OrderedDict, raise_with_traceback)
from pandas import compat
from pandas.sparse.array import SparseArray
from pandas.util.decorators import deprecate, Appender, Substitution, \
deprecate_kwarg
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import DatetimeIndex
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.core.format as fmt
import pandas.core.nanops as nanops
import pandas.core.ops as ops
import pandas.lib as lib
import pandas.algos as _algos
from pandas.core.config import get_option
#----------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(axes='index, columns', klass='DataFrame',
axes_single_arg="{0,1,'index','columns'}")
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame objects by performing a database-style join operation by
columns or indexes.
If joining columns on columns, the DataFrame indexes *will be
ignored*. Otherwise if joining indexes on indexes or indexes on a column or
columns, the index will be passed on.
Parameters
----------%s
right : DataFrame
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
* left: use only keys from left frame (SQL: left outer join)
* right: use only keys from right frame (SQL: right outer join)
* outer: use union of keys from both frames (SQL: full outer join)
* inner: use intersection of keys from both frames (SQL: inner join)
on : label or list
Field names to join on. Must be found in both DataFrames. If on is
None and not merging on indexes, then it merges on the intersection of
the columns by default.
left_on : label or list, or array-like
Field names to join on in left DataFrame. Can be a vector or list of
vectors of the length of the DataFrame to use a particular vector as
the join key instead of columns
right_on : label or list, or array-like
Field names to join on in right DataFrame or vector/list of vectors per
left_on docs
left_index : boolean, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels
right_index : boolean, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index
sort : boolean, default False
Sort the join keys lexicographically in the result DataFrame
suffixes : 2-length sequence (tuple, list, ...)
Suffix to apply to overlapping column names in the left and right
side, respectively
copy : boolean, default True
If False, do not copy data unnecessarily
Examples
--------
>>> A >>> B
lkey value rkey value
0 foo 1 0 foo 5
1 bar 2 1 bar 6
2 baz 3 2 qux 7
3 foo 4 3 bar 8
>>> merge(A, B, left_on='lkey', right_on='rkey', how='outer')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 4 foo 5
2 bar 2 bar 6
3 bar 2 bar 8
4 baz 3 NaN NaN
5 NaN NaN qux 7
Returns
-------
merged : DataFrame
The output type will the be same as 'left', if it is a subclass
of DataFrame.
"""
#----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
""" Two-dimensional size-mutable, potentially heterogeneous tabular data
structure with labeled axes (rows and columns). Arithmetic operations
align on both row and column labels. Can be thought of as a dict-like
container for Series objects. The primary pandas data structure
Parameters
----------
data : numpy ndarray (structured or homogeneous), dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects
index : Index or array-like
Index to use for resulting frame. Will default to np.arange(n) if
no indexing information part of input data and no index provided
columns : Index or array-like
Column labels to use for resulting frame. Will default to
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force, otherwise infer
copy : boolean, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
--------
>>> d = {'col1': ts1, 'col2': ts2}
>>> df = DataFrame(data=d, index=index)
>>> df2 = DataFrame(np.random.randn(10, 5))
>>> df3 = DataFrame(np.random.randn(10, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
See also
--------
DataFrame.from_records : constructor from tuples, also record arrays
DataFrame.from_dict : from dicts of Series, arrays, or dicts
DataFrame.from_csv : from CSV files
DataFrame.from_items : from sequence of (key, value) pairs
pandas.read_csv, pandas.read_table, pandas.read_clipboard
"""
_auto_consolidate = True
@property
def _constructor(self):
return DataFrame
_constructor_sliced = Series
def __init__(self, data=None, index=None, columns=None, dtype=None,
copy=False):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = _masked_rec_array_to_mgr(data, index, columns, dtype,
copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = _maybe_upcast(data, copy=True)
data[mask] = fill_value
else:
data = data.copy()
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = dict((k, data[k]) for k in data_columns)
if columns is None:
columns = data_columns
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif getattr(data, 'name', None):
mgr = self._init_dict({data.name: data}, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, (list, types.GeneratorType)):
if isinstance(data, types.GeneratorType):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = _default_index(len(data[0]))
else:
index = _default_index(len(data))
mgr = _arrays_to_mgr(arrays, columns, index, columns,
dtype=dtype)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
else:
mgr = self._init_ndarray(data, index, columns, dtype=dtype,
copy=copy)
elif isinstance(data, collections.Iterator):
raise TypeError("data argument can't be an iterator")
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError('DataFrame constructor called with '
'incompatible data and dtype: %s' % e)
raise_with_traceback(exc)
if arr.ndim == 0 and index is not None and columns is not None:
if isinstance(data, compat.string_types) and dtype is None:
dtype = np.object_
if dtype is None:
dtype, data = _infer_dtype_from_scalar(data)
values = np.empty((len(index), len(columns)), dtype=dtype)
values.fill(data)
mgr = self._init_ndarray(values, index, columns, dtype=dtype,
copy=False)
else:
raise PandasError('DataFrame constructor not properly called!')
NDFrame.__init__(self, mgr, fastpath=True)
def _init_dict(self, data, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
if columns is not None:
columns = _ensure_index(columns)
# prefilter if columns passed
data = dict((k, v) for k, v in compat.iteritems(data)
if k in columns)
if index is None:
index = extract_index(list(data.values()))
else:
index = _ensure_index(index)
arrays = []
data_names = []
for k in columns:
if k not in data:
# no obvious "empty" int column
if dtype is not None and issubclass(dtype.type,
np.integer):
continue
if dtype is None:
# 1783
v = np.empty(len(index), dtype=object)
else:
v = np.empty(len(index), dtype=dtype)
v.fill(NA)
else:
v = data[k]
data_names.append(k)
arrays.append(v)
else:
keys = list(data.keys())
if not isinstance(data, OrderedDict):
keys = _try_sort(keys)
columns = data_names = Index(keys)
arrays = [data[k] for k in keys]
return _arrays_to_mgr(arrays, data_names, index, columns,
dtype=dtype)
def _init_ndarray(self, values, index, columns, dtype=None,
copy=False):
# input must be a ndarray, list, Series, index
if isinstance(values, Series):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# helper to create the axes as indexes
def _get_axes(N, K, index=index, columns=columns):
# return axes or defaults
if index is None:
index = _default_index(N)
else:
index = _ensure_index(index)
if columns is None:
columns = _default_index(K)
else:
columns = _ensure_index(columns)
return index, columns
# we could have a categorical type passed or coerced to 'category'
# recast this to an _arrays_to_mgr
if is_categorical_dtype(getattr(values,'dtype',None)) or is_categorical_dtype(dtype):
if not hasattr(values,'dtype'):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values),1)
return _arrays_to_mgr([ values ], columns, index, columns,
dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None:
if values.dtype != dtype:
try:
values = values.astype(dtype)
except Exception as orig:
e = ValueError("failed to cast to '%s' (Exception was: %s)"
% (dtype, orig))
raise_with_traceback(e)
index, columns = _get_axes(*values.shape)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values):
values = _possibly_infer_to_datetimelike(values)
return create_block_manager_from_blocks([values], [columns, index])
@property
def axes(self):
return [self.index, self.columns]
@property
def shape(self):
return (len(self.index), len(self.columns))
def _repr_fits_vertical_(self):
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width=False):
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns. In case off non-interactive session, no
boundaries apply.
ignore_width is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = fmt.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if ((max_columns and nb_columns > max_columns) or
((not ignore_width) and width and nb_columns > (width // 2))):
return False
if (ignore_width # used by repr_html under IPython notebook
# scripts ignore terminal dims
or not com.in_interactive_session()):
return True
if (get_option('display.width') is not None or
com.in_ipython_frontend()):
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actualy checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[:min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max([len(l) for l in value.split('\n')])
return repr_width < width
def _info_repr(self):
"""True if the repr should show the info view."""
info_repr_option = (get_option("display.large_repr") == "info")
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __unicode__(self):
"""
Return a string representation for a particular DataFrame
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
buf = StringIO(u(""))
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = fmt.get_console_size()
else:
width = None
self.to_string(buf=buf, max_rows=max_rows, max_cols=max_cols,
line_width=width, show_dimensions=show_dimensions)
return buf.getvalue()
def _repr_html_(self):
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
# qtconsole doesn't report its line width, and also
# behaves badly when outputting an HTML table
# that doesn't fit the window, so disable it.
# XXX: In IPython 3.x and above, the Qt console will not attempt to
# display HTML, so this check can be removed when support for IPython 2.x
# is no longer needed.
if com.in_qtconsole():
# 'HTML output is disabled in QtConsole'
return None
if self._info_repr():
buf = StringIO(u(""))
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace('<', r'<', 1).replace('>',
r'>', 1)
return '<pre>' + val + '</pre>'
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
return ('<div style="max-height:1000px;'
'max-width:1500px;overflow:auto;">\n' +
self.to_html(max_rows=max_rows, max_cols=max_cols,
show_dimensions=show_dimensions) + '\n</div>')
else:
return None
def iteritems(self):
"""Iterator over (column, series) pairs"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self.icol(i)
def iterrows(self):
"""
Iterate over rows of DataFrame as (index, Series) pairs.
Notes
-----
* ``iterrows`` does **not** preserve dtypes across the rows (dtypes
are preserved across columns for DataFrames). For example,
>>> df = DataFrame([[1, 1.0]], columns=['x', 'y'])
>>> row = next(df.iterrows())[1]
>>> print(row['x'].dtype)
float64
>>> print(df['x'].dtype)
int64
Returns
-------
it : generator
A generator that iterates over the rows of the frame.
"""
columns = self.columns
for k, v in zip(self.index, self.values):
s = Series(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True):
"""
Iterate over rows of DataFrame as tuples, with index value
as first element of the tuple
"""
arrays = []
if index:
arrays.append(self.index)
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
return zip(*arrays)
if compat.PY3: # pragma: no cover
items = iteritems
def __len__(self):
"""Returns length of info axis, but here we use the index """
return len(self.index)
def dot(self, other):
"""
Matrix multiplication with DataFrame or Series objects
Parameters
----------
other : DataFrame or Series
Returns
-------
dot_product : DataFrame or Series
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if (len(common) > len(self.columns) or
len(common) > len(other.index)):
raise ValueError('matrices are not aligned')
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError('Dot product shape mismatch, %s vs %s' %
(lvals.shape, rvals.shape))
if isinstance(other, DataFrame):
return self._constructor(np.dot(lvals, rvals),
index=left.index,
columns=other.columns)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError('unsupported type: %s' % type(other))
#----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient='columns', dtype=None):
"""
Construct DataFrame from dict of array-like or dicts
Parameters
----------
data : dict
{field : array-like} or {field : dict}
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
Returns
-------
DataFrame
"""
index, columns = None, None
orient = orient.lower()
if orient == 'index':
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient != 'columns': # pragma: no cover
raise ValueError('only recognize index or columns for orient')
return cls(data, index=index, columns=columns, dtype=dtype)
@deprecate_kwarg(old_arg_name='outtype', new_arg_name='orient')
def to_dict(self, orient='dict'):
"""Convert DataFrame to dictionary.
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records'}
Determines the type of the values of the dictionary.
- dict (default) : dict like {column -> {index -> value}}
- list : dict like {column -> [values]}
- series : dict like {column -> Series(values)}
- split : dict like
{index -> [index], columns -> [columns], data -> [values]}
- records : list like
[{column -> value}, ... , {column -> value}]
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
Returns
-------
result : dict like {column -> {index -> value}}
"""
if not self.columns.is_unique:
warnings.warn("DataFrame columns are not unique, some "
"columns will be omitted.", UserWarning)
if orient.lower().startswith('d'):
return dict((k, v.to_dict()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('l'):
return dict((k, v.tolist()) for k, v in compat.iteritems(self))
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
'data': self.values.tolist()}
elif orient.lower().startswith('s'):
return dict((k, v) for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
return [dict((k, v) for k, v in zip(self.columns, row))
for row in self.values]
else:
raise ValueError("orient '%s' not understood" % orient)
def to_gbq(self, destination_table, project_id=None, chunksize=10000,
verbose=True, reauth=False):
"""Write a DataFrame to a Google BigQuery table.
THIS IS AN EXPERIMENTAL LIBRARY
If the table exists, the dataframe will be written to the table using
the defined table schema and column types. For simplicity, this method
uses the Google BigQuery streaming API. The to_gbq method chunks data
into a default chunk size of 10,000. Failures return the complete error
response which can be quite long depending on the size of the insert.
There are several important limitations of the Google streaming API
which are detailed at:
https://developers.google.com/bigquery/streaming-data-into-bigquery.
Parameters
----------
dataframe : DataFrame
DataFrame to be written
destination_table : string
Name of table to be written, in the form 'dataset.tablename'
project_id : str
Google BigQuery Account project ID.
chunksize : int (default 10000)
Number of rows to be inserted in each chunk from the dataframe.
verbose : boolean (default True)
Show percentage complete
reauth : boolean (default False)
Force Google BigQuery to reauthenticate the user. This is useful
if multiple accounts are used.
"""
from pandas.io import gbq
return gbq.to_gbq(self, destination_table, project_id=project_id,
chunksize=chunksize, verbose=verbose,
reauth=reauth)
@classmethod
def from_records(cls, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
"""
Convert structured or record ndarray to DataFrame
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : string, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use
exclude : sequence, default None
Columns or fields to exclude
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns)
coerce_float : boolean, default False
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
Returns
-------
df : DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = _ensure_index(columns)
if com.is_iterator(data):
if nrows == 0:
return cls()
try:
if compat.PY3:
first_row = next(data)
else:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, 'dtype') and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = _ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in compat.iteritems(data):
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = _reorder_arrays(arrays, arr_columns,
columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = _to_arrays(data, columns)
if columns is not None:
columns = _ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = _to_arrays(data, columns,
coerce_float=coerce_float)
arr_columns = _ensure_index(arr_columns)
if columns is not None:
columns = _ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if (isinstance(index, compat.string_types) or
not hasattr(index, "__iter__")):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
to_remove = [arr_columns.get_loc(field) for field in index]
result_index = MultiIndex.from_arrays(
[arrays[i] for i in to_remove], names=index)
exclude.update(index)
except Exception:
result_index = index
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = _arrays_to_mgr(arrays, arr_columns, result_index,
columns)
return cls(mgr)
def to_records(self, index=True, convert_datetime64=True):
"""
Convert DataFrame to record array. Index will be put in the
'index' field of the record array if requested
Parameters
----------
index : boolean, default True
Include index in resulting record array, stored in 'index' field
convert_datetime64 : boolean, default True
Whether to convert the index to datetime.datetime if it is a
DatetimeIndex
Returns
-------
y : recarray
"""
if index:
if com.is_datetime64_dtype(self.index) and convert_datetime64:
ix_vals = [self.index.to_pydatetime()]
else:
if isinstance(self.index, MultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = lmap(np.array, zip(*self.index.values))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c].get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, MultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = 'level_%d' % count
count += 1
elif index_names[0] is None:
index_names = ['index']
names = index_names + lmap(str, self.columns)
else:
arrays = [self[c].get_values() for c in self.columns]
names = lmap(str, self.columns)
dtype = np.dtype([(x, v.dtype) for x, v in zip(names, arrays)])
return np.rec.fromarrays(arrays, dtype=dtype, names=names)
@classmethod
def from_items(cls, items, columns=None, orient='columns'):
"""
Convert (key, value) pairs to DataFrame. The keys will be the axis
index (usually the columns, but depends on the specified
orientation). The values should be arrays or Series.
Parameters
----------
items : sequence of (key, value) pairs
Values should be arrays or Series.
columns : sequence of column labels, optional
Must be passed if orient='index'.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the
input correspond to column labels, pass 'columns'
(default). Otherwise if the keys correspond to the index,
pass 'index'.
Returns
-------
frame : DataFrame
"""
keys, values = lzip(*items)
if orient == 'columns':
if columns is not None:
columns = _ensure_index(columns)
idict = dict(items)
if len(idict) < len(items):
if not columns.equals(_ensure_index(keys)):
raise ValueError('With non-unique item names, passed '
'columns must be identical')
arrays = values
else:
arrays = [idict[k] for k in columns if k in idict]
else:
columns = _ensure_index(keys)
arrays = values
return cls._from_arrays(arrays, columns, None)
elif orient == 'index':
if columns is None:
raise TypeError("Must pass columns with orient='index'")
keys = _ensure_index(keys)
arr = np.array(values, dtype=object).T
data = [lib.maybe_convert_objects(v) for v in arr]
return cls._from_arrays(data, columns, keys)
else: # pragma: no cover
raise ValueError("'orient' must be either 'columns' or 'index'")
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None):
mgr = _arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@classmethod
def from_csv(cls, path, header=0, sep=',', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=False,
infer_datetime_format=False):
"""
Read delimited file into DataFrame
Parameters
----------
path : string file path or file handle / StringIO
header : int, default 0
Row to use at header (skip prior rows)
sep : string, default ','
Field delimiter
index_col : int or sequence, default 0
Column to use for index. If a sequence is given, a MultiIndex
is used. Different default from read_table
parse_dates : boolean, default True
Parse dates. Different default from read_table
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
infer_datetime_format: boolean, default False
If True and `parse_dates` is True for a column, try to infer the
datetime format based on the first datetime string. If the format
can be inferred, there often will be a large parsing speed-up.
Notes
-----
Preferable to use read_table for most general purposes but from_csv
makes for an easy roundtrip to and from file, especially with a
DataFrame of time series data
Returns
-------
y : DataFrame
"""
from pandas.io.parsers import read_table
return read_table(path, header=header, sep=sep,
parse_dates=parse_dates, index_col=index_col,
encoding=encoding, tupleize_cols=tupleize_cols,
infer_datetime_format=infer_datetime_format)
def to_sparse(self, fill_value=None, kind='block'):
"""
Convert to SparseDataFrame
Parameters
----------
fill_value : float, default NaN
kind : {'block', 'integer'}
Returns
-------
y : SparseDataFrame
"""
from pandas.core.sparse import SparseDataFrame
return SparseDataFrame(self._series, index=self.index,
default_kind=kind,
default_fill_value=fill_value)
def to_panel(self):
"""
Transform long (stacked) format (DataFrame) into wide (3D, Panel)
format.
Currently the index of the DataFrame must be a 2-level MultiIndex. This
may be generalized later
Returns
-------
panel : Panel
"""
from pandas.core.panel import Panel
# only support this kind for now
if (not isinstance(self.index, MultiIndex) or # pragma: no cover
len(self.index.levels) != 2):
raise NotImplementedError('Only 2-level MultiIndex are supported.')
if not self.index.is_unique:
raise ValueError("Can't convert non-uniquely indexed "
"DataFrame to Panel")
self._consolidate_inplace()
# minor axis must be sorted
if self.index.lexsort_depth < 2:
selfsorted = self.sortlevel(0)
else:
selfsorted = self
major_axis, minor_axis = selfsorted.index.levels
major_labels, minor_labels = selfsorted.index.labels
shape = len(major_axis), len(minor_axis)
# preserve names, if any
major_axis = major_axis.copy()
major_axis.name = self.index.names[0]
minor_axis = minor_axis.copy()
minor_axis.name = self.index.names[1]
# create new axes
new_axes = [selfsorted.columns, major_axis, minor_axis]
# create new manager
new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
labels=[major_labels, minor_labels],
shape=shape,
ref_items=selfsorted.columns)
return Panel(new_mgr)
to_wide = deprecate('to_wide', to_panel)
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.', **kwds):
r"""Write DataFrame to a comma-separated values (csv) file
Parameters
----------
path_or_buf : string or file handle, default None
File path or object, if None is provided the result is returned as
a string.
sep : character, default ","
Field delimiter for the output file.
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is assumed
to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R
nanRep : None
deprecated, use na_rep
mode : str
Python write mode, default 'w'
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
quoting : optional constant from csv module
defaults to csv.QUOTE_MINIMAL
quotechar : string (length 1), default '"'
character used to quote fields
doublequote : boolean, default True
Control quoting of `quotechar` inside a field
escapechar : string (length 1), default None
character used to escape `sep` and `quotechar` when appropriate
chunksize : int or None
rows to write at a time
tupleize_cols : boolean, default False
write multi_index columns as a list of tuples (if True)
or new (expanded format) if False)
date_format : string, default None
Format string for datetime objects
decimal: string, default '.'
Character recognized as decimal separator. E.g. use ',' for European data
"""
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator,
sep=sep, encoding=encoding,
quoting=quoting, na_rep=na_rep,
float_format=float_format, cols=columns,
header=header, index=index,
index_label=index_label, mode=mode,
chunksize=chunksize, quotechar=quotechar,
engine=kwds.get("engine"),
tupleize_cols=tupleize_cols,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf'):
"""
Write DataFrame to a excel sheet
Parameters
----------
excel_writer : string or ExcelWriter object
File path or existing ExcelWriter
sheet_name : string, default 'Sheet1'
Name of sheet which will contain DataFrame
na_rep : string, default ''
Missing data representation
float_format : string, default None
Format string for floating point numbers
columns : sequence, optional
Columns to write
header : boolean or list of string, default True
Write out column names. If a list of string is given it is
assumed to be aliases for the column names
index : boolean, default True
Write row names (index)
index_label : string or sequence, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow :
upper left cell row to dump data frame
startcol :
upper left cell column to dump data frame
engine : string, default None
write engine to use - you can also set this via the options
``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : boolean, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding: string, default None
encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : string, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel)
Notes
-----
If passing an existing ExcelWriter object, then the sheet will be added
to the existing workbook. This can be used to save different
DataFrames to one workbook:
>>> writer = ExcelWriter('output.xlsx')
>>> df1.to_excel(writer,'Sheet1')
>>> df2.to_excel(writer,'Sheet2')
>>> writer.save()
"""
from pandas.io.excel import ExcelWriter
need_save = False
if encoding == None:
encoding = 'ascii'
if isinstance(excel_writer, compat.string_types):
excel_writer = ExcelWriter(excel_writer, engine=engine)
need_save = True
formatter = fmt.ExcelFormatter(self,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep)
formatted_cells = formatter.get_formatted_cells()
excel_writer.write_cells(formatted_cells, sheet_name,
startrow=startrow, startcol=startcol)
if need_save:
excel_writer.save()
def to_stata(
self, fname, convert_dates=None, write_index=True, encoding="latin-1",
byteorder=None, time_stamp=None, data_label=None):
"""
A class for writing Stata binary dta files from array-like objects
Parameters
----------
fname : file path or buffer
Where to save the dta file.
convert_dates : dict
Dictionary mapping column of datetime types to the stata internal
format that you want to use for the dates. Options are
'tc', 'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either a
number or a name.
encoding : str
Default is latin-1. Note that Stata does not support unicode.
byteorder : str
Can be ">", "<", "little", or "big". The default is None which uses
`sys.byteorder`
Examples
--------
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> writer = StataWriter('./date_data_file.dta', data, {2 : 'tw'})
>>> writer.write_file()
"""
from pandas.io.stata import StataWriter
writer = StataWriter(fname, self, convert_dates=convert_dates,
encoding=encoding, byteorder=byteorder,
time_stamp=time_stamp, data_label=data_label,
write_index=write_index)
writer.write_file()
@Appender(fmt.docstring_to_string, indents=1)
def to_string(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
"""
Render a DataFrame to a console-friendly tabular output.
"""
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
FutureWarning)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header, index=index,
line_width=line_width,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_string()
if buf is None:
result = formatter.buf.getvalue()
return result
@Appender(fmt.docstring_to_string, indents=1)
def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
max_rows=None, max_cols=None, show_dimensions=False):
"""
Render a DataFrame as an HTML table.
`to_html`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table
escape : boolean, default True
Convert the characters <, >, and & to HTML-safe sequences.=
max_rows : int, optional
Maximum number of rows to show before truncating. If None, show
all.
max_cols : int, optional
Maximum number of columns to show before truncating. If None, show
all.
"""
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
FutureWarning)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header, index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions)
formatter.to_html(classes=classes)
if buf is None:
return formatter.buf.getvalue()
@Appender(fmt.docstring_to_string, indents=1)
def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
header=True, index=True, na_rep='NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
bold_rows=True, longtable=False, escape=True):
"""
Render a DataFrame to a tabular environment table. You can splice
this into a LaTeX document. Requires \\usepackage{booktabs}.
`to_latex`-specific options:
bold_rows : boolean, default True
Make the row labels bold in the output
longtable : boolean, default False
Use a longtable environment instead of tabular. Requires adding
a \\usepackage{longtable} to your LaTeX preamble.
escape : boolean, default True
When set to False prevents from escaping latex special
characters in column names.
"""
if colSpace is not None: # pragma: no cover
warnings.warn("colSpace is deprecated, use col_space",
FutureWarning)
col_space = colSpace
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape)
formatter.to_latex(longtable=longtable)
if buf is None:
return formatter.buf.getvalue()
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None):
"""
Concise summary of a DataFrame.
Parameters
----------
verbose : {None, True, False}, optional
Whether to print the full summary.
None follows the `display.max_info_columns` setting.
True or False overrides the `display.max_info_columns` setting.
buf : writable buffer, defaults to sys.stdout
max_cols : int, default None
Determines whether full summary or short summary is printed.
None follows the `display.max_info_columns` setting.
memory_usage : boolean, default None
Specifies whether total memory usage of the DataFrame
elements (including index) should be displayed. None follows
the `display.memory_usage` setting. True or False overrides
the `display.memory_usage` setting. Memory usage is shown in
human-readable units (base-2 representation).
null_counts : boolean, default None
Whether to show the non-null counts
If None, then only show if the frame is smaller than max_info_rows and max_info_columns.
If True, always show counts.
If False, never show counts.
"""
from pandas.core.format import _put_lines
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index.summary())
if len(self.columns) == 0:
lines.append('Empty %s' % type(self).__name__)
_put_lines(buf, lines)
return
cols = self.columns
# hack
if max_cols is None:
max_cols = get_option(
'display.max_info_columns', len(self.columns) + 1)
max_rows = get_option('display.max_info_rows', len(self) + 1)
if null_counts is None:
show_counts = ((len(self.columns) <= max_cols) and
(len(self) < max_rows))
else:
show_counts = null_counts
exceeds_info_cols = len(self.columns) > max_cols
def _verbose_repr():
lines.append('Data columns (total %d columns):' %
len(self.columns))
space = max([len(com.pprint_thing(k)) for k in self.columns]) + 4
counts = None
tmpl = "%s%s"
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError('Columns must equal counts (%d != %d)' %
(len(cols), len(counts)))
tmpl = "%s non-null %s"
dtypes = self.dtypes
for i, col in enumerate(self.columns):
dtype = dtypes[col]
col = com.pprint_thing(col)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(_put_str(col, space) +
tmpl % (count, dtype))
def _non_verbose_repr():
lines.append(self.columns.summary(name='Columns'))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:
if num < 1024.0:
return "%3.1f%s %s" % (num, size_qualifier, x)
num /= 1024.0
return "%3.1f%s %s" % (num, size_qualifier, 'PB')
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self.get_dtype_counts()
dtypes = ['%s(%d)' % k for k in sorted(compat.iteritems(counts))]
lines.append('dtypes: %s' % ', '.join(dtypes))
if memory_usage is None:
memory_usage = get_option('display.memory_usage')
if memory_usage: # append memory usage of df to display
# size_qualifier is just a best effort; not guaranteed to catch all
# cases (e.g., it misses categorical data even with object
# categories)
size_qualifier = ('+' if 'object' in counts
or is_object_dtype(self.index) else '')
mem_usage = self.memory_usage(index=True).sum()
lines.append("memory usage: %s\n" %
_sizeof_fmt(mem_usage, size_qualifier))
_put_lines(buf, lines)
def memory_usage(self, index=False):
"""Memory usage of DataFrame columns.
Parameters
----------
index : bool
Specifies whether to include memory usage of DataFrame's
index in returned Series. If `index=True` (default is False)
the first index of the Series is `Index`.
Returns
-------
sizes : Series
A series with column names as index and memory usage of
columns with units of bytes.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array.
See Also
--------
numpy.ndarray.nbytes
"""
result = Series([ c.values.nbytes for col, c in self.iteritems() ],
index=self.columns)
if index:
result = Series(self.index.nbytes,
index=['Index']).append(result)
return result
def transpose(self):
"""Transpose index and columns"""
return super(DataFrame, self).transpose(1, 0)
T = property(transpose)
#----------------------------------------------------------------------
# Picklability
# legacy pickle formats
def _unpickle_frame_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
if len(state) == 2: # pragma: no cover
series, idx = state
columns = sorted(series)
else:
series, cols, idx = state
columns = _unpickle_array(cols)
index = _unpickle_array(idx)
self._data = self._init_dict(series, index, columns, None)
def _unpickle_matrix_compat(self, state): # pragma: no cover
from pandas.core.common import _unpickle_array
# old unpickling
(vals, idx, cols), object_state = state
index = _unpickle_array(idx)
dm = DataFrame(vals, index=index, columns=_unpickle_array(cols),
copy=False)
if object_state is not None:
ovals, _, ocols = object_state
objects = DataFrame(ovals, index=index,
columns=_unpickle_array(ocols),
copy=False)
dm = dm.join(objects)
self._data = dm._data
#----------------------------------------------------------------------
#----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
if takeable:
series = self._iget_item_cache(col)
return _maybe_box_datetimelike(series.values[index])
series = self._get_item_cache(col)
engine = self.index._engine
return engine.get_value(series.get_values(), index)
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Returns
-------
frame : DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series.set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series.values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def irow(self, i, copy=False):
return self._ixs(i, axis=0)
def icol(self, i):
return self._ixs(i, axis=1)
def _ixs(self, i, axis=0):
"""
i : int, slice, or sequence of integers
axis : int
"""
# irow
if axis == 0:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
if isinstance(i, slice):
return self[i]
else:
label = self.index[i]
if isinstance(label, Index):
# a location index by definition
result = self.take(i, axis=axis)
copy=True
else:
new_values = self._data.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values,np.ndarray) and new_values.base is None
result = Series(new_values, index=self.columns,
name=self.index[i], dtype=new_values.dtype)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
"""
Notes
-----
If slice passed, the resulting data will be a view
"""
label = self.columns[i]
if isinstance(i, slice):
# need to return view
lab_slice = slice(label[0], label[-1])
return self.ix[:, lab_slice]
else:
label = self.columns[i]
if isinstance(label, Index):
return self.take(i, axis=1, convert=True)
# if the values returned are not the same length
# as the index (iow a not found value), iget returns
# a 0-len ndarray. This is effectively catching
# a numpy error (as numpy should really raise)
values = self._data.iget(i)
if not len(values):
values = np.array([np.nan] * len(self.index), dtype=object)
result = self._constructor_sliced.from_array(
values, index=self.index,
name=label, fastpath=True)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def iget_value(self, i, j):
return self.iat[i, j]
def __getitem__(self, key):
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
if key in self.columns and not is_mi_columns:
return self._getitem_column(key)
except:
pass
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._getitem_slice(indexer)
if isinstance(key, (Series, np.ndarray, Index, list)):
# either boolean or fancy integer index
return self._getitem_array(key)
elif isinstance(key, DataFrame):
return self._getitem_frame(key)
elif is_mi_columns:
return self._getitem_multilevel(key)
else:
return self._getitem_column(key)
def _getitem_column(self, key):
""" return the actual column """
# get column
if self.columns.is_unique:
return self._get_item_cache(key)
# duplicate columns & possible reduce dimensionaility
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
return result
def _getitem_slice(self, key):
return self._slice(key, axis=0)
def _getitem_array(self, key):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn("Boolean Series key will be reindexed to match "
"DataFrame index.", UserWarning)
elif len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d.' %
(len(key), len(self.index)))
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0, convert=False)
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
return self.take(indexer, axis=1, convert=True)
def _getitem_multilevel(self, key):
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = DataFrame(new_values, index=self.index,
columns=result_columns).__finalize__(self)
if len(result.columns) == 1:
top = result.columns[0]
if ((type(top) == str and top == '') or
(type(top) == tuple and top[0] == '')):
result = result['']
if isinstance(result, Series):
result = Series(result, index=self.index, name=key)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _getitem_frame(self, key):
if key.values.dtype != np.bool_:
raise ValueError('Must pass DataFrame with boolean values only')
return self.where(key)
def query(self, expr, **kwargs):
"""Query the columns of a frame with a boolean expression.
.. versionadded:: 0.13
Parameters
----------
expr : string
The query string to evaluate. You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
kwargs : dict
See the documentation for :func:`pandas.eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
q : DataFrame
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`pandas.eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
See Also
--------
pandas.eval
DataFrame.eval
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.query('a > b')
>>> df[df.a > df.b] # same result as the previous expression
"""
kwargs['level'] = kwargs.pop('level', 0) + 1
res = self.eval(expr, **kwargs)
try:
return self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
return self[res]
def eval(self, expr, **kwargs):
"""Evaluate an expression in the context of the calling DataFrame
instance.
Parameters
----------
expr : string
The expression string to evaluate.
kwargs : dict
See the documentation for :func:`~pandas.eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ret : ndarray, scalar, or pandas object
See Also
--------
pandas.DataFrame.query
pandas.eval
Notes
-----
For more details see the API documentation for :func:`~pandas.eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> from numpy.random import randn
>>> from pandas import DataFrame
>>> df = DataFrame(randn(10, 2), columns=list('ab'))
>>> df.eval('a + b')
>>> df.eval('c = a + b')
"""
resolvers = kwargs.pop('resolvers', None)
kwargs['level'] = kwargs.pop('level', 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
resolvers = dict(self.iteritems()), index_resolvers
kwargs['target'] = self
kwargs['resolvers'] = kwargs.get('resolvers', ()) + resolvers
return _eval(expr, **kwargs)
def select_dtypes(self, include=None, exclude=None):
"""Return a subset of a DataFrame including/excluding columns based on
their ``dtype``.
Parameters
----------
include, exclude : list-like
A list of dtypes or strings to be included/excluded. You must pass
in a non-empty sequence for at least one of these.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
TypeError
* If either of ``include`` or ``exclude`` is not a sequence
Returns
-------
subset : DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Notes
-----
* To select all *numeric* types use the numpy dtype ``numpy.number``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select Pandas categorical dtypes, use 'category'
Examples
--------
>>> df = pd.DataFrame({'a': np.random.randn(6).astype('f4'),
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 0.3962 True 1
1 0.1459 False 2
2 0.2623 True 1
3 0.0764 False 2
4 -0.9703 True 1
5 -1.2094 False 2
>>> df.select_dtypes(include=['float64'])
c
0 1
1 2
2 1
3 2
4 1
5 2
>>> df.select_dtypes(exclude=['floating'])
b
0 True
1 False
2 True
3 False
4 True
5 False
"""
include, exclude = include or (), exclude or ()
if not (com.is_list_like(include) and com.is_list_like(exclude)):
raise TypeError('include and exclude must both be non-string'
' sequences')
selection = tuple(map(frozenset, (include, exclude)))
if not any(selection):
raise ValueError('at least one of include or exclude must be '
'nonempty')
# convert the myriad valid dtypes object to a single representation
include, exclude = map(lambda x:
frozenset(map(com._get_dtype_from_object, x)),
selection)
for dtypes in (include, exclude):
com._invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError('include and exclude overlap on %s'
% (include & exclude))
# empty include/exclude -> defaults to True
# three cases (we've already raised if both are empty)
# case 1: empty include, nonempty exclude
# we have True, True, ... True for include, same for exclude
# in the loop below we get the excluded
# and when we call '&' below we get only the excluded
# case 2: nonempty include, empty exclude
# same as case 1, but with include
# case 3: both nonempty
# the "union" of the logic of case 1 and case 2:
# we get the included and excluded, and return their logical and
include_these = Series(not bool(include), index=self.columns)
exclude_these = Series(not bool(exclude), index=self.columns)
def is_dtype_instance_mapper(column, dtype):
return column, functools.partial(issubclass, dtype.type)
for column, f in itertools.starmap(is_dtype_instance_mapper,
self.dtypes.iteritems()):
if include: # checks for the case of empty include or exclude
include_these[column] = any(map(f, include))
if exclude:
exclude_these[column] = not any(map(f, exclude))
dtype_indexer = include_these & exclude_these
return self.loc[com._get_info_slice(self, dtype_indexer)]
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
""" provide boxed values for a column """
return self._constructor_sliced.from_array(values, index=self.index,
name=items, fastpath=True)
def __setitem__(self, key, value):
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._setitem_slice(indexer, value)
if isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(key, DataFrame):
self._setitem_frame(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.ix._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError('Item wrong length %d instead of %d!' %
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.ix._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError('Columns must be same length as key')
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
self._check_setitem_copy()
self.ix._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if key.values.dtype != np.bool_:
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
self._check_setitem_copy()
self.where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
"""
ensure that if we don't have an index, that we can create one from the
passed value
"""
if not len(self.index):
# GH5632, make sure that we are a Series convertible
if is_list_like(value):
try:
value = Series(value)
except:
pass
if not isinstance(value, Series):
raise ValueError('Cannot set a frame with no defined index '
'and a value that cannot be converted to a '
'Series')
self._data = self._data.reindex_axis(value.index.copy(), axis=1,
fill_value=np.nan)
# we are a scalar
# noop
else:
pass
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exeption to occur first
if len(self):
self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
Insert column into DataFrame at specified location.
If `allow_duplicates` is False, raises Exception if column
is already contained in the DataFrame.
Parameters
----------
loc : int
Must have 0 <= loc <= len(columns)
column : object
value : int, Series, or array-like
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value)
self._data.insert(
loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs):
"""
Assign new columns to a DataFrame, returning a new object
(a copy) with all the original columns in addition to the new ones.
.. versionadded:: 0.16.0
Parameters
----------
kwargs : keyword, value pairs
keywords are the column names. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. If the values are
not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
df : DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your
arguments may not be preserved, and so the order of the
new columns is not well defined. Assigning multiple
columns within the same ``assign`` is possible, but you cannot
reference other columns created within the same ``assign`` call.
Examples
--------
>>> df = DataFrame({'A': range(1, 11), 'B': np.random.randn(10)})
Where the value is a callable, evaluated on `df`:
>>> df.assign(ln_A = lambda x: np.log(x.A))
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
Where the value already exists and is inserted:
>>> newcol = np.log(df['A'])
>>> df.assign(ln_A=newcol)
A B ln_A
0 1 0.426905 0.000000
1 2 -0.780949 0.693147
2 3 -0.418711 1.098612
3 4 -0.269708 1.386294
4 5 -0.274002 1.609438
5 6 -0.500792 1.791759
6 7 1.649697 1.945910
7 8 -1.495604 2.079442
8 9 0.549296 2.197225
9 10 -0.758542 2.302585
"""
data = self.copy()
# do all calculations first...
results = {}
for k, v in kwargs.items():
if callable(v):
results[k] = v(data)
else:
results[k] = v
# ... and then assign
for k, v in results.items():
data[k] = v
return data
def _sanitize_column(self, key, value):
# Need to make sure new columns (which go into the BlockManager as new
# blocks) are always copied
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value.values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index).values
except Exception as e:
# duplicate axis
if not value.index.is_unique:
raise e
# other
raise TypeError('incompatible index of inserted column '
'with frame index')
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, MultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex_axis(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, Categorical):
value = value.copy()
elif (isinstance(value, Index) or is_sequence(value)):
from pandas.core.series import _sanitize_index
# turn me into an ndarray
value = _sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = com._possibly_convert_platform(value)
else:
value = com._asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = _possibly_infer_to_datetimelike(value.ravel()).reshape(value.shape)
else:
# upcast the scalar
dtype, value = _infer_dtype_from_scalar(value)
value = np.repeat(value, len(self.index)).astype(dtype)
value = com._possibly_cast_to_datetime(value, dtype)
# return unconsolidatables directly
if isinstance(value, (Categorical, SparseArray)):
return value
# broadcast across multiple columns if necessary
if key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns,
MultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
result = {}
for idx, item in enumerate(self.columns):
result[item] = Series(self._data.iget(idx), index=self.index,
name=item)
return result
def lookup(self, row_labels, col_labels):
"""Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup
col_labels : sequence
The column labels to use for lookup
Notes
-----
Akin to::
result = []
for row, col in zip(row_labels, col_labels):
result.append(df.get_value(row, col))
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError('Row labels must have same size as column labels')
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError('One or more row labels was not found')
if (cidx == -1).any():
raise KeyError('One or more column labels was not found')
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype='O')
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self.get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
#----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, method, fill_value, copy):
frame = self
columns = axes['columns']
if columns is not None:
frame = frame._reindex_columns(columns, copy, level, fill_value,
limit)
index = axes['index']
if index is not None:
frame = frame._reindex_index(index, method, copy, level,
fill_value, limit)
return frame
def _reindex_index(self, new_index, method, copy, level, fill_value=NA,
limit=None):
new_index, indexer = self.index.reindex(new_index, method, level,
limit=limit)
return self._reindex_with_indexers({0: [new_index, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_columns(self, new_columns, copy, level, fill_value=NA,
limit=None):
new_columns, indexer = self.columns.reindex(new_columns, level=level,
limit=limit)
return self._reindex_with_indexers({1: [new_columns, indexer]},
copy=copy, fill_value=fill_value,
allow_dups=False)
def _reindex_multi(self, axes, copy, fill_value):
""" we are guaranteed non-Nones in the axes! """
new_index, row_indexer = self.index.reindex(axes['index'])
new_columns, col_indexer = self.columns.reindex(axes['columns'])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = com.take_2d_multi(self.values, indexer,
fill_value=fill_value)
return self._constructor(new_values, index=new_index,
columns=new_columns)
else:
return self._reindex_with_indexers({0: [new_index, row_indexer],
1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value)
@Appender(_shared_docs['reindex'] % _shared_doc_kwargs)
def reindex(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).reindex(index=index, columns=columns,
**kwargs)
@Appender(_shared_docs['reindex_axis'] % _shared_doc_kwargs)
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
return super(DataFrame, self).reindex_axis(labels=labels, axis=axis,
method=method, level=level,
copy=copy, limit=limit,
fill_value=fill_value)
@Appender(_shared_docs['rename'] % _shared_doc_kwargs)
def rename(self, index=None, columns=None, **kwargs):
return super(DataFrame, self).rename(index=index, columns=columns,
**kwargs)
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""
Set the DataFrame index (row labels) using one or more existing
columns. By default yields a new object.
Parameters
----------
keys : column label or list of column labels / arrays
drop : boolean, default True
Delete columns to be used as the new index
append : boolean, default False
Whether to append columns to existing index
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
verify_integrity : boolean, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method
Examples
--------
>>> indexed_df = df.set_index(['A', 'B'])
>>> indexed_df2 = df.set_index(['A', [0, 1, 2, 0, 1, 2]])
>>> indexed_df3 = df.set_index([[0, 1, 2, 0, 1, 2]])
Returns
-------
dataframe : DataFrame
"""
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index.get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col.get_level_values(n))
level = col.get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, Series):
level = col.values
names.append(col.name)
elif isinstance(col, Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, Index)):
level = col
names.append(None)
else:
level = frame[col].values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = MultiIndex.from_arrays(arrays, names=names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""
For DataFrame with multi-level index, return new DataFrame with
labeling information in the columns under the index names, defaulting
to 'level_0', 'level_1', etc. if any are None. For a standard index,
the index name will be used (if set), otherwise a default 'index' or
'level_0' (if 'index' is already taken) will be used.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default
drop : boolean, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : boolean, default False
Modify the DataFrame in place (do not create a new object)
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
resetted : DataFrame
"""
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, PeriodIndex):
values = index.asobject.values
elif (isinstance(index, DatetimeIndex) and
index.tz is not None):
values = index.asobject
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
values = values.take(labels)
if mask.any():
values, changed = com._maybe_upcast_putmask(values,
mask, np.nan)
return values
new_index = np.arange(len(new_obj),dtype='int64')
if isinstance(self.index, MultiIndex):
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < len(self.index.levels):
new_index = self.index.droplevel(level)
if not drop:
names = self.index.names
zipped = lzip(self.index.levels, self.index.labels)
multi_col = isinstance(self.columns, MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(zipped))):
col_name = names[i]
if col_name is None:
col_name = 'level_%d' % i
if multi_col:
if col_fill is None:
col_name = tuple([col_name] *
self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = col_name
col_name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
if level is None or i in level:
new_obj.insert(0, col_name, level_values)
elif not drop:
name = self.index.name
if name is None or name == 'index':
name = 'index' if 'index' not in self else 'level_0'
if isinstance(self.columns, MultiIndex):
if col_fill is None:
name = tuple([name] * self.columns.nlevels)
else:
name_lst = [col_fill] * self.columns.nlevels
lev_num = self.columns._get_level_number(col_level)
name_lst[lev_num] = name
name = tuple(name_lst)
values = _maybe_casted_values(self.index)
new_obj.insert(0, name, values)
new_obj.index = new_index
if not inplace:
return new_obj
#----------------------------------------------------------------------
# Reindex-based selection methods
def dropna(self, axis=0, how='any', thresh=None, subset=None,
inplace=False):
"""
Return object with labels on given axis omitted where alternately any
or all of the data are missing
Parameters
----------
axis : {0, 1}, or tuple/list thereof
Pass tuple or list to drop on multiple axes
how : {'any', 'all'}
* any : if any NA values are present, drop that label
* all : if all values are NA, drop that label
thresh : int, default None
int value : require that many non-NA values
subset : array-like
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include
inplace : boolean, defalt False
If True, do operation inplace and return None.
Returns
-------
dropped : DataFrame
"""
if isinstance(axis, (tuple, list)):
result = self
for ax in axis:
result = result.dropna(how=how, thresh=thresh,
subset=subset, axis=ax)
else:
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check,subset)))
agg_obj = self.take(indices,axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == 'any':
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == 'all':
mask = count > 0
else:
if how is not None:
raise ValueError('invalid how option: %s' % how)
else:
raise TypeError('must specify how or thresh')
result = self.take(mask.nonzero()[0], axis=axis, convert=False)
if inplace:
self._update_inplace(result)
else:
return result
@deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')
def drop_duplicates(self, subset=None, take_last=False, inplace=False):
"""
Return DataFrame with duplicate rows removed, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
take_last : boolean, default False
Take the last observed row in a row. Defaults to the first row
inplace : boolean, default False
Whether to drop duplicates in place or to return a copy
cols : kwargs only argument of subset [deprecated]
Returns
-------
deduplicated : DataFrame
"""
duplicated = self.duplicated(subset, take_last=take_last)
if inplace:
inds, = (-duplicated).nonzero()
new_data = self._data.take(inds)
self._update_inplace(new_data)
else:
return self[-duplicated]
@deprecate_kwarg(old_arg_name='cols', new_arg_name='subset')
def duplicated(self, subset=None, take_last=False):
"""
Return boolean Series denoting duplicate rows, optionally only
considering certain columns
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns
take_last : boolean, default False
For a set of distinct duplicate rows, flag all but the last row as
duplicated. Default is for all but the first row to be flagged
cols : kwargs only argument of subset [deprecated]
Returns
-------
duplicated : Series
"""
from pandas.core.groupby import get_group_index
from pandas.core.algorithms import factorize
from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8',copy=False), len(shape)
if subset is None:
subset = self.columns
elif not np.iterable(subset) or \
isinstance(subset, compat.string_types) or \
isinstance(subset, tuple) and subset in self.columns:
subset = subset,
vals = (self[col].values for col in subset)
labels, shape = map(list, zip( * map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, take_last), index=self.index)
#----------------------------------------------------------------------
# Sorting
def sort(self, columns=None, axis=0, ascending=True,
inplace=False, kind='quicksort', na_position='last'):
"""
Sort DataFrame either by labels (along either axis) or by the values in
column(s)
Parameters
----------
columns : object
Column name(s) in frame. Accepts a column name or a list
for a nested sort. A tuple will be interpreted as the
levels of a multi-index.
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
axis : {0, 1}
Sort index/rows versus columns
inplace : boolean, default False
Sort the DataFrame without creating a new instance
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
This option is only applied when sorting on a single column or label.
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
Examples
--------
>>> result = df.sort(['A', 'B'], ascending=[1, 0])
Returns
-------
sorted : DataFrame
"""
return self.sort_index(by=columns, axis=axis, ascending=ascending,
inplace=inplace, kind=kind, na_position=na_position)
def sort_index(self, axis=0, by=None, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
"""
Sort DataFrame either by labels (along either axis) or by the values in
a column
Parameters
----------
axis : {0, 1}
Sort index/rows versus columns
by : object
Column name(s) in frame. Accepts a column name or a list
for a nested sort. A tuple will be interpreted as the
levels of a multi-index.
ascending : boolean or list, default True
Sort ascending vs. descending. Specify list for multiple sort
orders
inplace : boolean, default False
Sort the DataFrame without creating a new instance
na_position : {'first', 'last'} (optional, default='last')
'first' puts NaNs at the beginning
'last' puts NaNs at the end
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
This option is only applied when sorting on a single column or label.
Examples
--------
>>> result = df.sort_index(by=['A', 'B'], ascending=[True, False])
Returns
-------
sorted : DataFrame
"""
from pandas.core.groupby import _lexsort_indexer, _nargsort
axis = self._get_axis_number(axis)
if axis not in [0, 1]: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
labels = self._get_axis(axis)
if by is not None:
if axis != 0:
raise ValueError('When sorting by column, axis must be 0 '
'(rows)')
if not isinstance(by, list):
by = [by]
if com.is_sequence(ascending) and len(by) != len(ascending):
raise ValueError('Length of ascending (%d) != length of by'
' (%d)' % (len(ascending), len(by)))
if len(by) > 1:
def trans(v):
if com.needs_i8_conversion(v):
return v.view('i8')
return v
keys = []
for x in by:
k = self[x].values
if k.ndim == 2:
raise ValueError('Cannot sort by duplicate column %s' % str(x))
keys.append(trans(k))
indexer = _lexsort_indexer(keys, orders=ascending,
na_position=na_position)
indexer = com._ensure_platform_int(indexer)
else:
by = by[0]
k = self[by].values
if k.ndim == 2:
# try to be helpful
if isinstance(self.columns, MultiIndex):
raise ValueError('Cannot sort by column %s in a multi-index'
' you need to explicity provide all the levels'
% str(by))
raise ValueError('Cannot sort by duplicate column %s'
% str(by))
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = _nargsort(k, kind=kind, ascending=ascending,
na_position=na_position)
elif isinstance(labels, MultiIndex):
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
if not labels.is_lexsorted():
labels = MultiIndex.from_tuples(labels.values)
indexer = _lexsort_indexer(labels.labels, orders=ascending,
na_position=na_position)
indexer = com._ensure_platform_int(indexer)
else:
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=bm_axis,
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sortlevel(self, level=0, axis=0, ascending=True,
inplace=False, sort_remaining=True):
"""
Sort multilevel index by chosen axis and primary level. Data will be
lexicographically sorted by the chosen level followed by the other
levels (in order)
Parameters
----------
level : int
axis : {0, 1}
ascending : boolean, default True
inplace : boolean, default False
Sort the DataFrame without creating a new instance
sort_remaining : boolean, default True
Sort by the other levels too.
Returns
-------
sorted : DataFrame
"""
axis = self._get_axis_number(axis)
the_axis = self._get_axis(axis)
if not isinstance(the_axis, MultiIndex):
raise TypeError('can only sort by level with a hierarchical index')
new_axis, indexer = the_axis.sortlevel(level, ascending=ascending,
sort_remaining=sort_remaining)
if self._is_mixed_type and not inplace:
ax = 'index' if axis == 0 else 'columns'
if new_axis.is_unique:
return self.reindex(**{ax: new_axis})
else:
return self.take(indexer, axis=axis, convert=False)
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=bm_axis,
convert=False, verify=False)
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def swaplevel(self, i, j, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, string (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : type of caller (new object)
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0):
"""
Rearrange index levels using input order.
May not drop or duplicate levels
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
type of caller (new object)
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis),
MultiIndex): # pragma: no cover
raise TypeError('Can only reorder levels on a hierarchical axis.')
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
#----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
def _arith_op(left, right):
if fill_value is not None:
left_mask = isnull(left)
right_mask = isnull(right)
left = left.copy()
right = right.copy()
# one but not both
mask = left_mask ^ right_mask
left[left_mask & mask] = fill_value
right[right_mask & mask] = fill_value
return func(left, right)
if this._is_mixed_type or other._is_mixed_type:
# unique
if this.columns.is_unique:
def f(col):
r = _arith_op(this[col].values, other[col].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([(col, f(col)) for col in this])
# non-unique
else:
def f(i):
r = _arith_op(this.iloc[:, i].values,
other.iloc[:, i].values)
return self._constructor_sliced(r, index=new_index,
dtype=r.dtype)
result = dict([
(i, f(i)) for i, col in enumerate(this.columns)
])
result = self._constructor(result, index=new_index, copy=False)
result.columns = new_columns
return result
else:
result = _arith_op(this.values, other.values)
return self._constructor(result, index=new_index,
columns=new_columns, copy=False)
def _combine_series(self, other, func, fill_value=None, axis=None,
level=None):
if axis is not None:
axis = self._get_axis_name(axis)
if axis == 'index':
return self._combine_match_index(other, func, level=level, fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
return self._combine_series_infer(other, func, level=level, fill_value=fill_value)
def _combine_series_infer(self, other, func, level=None, fill_value=None):
if len(other) == 0:
return self * NA
if len(self) == 0:
# Ambiguous case, use _series so works with DataFrame
return self._constructor(data=self._series, index=self.index,
columns=self.columns)
# teeny hack because one does DataFrame + TimeSeries all the time
if self.index.is_all_dates and other.index.is_all_dates:
warnings.warn(("TimeSeries broadcasting along DataFrame index "
"by default is deprecated. Please use "
"DataFrame.<op> to explicitly broadcast arithmetic "
"operations along the index"),
FutureWarning)
return self._combine_match_index(other, func, level=level, fill_value=fill_value)
else:
return self._combine_match_columns(other, func, level=level, fill_value=fill_value)
def _combine_match_index(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=0, level=level, copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported." %
fill_value)
return self._constructor(func(left.values.T, right.values).T,
index=left.index,
columns=self.columns, copy=False)
def _combine_match_columns(self, other, func, level=None, fill_value=None):
left, right = self.align(other, join='outer', axis=1, level=level, copy=False)
if fill_value is not None:
raise NotImplementedError("fill_value %r not supported" %
fill_value)
new_data = left._data.eval(
func=func, other=right, axes=[left.columns, self.index])
return self._constructor(new_data)
def _combine_const(self, other, func, raise_on_error=True):
if self.empty:
return self
new_data = self._data.eval(func=func, other=other, raise_on_error=raise_on_error)
return self._constructor(new_data)
def _compare_frame_evaluate(self, other, func, str_rep):
# unique
if self.columns.is_unique:
def _compare(a, b):
return dict([(col, func(a[col], b[col])) for col in a.columns])
new_data = expressions.evaluate(_compare, str_rep, self, other)
return self._constructor(data=new_data, index=self.index,
columns=self.columns, copy=False)
# non-unique
else:
def _compare(a, b):
return dict([(i, func(a.iloc[:, i], b.iloc[:, i]))
for i, col in enumerate(a.columns)])
new_data = expressions.evaluate(_compare, str_rep, self, other)
result = self._constructor(data=new_data, index=self.index,
copy=False)
result.columns = self.columns
return result
def _compare_frame(self, other, func, str_rep):
if not self._indexed_same(other):
raise ValueError('Can only compare identically-labeled '
'DataFrame objects')
return self._compare_frame_evaluate(other, func, str_rep)
def _flex_compare_frame(self, other, func, str_rep, level):
if not self._indexed_same(other):
self, other = self.align(other, 'outer', level=level, copy=False)
return self._compare_frame_evaluate(other, func, str_rep)
def combine(self, other, func, fill_value=None, overwrite=True):
"""
Add two DataFrame objects and do not propagate NaN values, so if for a
(column, time) one frame is missing a value, it will default to the
other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
func : function
fill_value : scalar value
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
Returns
-------
result : DataFrame
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isnull(series)
other_mask = isnull(otherSeries)
# don't overwrite columns unecessarily
# DO propogate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
# if we have different dtypes, possibily promote
new_dtype = this_dtype
if this_dtype != other_dtype:
new_dtype = com._lcd_dtypes(this_dtype, other_dtype)
series = series.astype(new_dtype)
otherSeries = otherSeries.astype(new_dtype)
# see if we need to be represented as i8 (datetimelike)
# try to keep us at this dtype
needs_i8_conversion = com.needs_i8_conversion(new_dtype)
if needs_i8_conversion:
this_dtype = new_dtype
arr = func(series, otherSeries, True)
else:
arr = func(series, otherSeries)
if do_fill:
arr = com.ensure_float(arr)
arr[this_mask & other_mask] = NA
# try to downcast back to the original dtype
if needs_i8_conversion:
arr = com._possibly_cast_to_datetime(arr, this_dtype)
else:
arr = com._possibly_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result,
index=new_index,
columns=new_columns).convert_objects(
convert_dates=True,
copy=False)
def combine_first(self, other):
"""
Combine two DataFrame objects and default to non-null values in frame
calling the method. Result index columns will be the union of the
respective indexes and columns
Parameters
----------
other : DataFrame
Examples
--------
a's values prioritized, use values from b to fill holes:
>>> a.combine_first(b)
Returns
-------
combined : DataFrame
"""
def combiner(x, y, needs_i8_conversion=False):
x_values = x.values if hasattr(x, 'values') else x
y_values = y.values if hasattr(y, 'values') else y
if needs_i8_conversion:
mask = isnull(x)
x_values = x_values.view('i8')
y_values = y_values.view('i8')
else:
mask = isnull(x_values)
return expressions.where(mask, y_values, x_values,
raise_on_error=True)
return self.combine(other, combiner, overwrite=False)
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
"""
Modify DataFrame in place using non-NA values from passed
DataFrame. Aligns on indices
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
join : {'left'}, default 'left'
overwrite : boolean, default True
If True then overwrite values for common keys in the calling frame
filter_func : callable(1d-array) -> 1d-array<boolean>, default None
Can choose to replace values other than NA. Return True for values
that should be updated
raise_conflict : boolean
If True, will raise an error if the DataFrame and other both
contain data in the same place.
"""
# TODO: Support other joins
if join != 'left': # pragma: no cover
raise NotImplementedError("Only left join is supported")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col].values
that = other[col].values
if filter_func is not None:
mask = ~filter_func(this) | isnull(that)
else:
if raise_conflict:
mask_this = notnull(that)
mask_that = notnull(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isnull(that)
# don't overwrite columns unecessarily
if mask.all():
continue
else:
mask = notnull(this)
self[col] = expressions.where(
mask, this, that, raise_on_error=True)
#----------------------------------------------------------------------
# Misc methods
def first_valid_index(self):
"""
Return label for first non-NA/null value
"""
return self.index[self.count(1) > 0][0]
def last_valid_index(self):
"""
Return label for last non-NA/null value
"""
return self.index[self.count(1) > 0][-1]
#----------------------------------------------------------------------
# Data reshaping
def pivot(self, index=None, columns=None, values=None):
"""
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from index / columns to form axes and return either
DataFrame or Panel, depending on whether you request a single value
column (DataFrame) or all columns (Panel)
Parameters
----------
index : string or object
Column name to use to make new frame's index
columns : string or object
Column name to use to make new frame's columns
values : string or object, optional
Column name to use for populating new frame's values
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods
Examples
--------
>>> df
foo bar baz
0 one A 1.
1 one B 2.
2 one C 3.
3 two A 4.
4 two B 5.
5 two C 6.
>>> df.pivot('foo', 'bar', 'baz')
A B C
one 1 2 3
two 4 5 6
>>> df.pivot('foo', 'bar')['baz']
A B C
one 1 2 3
two 4 5 6
Returns
-------
pivoted : DataFrame
If no values column specified, will have hierarchically indexed
columns
"""
from pandas.core.reshape import pivot
return pivot(self, index=index, columns=columns, values=values)
def stack(self, level=-1, dropna=True):
"""
Pivot a level of the (possibly hierarchical) column labels, returning a
DataFrame (or Series in the case of an object with a single level of
column labels) having a hierarchical index with a new inner-most level
of row labels.
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default last level
Level(s) to stack, can pass level name
dropna : boolean, default True
Whether to drop rows in the resulting Frame/Series with no valid
values
Examples
----------
>>> s
a b
one 1. 2.
two 3. 4.
>>> s.stack()
one a 1
b 2
two a 3
b 4
Returns
-------
stacked : DataFrame or Series
"""
from pandas.core.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def unstack(self, level=-1):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels. If the index is not a MultiIndex,
the output will be a Series (the analogue of stack when the columns are
not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
See also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1
b 2
two a 3
b 4
dtype: float64
>>> s.unstack(level=-1)
a b
one 1 2
two 3 4
>>> s.unstack(level=0)
one two
a 1 3
b 2 4
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.
b 3.
two a 2.
b 4.
Returns
-------
unstacked : DataFrame or Series
"""
from pandas.core.reshape import unstack
return unstack(self, level)
#----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1):
"""
1st discrete difference of object
Parameters
----------
periods : int, default 1
Periods to shift for forming difference
Returns
-------
diffed : DataFrame
"""
new_data = self._data.diff(n=periods)
return self._constructor(new_data)
#----------------------------------------------------------------------
# Function application
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
"""
Applies function along input axis of DataFrame.
Objects passed to functions are Series objects having index
either the DataFrame's index (axis=0) or the columns (axis=1).
Return type depends on whether passed function aggregates, or the
reduce argument if the DataFrame is empty.
Parameters
----------
func : function
Function to apply to each column/row
axis : {0, 1}
* 0 : apply function to each column
* 1 : apply function to each row
broadcast : boolean, default False
For aggregation functions, return object of same size with values
propagated
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
raw : boolean, default False
If False, convert each row or column into a Series. If raw=True the
passed function will receive ndarray objects instead. If you are
just applying a NumPy reduction function this will achieve much
better performance
args : tuple
Positional arguments to pass to function in addition to the
array/series
Additional keyword arguments will be passed as keywords to the function
Notes
-----
In the current implementation apply calls func twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df.apply(numpy.sqrt) # returns DataFrame
>>> df.apply(numpy.sum, axis=0) # equiv to df.sum(0)
>>> df.apply(numpy.sum, axis=1) # equiv to df.sum(1)
See also
--------
DataFrame.applymap: For elementwise operations
Returns
-------
applied : Series or DataFrame
"""
axis = self._get_axis_number(axis)
if kwds or args and not isinstance(func, np.ufunc):
f = lambda x: func(x, *args, **kwds)
else:
f = func
if len(self.columns) == 0 and len(self.index) == 0:
return self._apply_empty_result(func, axis, reduce, *args, **kwds)
if isinstance(f, np.ufunc):
results = f(self.values)
return self._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
else:
if not broadcast:
if not all(self.shape):
return self._apply_empty_result(func, axis, reduce, *args,
**kwds)
if raw and not self._is_mixed_type:
return self._apply_raw(f, axis)
else:
if reduce is None:
reduce = True
return self._apply_standard(f, axis, reduce=reduce)
else:
return self._apply_broadcast(f, axis)
def _apply_empty_result(self, func, axis, reduce, *args, **kwds):
if reduce is None:
reduce = False
try:
reduce = not isinstance(func(_EMPTY_SERIES, *args, **kwds),
Series)
except Exception:
pass
if reduce:
return Series(NA, index=self._get_agg_axis(axis))
else:
return self.copy()
def _apply_raw(self, func, axis):
try:
result = lib.reduce(self.values, func, axis=axis)
except Exception:
result = np.apply_along_axis(func, axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return DataFrame(result, index=self.index,
columns=self.columns)
else:
return Series(result, index=self._get_agg_axis(axis))
def _apply_standard(self, func, axis, ignore_failures=False, reduce=True):
# skip if we are mixed datelike and trying reduce across axes
# GH6125
if reduce and axis==1 and self._is_mixed_type and self._is_datelike_mixed_type:
reduce=False
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
if reduce:
try:
# the is the fast-path
values = self.values
dummy = Series(NA, index=self._get_axis(axis),
dtype=values.dtype)
labels = self._get_agg_axis(axis)
result = lib.reduce(values, func, axis=axis, dummy=dummy,
labels=labels)
return Series(result, index=labels)
except Exception:
pass
dtype = object if self._is_mixed_type else None
if axis == 0:
series_gen = (self.icol(i) for i in range(len(self.columns)))
res_index = self.columns
res_columns = self.index
elif axis == 1:
res_index = self.index
res_columns = self.columns
values = self.values
series_gen = (Series.from_array(arr, index=res_columns, name=name, dtype=dtype)
for i, (arr, name) in
enumerate(zip(values, res_index)))
else: # pragma : no cover
raise AssertionError('Axis must be 0 or 1, got %s' % str(axis))
i = None
keys = []
results = {}
if ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = func(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = func(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
com.pprint_thing(k),)
raise
if len(results) > 0 and is_sequence(results[0]):
if not isinstance(results[0], Series):
index = res_columns
else:
index = None
result = self._constructor(data=results, index=index)
result.columns = res_index
if axis == 1:
result = result.T
result = result.convert_objects(copy=False)
else:
result = Series(results)
result.index = res_index
return result
def _apply_broadcast(self, func, axis):
if axis == 0:
target = self
elif axis == 1:
target = self.T
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1, got %s' % axis)
result_values = np.empty_like(target.values)
columns = target.columns
for i, col in enumerate(columns):
result_values[:, i] = func(target[col])
result = self._constructor(result_values, index=target.index,
columns=target.columns)
if axis == 1:
result = result.T
return result
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
See also
--------
DataFrame.apply : For operations on rows/columns
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if com.needs_i8_conversion(x):
f = com.i8_boxer(x)
x = lib.map_infer(_values_from_object(x), f)
return lib.map_infer(_values_from_object(x), func)
return self.apply(infer)
#----------------------------------------------------------------------
# Merging / joining methods
def append(self, other, ignore_index=False, verify_integrity=False):
"""
Append rows of `other` to the end of this frame, returning a new
object. Columns not in this frame are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : boolean, default False
If True, do not use the index labels.
verify_integrity : boolean, default False
If True, raise ValueError on creating index with duplicates.
Returns
-------
appended : DataFrame
Notes
-----
If a list of dict/series is passed and the keys are all contained in the
DataFrame's index, the order of the columns in the resulting DataFrame
will be unchanged.
See also
--------
pandas.concat : General function to concatenate DataFrame, Series
or Panel objects
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError('Can only append a Series if ignore_index=True'
' or if the Series has a name')
index = None if other.name is None else [other.name]
combined_columns = self.columns.tolist() + self.columns.union(other.index).difference(self.columns).tolist()
other = other.reindex(combined_columns, copy=False)
other = DataFrame(other.values.reshape((1, len(other))),
index=index, columns=combined_columns).convert_objects()
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list) and not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.ix[:, self.columns]
from pandas.tools.merge import concat
if isinstance(other, (list, tuple)):
to_concat = [self] + other
else:
to_concat = [self, other]
return concat(to_concat, ignore_index=ignore_index,
verify_integrity=verify_integrity)
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
"""
Join columns with other DataFrame either on index or on a key
column. Efficiently Join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series with name field set, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame
on : column name, tuple/list of column names, or array-like
Column(s) to use for joining, otherwise join on index. If multiples
columns given, the passed DataFrame must have a MultiIndex. Can
pass an array as the join key if not already contained in the
calling DataFrame. Like an Excel VLOOKUP operation
how : {'left', 'right', 'outer', 'inner'}
How to handle indexes of the two objects. Default: 'left'
for joining on index, None otherwise
* left: use calling frame's index
* right: use input frame's index
* outer: form union of indexes
* inner: use intersection of indexes
lsuffix : string
Suffix to use from left frame's overlapping columns
rsuffix : string
Suffix to use from right frame's overlapping columns
sort : boolean, default False
Order result DataFrame lexicographically by the join key. If False,
preserves the index order of the calling (left) DataFrame
Notes
-----
on, lsuffix, and rsuffix options are not supported when passing a list
of DataFrame objects
Returns
-------
joined : DataFrame
"""
# For SparseDataFrame's benefit
return self._join_compat(other, on=on, how=how, lsuffix=lsuffix,
rsuffix=rsuffix, sort=sort)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
from pandas.tools.merge import merge, concat
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(self, other, left_on=on, how=how,
left_index=on is None, right_index=True,
suffixes=(lsuffix, rsuffix), sort=sort)
else:
if on is not None:
raise ValueError('Joining multiple DataFrames only supported'
' for joining on index')
# join indexes only using concat
if how == 'left':
how = 'outer'
join_axes = [self.index]
else:
join_axes = None
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
if can_concat:
return concat(frames, axis=1, join=how, join_axes=join_axes,
verify_integrity=True)
joined = frames[0]
for frame in frames[1:]:
joined = merge(joined, frame, how=how,
left_index=True, right_index=True)
return joined
@Substitution('')
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True):
from pandas.tools.merge import merge
return merge(self, right, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index, sort=sort,
suffixes=suffixes, copy=copy)
#----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method='pearson', min_periods=1):
"""
Compute pairwise correlation of columns, excluding NA/null values
Parameters
----------
method : {'pearson', 'kendall', 'spearman'}
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for pearson
and spearman correlation
Returns
-------
y : DataFrame
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if method == 'pearson':
correl = _algos.nancorr(com._ensure_float64(mat),
minp=min_periods)
elif method == 'spearman':
correl = _algos.nancorr_spearman(com._ensure_float64(mat),
minp=min_periods)
else:
if min_periods is None:
min_periods = 1
mat = mat.T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = NA
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
return self._constructor(correl, index=cols, columns=cols)
def cov(self, min_periods=None):
"""
Compute pairwise covariance of columns, excluding NA/null values
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
y : DataFrame
Notes
-----
`y` contains the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1 (unbiased estimator).
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
mat = numeric_df.values
if notnull(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = _algos.nancorr(com._ensure_float64(mat), cov=True,
minp=min_periods)
return self._constructor(baseCov, index=cols, columns=cols)
def corrwith(self, other, axis=0, drop=False):
"""
Compute pairwise correlation between rows or columns of two DataFrame
objects.
Parameters
----------
other : DataFrame
axis : {0, 1}
0 to compute column-wise, 1 for row-wise
drop : boolean, default False
Drop missing indices from result, default returns union of all
Returns
-------
correls : Series
"""
axis = self._get_axis_number(axis)
if isinstance(other, Series):
return self.apply(other.corr, axis=axis)
this = self._get_numeric_data()
other = other._get_numeric_data()
left, right = this.align(other, join='inner', copy=False)
# mask missing values
left = left + right * 0
right = right + left * 0
if axis == 1:
left = left.T
right = right.T
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
if not drop:
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
correl = correl.reindex(result_index)
return correl
#----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Return Series with number of non-NA/null observations over requested
axis. Works with non-floating point data as well (detects NaN and None)
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a DataFrame
numeric_only : boolean, default False
Include only float, int, boolean data
Returns
-------
count : Series (or DataFrame if level specified)
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis,
numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type:
result = notnull(frame).sum(axis=axis)
else:
counts = notnull(frame.values).sum(axis=axis)
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype('int64')
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, MultiIndex):
raise TypeError("Can only count levels on hierarchical %s." %
self._get_axis_name(axis))
if frame._is_mixed_type:
# Since we have mixed types, calling notnull(frame.values) might
# upcast everything to object
mask = notnull(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notnull(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, compat.string_types):
level = count_axis._get_level_number(level)
level_index = count_axis.levels[level]
labels = com._ensure_int64(count_axis.labels[level])
counts = lib.count_level_2d(mask, labels, len(level_index))
result = DataFrame(counts, index=level_index,
columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
filter_type=None, **kwds):
axis = self._get_axis_number(axis)
f = lambda x: op(x, axis=axis, skipna=skipna, **kwds)
labels = self._get_agg_axis(axis)
# exclude timedelta/datetime unless we are uniform types
if axis == 1 and self._is_mixed_type and self._is_datelike_mixed_type:
numeric_only = True
if numeric_only is None:
try:
values = self.values
result = f(values)
except Exception as e:
# try by-column first
if filter_type is None and axis == 0:
try:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
result = self.apply(f,reduce=False)
if result.ndim == self.ndim:
result = result.iloc[0]
return result
except:
pass
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
e = NotImplementedError("Handling exception with filter_"
"type %s not implemented."
% filter_type)
raise_with_traceback(e)
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
if filter_type is None or filter_type == 'numeric':
data = self._get_numeric_data()
elif filter_type == 'bool':
data = self._get_bool_data()
else: # pragma: no cover
msg = ("Generating numeric_only data with filter_type %s"
"not supported." % filter_type)
raise NotImplementedError(msg)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
elif filter_type == 'bool' and notnull(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = com._coerce_to_dtypes(result, self.dtypes)
return Series(result, index=labels)
def idxmin(self, axis=0, skipna=True):
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
Returns
-------
idxmin : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
See Also
--------
Series.idxmin
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True):
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
skipna : boolean, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be first index.
Returns
-------
idxmax : Series
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
See Also
--------
Series.idxmax
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else NA for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
""" let's be explict about this """
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError('Axis must be 0 or 1 (got %r)' % axis_num)
def mode(self, axis=0, numeric_only=False):
"""
Gets the mode(s) of each element along the axis selected. Empty if nothing
has 2+ occurrences. Adds a row for each mode per label, fills in gaps
with nan.
Note that there could be multiple values returned for the selected
axis (when more than one item share the maximum frequency), which is the
reason why a dataframe is returned. If you want to impute missing values
with the mode in a dataframe ``df``, you can just do this:
``df.fillna(df.mode().iloc[0])``
Parameters
----------
axis : {0, 1, 'index', 'columns'} (default 0)
* 0/'index' : get mode of each column
* 1/'columns' : get mode of each row
numeric_only : boolean, default False
if True, only apply to numeric columns
Returns
-------
modes : DataFrame (sorted)
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 1, 2, 1, 2, 3]})
>>> df.mode()
A
0 1
1 2
"""
data = self if not numeric_only else self._get_numeric_data()
f = lambda s: s.mode()
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True):
"""
Return values at the given quantile over requested axis, a la
numpy.percentile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
quantiles : Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
Examples
--------
>>> df = DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
"""
per = np.asarray(q) * 100
if not com.is_list_like(per):
per = [per]
q = [q]
squeeze = True
else:
squeeze = False
def f(arr, per):
if arr._is_datelike_mixed_type:
values = _values_from_object(arr).view('i8')
else:
values = arr.astype(float)
values = values[notnull(values)]
if len(values) == 0:
return NA
else:
return _quantile(values, per)
data = self._get_numeric_data() if numeric_only else self
if axis == 1:
data = data.T
# need to know which cols are timestamp going in so that we can
# map timestamp over them after getting the quantile.
is_dt_col = data.dtypes.map(com.is_datetime64_dtype)
is_dt_col = is_dt_col[is_dt_col].index
quantiles = [[f(vals, x) for x in per]
for (_, vals) in data.iteritems()]
result = DataFrame(quantiles, index=data._info_axis, columns=q).T
if len(is_dt_col) > 0:
result[is_dt_col] = result[is_dt_col].applymap(lib.Timestamp)
if squeeze:
if result.shape == (1, 1):
result = result.T.iloc[:, 0] # don't want scalar
else:
result = result.T.squeeze()
result.name = None # For groupby, so it can set an index name
return result
def rank(self, axis=0, numeric_only=None, method='average',
na_option='keep', ascending=True, pct=False):
"""
Compute numerical data ranks (1 through n) along axis. Equal values are
assigned a rank that is the average of the ranks of those values
Parameters
----------
axis : {0, 1}, default 0
Ranks over columns (0) or rows (1)
numeric_only : boolean, default None
Include only float, int, boolean data
method : {'average', 'min', 'max', 'first', 'dense'}
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
na_option : {'keep', 'top', 'bottom'}
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
ascending : boolean, default True
False for ranks by high (1) to low (N)
pct : boolean, default False
Computes percentage rank of data
Returns
-------
ranks : DataFrame
"""
axis = self._get_axis_number(axis)
if numeric_only is None:
try:
ranks = algos.rank(self.values, axis=axis, method=method,
ascending=ascending, na_option=na_option,
pct=pct)
return self._constructor(ranks, index=self.index,
columns=self.columns)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
ranks = algos.rank(data.values, axis=axis, method=method,
ascending=ascending, na_option=na_option, pct=pct)
return self._constructor(ranks, index=data.index, columns=data.columns)
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period
Parameters
----------
freq : string, default frequency of PeriodIndex
Desired frequency
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end
axis : {0, 1} default 0
The axis to convert (the index by default)
copy : boolean, default True
If false then underlying input data is not copied
Returns
-------
df : DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True):
"""
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed)
Parameters
----------
freq : string, default
axis : {0, 1}, default 0
The axis to convert (the index by default)
copy : boolean, default True
If False then underlying input data is not copied
Returns
-------
ts : TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError('Axis must be 0 or 1. Got %s' % str(axis))
return self._constructor(new_data)
def isin(self, values):
"""
Return boolean DataFrame showing whether each element in the
DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dictionary
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dictionary, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame of booleans
Examples
--------
When ``values`` is a list:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> df.isin([1, 3, 12, 'a'])
A B
0 True True
1 False False
2 True False
When ``values`` is a dict:
>>> df = DataFrame({'A': [1, 2, 3], 'B': [1, 4, 7]})
>>> df.isin({'A': [1, 3], 'B': [4, 7, 12]})
A B
0 True False # Note that B didn't match the 1 here.
1 False True
2 True True
When ``values`` is a Series or DataFrame:
>>> df = DataFrame({'A': [1, 2, 3], 'B': ['a', 'b', 'f']})
>>> other = DataFrame({'A': [1, 3, 3, 2], 'B': ['e', 'f', 'f', 'e']})
>>> df.isin(other)
A B
0 True False
1 False False # Column A in `other` has a 3, but not at index 1.
2 True True
"""
if isinstance(values, dict):
from collections import defaultdict
from pandas.tools.merge import concat
values = defaultdict(list, values)
return concat((self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)), axis=1)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("ValueError: cannot compute isin with"
" a duplicate axis.")
return self.eq(values.reindex_like(self), axis='index')
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("ValueError: cannot compute isin with"
" a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError("only list-like or dict-like objects are"
" allowed to be passed to DataFrame.isin(), "
"you passed a "
"{0!r}".format(type(values).__name__))
return DataFrame(lib.ismember(self.values.ravel(),
set(values)).reshape(self.shape),
self.index,
self.columns)
#----------------------------------------------------------------------
# Deprecated stuff
def combineAdd(self, other):
"""
Add two DataFrame objects and do not propagate
NaN values, so if for a (column, time) one frame is missing a
value, it will default to the other frame's value (which might
be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
"""
return self.add(other, fill_value=0.)
def combineMult(self, other):
"""
Multiply two DataFrame objects and do not propagate NaN values, so if
for a (column, time) one frame is missing a value, it will default to
the other frame's value (which might be NaN as well)
Parameters
----------
other : DataFrame
Returns
-------
DataFrame
"""
return self.mul(other, fill_value=1.)
DataFrame._setup_axes(['index', 'columns'], info_axis=1, stat_axis=0,
axes_are_reversed=True, aliases={'rows': 0})
DataFrame._add_numeric_operations()
_EMPTY_SERIES = Series([])
def _arrays_to_mgr(arrays, arr_names, index, columns, dtype=None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = _ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
# from BlockManager perspective
axes = [_ensure_index(columns), _ensure_index(index)]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def extract_index(data):
from pandas.core.index import _union_indexes
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes = []
have_raw_arrays = False
have_series = False
have_dicts = False
for v in data:
if isinstance(v, Series):
have_series = True
indexes.append(v.index)
elif isinstance(v, dict):
have_dicts = True
indexes.append(list(v.keys()))
elif is_list_like(v) and getattr(v, 'ndim', 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(v))
if not indexes and not raw_lengths:
raise ValueError('If using all scalar values, you must pass'
' an index')
if have_series or have_dicts:
index = _union_indexes(indexes)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError('arrays must all be same length')
if have_dicts:
raise ValueError('Mixing dicts with non-Series may lead to '
'ambiguous ordering.')
if have_series:
if lengths[0] != len(index):
msg = ('array length %d does not match index length %d'
% (lengths[0], len(index)))
raise ValueError(msg)
else:
index = Index(np.arange(lengths[0]))
return _ensure_index(index)
def _prep_ndarray(values, copy=True):
if not isinstance(values, (np.ndarray, Series, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
def convert(v):
return com._possibly_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if com.is_list_like(values[0]) or hasattr(values[0], 'len'):
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except:
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError('Must pass 2-d input')
return values
def _to_arrays(data, columns, coerce_float=False, dtype=None):
"""
Return list of arrays, columns
"""
if isinstance(data, DataFrame):
if columns is not None:
arrays = [data.icol(i).values for i, col in enumerate(data.columns)
if col in columns]
else:
columns = data.columns
arrays = [data.icol(i).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
if isinstance(data[0], (list, tuple)):
return _list_to_arrays(data, columns, coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], collections.Mapping):
return _list_of_dict_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Series):
return _list_of_series_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
elif isinstance(data[0], Categorical):
if columns is None:
columns = _default_index(len(data))
return data, columns
elif (isinstance(data, (np.ndarray, Series, Index))
and data.dtype.names is not None):
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
else:
# last ditch effort
data = lmap(tuple, data)
return _list_to_arrays(data, columns,
coerce_float=coerce_float,
dtype=dtype)
def _masked_rec_array_to_mgr(data, index, columns, dtype, copy):
""" extract from a masked rec array and create the manager """
# essentially process a record array then fill it
fill_value = data.fill_value
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = _default_index(len(data))
index = _ensure_index(index)
if columns is not None:
columns = _ensure_index(columns)
arrays, arr_columns = _to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for fv, arr, col in zip(fill_value, arrays, arr_columns):
mask = ma.getmaskarray(data[col])
if mask.any():
arr, fv = _maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = _reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = _arrays_to_mgr(arrays, arr_columns, index, columns)
if copy:
mgr = mgr.copy()
return mgr
def _reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (columns is not None and len(columns) and arr_columns is not None and
len(arr_columns)):
indexer = _ensure_index(
arr_columns).get_indexer(columns)
arr_columns = _ensure_index(
[arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _list_to_arrays(data, columns, coerce_float=False, dtype=None):
if len(data) > 0 and isinstance(data[0], tuple):
content = list(lib.to_object_array_tuples(data).T)
else:
# list of lists
content = list(lib.to_object_array(data).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
from pandas.core.index import _get_combined_index
if columns is None:
columns = _get_combined_index([
s.index for s in data if getattr(s, 'index', None) is not None
])
indexer_cache = {}
aligned_values = []
for s in data:
index = getattr(s, 'index', None)
if index is None:
index = _default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = _values_from_object(s)
aligned_values.append(com.take_1d(values, indexer))
values = np.vstack(aligned_values)
if values.dtype == np.object_:
content = list(values.T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
else:
return values.T, columns
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
if columns is None:
gen = (list(x.keys()) for x in data)
columns = lib.fast_unique_multiple_list_gen(gen)
# assure that they are of the base dict class and not of derived
# classes
data = [(type(d) is dict) and d or dict(d) for d in data]
content = list(lib.dicts_to_array(data, list(columns)).T)
return _convert_object_array(content, columns, dtype=dtype,
coerce_float=coerce_float)
def _convert_object_array(content, columns, coerce_float=False, dtype=None):
if columns is None:
columns = _default_index(len(content))
else:
if len(columns) != len(content): # pragma: no cover
# caller's responsibility to check for this...
raise AssertionError('%d columns passed, passed data had %s '
'columns' % (len(columns), len(content)))
# provide soft conversion of object dtypes
def convert(arr):
if dtype != object and dtype != np.object:
arr = lib.maybe_convert_objects(arr, try_float=coerce_float)
arr = com._possibly_cast_to_datetime(arr, dtype)
return arr
arrays = [ convert(arr) for arr in content ]
return arrays, columns
def _get_names_from_index(data):
index = lrange(len(data))
has_some_name = any([getattr(s, 'name', None) is not None for s in data])
if not has_some_name:
return index
count = 0
for i, s in enumerate(data):
n = getattr(s, 'name', None)
if n is not None:
index[i] = n
else:
index[i] = 'Unnamed %d' % count
count += 1
return index
def _homogenize(data, index, dtype=None):
from pandas.core.series import _sanitize_array
oindex = None
homogenized = []
for v in data:
if isinstance(v, Series):
if dtype is not None:
v = v.astype(dtype)
if v.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
v = v.reindex(index, copy=False)
else:
if isinstance(v, dict):
if oindex is None:
oindex = index.astype('O')
if type(v) == dict:
# fast cython method
v = lib.fast_multiget(v, oindex.values, default=NA)
else:
v = lib.map_infer(oindex.values, v.get)
v = _sanitize_array(v, index, dtype=dtype, copy=False,
raise_cast_failure=False)
homogenized.append(v)
return homogenized
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = OrderedDict()
for index, s in compat.iteritems(data):
for col, v in compat.iteritems(s):
new_data[col] = new_data.get(col, OrderedDict())
new_data[col][index] = v
return new_data
def _put_str(s, space):
return ('%s' % s)[:space].ljust(space)
#----------------------------------------------------------------------
# Add plotting methods to DataFrame
import pandas.tools.plotting as gfx
DataFrame.plot = gfx.plot_frame
DataFrame.hist = gfx.hist_frame
@Appender(_shared_docs['boxplot'] % _shared_doc_kwargs)
def boxplot(self, column=None, by=None, ax=None, fontsize=None,
rot=0, grid=True, figsize=None, layout=None, return_type=None,
**kwds):
import pandas.tools.plotting as plots
import matplotlib.pyplot as plt
ax = plots.boxplot(self, column=column, by=by, ax=ax,
fontsize=fontsize, grid=grid, rot=rot,
figsize=figsize, layout=layout, return_type=return_type,
**kwds)
plt.draw_if_interactive()
return ax
DataFrame.boxplot = boxplot
ops.add_flex_arithmetic_methods(DataFrame, **ops.frame_flex_funcs)
ops.add_special_arithmetic_methods(DataFrame, **ops.frame_special_funcs)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| [
[
[
321,
329
]
],
[
[
410,
419
],
[
80111,
80120
]
],
[
[
427,
438
],
[
10342,
10353
],
[
182171,
182182
]
],
[
[
446,
455
],
[
30804,
30813
],
[
80179,
80188
]
],
[
[
463,
466
],
[
57967,
57970
]
],
[
[
474,
479
],
[
9114,
9119
],
[
9169,
9174
]
],
[
[
487,
495
],
[
26264,
26272
],
[
51289,
51297
],
[
53523,
53531
],
[
55666,
55674
],
[
69819,
69827
],
[
121664,
121672
]
],
[
[
515,
524
],
[
93827,
93829
],
[
94279,
94281
],
[
12607,
12609
],
[
121282,
121284
],
[
127465,
127467
],
[
140620,
140622
],
[
141782,
141784
],
[
154630,
154632
],
[
164357,
164359
],
[
165282,
165284
],
[
188623,
188625
],
[
168539,
168541
]
],
[
[
532,
543
],
[
96036,
96038
],
[
8429,
8431
],
[
10484,
10486
],
[
10935,
10937
],
[
11071,
11073
],
[
12312,
12314
],
[
12456,
12458
],
[
12545,
12547
],
[
13662,
13664
],
[
23098,
23100
],
[
23379,
23381
],
[
23582,
23584
],
[
23654,
23656
],
[
23696,
23698
],
[
30897,
30899
],
[
31589,
31591
],
[
34395,
34397
],
[
35163,
35165
],
[
35234,
35236
],
[
37091,
37093
],
[
66413,
66415
],
[
67594,
67596
],
[
67604,
67606
],
[
68506,
68508
],
[
70667,
70669
],
[
71708,
71710
],
[
81430,
81432
],
[
82952,
82954
],
[
83983,
83985
],
[
89183,
89185
],
[
89820,
89822
],
[
90487,
90489
],
[
91106,
91108
],
[
91171,
91173
],
[
91185,
91187
],
[
92887,
92889
],
[
98937,
98939
],
[
102036,
102038
],
[
106007,
106009
],
[
109099,
109101
],
[
139237,
139239
],
[
139498,
139500
],
[
140862,
140864
],
[
144638,
144640
],
[
154354,
154356
],
[
154403,
154405
],
[
155721,
155723
],
[
155789,
155791
],
[
155841,
155843
],
[
163120,
163122
],
[
163245,
163247
],
[
168074,
168076
],
[
180213,
180215
],
[
180340,
180342
],
[
180417,
180419
],
[
180797,
180799
],
[
181024,
181026
],
[
181778,
181780
],
[
182757,
182759
],
[
185867,
185869
],
[
185917,
185919
],
[
101548,
101550
],
[
101981,
101983
],
[
187213,
187215
]
],
[
[
551,
565
],
[
7688,
7690
],
[
8041,
8043
],
[
183400,
183402
],
[
183831,
183833
]
],
[
[
599,
605
],
[
126139,
126145
],
[
126179,
126185
],
[
130422,
130428
],
[
130748,
130754
],
[
119051,
119057
],
[
119093,
119099
],
[
128762,
128768
],
[
128907,
128913
]
],
[
[
607,
614
],
[
130520,
130527
],
[
130566,
130573
],
[
130938,
130945
],
[
155608,
155615
],
[
158828,
158835
],
[
158901,
158908
],
[
159665,
159672
],
[
159786,
159793
],
[
163179,
163186
],
[
168466,
168473
]
],
[
[
616,
627
],
[
11326,
11337
]
],
[
[
629,
638
],
[
12859,
12868
]
],
[
[
672,
686
],
[
9758,
9772
],
[
9853,
9867
],
[
182674,
182688
],
[
183530,
183544
],
[
185552,
185566
],
[
186795,
186809
],
[
13893,
13907
],
[
14033,
14047
]
],
[
[
688,
701
],
[
8133,
8146
],
[
183903,
183916
]
],
[
[
703,
714
],
[
89596,
89607
],
[
143868,
143879
]
],
[
[
748,
772
],
[
11014,
11038
],
[
90435,
90459
]
],
[
[
774,
793
],
[
185770,
185789
],
[
145707,
145726
],
[
145767,
145786
],
[
168341,
168360
]
],
[
[
827,
839
],
[
9274,
9286
],
[
83443,
83455
],
[
176318,
176330
],
[
179210,
179222
]
],
[
[
841,
851
]
],
[
[
853,
876
],
[
64263,
64286
]
],
[
[
910,
930
],
[
14282,
14302
],
[
14336,
14356
]
],
[
[
932,
947
],
[
15540,
15555
],
[
61023,
61038
],
[
90239,
90254
],
[
93040,
93055
],
[
162961,
162976
]
],
[
[
949,
980
],
[
15586,
15617
],
[
90293,
90324
]
],
[
[
1014,
1021
],
[
5341,
5348
],
[
11393,
11400
],
[
84545,
84552
]
],
[
[
1023,
1035
],
[
95610,
95622
],
[
95862,
95874
],
[
96365,
96377
],
[
189496,
189508
]
],
[
[
1066,
1071
],
[
8449,
8454
],
[
12910,
12915
],
[
23666,
23671
],
[
32555,
32560
],
[
32641,
32646
],
[
66089,
66094
],
[
67153,
67158
],
[
68518,
68523
],
[
70679,
70684
],
[
81448,
81453
],
[
89195,
89200
],
[
89586,
89591
],
[
89832,
89837
],
[
98822,
98827
],
[
98949,
98954
],
[
178769,
178774
],
[
180207,
180212
],
[
180360,
180365
],
[
182777,
182782
]
],
[
[
1073,
1083
],
[
32818,
32828
],
[
34279,
34289
],
[
34689,
34699
],
[
40312,
40322
],
[
68128,
68138
],
[
89047,
89057
],
[
90964,
90974
],
[
98063,
98073
],
[
98334,
98344
],
[
99232,
99242
],
[
102108,
102118
],
[
102630,
102640
],
[
103811,
103821
],
[
113878,
113888
],
[
114509,
114519
],
[
114721,
114731
],
[
116232,
116242
],
[
118292,
118302
],
[
159351,
159361
]
],
[
[
1085,
1098
],
[
9441,
9454
],
[
11692,
11705
],
[
11998,
12011
],
[
30162,
30175
],
[
31083,
31096
],
[
31732,
31745
],
[
31962,
31975
],
[
32051,
32064
],
[
36293,
36306
],
[
36439,
36452
],
[
36783,
36796
],
[
37052,
37065
],
[
178335,
178348
],
[
178520,
178533
],
[
178544,
178557
],
[
180248,
180261
],
[
183568,
183581
],
[
183636,
183649
],
[
184497,
184510
],
[
184580,
184593
],
[
13953,
13966
],
[
14095,
14108
]
],
[
[
1133,
1149
],
[
70761,
70777
],
[
89231,
89247
]
],
[
[
1185,
1211
],
[
68351,
68377
],
[
81268,
81294
]
],
[
[
1247,
1265
],
[
70270,
70288
],
[
82132,
82150
]
],
[
[
1302,
1314
],
[
7400,
7412
]
],
[
[
1351,
1383
],
[
178578,
178610
]
],
[
[
1420,
1452
],
[
15642,
15674
]
],
[
[
1485,
1491
],
[
7065,
7071
],
[
178004,
178010
],
[
8441,
8447
],
[
9586,
9592
],
[
13263,
13269
],
[
21622,
21628
],
[
22583,
22589
],
[
23547,
23553
],
[
23575,
23581
],
[
23858,
23864
],
[
24945,
24951
],
[
62010,
62016
],
[
62153,
62159
],
[
66482,
66488
],
[
68498,
68504
],
[
69757,
69763
],
[
70659,
70665
],
[
71432,
71438
],
[
71474,
71480
],
[
79914,
79920
],
[
79984,
79990
],
[
81422,
81428
],
[
83513,
83519
],
[
83618,
83624
],
[
88799,
88805
],
[
89175,
89181
],
[
91339,
91345
],
[
98706,
98712
],
[
109479,
109485
],
[
140515,
140521
],
[
140613,
140619
],
[
141108,
141114
],
[
141775,
141781
],
[
142064,
142070
],
[
142528,
142534
],
[
143935,
143941
],
[
144287,
144293
],
[
147498,
147504
],
[
147578,
147584
],
[
151138,
151144
],
[
156682,
156688
],
[
158709,
158715
],
[
158963,
158969
],
[
163496,
163502
],
[
164393,
164399
],
[
165318,
165324
],
[
175739,
175745
],
[
178990,
178996
],
[
180352,
180358
],
[
182394,
182400
],
[
182769,
182775
],
[
188060,
188066
]
],
[
[
1528,
1539
],
[
9708,
9719
],
[
89506,
89517
],
[
90675,
90686
],
[
182610,
182621
]
],
[
[
1547,
1592
],
[
123656,
123667
],
[
124073,
124084
],
[
130977,
130988
],
[
128944,
128955
]
],
[
[
1629,
1642
],
[
76220,
76225
]
],
[
[
1661,
1684
],
[
168583,
168592
]
],
[
[
1711,
1716
],
[
22044,
22049
],
[
98101,
98106
],
[
98487,
98492
],
[
142271,
142276
],
[
181674,
181679
]
],
[
[
1718,
1721
],
[
21576,
21579
],
[
22085,
22088
],
[
27029,
27032
],
[
34405,
34408
],
[
35198,
35201
],
[
82519,
82522
],
[
92949,
92952
],
[
109371,
109374
],
[
142678,
142681
],
[
183778,
183781
]
],
[
[
1723,
1729
],
[
187502,
187508
]
],
[
[
1731,
1735
],
[
34390,
34394
],
[
34998,
35002
],
[
35122,
35126
],
[
183000,
183004
]
],
[
[
1737,
1741
],
[
36185,
36189
],
[
102533,
102537
]
],
[
[
1743,
1751
],
[
17458,
17466
],
[
18459,
18467
],
[
19718,
19726
]
],
[
[
1753,
1754
],
[
18468,
18469
],
[
19727,
19728
]
],
[
[
1782,
1793
],
[
12822,
12833
],
[
188984,
188995
],
[
189131,
189142
]
],
[
[
1795,
1815
],
[
10728,
10748
],
[
15222,
15242
],
[
162176,
162196
]
],
[
[
1836,
1842
],
[
22106,
22112
],
[
10867,
10873
],
[
11802,
11808
],
[
26483,
26489
],
[
26604,
26610
],
[
26916,
26922
],
[
30313,
30319
],
[
31270,
31276
],
[
32329,
32335
],
[
48380,
48386
],
[
60515,
60521
],
[
109159,
109165
],
[
160028,
160034
],
[
189018,
189024
],
[
189064,
189070
]
],
[
[
1875,
1886
],
[
90688,
90699
]
],
[
[
1922,
1931
],
[
41650,
41659
]
],
[
[
1933,
1941
],
[
50745,
50753
],
[
52369,
52377
],
[
54644,
54652
],
[
95601,
95609
],
[
95853,
95861
],
[
96356,
96364
],
[
152424,
152432
],
[
189487,
189495
]
],
[
[
1943,
1955
],
[
152402,
152414
]
],
[
[
1963,
1978
],
[
25311,
25326
],
[
106775,
106790
],
[
107886,
107901
]
],
[
[
2014,
2025
],
[
101260,
101271
]
],
[
[
2059,
2072
],
[
101357,
101370
]
],
[
[
2081,
2112
],
[
170926,
170931
],
[
171415,
171420
]
],
[
[
2120,
2145
],
[
16951,
16954
],
[
17078,
17081
],
[
19572,
19575
],
[
30197,
30200
],
[
34104,
34107
],
[
69428,
69431
],
[
78372,
78375
],
[
78402,
78405
],
[
79048,
79051
],
[
80596,
80599
],
[
81909,
81912
],
[
89932,
89935
],
[
90020,
90023
],
[
90555,
90558
],
[
95064,
95067
],
[
112835,
112838
],
[
113642,
113645
],
[
114918,
114921
],
[
126823,
126826
],
[
127120,
127123
],
[
127397,
127400
],
[
127584,
127587
],
[
127672,
127675
],
[
143791,
143794
],
[
153903,
153906
],
[
154063,
154066
],
[
155968,
155971
],
[
160171,
160174
],
[
163437,
163440
],
[
168110,
168113
],
[
168892,
168895
],
[
180714,
180717
],
[
185823,
185826
],
[
58897,
58900
],
[
59462,
59465
],
[
78919,
78922
],
[
101877,
101880
],
[
113120,
113123
],
[
145609,
145612
],
[
145657,
145660
],
[
180492,
180495
],
[
187315,
187318
]
],
[
[
2153,
2178
],
[
50754,
50757
],
[
52378,
52381
],
[
54653,
54656
],
[
16501,
16504
],
[
18813,
18816
],
[
44672,
44675
],
[
48520,
48523
],
[
51439,
51442
],
[
53673,
53676
],
[
55816,
55819
]
],
[
[
2186,
2214
],
[
154278,
154284
],
[
164222,
164228
],
[
165147,
165153
]
],
[
[
2222,
2244
],
[
190085,
190088
],
[
190130,
190133
],
[
190152,
190155
],
[
190200,
190203
]
],
[
[
2253,
2270
],
[
37144,
37147
],
[
93086,
93089
],
[
140774,
140777
],
[
141941,
141944
],
[
160232,
160235
],
[
169232,
169235
],
[
176640,
176643
],
[
184865,
184868
],
[
184958,
184961
],
[
186302,
186305
],
[
186507,
186510
],
[
188579,
188582
],
[
188673,
188676
],
[
101589,
101592
],
[
145693,
145696
],
[
145753,
145756
],
[
187242,
187245
]
],
[
[
2278,
2300
],
[
153888,
153894
],
[
154039,
154045
],
[
155953,
155959
]
],
[
[
2333,
2343
],
[
15987,
15997
],
[
16546,
16556
],
[
17019,
17029
],
[
17232,
17242
],
[
18063,
18073
],
[
18590,
18600
],
[
18640,
18650
],
[
18700,
18710
],
[
18749,
18759
],
[
20038,
20048
],
[
20103,
20113
],
[
20157,
20167
],
[
20221,
20231
],
[
58326,
58336
],
[
58425,
58435
],
[
60657,
60667
]
],
[
[
2440,
2458
],
[
95636,
95654
],
[
95893,
95911
],
[
96390,
96408
],
[
189522,
189540
]
],
[
[
2578,
2595
]
],
[
[
2758,
2768
],
[
152433,
152443
]
],
[
[
5331,
5340
],
[
177814,
177823
],
[
177951,
177960
],
[
189420,
189429
],
[
189452,
189461
],
[
190056,
190065
],
[
190117,
190126
],
[
190187,
190196
],
[
7028,
7037
],
[
7329,
7338
],
[
22591,
22600
],
[
23330,
23339
],
[
31601,
31610
],
[
62346,
62355
],
[
63217,
63226
],
[
63415,
63424
],
[
68658,
68667
],
[
71031,
71040
],
[
81530,
81539
],
[
82351,
82360
],
[
88877,
88886
],
[
91066,
91075
],
[
95736,
95745
],
[
96066,
96075
],
[
96489,
96498
],
[
130140,
130149
],
[
130172,
130181
],
[
140988,
140997
],
[
148063,
148072
],
[
148385,
148394
],
[
148417,
148426
],
[
151268,
151277
],
[
151329,
151338
],
[
160301,
160310
],
[
169085,
169094
],
[
176018,
176027
],
[
176630,
176639
],
[
181409,
181418
]
],
[
[
177988,
178001
],
[
140444,
140457
]
],
[
[
178020,
178034
],
[
9906,
9920
],
[
12983,
12997
],
[
14644,
14658
],
[
33389,
33403
],
[
37450,
37464
],
[
184173,
184187
]
],
[
[
178642,
178655
],
[
11921,
11934
],
[
178287,
178300
]
],
[
[
180275,
180288
],
[
14435,
14448
],
[
14854,
14867
]
],
[
[
181275,
181285
],
[
9372,
9382
],
[
31644,
31654
],
[
31837,
31847
],
[
183685,
183695
]
],
[
[
183172,
183196
],
[
7863,
7887
]
],
[
[
184285,
184300
],
[
31457,
31472
],
[
184058,
184073
]
],
[
[
184725,
184740
],
[
182040,
182055
],
[
183033,
183048
]
],
[
[
185117,
185142
],
[
182418,
182443
]
],
[
[
186143,
186166
],
[
182208,
182231
]
],
[
[
186680,
186701
],
[
184998,
185019
],
[
185977,
185998
],
[
186561,
186582
]
],
[
[
187461,
187482
],
[
9631,
9652
],
[
183455,
183476
]
],
[
[
187886,
187897
],
[
178437,
178448
]
],
[
[
188896,
188913
],
[
24988,
25005
]
],
[
[
189209,
189217
],
[
59617,
59625
]
],
[
[
189390,
189418
],
[
189437,
189440
],
[
189469,
189472
]
],
[
[
189546,
189553
],
[
190076,
190083
]
],
[
[
190264,
190268
],
[
190273,
190277
]
]
] |
import geopandas
import shapely.geometry
gdf = geopandas.GeoDataFrame(geometry=[shapely.geometry.Point(x, x) for x in [5,4,3,2]])
gdf.index.name = 'id'
gdf.to_file("test.geojson", index=True, driver='GeoJSON')
gdf.to_file("test.geojson1", driver='GeoJSON') | [
[
[
7,
16
],
[
47,
56
]
],
[
[
24,
40
],
[
80,
87
]
],
[
[
41,
44
],
[
130,
133
],
[
152,
155
],
[
210,
213
]
]
] |
r"""
Semimonomial transformation group
The semimonomial transformation group of degree `n` over a ring `R` is
the semidirect product of the monomial transformation group of degree `n`
(also known as the complete monomial group over the group of units
`R^{\times}` of `R`) and the group of ring automorphisms.
The multiplication of two elements `(\phi, \pi, \alpha)(\psi, \sigma, \beta)`
with
- `\phi, \psi \in {R^{\times}}^n`
- `\pi, \sigma \in S_n` (with the multiplication `\pi\sigma`
done from left to right (like in GAP) --
that is, `(\pi\sigma)(i) = \sigma(\pi(i))` for all `i`.)
- `\alpha, \beta \in Aut(R)`
is defined by
.. MATH::
(\phi, \pi, \alpha)(\psi, \sigma, \beta) =
(\phi \cdot \psi^{\pi, \alpha}, \pi\sigma, \alpha \circ \beta)
where
`\psi^{\pi, \alpha} = (\alpha(\psi_{\pi(1)-1}), \ldots, \alpha(\psi_{\pi(n)-1}))`
and the multiplication of vectors is defined elementwisely. (The indexing
of vectors is `0`-based here, so `\psi = (\psi_0, \psi_1, \ldots, \psi_{n-1})`.)
.. TODO::
Up to now, this group is only implemented for finite fields because of
the limited support of automorphisms for arbitrary rings.
AUTHORS:
- Thomas Feulner (2012-11-15): initial version
EXAMPLES::
sage: S = SemimonomialTransformationGroup(GF(4, 'a'), 4)
sage: G = S.gens()
sage: G[0]*G[1]
((a, 1, 1, 1); (1,2,3,4), Ring endomorphism of Finite Field in a of size 2^2
Defn: a |--> a)
TESTS::
sage: TestSuite(S).run()
sage: TestSuite(S.an_element()).run()
"""
from sage.rings.integer import Integer
from sage.groups.group import FiniteGroup
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.action import Action
from sage.combinat.permutation import Permutation
from sage.groups.semimonomial_transformations.semimonomial_transformation import SemimonomialTransformation
class SemimonomialTransformationGroup(FiniteGroup, UniqueRepresentation):
r"""
A semimonomial transformation group over a ring.
The semimonomial transformation group of degree `n` over a ring `R` is
the semidirect product of the monomial transformation group of degree `n`
(also known as the complete monomial group over the group of units
`R^{\times}` of `R`) and the group of ring automorphisms.
The multiplication of two elements `(\phi, \pi, \alpha)(\psi, \sigma, \beta)`
with
- `\phi, \psi \in {R^{\times}}^n`
- `\pi, \sigma \in S_n` (with the multiplication `\pi\sigma`
done from left to right (like in GAP) --
that is, `(\pi\sigma)(i) = \sigma(\pi(i))` for all `i`.)
- `\alpha, \beta \in Aut(R)`
is defined by
.. MATH::
(\phi, \pi, \alpha)(\psi, \sigma, \beta) =
(\phi \cdot \psi^{\pi, \alpha}, \pi\sigma, \alpha \circ \beta)
where
`\psi^{\pi, \alpha} = (\alpha(\psi_{\pi(1)-1}), \ldots, \alpha(\psi_{\pi(n)-1}))`
and the multiplication of vectors is defined elementwisely. (The indexing
of vectors is `0`-based here, so `\psi = (\psi_0, \psi_1, \ldots, \psi_{n-1})`.)
.. TODO::
Up to now, this group is only implemented for finite fields because of
the limited support of automorphisms for arbitrary rings.
EXAMPLES::
sage: F.<a> = GF(9)
sage: S = SemimonomialTransformationGroup(F, 4)
sage: g = S(v = [2, a, 1, 2])
sage: h = S(perm = Permutation('(1,2,3,4)'), autom=F.hom([a**3]))
sage: g*h
((2, a, 1, 2); (1,2,3,4), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> 2*a + 1)
sage: h*g
((2*a + 1, 1, 2, 2); (1,2,3,4), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> 2*a + 1)
sage: S(g)
((2, a, 1, 2); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a)
sage: S(1)
((1, 1, 1, 1); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a)
"""
Element = SemimonomialTransformation
def __init__(self, R, len):
r"""
Initialization.
INPUT:
- ``R`` -- a ring
- ``len`` -- the degree of the monomial group
OUTPUT:
- the complete semimonomial group
EXAMPLES::
sage: F.<a> = GF(9)
sage: S = SemimonomialTransformationGroup(F, 4)
"""
if not R.is_field():
raise NotImplementedError('the ring must be a field')
self._R = R
self._len = len
from sage.categories.finite_groups import FiniteGroups
super(SemimonomialTransformationGroup, self).__init__(category=FiniteGroups())
def _element_constructor_(self, arg1, v=None, perm=None, autom=None, check=True):
r"""
Coerce ``arg1`` into this permutation group, if ``arg1`` is 0,
then we will try to coerce ``(v, perm, autom)``.
INPUT:
- ``arg1`` (optional) -- either the integers 0, 1 or an element of ``self``
- ``v`` (optional) -- a vector of length ``self.degree()``
- ``perm`` (optional) -- a permutation of degree ``self.degree()``
- ``autom`` (optional) -- an automorphism of the ring
EXAMPLES::
sage: F.<a> = GF(9)
sage: S = SemimonomialTransformationGroup(F, 4)
sage: S(1)
((1, 1, 1, 1); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a)
sage: g = S(v=[1,1,1,a])
sage: S(g)
((1, 1, 1, a); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a)
sage: S(perm=Permutation('(1,2)(3,4)'))
((1, 1, 1, 1); (1,2)(3,4), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> a)
sage: S(autom=F.hom([a**3]))
((1, 1, 1, 1); (), Ring endomorphism of Finite Field in a of size 3^2 Defn: a |--> 2*a + 1)
"""
from sage.categories.homset import End
R = self.base_ring()
if arg1 == 0:
if v is None:
v = [R.one()] * self.degree()
if perm is None:
perm = Permutation(range(1, self.degree() + 1))
if autom is None:
autom = R.hom(R.gens())
if check:
try:
v = [R(x) for x in v]
except TypeError:
raise TypeError('the vector attribute %s ' % v +
'should be iterable')
if len(v) != self.degree():
raise ValueError('the length of the vector is %s,' % len(v) +
' should be %s' % self.degree())
if not all(x.parent() is R and x.is_unit() for x in v):
raise ValueError('there is at least one element in the ' +
'list %s not lying in %s ' % (v, R) +
'or which is not invertible')
try:
perm = Permutation(perm)
except TypeError:
raise TypeError('the permutation attribute %s ' % perm +
'could not be converted to a permutation')
if len(perm) != self.degree():
txt = 'the permutation length is {}, should be {}'
raise ValueError(txt.format(len(perm), self.degree()))
try:
if autom.parent() != End(R):
autom = End(R)(autom)
except TypeError:
raise TypeError('%s of type %s' % (autom, type(autom)) +
' is not coerceable to an automorphism')
return self.Element(self, v, perm, autom)
else:
try:
if arg1.parent() is self:
return arg1
except AttributeError:
pass
try:
from sage.rings.integer import Integer
if Integer(arg1) == 1:
return self()
except TypeError:
pass
raise TypeError('the first argument must be an integer' +
' or an element of this group')
def base_ring(self):
r"""
Return the underlying ring of ``self``.
EXAMPLES::
sage: F.<a> = GF(4)
sage: SemimonomialTransformationGroup(F, 3).base_ring() is F
True
"""
return self._R
def degree(self) -> Integer:
r"""
Return the degree of ``self``.
EXAMPLES::
sage: F.<a> = GF(4)
sage: SemimonomialTransformationGroup(F, 3).degree()
3
"""
return self._len
def _an_element_(self):
r"""
Return an element of ``self``.
EXAMPLES::
sage: F.<a> = GF(4)
sage: SemimonomialTransformationGroup(F, 3).an_element() # indirect doctest
((a, 1, 1); (1,3,2), Ring endomorphism of Finite Field in a of size 2^2 Defn: a |--> a + 1)
"""
R = self.base_ring()
v = [R.primitive_element()] + [R.one()] * (self.degree() - 1)
p = Permutation([self.degree()] + [i for i in range(1, self.degree())])
if not R.is_prime_field():
f = R.hom([R.gen()**R.characteristic()])
else:
f = R.Hom(R).identity()
return self(0, v, p, f)
def __contains__(self, item) -> bool:
r"""
EXAMPLES::
sage: F.<a> = GF(4)
sage: S = SemimonomialTransformationGroup(F, 3)
sage: 1 in S # indirect doctest
True
sage: a in S # indirect doctest
False
"""
try:
self(item, check=True)
except TypeError:
return False
return True
def gens(self):
r"""
Return a tuple of generators of ``self``.
EXAMPLES::
sage: F.<a> = GF(4)
sage: SemimonomialTransformationGroup(F, 3).gens()
[((a, 1, 1); (), Ring endomorphism of Finite Field in a of size 2^2
Defn: a |--> a), ((1, 1, 1); (1,2,3), Ring endomorphism of Finite Field in a of size 2^2
Defn: a |--> a), ((1, 1, 1); (1,2), Ring endomorphism of Finite Field in a of size 2^2
Defn: a |--> a), ((1, 1, 1); (), Ring endomorphism of Finite Field in a of size 2^2
Defn: a |--> a + 1)]
"""
from sage.groups.perm_gps.permgroup_named import SymmetricGroup
R = self.base_ring()
l = [self(v=([R.primitive_element()] + [R.one()] * (self.degree() - 1)))]
for g in SymmetricGroup(self.degree()).gens():
l.append(self(perm=Permutation(g)))
if R.is_field() and not R.is_prime_field():
l.append(self(autom=R.hom([R.primitive_element()**R.characteristic()])))
return l
def order(self) -> Integer:
r"""
Return the number of elements of ``self``.
EXAMPLES::
sage: F.<a> = GF(4)
sage: SemimonomialTransformationGroup(F, 5).order() == (4-1)**5 * factorial(5) * 2
True
"""
from sage.functions.other import factorial
from sage.categories.homset import End
n = self.degree()
R = self.base_ring()
if R.is_field():
multgroup_size = len(R) - 1
autgroup_size = R.degree()
else:
multgroup_size = R.unit_group_order()
autgroup_size = len([x for x in End(R) if x.is_injective()])
return multgroup_size**n * factorial(n) * autgroup_size
def _get_action_(self, X, op, self_on_left):
r"""
If ``self`` is the semimonomial group of degree `n` over `R`, then
there is the natural action on `R^n` and on matrices `R^{m \times n}`
for arbitrary integers `m` from the left.
See also:
:class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialActionVec` and
:class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialActionMat`
EXAMPLES::
sage: F.<a> = GF(4)
sage: s = SemimonomialTransformationGroup(F, 3).an_element()
sage: v = (F**3).0
sage: s*v # indirect doctest
(0, 1, 0)
sage: M = MatrixSpace(F, 3).one()
sage: s*M # indirect doctest
[ 0 1 0]
[ 0 0 1]
[a + 1 0 0]
"""
if self_on_left:
try:
A = SemimonomialActionVec(self, X)
return A
except ValueError:
pass
try:
A = SemimonomialActionMat(self, X)
return A
except ValueError:
pass
return None
def _repr_(self) -> str:
r"""
Return a string describing ``self``.
EXAMPLES::
sage: F.<a> = GF(4)
sage: SemimonomialTransformationGroup(F, 3) # indirect doctest
Semimonomial transformation group over Finite Field in a of size 2^2 of degree 3
"""
return ('Semimonomial transformation group over %s' % self.base_ring() +
' of degree %s' % self.degree())
def _latex_(self) -> str:
r"""
Method for describing ``self`` in LaTeX.
EXAMPLES::
sage: F.<a> = GF(4)
sage: latex(SemimonomialTransformationGroup(F, 3)) # indirect doctest
\left(\Bold{F}_{2^{2}}^3\wr\langle (1,2,3), (1,2) \rangle \right) \rtimes \operatorname{Aut}(\Bold{F}_{2^{2}})
"""
from sage.groups.perm_gps.permgroup_named import SymmetricGroup
ring_latex = self.base_ring()._latex_()
return ('\\left(' + ring_latex + '^' + str(self.degree()) + '\\wr' +
SymmetricGroup(self.degree())._latex_() +
' \\right) \\rtimes \\operatorname{Aut}(' + ring_latex + ')')
class SemimonomialActionVec(Action):
r"""
The natural left action of the semimonomial group on vectors.
The action is defined by:
`(\phi, \pi, \alpha)*(v_0, \ldots, v_{n-1}) :=
(\alpha(v_{\pi(1)-1}) \cdot \phi_0^{-1}, \ldots, \alpha(v_{\pi(n)-1}) \cdot \phi_{n-1}^{-1})`.
(The indexing of vectors is `0`-based here, so
`\psi = (\psi_0, \psi_1, \ldots, \psi_{n-1})`.)
"""
def __init__(self, G, V, check=True):
r"""
Initialization.
EXAMPLES::
sage: F.<a> = GF(4)
sage: s = SemimonomialTransformationGroup(F, 3).an_element()
sage: v = (F**3).1
sage: s*v # indirect doctest
(0, 0, 1)
"""
if check:
from sage.modules.free_module import FreeModule_generic
if not isinstance(G, SemimonomialTransformationGroup):
raise ValueError('%s is not a semimonomial group' % G)
if not isinstance(V, FreeModule_generic):
raise ValueError('%s is not a free module' % V)
if V.ambient_module() != V:
raise ValueError('%s is not equal to its ambient module' % V)
if V.dimension() != G.degree():
raise ValueError('%s has a dimension different to the degree of %s' % (V, G))
if V.base_ring() != G.base_ring():
raise ValueError('%s and %s have different base rings' % (V, G))
Action.__init__(self, G, V.dense_module())
def _act_(self, a, b):
r"""
Apply the semimonomial group element `a` to the vector `b`.
EXAMPLES::
sage: F.<a> = GF(4)
sage: s = SemimonomialTransformationGroup(F, 3).an_element()
sage: v = (F**3).1
sage: s*v # indirect doctest
(0, 0, 1)
"""
b = b.apply_map(a.get_autom())
b = self.codomain()(a.get_perm().action(b))
return b.pairwise_product(self.codomain()(a.get_v_inverse()))
class SemimonomialActionMat(Action):
r"""
The left action of
:class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialTransformationGroup`
on matrices over the same ring whose number of columns is equal to the degree.
See :class:`~sage.groups.semimonomial_transformations.semimonomial_transformation_group.SemimonomialActionVec`
for the definition of the action on the row vectors of such a matrix.
"""
def __init__(self, G, M, check=True):
r"""
Initialization.
EXAMPLES::
sage: F.<a> = GF(4)
sage: s = SemimonomialTransformationGroup(F, 3).an_element()
sage: M = MatrixSpace(F, 3).one()
sage: s*M # indirect doctest
[ 0 1 0]
[ 0 0 1]
[a + 1 0 0]
"""
if check:
from sage.matrix.matrix_space import MatrixSpace
if not isinstance(G, SemimonomialTransformationGroup):
raise ValueError('%s is not a semimonomial group' % G)
if not isinstance(M, MatrixSpace):
raise ValueError('%s is not a matrix space' % M)
if M.ncols() != G.degree():
raise ValueError('the number of columns of %s' % M +
' and the degree of %s are different' % G)
if M.base_ring() != G.base_ring():
raise ValueError('%s and %s have different base rings' % (M, G))
Action.__init__(self, G, M)
def _act_(self, a, b):
r"""
Apply the semimonomial group element `a` to the matrix `b`.
EXAMPLES::
sage: F.<a> = GF(4)
sage: s = SemimonomialTransformationGroup(F, 3).an_element()
sage: M = MatrixSpace(F, 3).one()
sage: s*M # indirect doctest
[ 0 1 0]
[ 0 0 1]
[a + 1 0 0]
"""
return self.codomain()([a * x for x in b.rows()])
| [
[
[
1572,
1579
],
[
8543,
8550
],
[
10976,
10983
]
],
[
[
1611,
1622
],
[
1933,
1944
]
],
[
[
1672,
1692
],
[
1946,
1966
]
],
[
[
1728,
1734
],
[
14134,
14140
],
[
16135,
16141
],
[
15558,
15564
],
[
17627,
17633
]
],
[
[
1773,
1784
],
[
6111,
6122
],
[
7006,
7017
],
[
9222,
9233
],
[
10781,
10792
]
],
[
[
1866,
1892
],
[
3975,
4001
]
],
[
[
1901,
1932
],
[
4573,
4604
],
[
14942,
14973
],
[
17089,
17120
]
],
[
[
14112,
14133
],
[
12682,
12703
]
],
[
[
16113,
16134
],
[
12828,
12849
]
]
] |
import numpy as np
class StaticFns:
@staticmethod
def termination_fn(obs, act, next_obs):
done = np.array([False]).repeat(len(obs))
done = done[:,None]
return done
| [
[
[
7,
18
],
[
116,
118
]
],
[
[
26,
35
]
]
] |
"""Base class for directed graphs."""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
from copy import deepcopy
import networkx_mod as nx
from networkx_mod.classes.graph import Graph
from networkx_mod.exception import NetworkXError
import networkx_mod.convert as convert
__author__ = """\n""".join(['Aric Hagberg ([email protected])',
'Pieter Swart ([email protected])',
'Dan Schult([email protected])'])
class DiGraph(Graph):
"""
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> G.nodes(data=True)
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edge[1][2]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,eattr in nbrsdict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 3, 8)
>>> G.edges(data='weight')
[(1, 2, 4), (2, 3, 8), (3, 4, None), (4, 5, None)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency lists keyed by node.
The next dict (adjlist) represents the adjacency list and holds
edge data keyed by neighbor. The inner dict (edge_attr) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure. The variable names
are node_dict_factory, adjlist_dict_factory and edge_attr_dict_factory.
node_dict_factory : function, optional (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency lists keyed by node.
It should require no arguments and return a dict-like object.
adjlist_dict_factory : function, optional (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, optional (default: dict)
Factory function to be used to create the edge attribute
dict which holds attrbute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Examples
--------
Create a graph object that tracks the order nodes are added.
>>> from collections import OrderedDict
>>> class OrderedNodeGraph(nx.Graph):
... node_dict_factory=OrderedDict
>>> G=OrderedNodeGraph()
>>> G.add_nodes_from( (2,1) )
>>> G.nodes()
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (1,1)) )
>>> G.edges()
[(2, 1), (2, 2), (1, 1)]
Create a graph object that tracks the order nodes are added
and for each node track the order that neighbors are added.
>>> class OrderedGraph(nx.Graph):
... node_dict_factory = OrderedDict
... adjlist_dict_factory = OrderedDict
>>> G = OrderedGraph()
>>> G.add_nodes_from( (2,1) )
>>> G.nodes()
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (1,1)) )
>>> G.edges()
[(2, 2), (2, 1), (1, 1)]
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {'weight': 1}
... def single_edge_dict(self):
... return self.all_edge_dict
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2,1)
>>> G.edges(data= True)
[(1, 2, {'weight': 1})]
>>> G.add_edge(2,2)
>>> G[2][1] is G[2][2]
True
"""
def __init__(self, data=None, **attr):
"""Initialize a graph with edges, name, graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name='my graph')
>>> e = [(1,2),(2,3),(3,4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G=nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.node_dict_factory = ndf = self.node_dict_factory
self.adjlist_dict_factory = self.adjlist_dict_factory
self.edge_attr_dict_factory = self.edge_attr_dict_factory
self.graph = {} # dictionary for graph attributes
self.node = ndf() # dictionary for node attributes
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self.pred
# the successors of node n are stored in the dict self.succ=self.adj
self.adj = ndf() # empty adjacency dictionary
self.pred = ndf() # predecessor
self.succ = self.adj # successor
# attempt to load graph with data
if data is not None:
convert.to_networkx_mod_graph(data,create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
self.edge=self.adj
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1,size=10)
>>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
if n not in self.succ:
self.succ[n] = self.adjlist_dict_factory()
self.pred[n] = self.adjlist_dict_factory()
self.node[n] = attr_dict
else: # update attr even if node already exists
self.node[n].update(attr_dict)
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(),key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1,2], size=10)
>>> G.add_nodes_from([3,4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
>>> G.node[1]['size']
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.node[1]['size']
11
"""
for n in nodes:
# keep all this inside try/except because
# CPython throws TypeError on n not in self.succ,
# while pre-2.7.5 ironpython throws on self.succ[n]
try:
if n not in self.succ:
self.succ[n] = self.adjlist_dict_factory()
self.pred[n] = self.adjlist_dict_factory()
self.node[n] = attr.copy()
else:
self.node[n].update(attr)
except TypeError:
nn,ndict = n
if nn not in self.succ:
self.succ[nn] = self.adjlist_dict_factory()
self.pred[nn] = self.adjlist_dict_factory()
newdict = attr.copy()
newdict.update(ndict)
self.node[nn] = newdict
else:
olddict = self.node[nn]
olddict.update(attr)
olddict.update(ndict)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
-------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.edges()
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> G.edges()
[]
"""
try:
nbrs=self.succ[n]
del self.node[n]
except KeyError: # NetworkXError if n not in self
raise NetworkXError("The node %s is not in the digraph."%(n,))
for u in nbrs:
del self.pred[u][n] # remove all edges n-u in digraph
del self.succ[n] # remove node from succ
for u in self.pred[n]:
del self.succ[u][n] # remove all edges n-u in digraph
del self.pred[n] # remove node from pred
def remove_nodes_from(self, nbunch):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently
ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> e = G.nodes()
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> G.nodes()
[]
"""
for n in nbunch:
try:
succs=self.succ[n]
del self.node[n]
for u in succs:
del self.pred[u][n] # remove all edges n-u in digraph
del self.succ[n] # now remove node
for u in self.pred[n]:
del self.succ[u][n] # remove all edges n-u in digraph
del self.pred[n] # now remove node
except KeyError:
pass # silent failure on remove
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u,v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use as
the edge weight a numerical value assigned to a keyword
which by default is 'weight'.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.succ:
self.succ[u]= self.adjlist_dict_factory()
self.pred[u]= self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.succ:
self.succ[v]= self.adjlist_dict_factory()
self.pred[v]= self.adjlist_dict_factory()
self.node[v] = {}
# add the edge
datadict=self.adj[u].get(v,self.edge_attr_dict_factory())
datadict.update(attr_dict)
self.succ[u][v]=datadict
self.pred[v][u]=datadict
def add_edges_from(self, ebunch, attr_dict=None, **attr):
"""Add all the edges in ebunch.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing edge
data.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with each edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in edges take precedence
over attributes specified generally.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
>>> e = zip(range(0,3),range(1,4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1,2),(2,3)], weight=3)
>>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dict.")
# process ebunch
for e in ebunch:
ne = len(e)
if ne==3:
u,v,dd = e
assert hasattr(dd,"update")
elif ne==2:
u,v = e
dd = {}
else:
raise NetworkXError(\
"Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
if u not in self.succ:
self.succ[u] = self.adjlist_dict_factory()
self.pred[u] = self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.succ:
self.succ[v] = self.adjlist_dict_factory()
self.pred[v] = self.adjlist_dict_factory()
self.node[v] = {}
datadict=self.adj[u].get(v,self.edge_attr_dict_factory())
datadict.update(attr_dict)
datadict.update(dd)
self.succ[u][v] = datadict
self.pred[v][u] = datadict
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u,v: nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2,3,{'weight':7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self.succ[u][v]
del self.pred[v][u]
except KeyError:
raise NetworkXError("The edge %s-%s not in graph."%(u,v))
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u,v) edge between u and v.
- 3-tuples (u,v,k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> ebunch=[(1,2),(2,3)]
>>> G.remove_edges_from(ebunch)
"""
for e in ebunch:
(u,v)=e[:2] # ignore edge data
if u in self.succ and v in self.succ[u]:
del self.succ[u][v]
del self.pred[v][u]
def has_successor(self, u, v):
"""Return True if node u has successor v.
This is true if graph has the edge u->v.
"""
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""Return True if node u has predecessor v.
This is true if graph has the edge u<-v.
"""
return (u in self.pred and v in self.pred[u])
def successors_iter(self,n):
"""Return an iterator over successor nodes of n.
neighbors_iter() and successors_iter() are the same.
"""
try:
return iter(self.succ[n])
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def predecessors_iter(self,n):
"""Return an iterator over predecessor nodes of n."""
try:
return iter(self.pred[n])
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def successors(self, n):
"""Return a list of successor nodes of n.
neighbors() and successors() are the same function.
"""
return list(self.successors_iter(n))
def predecessors(self, n):
"""Return a list of predecessor nodes of n."""
return list(self.predecessors_iter(n))
# digraph definitions
neighbors = successors
neighbors_iter = successors_iter
def edges_iter(self, nbunch=None, data=False, default=None):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u,v,ddict[data]).
If True, return edge attribute dict in 3-tuple (u,v,ddict).
If False, return 2-tuple (u,v).
default : value, optional (default=None)
Value used for edges that dont have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.add_edge(2,3,weight=5)
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]
>>> list(G.edges_iter(data='weight', default=1))
[(0, 1, 1), (1, 2, 1), (2, 3, 5)]
>>> list(G.edges_iter([0,2]))
[(0, 1), (2, 3)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
if nbunch is None:
nodes_nbrs=self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if data is True:
for n,nbrs in nodes_nbrs:
for nbr,ddict in nbrs.items():
yield (n,nbr,ddict)
elif data is not False:
for n,nbrs in nodes_nbrs:
for nbr,ddict in nbrs.items():
d=ddict[data] if data in ddict else default
yield (n,nbr,d)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (n,nbr)
# alias out_edges to edges
out_edges_iter=edges_iter
out_edges=Graph.edges
def in_edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
in_edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of incoming edges.
See Also
--------
edges_iter : return an iterator of edges
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
yield (nbr,n,data)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (nbr,n)
def in_edges(self, nbunch=None, data=False):
"""Return a list of the incoming edges.
See Also
--------
edges : return a list of edges
"""
return list(self.in_edges_iter(nbunch, data))
def degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter, out_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs=zip(iter(self.succ.items()),iter(self.pred.items()))
else:
nodes_nbrs=zip(
((n,self.succ[n]) for n in self.nbunch_iter(nbunch)),
((n,self.pred[n]) for n in self.nbunch_iter(nbunch)))
if weight is None:
for (n,succ),(n2,pred) in nodes_nbrs:
yield (n,len(succ)+len(pred))
else:
# edge weighted graph - degree is sum of edge weights
for (n,succ),(n2,pred) in nodes_nbrs:
yield (n,
sum((succ[nbr].get(weight,1) for nbr in succ))+
sum((pred[nbr].get(weight,1) for nbr in pred)))
def in_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, in-degree).
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, in_degree, out_degree, out_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.in_degree_iter(0)) # node 0 with degree 0
[(0, 0)]
>>> list(G.in_degree_iter([0,1]))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
else:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get(weight,1) for data in nbrs.values()))
def out_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, out-degree).
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.out_degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.out_degree_iter([0,1]))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.succ.items()
else:
nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
else:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get(weight,1) for data in nbrs.values()))
def in_degree(self, nbunch=None, weight=None):
"""Return the in-degree of a node or nodes.
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and in-degree as values or
a number if a single node is specified.
See Also
--------
degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.in_degree(0)
0
>>> G.in_degree([0,1])
{0: 0, 1: 1}
>>> list(G.in_degree([0,1]).values())
[0, 1]
"""
if nbunch in self: # return a single node
return next(self.in_degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.in_degree_iter(nbunch,weight))
def out_degree(self, nbunch=None, weight=None):
"""Return the out-degree of a node or nodes.
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and out-degree as values or
a number if a single node is specified.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.out_degree(0)
1
>>> G.out_degree([0,1])
{0: 1, 1: 1}
>>> list(G.out_degree([0,1]).values())
[1, 1]
"""
if nbunch in self: # return a single node
return next(self.out_degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.out_degree_iter(nbunch,weight))
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.clear()
>>> G.nodes()
[]
>>> G.edges()
[]
"""
self.succ.clear()
self.pred.clear()
self.node.clear()
self.graph.clear()
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return True
def to_directed(self):
"""Return a directed copy of the graph.
Returns
-------
G : DiGraph
A deepcopy of the graph.
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
return deepcopy(self)
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
Returns
-------
G : Graph
An undirected graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning
-------
If you have subclassed DiGraph to use dict-like objects in the
data structure, those changes do not transfer to the Graph
created by this method.
"""
H=Graph()
H.name=self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items()
if v in self.pred[u])
else:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items() )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
return H
def reverse(self, copy=True):
"""Return the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, reverse the reverse graph is created using
the original graph (this changes the original graph).
"""
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.add_nodes_from(self)
H.add_edges_from( (v,u,deepcopy(d)) for u,v,d
in self.edges(data=True) )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
else:
self.pred,self.succ=self.succ,self.pred
self.adj=self.succ
H=self
return H
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n]=self.node[n]
# namespace shortcuts for speed
H_succ=H.succ
H_pred=H.pred
self_succ=self.succ
# add nodes
for n in H:
H_succ[n]=H.adjlist_dict_factory()
H_pred[n]=H.adjlist_dict_factory()
# add edges
for u in H_succ:
Hnbrs=H_succ[u]
for v,datadict in self_succ[u].items():
if v in H_succ:
# add both representations of edge: u-v and v-u
Hnbrs[v]=datadict
H_pred[v][u]=datadict
H.graph=self.graph
return H
| [
[
[
241,
249
],
[
38890,
38898
],
[
40783,
40791
],
[
41017,
41025
],
[
41168,
41176
],
[
41204,
41212
],
[
41886,
41894
],
[
41986,
41994
],
[
42026,
42034
]
],
[
[
257,
275
]
],
[
[
315,
320
],
[
612,
617
],
[
28076,
28081
],
[
40653,
40658
]
],
[
[
356,
369
],
[
11619,
11632
],
[
15263,
15276
],
[
18776,
18789
],
[
21204,
21217
],
[
21561,
21574
],
[
23123,
23136
],
[
24846,
24859
],
[
25095,
25108
]
],
[
[
377,
408
],
[
9840,
9847
]
],
[
[
409,
419
]
],
[
[
604,
611
]
]
] |
import logging
import logging.config
import os
from celery.utils.log import get_task_logger
from dotenv import load_dotenv
from flask import Flask
from flask_login import LoginManager
from config import config, Config
from .AfricasTalkingGateway import gateway
from .database import db, redis
dotenv_path = os.path.join(os.path.join(os.path.dirname(__file__), ".."), ".env")
load_dotenv(dotenv_path)
__version__ = "0.2.0"
__author__ = "[email protected]"
__description__ = "Nerds Microfinance application"
__email__ = "[email protected]"
__copyright__ = "MIT LICENCE"
login_manager = LoginManager()
celery_logger = get_task_logger(__name__)
def create_celery():
from celery import Celery
celery = Celery(
__name__,
backend=Config.CELERY_RESULT_BACKEND,
broker=Config.CELERY_BROKER_URL
)
return celery
celery = create_celery()
def create_app(config_name):
app = Flask(__name__)
# configure application
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# setup login manager
login_manager.init_app(app)
# setup database
redis.init_app(app)
db.init_app(app)
# initialize africastalking gateway
gateway.init_app(app=app)
# setup celery
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
celery.Task = ContextTask
# register blueprints
from app.ussd import ussd as ussd_bp
app.register_blueprint(ussd_bp)
# setup logging
from app.util import setup_logging
from config import basedir
if app.debug:
logging_level = logging.DEBUG
else:
logging_level = logging.INFO
path = os.path.join(basedir, "app_logger.yaml")
setup_logging(default_level=logging_level, logger_file_path=path)
return app
| [
[
[
7,
14
]
],
[
[
22,
36
],
[
1739,
1746
],
[
1787,
1794
]
],
[
[
44,
46
],
[
310,
312
],
[
323,
325
],
[
336,
338
],
[
1811,
1813
]
],
[
[
77,
92
],
[
623,
638
]
],
[
[
112,
123
],
[
378,
389
]
],
[
[
142,
147
],
[
920,
925
]
],
[
[
172,
184
],
[
591,
603
]
],
[
[
205,
211
],
[
991,
997
],
[
1016,
1022
]
],
[
[
213,
219
],
[
758,
764
],
[
803,
809
]
],
[
[
255,
262
],
[
1221,
1228
]
],
[
[
285,
287
],
[
1159,
1161
]
],
[
[
289,
294
],
[
1135,
1140
]
],
[
[
296,
307
],
[
390,
401
]
],
[
[
404,
415
]
],
[
[
426,
436
]
],
[
[
460,
475
]
],
[
[
511,
520
]
],
[
[
544,
557
]
],
[
[
575,
588
],
[
1081,
1094
]
],
[
[
607,
620
]
],
[
[
655,
668
],
[
863,
876
]
],
[
[
854,
860
],
[
1271,
1277
],
[
1325,
1331
],
[
1474,
1480
]
],
[
[
885,
895
]
]
] |
from botocore.exceptions import CapacityNotAvailableError
from botocore.retries import bucket
from tests import unittest
class FakeClock(bucket.Clock):
def __init__(self, timestamp_sequences):
self.timestamp_sequences = timestamp_sequences
self.sleep_call_amounts = []
def sleep(self, amount):
self.sleep_call_amounts.append(amount)
def current_time(self):
return self.timestamp_sequences.pop(0)
class TestTokenBucket(unittest.TestCase):
def setUp(self):
self.timestamp_sequences = [0]
self.clock = FakeClock(self.timestamp_sequences)
def create_token_bucket(self, max_rate=10, min_rate=0.1):
return bucket.TokenBucket(max_rate=max_rate, clock=self.clock,
min_rate=min_rate)
def test_can_acquire_amount(self):
self.timestamp_sequences.extend([
# Requests tokens every second, which is well below our
# 10 TPS fill rate.
1,
2,
3,
4,
5,
])
token_bucket = self.create_token_bucket(max_rate=10)
for _ in range(5):
self.assertTrue(token_bucket.acquire(1, block=False))
def test_can_change_max_capacity_lower(self):
# Requests at 1 TPS.
self.timestamp_sequences.extend([1, 2, 3, 4, 5])
token_bucket = self.create_token_bucket(max_rate=10)
# Request the first 5 tokens with max_rate=10
for _ in range(5):
self.assertTrue(token_bucket.acquire(1, block=False))
# Now scale the max_rate down to 1 on the 5th second.
self.timestamp_sequences.append(5)
token_bucket.max_rate = 1
# And then from seconds 6-10 we request at one per second.
self.timestamp_sequences.extend([6, 7, 8, 9, 10])
for _ in range(5):
self.assertTrue(token_bucket.acquire(1, block=False))
def test_max_capacity_is_at_least_one(self):
token_bucket = self.create_token_bucket()
self.timestamp_sequences.append(1)
token_bucket.max_rate = 0.5
self.assertEqual(token_bucket.max_rate, 0.5)
self.assertEqual(token_bucket.max_capacity, 1)
def test_acquire_fails_on_non_block_mode_returns_false(self):
self.timestamp_sequences.extend([
# Initial creation time.
0,
# Requests a token 1 second later.
1
])
token_bucket = self.create_token_bucket(max_rate=10)
with self.assertRaises(CapacityNotAvailableError):
token_bucket.acquire(100, block=False)
def test_can_retrieve_at_max_send_rate(self):
self.timestamp_sequences.extend([
# Request a new token every 100ms (10 TPS) for 2 seconds.
1 + 0.1 * i for i in range(20)
])
token_bucket = self.create_token_bucket(max_rate=10)
for _ in range(20):
self.assertTrue(token_bucket.acquire(1, block=False))
def test_acquiring_blocks_when_capacity_reached(self):
# This is 1 token every 0.1 seconds.
token_bucket = self.create_token_bucket(max_rate=10)
self.timestamp_sequences.extend([
# The first acquire() happens after .1 seconds.
0.1,
# The second acquire() will fail because we get tokens at
# 1 per 0.1 seconds. We will then sleep for 0.05 seconds until we
# get a new token.
0.15,
# And at 0.2 seconds we get our token.
0.2,
# And at 0.3 seconds we have no issues getting a token.
# Because we're using such small units (to avoid bloating the
# test run time), we have to go slightly over 0.3 seconds here.
0.300001,
])
self.assertTrue(token_bucket.acquire(1, block=False))
self.assertEqual(token_bucket.available_capacity, 0)
self.assertTrue(token_bucket.acquire(1, block=True))
self.assertEqual(token_bucket.available_capacity, 0)
self.assertTrue(token_bucket.acquire(1, block=False))
def test_rate_cant_go_below_min(self):
token_bucket = self.create_token_bucket(max_rate=1, min_rate=0.2)
self.timestamp_sequences.append(1)
token_bucket.max_rate = 0.1
self.assertEqual(token_bucket.max_rate, 0.2)
self.assertEqual(token_bucket.max_capacity, 1)
| [
[
[
32,
57
],
[
2528,
2553
]
],
[
[
87,
93
],
[
139,
145
],
[
683,
689
]
],
[
[
112,
120
],
[
468,
476
]
],
[
[
129,
138
],
[
569,
578
]
],
[
[
452,
467
]
]
] |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import ad_group_simulation
from google.ads.googleads.v7.services.types import ad_group_simulation_service
from .base import AdGroupSimulationServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupSimulationServiceGrpcTransport(AdGroupSimulationServiceTransport):
"""gRPC backend transport for AdGroupSimulationService.
Service to fetch ad group simulations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group_simulation(
self,
) -> Callable[
[ad_group_simulation_service.GetAdGroupSimulationRequest],
ad_group_simulation.AdGroupSimulation,
]:
r"""Return a callable for the
get ad group simulation
method over gRPC.
Returns the requested ad group simulation in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetAdGroupSimulationRequest],
~.AdGroupSimulation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_ad_group_simulation" not in self._stubs:
self._stubs[
"get_ad_group_simulation"
] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v7.services.AdGroupSimulationService/GetAdGroupSimulation",
request_serializer=ad_group_simulation_service.GetAdGroupSimulationRequest.serialize,
response_deserializer=ad_group_simulation.AdGroupSimulation.deserialize,
)
return self._stubs["get_ad_group_simulation"]
__all__ = ("AdGroupSimulationServiceGrpcTransport",)
| [
[
[
607,
615
],
[
5057,
5065
]
],
[
[
635,
643
],
[
2017,
2025
],
[
8992,
9000
]
],
[
[
645,
649
]
],
[
[
651,
659
],
[
2150,
2158
],
[
7614,
7622
]
],
[
[
661,
669
],
[
1890,
1898
],
[
7623,
7631
]
],
[
[
671,
676
],
[
2030,
2035
]
],
[
[
706,
718
],
[
8587,
8599
]
],
[
[
763,
771
],
[
2193,
2201
]
],
[
[
807,
811
],
[
5429,
5433
],
[
6738,
6742
]
],
[
[
852,
863
]
],
[
[
919,
933
],
[
5934,
5948
]
],
[
[
958,
962
],
[
1929,
1933
],
[
2092,
2096
],
[
5774,
5778
],
[
7673,
7677
],
[
8794,
8798
]
],
[
[
1032,
1051
],
[
9077,
9096
],
[
10324,
10343
]
],
[
[
1103,
1130
],
[
9011,
9038
],
[
10219,
10246
]
],
[
[
1149,
1182
],
[
1250,
1283
]
],
[
[
1184,
1203
],
[
2227,
2246
]
],
[
[
1212,
1249
]
],
[
[
10445,
10452
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Letter-color Consistency test
O.Colizoli 2020
Each letter of the alphabet in random order x 2
Color wheel opens at a randomized color on each trial (but does not turn)
Python 2..7
"""
# data saved in ~/LogFiles/sub-XXX
# Import necessary modules
import random
import numpy as np
import pandas as pd
import os, time # for paths and data
from IPython import embed as shell
try:
import Tkinter as tk # py27
from tkColorChooser import askcolor
except:
import tkinter as tk
from tkinter.colorchooser import askcolor
# Get subject number via tkinter (command line doesn't work in PsychoPy)
subject_ID = []
session = []
## INPUT WINDOW
class GetInput():
def __init__(self):
self.root2 = tk.Tk()
self.root2.title("Subject and Session")
# always put in same location
w = 400 # width for the Tk root
h = 200 # height for the Tk root
# get screen width and height
ws = self.root2.winfo_screenwidth() # width of the screen
hs = self.root2.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws/6) - (w/6)
y = (hs/6) - (h/6)
self.root2.geometry('%dx%d+%d+%d' % (w, h, x, y))
# Subject
self.e = tk.Entry(self.root2)
self.e.insert(0, 'Subject Number')
self.e.pack()
self.e.focus_set()
# Session
self.e2 = tk.Entry(self.root2)
self.e2.insert(0, 'Session')
self.e2.pack()
self.e2.focus_set()
txt='If each letter of the alphabet\
\nwere to have a unique color,\
\nwhat color would it have?\
\n\nThere are no right or wrong answers.'
# instructions
self.instr = tk.Label(self.root2, bg='white', text=txt, font=("Helvetica", 14))
self.instr.pack()
b = tk.Button(self.root2,text='OK',command=self.get_input)
b.pack(side='bottom')
self.root2.mainloop()
def get_input(self):
subj_str = self.e.get()
sess_str = self.e2.get()
subject_ID.append(subj_str)
session.append(sess_str)
self.root2.destroy()
## ASK INPUT
app = GetInput() # subject and session
subject_ID = int(subject_ID[0])
session = int(session[0])
## Create LogFile folder cwd/LogFiles
cwd = os.getcwd()
logfile_dir = os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav')
if not os.path.isdir(logfile_dir):
os.makedirs(logfile_dir)
timestr = time.strftime("%Y%m%d-%H%M%S")
output_alphabet = os.path.join(logfile_dir,'sub-{}_sess-{}_task-consistency_events_{}.tsv'.format(subject_ID,session,timestr))
### CONSISTENCY TASK ###
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
#alphabet = ['a','b','c']
REPS = 2 # number of times to repeat whole alphabet
RGBS = [] # save output
L = '2' # place holder
class Test():
def __init__(self):
self.counter = 1
self.root = tk.Tk()
self.root.title("Subject {} Session {}".format(subject_ID, session))
# always put in same location
# get screen width and height
ws = self.root.winfo_screenwidth() # width of the screen
hs = self.root.winfo_screenheight() # height of the screen
# open in full screen
self.root.geometry('%dx%d+%d+%d' % (ws, hs, 0, 0))
self.open1 = tk.Button(self.root, text='Pick a color:', command=self.pick_a_color, font=('Helvetica', '36'),padx=5, pady=5)
self.open1.pack(fill=tk.X, expand=False)
self.letter = tk.Label(self.root, bg='white', text=L, font=("Helvetica", 90))
self.letter.pack()
self.root.mainloop()
def quit(self):
RGBS.append( [L ,self.RGB, self.HEX, abc] )
self.root.destroy()
def pick_a_color(self,):
# GET COLOR CHOOSER NOT OPEN ON TOP OF ROOT
self.RGB,self.HEX = askcolor((random.randint(0,255), random.randint(0,255), random.randint(0,255)), parent=None, title='Pick a color: {}'.format(L) )
self.letter.configure(fg = self.HEX)
if self.counter:
exit_button = tk.Button(self.root, text='FINISHED', command=self.quit, font=('Helvetica', '28'))
exit_button.pack()
self.counter = 0
self.root.mainloop()
# MAIN LOOP
abc = 1 # round
for R in np.arange(REPS):
random.shuffle(alphabet)
# Open a new GUI per letter
for L in alphabet:
app = Test()
# save colors on each trial to prevent losing data
DFS = pd.DataFrame(RGBS)
print(RGBS)
try:
DFS.columns = ["letter","rgb","hex","choice"]
DFS['subject'] = np.repeat(subject_ID,len(DFS))
DFS['r'] = [c[0] for c in DFS['rgb']]
DFS['g'] = [c[1] for c in DFS['rgb']]
DFS['b'] = [c[2] for c in DFS['rgb']]
except:
# clicked window away
pass
DFS.to_csv(output_alphabet, sep='\t') # save all alphabet/preferences for both groups (also in case it goes wrong)
abc+=1
####################################
## SAVE OUTPUT & determine conditions
print(RGBS)
print('consistency test - success!')
##### OUTPUT FIGURE WITH COLORS #####
# Sort and show letters x 2 side by side
del tk # py27
del askcolor
import matplotlib.pyplot as plt # doesn't work together with tkinter
import seaborn as sns
fig = plt.figure(figsize=(10,5))
# Sort so the same letters go side by side for each choice
try:
DFS.sort_values(by=['choice', 'letter'],inplace=True)
except:
DFS = DFS.sort(['choice', 'letter'])
DFS.reset_index(inplace=True)
for i,A in enumerate(alphabet):
ax = fig.add_subplot(6,5,i+1)
ax.text(0.5, 0.5, DFS['letter'][i], color=DFS['hex'][i],fontsize=18)
ax.text(0.25, 0.5, DFS['letter'][i+len(alphabet)], color=DFS['hex'][i+len(alphabet)],fontsize=18)
ax.set_axis_off()
sns.despine(offset=10, trim=True)
plt.tight_layout()
fig.savefig(os.path.join(cwd,'LogFiles','sub-{}'.format(subject_ID),'sess-{}'.format(session),'behav','sub-{}_sess-{}_colors.pdf'.format(subject_ID,session)))
print('success: sub-{}_sess-{}_colors.pdf'.format(subject_ID,session))
| [
[
[
305,
311
],
[
4525,
4531
],
[
4070,
4076
],
[
4093,
4099
],
[
4116,
4122
]
],
[
[
319,
330
],
[
4504,
4506
],
[
4863,
4865
]
],
[
[
338,
350
],
[
4723,
4725
]
],
[
[
358,
360
],
[
2405,
2407
],
[
2431,
2433
],
[
2530,
2532
],
[
2562,
2564
],
[
2647,
2649
],
[
6172,
6174
]
],
[
[
362,
366
],
[
2597,
2601
]
],
[
[
409,
423
]
],
[
[
440,
453
],
[
5488,
5490
],
[
763,
765
],
[
1320,
1322
],
[
1469,
1471
],
[
1803,
1805
],
[
1917,
1919
],
[
3112,
3114
],
[
3515,
3517
],
[
3655,
3657
],
[
3701,
3703
],
[
4286,
4288
]
],
[
[
492,
500
],
[
5503,
5511
],
[
4060,
4068
]
],
[
[
520,
533
],
[
5488,
5490
],
[
763,
765
],
[
1320,
1322
],
[
1469,
1471
],
[
1803,
1805
],
[
1917,
1919
],
[
3112,
3114
],
[
3515,
3517
],
[
3655,
3657
],
[
3701,
3703
],
[
4286,
4288
]
],
[
[
571,
579
],
[
5503,
5511
],
[
4060,
4068
]
],
[
[
655,
665
],
[
2319,
2329
]
],
[
[
671,
678
],
[
2348,
2355
]
],
[
[
706,
714
],
[
2267,
2275
]
],
[
[
2261,
2264
]
],
[
[
2302,
2312
],
[
2475,
2485
],
[
2727,
2737
],
[
4873,
4883
],
[
6216,
6226
],
[
6297,
6307
],
[
6369,
6379
],
[
2149,
2159
],
[
3175,
3185
]
],
[
[
2334,
2341
],
[
2504,
2511
],
[
2738,
2745
],
[
6245,
6252
],
[
6308,
6315
],
[
6380,
6387
],
[
2185,
2192
],
[
3187,
3194
]
],
[
[
2399,
2402
],
[
2444,
2447
],
[
6185,
6188
]
],
[
[
2417,
2428
],
[
2544,
2555
],
[
2574,
2585
],
[
2660,
2671
]
],
[
[
2587,
2594
],
[
2746,
2753
]
],
[
[
2629,
2644
],
[
5163,
5178
]
],
[
[
2782,
2790
],
[
4540,
4548
],
[
4604,
4612
],
[
5860,
5868
],
[
6021,
6029
],
[
6056,
6064
]
],
[
[
2926,
2930
],
[
4514,
4518
]
],
[
[
2979,
2983
],
[
4736,
4740
],
[
4756,
4760
],
[
5360,
5364
],
[
3858,
3862
]
],
[
[
3003,
3004
],
[
3738,
3739
],
[
3872,
3873
],
[
4185,
4186
]
],
[
[
3035,
3039
],
[
4634,
4638
]
],
[
[
4479,
4482
],
[
5271,
5274
],
[
3895,
3898
]
],
[
[
4499,
4500
]
],
[
[
4599,
4600
],
[
3738,
3739
],
[
3872,
3873
],
[
4185,
4186
]
],
[
[
4628,
4631
]
],
[
[
4717,
4720
],
[
4788,
4791
],
[
4888,
4891
],
[
4846,
4849
],
[
4943,
4946
],
[
4906,
4909
],
[
5004,
5007
],
[
4967,
4970
],
[
5065,
5068
],
[
5028,
5031
],
[
5152,
5155
],
[
5705,
5708
],
[
5777,
5780
],
[
5809,
5812
],
[
5927,
5930
],
[
5951,
5954
],
[
6001,
6004
],
[
6039,
6042
]
],
[
[
5519,
5543
],
[
5609,
5612
],
[
6141,
6144
]
],
[
[
5588,
5602
],
[
6107,
6110
]
],
[
[
5603,
5606
],
[
5880,
5883
],
[
6160,
6163
]
],
[
[
5771,
5774
],
[
5809,
5812
],
[
5927,
5930
],
[
5951,
5954
],
[
6001,
6004
],
[
6039,
6042
]
],
[
[
5843,
5844
],
[
5900,
5901
],
[
5941,
5942
],
[
5962,
5963
],
[
6015,
6016
],
[
6050,
6051
]
],
[
[
5845,
5846
]
],
[
[
5875,
5877
],
[
5909,
5911
],
[
5982,
5984
],
[
6084,
6086
]
]
] |
import pytest
from list_utils import *
from oracle import ColumnRecommendation, ColumnClassification
def test_find_one():
needle = 1
none = [0, 0, 5, 's']
beginning = [1, None, 9, 6, 0, 0]
end = ['x', '0', 1]
several = [0, 0, 3, 4, 1, 3, 2, 1, 3, 4]
assert find_one(none, needle) == False
assert find_one(beginning, needle)
assert find_one(end, needle)
assert find_one(several, needle)
def test_find_n():
assert find_n([2, 3, 4, 5, 6], 2, -1) == False
assert find_n([1, 2, 3, 4, 5], 42, 2) == False
assert find_n([1, 2, 3, 4, 5], 1, 2) == False
assert find_n([1, 2, 3, 2, 4, 5], 2, 2)
assert find_n([1, 2, 3, 4, 5, 4, 6, 4, 7, 4, 6], 4, 2)
assert find_n([1, 2, 3, 4], 'x', 0) == True
def test_find_streak():
assert find_streak([1, 2, 3, 4, 5], 4, -1) == False
assert find_streak([1, 2, 3, 4, 5], 42, 2) == False
assert find_streak([1, 2, 3, 4], 4, 1)
assert find_streak([1, 2, 3, 1, 2], 2, 2) == False
assert find_streak([1, 2, 3, 4, 5, 5, 5], 5, 3)
assert find_streak([5, 5, 5, 1, 2, 3, 4], 5, 3)
assert find_streak([1, 2, 5, 5, 5, 3, 4], 5, 3)
assert find_streak([1, 2, 3, 4, 5, 5, 5], 5, 4) == False
def test_first_elements():
original = [[0, 7, 3], [4, 0, 1]]
assert first_elements(original) == [0, 4]
def test_transpose():
original = [[0, 7, 3], [4, 0, 1]]
transposed = [[0, 4], [7, 0], [3, 1]]
assert transpose(original) == transposed
assert transpose(transpose(original)) == original
def test_zero_distance_displace():
l1 = [1, 2, 3, 4, 5, 6]
l2 = [1]
l3 = [[4, 5], ['x', 'o', 'c']]
assert displace([], 0) == []
assert displace(l1, 0) == l1
assert displace(l2, 0) == l2
assert displace(l3, 0) == l3
def test_positive_distance_displace():
l1 = [1, 2, 3, 4, 5, 6]
l2 = [1]
l3 = [[4, 5], ['x', 'o', 'c']]
l4 = [9, 6, 5]
assert displace([], 2) == []
assert displace(l1, 2) == [None, None, 1, 2, 3, 4]
assert displace(l2, 3, '-') == ['-']
assert displace(l3, 1, '#') == ['#', [4, 5]]
assert displace(l4, 3, 0) == [0, 0, 0]
def test_negative_distance_displace():
l1 = [1, 2, 3, 4, 5, 6]
l2 = [1]
l3 = [[4, 5], ['x', 'o', 'c']]
l4 = [9, 6, 5]
assert displace([], -2) == []
assert displace(l1, -2) == [3, 4, 5, 6, None, None]
assert displace(l2, -3, '-') == ['-']
assert displace(l3, -1, '#') == [['x', 'o', 'c'], '#']
assert displace(l4, -3, 0) == [0, 0, 0]
def test_reverse_list():
assert reverse_list([]) == []
assert reverse_list([1, 2, 3, 4, 5, 6]) == [6, 5, 4, 3, 2, 1]
def test_reverse_matrix():
assert reverse_matrix([]) == []
assert reverse_matrix([[0, 1, 2, 3], [0, 1, 2, 3]]) == [
[3, 2, 1, 0], [3, 2, 1, 0]]
def test_all_same():
assert all_same([9, 1, 2, 3, 4]) == False
assert all_same([[], [], []])
assert all_same([])
assert all_same([ColumnRecommendation(0, ColumnClassification.WIN),
ColumnRecommendation(2, ColumnClassification.WIN)])
assert all_same([ColumnRecommendation(0, ColumnClassification.MAYBE),
ColumnRecommendation(0, ColumnClassification.WIN)]) == False
def test_collapse_list():
assert collapse_list([]) == ''
assert collapse_list(['o', 'x', 'x', 'o']) == 'oxxo'
assert collapse_list(['x', 'x', None, None, None]) == 'xx...'
def test_collapse_matrix():
assert collapse_matrix([]) == ''
assert collapse_matrix([['x', 'x', None],
['o', 'x', 'x'],
['o', None, None]]) == 'xx.|oxx|o..'
def test_replace_all_in_list():
assert replace_all_in_list([None, 3, '546', 33, None], None, '#') == [
'#', 3, '546', 33, '#']
assert replace_all_in_list([1, 2, 3, 4, 5], 'e', 42) == [1, 2, 3, 4, 5]
assert replace_all_in_list([], 34, 43) == []
def test_replace_all_in_matrix():
# caso normal: tiene lo viejo
assert replace_all_in_matrix([[1, 2, 3, 'n', 'n', None],
[4, 5, 'n']], 'n', '#') == [[1, 2, 3, '#', '#', None], [4, 5, '#']]
# caso raro: no tiene lo viejo
assert replace_all_in_matrix([[None, None, 2, True], [4, 5, '#']], 'k', 42) == [[
None, None, 2, True], [4, 5, '#']]
# caso más raro: lista de listas vacías
assert replace_all_in_matrix([], None, 7) == []
assert replace_all_in_matrix([[], []], None, 7) == [[], []]
| [
[
[
7,
13
]
],
[
[
38,
39
],
[
285,
293
],
[
328,
336
],
[
367,
375
],
[
400,
408
],
[
458,
464
],
[
509,
515
],
[
560,
566
],
[
610,
616
],
[
654,
660
],
[
713,
719
],
[
787,
798
],
[
843,
854
],
[
899,
910
],
[
942,
953
],
[
997,
1008
],
[
1049,
1060
],
[
1101,
1112
],
[
1153,
1164
],
[
1282,
1296
],
[
1433,
1442
],
[
1478,
1487
],
[
1488,
1497
],
[
1646,
1654
],
[
1679,
1687
],
[
1712,
1720
],
[
1745,
1753
],
[
1916,
1924
],
[
1949,
1957
],
[
2004,
2012
],
[
2045,
2053
],
[
2094,
2102
],
[
2274,
2282
],
[
2308,
2316
],
[
2364,
2372
],
[
2406,
2414
],
[
2465,
2473
],
[
2536,
2548
],
[
2570,
2582
],
[
2665,
2679
],
[
2701,
2715
],
[
2821,
2829
],
[
2867,
2875
],
[
2901,
2909
],
[
2926,
2934
],
[
3072,
3080
],
[
3256,
3269
],
[
3291,
3304
],
[
3348,
3361
],
[
3444,
3459
],
[
3481,
3496
],
[
3669,
3688
],
[
3799,
3818
],
[
3875,
3894
],
[
3994,
4015
],
[
4192,
4213
],
[
4366,
4387
],
[
4418,
4439
]
],
[
[
59,
79
],
[
2936,
2956
],
[
3008,
3028
],
[
3082,
3102
],
[
3156,
3176
]
],
[
[
81,
101
],
[
2960,
2980
],
[
3032,
3052
],
[
3106,
3126
],
[
3180,
3200
]
],
[
[
108,
121
]
],
[
[
432,
443
]
],
[
[
756,
772
]
],
[
[
1209,
1228
]
],
[
[
1323,
1337
]
],
[
[
1527,
1554
]
],
[
[
1773,
1804
]
],
[
[
2132,
2163
]
],
[
[
2504,
2521
]
],
[
[
2631,
2650
]
],
[
[
2793,
2806
]
],
[
[
3223,
3241
]
],
[
[
3409,
3429
]
],
[
[
3630,
3654
]
],
[
[
3919,
3945
]
]
] |
import logging
import azure.functions as func
import json
import os
from azure.cosmosdb.table.tableservice import TableService
from azure.cosmosdb.table.models import Entity
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
# Connect to Azure Table Storage
table_service = TableService(connection_string= os.environ['AzureWebJobsStorage'])
table_service.create_table('intents') if not table_service.exists('intents') else None
req_body = req.get_json()
if req_body:
# Create row to be saved on Azure Table Storage
print(req_body.get('ConversationId'))
data = req_body
data["PartitionKey"] = req_body.get('ConversationId')
data["RowKey"] = req_body.get('MessageId')
# Save row on Azure Table Storage
table_service.insert_or_replace_entity('intents', data)
return func.HttpResponse(f"Row {req_body.get('MessageId')} for {req_body.get('ConversationId')} added")
else:
return func.HttpResponse(
"Please pass valid request body",
status_code=400
) | [
[
[
7,
14
],
[
233,
240
]
],
[
[
22,
45
],
[
210,
214
],
[
189,
193
],
[
943,
947
],
[
1065,
1069
]
],
[
[
53,
57
]
],
[
[
65,
67
],
[
389,
391
]
],
[
[
114,
126
],
[
357,
369
]
],
[
[
167,
173
]
],
[
[
179,
183
]
]
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 8 15:25:03 2018
@author: bathmann
"""
from .TreeDynamicTimeStepping import TreeDynamicTimeStepping
from .TreeDynamicTimeLoop import TreeDynamicTimeLoop
from .SimpleTimeLoop.SimpleLoop import Loop
| [
[
[
148,
171
]
],
[
[
205,
224
]
],
[
[
264,
268
]
]
] |
'''
File: detect_forest_change.py
Author: Min Feng
Version: 0.1
Create: 2018-04-20 15:42:37
Description: detect forest changes from foest probility layers and tree cover layers
'''
import logging
def _load_tcc(f_tcc, msk):
from gio import geo_raster_ex as gx
from gio import config
import numpy as np
_bnd = gx.read_block(f_tcc, msk)
if _bnd is None:
return None
_dat = np.zeros(msk.data.shape, dtype=np.uint8)
_m_tcc = config.getfloat('conf', 'min_tcc')
_idx = _bnd.data >= _m_tcc
_dat[_idx] = 100
_idx = _bnd.data > 100
_dat[_idx] = _bnd.data[_idx]
return msk.from_grid(_dat, nodata=255)
def _task(tile, d_out, d_ref, opts):
from gio import file_unzip
from gio import config
from gio import file_mag
from gio import metadata
from gio import geo_raster as ge
from gio import mod_filter
import numpy as np
import os
import re
_tag = tile.tag
_ttt = config.get('conf', 'test_tile')
if _ttt and _tag not in _ttt.replace(' ', '').split(','):
return
_m = re.match(r'(h\d+)(v\d+)', _tag)
_h = _m.group(1)
_v = _m.group(2)
_d_out = os.path.join(d_out, _h, _v, _tag)
_d_ref = os.path.join(d_ref, _h, _v, _tag)
_f_met = os.path.join(_d_out, '%s_met.txt' % _tag)
_fname = lambda t: os.path.join(_d_out, '%s_%s.tif' % (_tag, t))
_fname_ref = lambda t: os.path.join(_d_ref, '%s_%s.tif' % (_tag, t))
_fname_m1 = lambda t, a='_m1': _fname('%s_n0%s' % (t, a))
# if not file_mag.get(_f_met).exists():
# logging.info('skip non-existing result for %s' % _tag)
# return
if not file_mag.get(_fname_m1('loss_year')).exists():
logging.info('skip non-existing result for %s' % _tag)
return
if (not _ttt) and file_mag.get(_fname_m1('esta_year')).exists() and \
(not config.getboolean('conf', 'over_write', False)):
logging.info('skip processed esta result for %s' % _tag)
return
_b_loss_year = ge.open(_fname_m1('loss_year')).get_band().cache()
_b_gain_year = ge.open(_fname_m1('gain_year')).get_band().cache()
_b_loss_prob = ge.open(_fname_m1('loss_prob')).get_band().cache()
_b_gain_prob = ge.open(_fname_m1('gain_prob')).get_band().cache()
_f_tcc = config.get('conf', 'latest_tcc')
_b_prob = _load_tcc(_f_tcc, _b_loss_year) if _f_tcc else ge.open(_fname_ref('age_prob')).get_band().cache()
if _b_prob is None:
logging.info('forced to use age_prob layer %s' % _fname_ref('age_prob'))
_b_prob = ge.open(_fname_ref('age_prob')).get_band().cache()
_d_forest_prob = _b_prob.data
_d_loss = _b_loss_year.data
_d_gain = _b_gain_year.data
_d_esta = np.zeros(_d_forest_prob.shape, dtype=np.uint8)
_d_prob = np.empty(_d_forest_prob.shape, dtype=np.float32)
_d_prob.fill(100)
_d_prob[_b_prob.data == _b_prob.nodata] = -9999
_b_esta = _b_loss_year.from_grid(_d_esta, nodata=255)
_b_esta.color_table = ge.load_colortable(config.get('conf', 'color'))
_d_esta[_d_forest_prob > 100] = _d_forest_prob[_d_forest_prob > 100]
for _y in range(1970, 2021):
_y = _y - 1970
_idx = _d_loss == _y
_d_esta[_idx] = 100
_d_prob[_idx] = _b_loss_prob.data[_idx]
_idx = _d_gain == _y
_d_esta[_idx] = _y
_d_prob[_idx] = _b_gain_prob.data[_idx]
_d_esta[_d_forest_prob < 50] = 100
_d_test = (_d_esta < 100).astype(np.uint8)
_d_test[(_d_esta < 100) & (_d_esta > 0)] = 1
_b_test = _b_esta.from_grid(_d_test, nodata=255)
mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch'))
_d_esta[(_d_esta == 100) & (_b_test.data == 1)] = 0
_d_test = ((_d_esta > 0) & (_d_esta <= 100)).astype(np.uint8)
_d_test[(_d_esta < 100) & (_d_esta > 0)] = 1
_b_test = _b_esta.from_grid(_d_test, nodata=255)
mod_filter.filter_band_mmu(_b_test, area=config.getfloat('conf', 'min_patch'))
_d_esta[(_d_esta == 0) & (_b_test.data == 1)] = 100
with file_unzip.file_unzip() as _zip:
_zip.save(_b_esta, _fname_m1('esta_year'))
_zip.save(_b_esta.from_grid(_d_prob, nodata=-9999), _fname_m1('esta_prob'))
return True
def main(opts):
import logging
from gio import config
from gio import file_mag
from gio import global_task
import os
_d_inp = config.get('conf', 'input')
_d_ref = config.get('conf', 'refer', _d_inp)
_f_mak = file_mag.get(os.path.join(_d_inp, 'tasks.txt'))
_ts = global_task.load(_f_mak)
from gio import multi_task
_rs = multi_task.run(_task, [(_t, os.path.join(_d_inp, 'data'), os.path.join(_d_ref, 'data'), opts) for _t in multi_task.load(_ts, opts)], opts)
print('processed', len([_r for _r in _rs if _r]), 'tiles')
def usage():
_p = environ_mag.usage(True)
_p.add_argument('-i', '--input', dest='input')
_p.add_argument('-r', '--refer', dest='refer')
_p.add_argument('--latest-tcc', dest='latest_tcc')
_p.add_argument('-w', '--over-write', dest='over_write', type='bool')
_p.add_argument('--min-tcc', dest='min_tcc', type=int, default=30)
_p.add_argument('-m', '--min-patch', dest='min_patch', type=float, default=100 * 100)
_p.add_argument('--test-tile', dest='test_tile')
return _p
if __name__ == '__main__':
from gio import environ_mag
environ_mag.init_path()
environ_mag.run(main, [environ_mag.config(usage())])
| [
[
[
197,
204
],
[
1781,
1788
],
[
2009,
2016
],
[
2574,
2581
]
],
[
[
212,
221
],
[
2442,
2451
]
],
[
[
696,
701
],
[
4824,
4829
]
],
[
[
4424,
4428
],
[
5646,
5650
]
],
[
[
5019,
5024
],
[
5672,
5677
]
],
[
[
5584,
5595
],
[
5601,
5612
],
[
5630,
5641
],
[
5653,
5664
],
[
5038,
5049
]
]
] |
import datetime
import logging
import traceback
from dis_snek.models import ComponentContext
from dis_snek.models import InteractionContext
from ElevatorBot.misc.formating import embed_message
def get_now_with_tz() -> datetime.datetime:
"""Returns the current datetime (timezone aware)"""
return datetime.datetime.now(tz=datetime.timezone.utc)
def localize_datetime(obj: datetime.datetime) -> datetime.datetime:
"""Returns a timezone aware object, localized to the system timezone"""
return obj.astimezone()
async def log_error(
ctx: InteractionContext | ComponentContext,
error: Exception,
situation: str,
) -> None:
"""Respond to the context and log error"""
if not ctx.responded:
await ctx.send(
embeds=embed_message(
"Error",
f"Sorry, something went wrong\nThe Error has been logged and will be worked on",
str(error),
)
)
# log the error
logger = logging.getLogger(situation)
logger.exception(
f"InteractionID '{ctx.interaction_id}' - Error {error} - Traceback: \n{''.join(traceback.format_tb(error.__traceback__))}"
)
# raising error again to making deving easier
raise error
| [
[
[
7,
15
],
[
222,
230
],
[
309,
317
],
[
334,
342
],
[
408,
416
],
[
386,
394
]
],
[
[
23,
30
],
[
996,
1003
]
],
[
[
38,
47
],
[
1134,
1143
]
],
[
[
77,
93
],
[
585,
601
]
],
[
[
122,
140
],
[
564,
582
]
],
[
[
181,
194
],
[
773,
786
]
],
[
[
201,
216
]
],
[
[
363,
380
]
],
[
[
534,
1250
]
]
] |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetOrderCollectionByNameResult',
'AwaitableGetOrderCollectionByNameResult',
'get_order_collection_by_name',
]
@pulumi.output_type
class GetOrderCollectionByNameResult:
"""
Specifies the properties or parameters for an order collection. Order collection is a grouping of one or more orders.
"""
def __init__(__self__, id=None, location=None, name=None, order_ids=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if order_ids and not isinstance(order_ids, list):
raise TypeError("Expected argument 'order_ids' to be a list")
pulumi.set(__self__, "order_ids", order_ids)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orderIds")
def order_ids(self) -> Sequence[str]:
"""
List of order ARM Ids which are part of an order collection.
"""
return pulumi.get(self, "order_ids")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Represents resource creation and update time
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetOrderCollectionByNameResult(GetOrderCollectionByNameResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetOrderCollectionByNameResult(
id=self.id,
location=self.location,
name=self.name,
order_ids=self.order_ids,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_order_collection_by_name(order_collection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetOrderCollectionByNameResult:
"""
Specifies the properties or parameters for an order collection. Order collection is a grouping of one or more orders.
:param str order_collection_name: The name of the order collection
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['orderCollectionName'] = order_collection_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:edgeorder/v20201201preview:getOrderCollectionByName', __args__, opts=opts, typ=GetOrderCollectionByNameResult).value
return AwaitableGetOrderCollectionByNameResult(
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
order_ids=__ret__.order_ids,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type)
| [
[
[
176,
184
]
],
[
[
192,
198
]
],
[
[
206,
220
],
[
476,
482
],
[
1992,
1998
],
[
2307,
2313
],
[
2490,
2496
],
[
2648,
2654
],
[
2879,
2885
],
[
3115,
3121
],
[
3287,
3293
],
[
907,
913
],
[
1073,
1079
],
[
1239,
1245
],
[
1414,
1420
],
[
1605,
1611
],
[
1779,
1785
],
[
1937,
1943
],
[
2264,
2270
],
[
2441,
2447
],
[
2603,
2609
],
[
2829,
2835
],
[
3063,
3069
],
[
3242,
3248
],
[
3481,
3487
],
[
4170,
4176
],
[
4727,
4733
],
[
4841,
4847
]
],
[
[
240,
243
]
],
[
[
245,
252
],
[
3160,
3167
]
],
[
[
254,
262
],
[
3151,
3159
],
[
4024,
4032
],
[
4100,
4108
],
[
4161,
4169
]
],
[
[
264,
272
],
[
2706,
2714
]
],
[
[
274,
279
]
],
[
[
296,
306
],
[
4802,
4812
]
],
[
[
308,
315
]
],
[
[
330,
337
]
],
[
[
339,
346
]
],
[
[
501,
531
],
[
3554,
3584
],
[
3710,
3740
],
[
4956,
4986
]
],
[
[
3514,
3553
],
[
4203,
4242
],
[
5006,
5045
]
],
[
[
3972,
4000
]
]
] |
#!/usr/bin/env python
"""
C.11.5 Index and Glossary (p211)
"""
import string, os
from plasTeX.Tokenizer import Token, EscapeSequence
from plasTeX import Command, Environment
from plasTeX.Logging import getLogger
from Sectioning import SectionUtils
try:
from pyuca import Collator
collator = Collator(os.path.join(os.path.dirname(__file__), 'allkeys.txt')).sort_key
except ImportError:
collator = lambda x: x.lower()
class IndexUtils(object):
""" Helper functions for generating indexes """
linkType = 'index'
level = Command.CHAPTER_LEVEL
class Index(Command):
"""
Utility class used to surface the index entries to the renderer
"""
def __init__(self, *args, **kwargs):
Command.__init__(self, *args, **kwargs)
self.pages = []
self.key = []
self.sortkey = ''
@property
def totallen(self):
""" Return the total number of entries generated by this entry """
total = 1
for item in self:
total += item.totallen
return total
def __repr__(self):
return '%s%s --> %s' % (''.join([x.source for x in self.key]),
', '.join([str(x) for x in self.pages]),
Command.__repr__(self))
class IndexGroup(list):
title = None
def invoke(self, tex):
if isinstance(self, Environment):
Environment.invoke(self, tex)
else:
Command.invoke(self, tex)
self.attributes['title'] = self.ownerDocument.createElement('indexname').expand(tex)
@property
def groups(self):
"""
Group index entries into batches according to the first letter
"""
batches = []
current = ''
for item in self:
try:
label = title = item.sortkey[0].upper()
if title in string.letters:
pass
elif title == '_':
title = '_ (Underscore)'
else:
label = title = 'Symbols'
except IndexError:
label = title = 'Symbols'
if current != title:
newgroup = self.IndexGroup()
newgroup.title = title
newgroup.id = label
batches.append(newgroup)
current = title
batches[-1].append(item)
for item in batches:
item[:] = self.splitColumns(item,
self.ownerDocument.config['document']['index-columns'])
return batches
def splitColumns(self, items, cols):
"""
Divide the index entries into the specified number of columns
Required Arguments:
items -- list of column entries
cols -- number of columns to create
Returns:
list of length `cols' containing groups of column entries
"""
entries = [(0,0)]
# Find the total number of entries
grandtotal = 0
for item in items:
entries.append((item.totallen, item))
grandtotal += entries[-1][0]
entries.pop(0)
entries.reverse()
# Get total number of entries per column
coltotal = int(grandtotal / cols)
# Group entries into columns
current = 0
output = [[]]
for num, item in entries:
current += num
if len(output) >= cols:
output[-1].append(item)
elif current > coltotal:
output.append([item])
current = num
elif current == coltotal:
output[-1].append(item)
output.append([])
current = 0
else:
output[-1].append(item)
output.reverse()
for item in output:
item.reverse()
# Get rid of empty columns
output = [x for x in output if x]
# Pad to the correct number of columns
for i in range(cols-len(output)):
output.append([])
return output
def digest(self, tokens):
""" Sort and group index entries """
if isinstance(self, Environment):
Environment.digest(self, tokens)
if self.macroMode == self.MODE_END:
return
# Throw it all away, we don't need it. We'll be generating
# our own index entries below.
while self.childNodes:
self.pop()
else:
Command.digest(self, tokens)
doc = self.ownerDocument
current = self
entries = sorted(self.ownerDocument.userdata.get('index', []))
prev = IndexEntry([], None)
for item in entries:
# See how many levels we need to add/subtract between this one
# and the previous
common = 0
for prevkey, itemkey in zip(zip(prev.sortkey, prev.key),
zip(item.sortkey, item.key)):
if prevkey == itemkey:
common += 1
continue
break
# print
# print item
# print (prev.key, prev.sortkey), (item.key, item.sortkey), common
# Pop out to the common level
i = common
while i < len(prev.key):
# print 'POP'
current = current.parentNode
i += 1
# Add the appropriate number of levels
i = common
while i < len(item.key):
# print 'ADD', item.sortkey[i]
newidx = self.Index()
newidx.key = item.key[i]
newidx.sortkey = item.sortkey[i]
newidx.parentNode = current
current.append(newidx)
current = newidx
i += 1
# Add the current page and format it
current.pages.append(IndexDestination(item.type, item.node))
if item.format is not None:
text = doc.createTextNode(str(len(current.pages)))
ipn = item.format.getElementsByTagName('index-page-number')
if ipn:
ipn = ipn[0]
ipn.parentNode.replaceChild(text, ipn)
item.node.append(item.format)
else:
text = doc.createTextNode(str(len(current.pages)))
item.node.append(text)
prev = item
class IndexDestination(object):
def __init__(self, type, node):
self._cr_type = type
self._cr_node = node
@property
def see(self):
return self._cr_type == IndexEntry.TYPE_SEE
@property
def seealso(self):
return self._cr_type == IndexEntry.TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __getattribute__(self, name):
if name.startswith('_cr_') or name in ['see', 'seealso', 'normal']:
return object.__getattribute__(self, name)
if self._cr_type and name in ['url']:
return None
return getattr(self._cr_node, name)
class theindex(IndexUtils, Environment, SectionUtils):
blockType = True
level = Environment.CHAPTER_LEVEL
counter = 'chapter'
class printindex(IndexUtils, Command, SectionUtils):
blockType = True
level = Command.CHAPTER_LEVEL
counter = 'chapter'
class makeindex(Command):
pass
class makeglossary(Command):
pass
class glossary(Command):
args = 'entry:nox'
class index(Command):
args = 'entry:nox'
@property
def textContent(self):
return ''
def invoke(self, tex):
result = Command.invoke(self, tex)
sortkey, key, format = [], [], []
entry = iter(self.attributes['entry'])
current = []
alphanumeric = [Token.CC_OTHER, Token.CC_LETTER, Token.CC_SPACE]
# Parse the index tokens
for tok in entry:
if tok.catcode in alphanumeric:
# Escape character
if tok == '"':
for tok in entry:
current.append(tok)
break
# Entry separator
elif tok == '!':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = []
# Sort key separator
elif tok == '@':
sortkey.append(current)
current = []
# Format separator
elif tok == '|':
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
current = format
else:
current.append(tok)
continue
# Everything else
current.append(tok)
# Make sure to get the stuff at the end
if not format:
key.append(current)
if len(sortkey) < len(key):
sortkey.append(current)
# Convert the sort keys to strings
for i, item in enumerate(sortkey):
sortkey[i] = tex.expandTokens(item).textContent
# Expand the key tokens
for i, item in enumerate(key):
key[i] = tex.expandTokens(item)
# Get the format element
type = IndexEntry.TYPE_NORMAL
if not format:
format = None
else:
macro = []
while format and format[0].catcode == Token.CC_LETTER:
macro.append(format.pop(0))
if macro:
macro = ''.join(macro)
format.insert(0, EscapeSequence(macro))
if macro == 'see':
type = IndexEntry.TYPE_SEE
elif macro == 'seealso':
type = IndexEntry.TYPE_SEEALSO
format.append(EscapeSequence('index-page-number'))
format = tex.expandTokens(format)
# Store the index information in the document
userdata = self.ownerDocument.userdata
if 'index' not in userdata:
userdata['index'] = []
userdata['index'].append(IndexEntry(key, self, sortkey, format, type))
return result
class IndexEntry(object):
"""
Utility class used to assist in the sorting of index entries
"""
TYPE_NORMAL = 0
TYPE_SEE = 1
TYPE_SEEALSO = 2
def __init__(self, key, node, sortkey=None, format=None, type=0):
"""
Required Arguments:
key -- a list of keys for the index entry
node -- the node of the document that the index entry is
associated with
sortkey -- a list of sort keys, one per key, to be used for
sorting instead of the key values
format -- formatting that should be used to format the
destination of the index entry
type -- the type of entry that this is: TYPE_NORMAL, TYPE_SEE,
or TYPE_SEEALSO
"""
self.key = key
if not sortkey:
self.sortkey = key
else:
self.sortkey = []
for i, sk in enumerate(sortkey):
if sk is None:
self.sortkey.append(key[i].textContent)
else:
self.sortkey.append(sk)
self.format = format
self.node = node
self.type = type
@property
def see(self):
return self.type == type(self).TYPE_SEE
@property
def seealso(self):
return self.type == type(self).TYPE_SEEALSO
@property
def normal(self):
return not(self.see) and not(self.seealso)
def __cmp__(self, other):
result = cmp(zip([collator(x) for x in self.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in self.key],
self.key),
zip([collator(x) for x in other.sortkey if isinstance(x, basestring)],
[collator(x.textContent) for x in other.key],
other.key))
if result == 0 and len(self.key) != len(other.key):
return cmp(len(self.key), len(other.key))
return result
def __repr__(self):
if self.format is None:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key])])
else:
return ' '.join(['@'.join(self.sortkey),
'!'.join([x.source for x in self.key]),
' '.join([x.source for x in self.format])])
def __str__(self):
return repr(self)
class IndexPageNumber(Command):
macroName = 'index-page-number'
| [
[
[
73,
79
],
[
1982,
1988
]
],
[
[
81,
83
],
[
312,
314
],
[
325,
327
]
],
[
[
114,
119
],
[
7986,
7991
],
[
8002,
8007
],
[
8019,
8024
],
[
9746,
9751
]
],
[
[
121,
135
],
[
9901,
9915
],
[
10125,
10139
]
],
[
[
156,
163
],
[
547,
554
],
[
586,
593
],
[
7448,
7455
],
[
7505,
7512
],
[
7568,
7575
],
[
7607,
7614
],
[
7642,
7649
],
[
7688,
7695
],
[
12938,
12945
],
[
759,
766
],
[
1348,
1355
],
[
1560,
1567
],
[
4629,
4636
],
[
7826,
7833
]
],
[
[
165,
176
],
[
7307,
7318
],
[
7368,
7379
],
[
1478,
1489
],
[
1504,
1515
],
[
4296,
4307
],
[
4322,
4333
]
],
[
[
205,
214
]
],
[
[
238,
250
],
[
7320,
7332
],
[
7457,
7469
]
],
[
[
279,
287
],
[
303,
311
]
],
[
[
292,
300
],
[
11961,
11969
],
[
12053,
12061
],
[
12161,
12169
],
[
12254,
12262
]
],
[
[
401,
409
],
[
11961,
11969
],
[
12053,
12061
],
[
12161,
12169
],
[
12254,
12262
]
],
[
[
439,
449
],
[
7295,
7305
],
[
7436,
7446
]
],
[
[
6607,
6623
],
[
6067,
6083
]
],
[
[
7286,
7294
]
],
[
[
7425,
7435
]
],
[
[
7558,
7567
]
],
[
[
7594,
7606
]
],
[
[
7633,
7641
]
],
[
[
7682,
7687
]
],
[
[
10492,
10502
],
[
4800,
4810
],
[
6793,
6803
],
[
6883,
6893
],
[
9587,
9597
],
[
9986,
9996
],
[
10075,
10085
],
[
10414,
10424
]
],
[
[
12922,
12937
]
]
] |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
def delete_disconnected_nodes(gd):
# delete all nodes with no inputs and outputs
empty_nodes = []
for k, v in gd.items():
if (
len(gd[k].inputs) == 0
and len(gd[k].outputs) == 0
and len(gd[k].control_inputs) == 0
and len(gd[k].control_outputs) == 0
and gd[k].op != "Placeholder"
):
empty_nodes.append(k)
for k in empty_nodes:
del gd[k]
| [
[
[
250,
275
]
]
] |
import numpy as np
def _main():
# Inputs
n = 3
x = np.arange(20, dtype=np.float64)
# Slow average/std
avg = np.zeros(len(x) - n + 1)
std = np.zeros(len(x) - n + 1)
for i in range(len(avg)):
avg[i] = np.mean(x[i:i+n])
std[i] = np.std(x[i:i+n])
print('AVG')
print('\n'.join(str(x) for x in avg))
print('STD:')
print('\n'.join(str(x) for x in std))
# Fast std
squares = np.square(x)
sum_of_squares = np.convolve(squares, np.ones(n, dtype=int), 'valid')
var_fast = (sum_of_squares / n) - np.square(avg)
std_fast = np.sqrt(var_fast)
print('STD FAST:')
print('\n'.join(str(x) for x in std_fast))
if __name__ == '__main__':
_main()
| [
[
[
7,
18
],
[
65,
67
],
[
85,
87
],
[
131,
133
],
[
166,
168
],
[
238,
240
],
[
273,
275
],
[
440,
442
],
[
474,
476
],
[
495,
497
],
[
565,
567
],
[
595,
597
]
],
[
[
25,
30
],
[
717,
722
]
]
] |
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def entropy(x):
"""Calculate entropy of a pre-softmax logit Tensor"""
exp_x = torch.exp(x)
A = torch.sum(exp_x, dim=1) # sum of exp(x_i)
B = torch.sum(x * exp_x, dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class DeeBertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.highway = nn.ModuleList([BertHighway(config) for _ in range(config.num_hidden_layers)])
self.early_exit_entropy = [-1 for _ in range(config.num_hidden_layers)]
def set_early_exit_entropy(self, x):
if (type(x) is float) or (type(x) is int):
for i in range(len(self.early_exit_entropy)):
self.early_exit_entropy[i] = x
else:
self.early_exit_entropy = x
def init_highway_pooler(self, pooler):
loaded_model = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
all_hidden_states = ()
all_attentions = ()
all_highway_exits = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask
)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
current_outputs = (hidden_states,)
if self.output_hidden_states:
current_outputs = current_outputs + (all_hidden_states,)
if self.output_attentions:
current_outputs = current_outputs + (all_attentions,)
highway_exit = self.highway[i](current_outputs)
# logits, pooled_output
if not self.training:
highway_logits = highway_exit[0]
highway_entropy = entropy(highway_logits)
highway_exit = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
all_highway_exits = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
new_output = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(new_output, i + 1)
else:
all_highway_exits = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
outputs = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). ",
BERT_START_DOCSTRING,
)
class DeeBertModel(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = DeeBertEncoder(config)
self.pooler = BertPooler(config)
self.init_weights()
def init_highway_pooler(self):
self.encoder.init_highway_pooler(self.pooler)
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
r"""
Return:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model.
pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`):
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during pre-training.
This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class HighwayException(Exception):
def __init__(self, message, exit_layer):
self.message = message
self.exit_layer = exit_layer # start from 1!
class BertHighway(nn.Module):
"""A module to provide a shortcut
from (the output of one non-final BertLayer in BertEncoder) to (cross-entropy computation in BertForSequenceClassification)
"""
def __init__(self, config):
super().__init__()
self.pooler = BertPooler(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
def forward(self, encoder_outputs):
# Pooler
pooler_input = encoder_outputs[0]
pooler_output = self.pooler(pooler_input)
# "return" pooler_output
# BertModel
bmodel_output = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
pooled_output = bmodel_output[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """,
BERT_START_DOCSTRING,
)
class DeeBertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.num_layers = config.num_hidden_layers
self.bert = DeeBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_layer=-1,
train_highway=False,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss.
Indices should be in :obj:`[0, ..., config.num_labels - 1]`.
If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.BertConfig`) and inputs:
loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
highway_exits (:obj:`tuple(tuple(torch.Tensor))`:
Tuple of each early exit's results (total length: number of layers)
Each tuple is again, a tuple of length 2 - the first entry is logits and the second entry is hidden states.
"""
exit_layer = self.num_layers
try:
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
outputs = e.message
exit_layer = e.exit_layer
logits = outputs[0]
if not self.training:
original_entropy = entropy(logits)
highway_entropy = []
highway_logits_all = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# work with highway exits
highway_losses = []
for highway_exit in outputs[-1]:
highway_logits = highway_exit[0]
if not self.training:
highway_logits_all.append(highway_logits)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
highway_loss = loss_fct(highway_logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
highway_loss = loss_fct(highway_logits.view(-1, self.num_labels), labels.view(-1))
highway_losses.append(highway_loss)
if train_highway:
outputs = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
outputs = (loss,) + outputs
if not self.training:
outputs = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
outputs = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| [
[
[
7,
12
],
[
455,
460
],
[
477,
482
],
[
529,
534
],
[
594,
599
],
[
8273,
8278
],
[
8394,
8399
],
[
8499,
8504
],
[
8530,
8535
],
[
8787,
8792
]
],
[
[
32,
34
],
[
641,
643
],
[
11028,
11030
],
[
860,
862
],
[
960,
962
],
[
11346,
11348
],
[
11412,
11414
],
[
12489,
12491
],
[
12555,
12557
]
],
[
[
57,
73
],
[
16534,
16550
],
[
17244,
17260
]
],
[
[
75,
82
],
[
16410,
16417
],
[
17092,
17099
]
],
[
[
122,
142
],
[
3957,
3977
],
[
12016,
12036
]
],
[
[
144,
181
],
[
5012,
5049
],
[
12648,
12685
]
],
[
[
229,
250
],
[
5050,
5071
],
[
12686,
12707
]
],
[
[
257,
277
],
[
4050,
4070
],
[
12170,
12190
]
],
[
[
284,
298
],
[
4243,
4257
]
],
[
[
305,
314
],
[
875,
884
]
],
[
[
321,
331
],
[
4336,
4346
],
[
11303,
11313
]
],
[
[
338,
357
],
[
4095,
4114
],
[
12235,
12254
]
],
[
[
371,
378
],
[
2915,
2922
],
[
16181,
16188
]
],
[
[
626,
640
],
[
4290,
4304
]
],
[
[
4082,
4094
],
[
12444,
12456
]
],
[
[
10843,
10859
],
[
3307,
3323
],
[
15988,
16004
]
],
[
[
11016,
11027
],
[
975,
986
]
],
[
[
12202,
12234
]
]
] |
# *** WARNING: this file was generated by the Kulado Kubernetes codegen tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import kulado
import kulado.runtime
import warnings
from ... import tables, version
class ClusterRoleBindingList(kulado.CustomResource):
"""
ClusterRoleBindingList is a collection of ClusterRoleBindings
"""
def __init__(self, resource_name, opts=None, items=None, metadata=None, __name__=None, __opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, kulado.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
__props__ = dict()
__props__['apiVersion'] = 'rbac.authorization.k8s.io/v1beta1'
__props__['kind'] = 'ClusterRoleBindingList'
if items is None:
raise TypeError('Missing required property items')
__props__['items'] = items
__props__['metadata'] = metadata
if opts is None:
opts = kulado.ResourceOptions()
if opts.version is None:
opts.version = version.get_version()
super(ClusterRoleBindingList, self).__init__(
"kubernetes:rbac.authorization.k8s.io/v1beta1:ClusterRoleBindingList",
resource_name,
__props__,
opts)
def translate_output_property(self, prop: str) -> str:
return tables._CASING_FORWARD_TABLE.get(prop) or prop
def translate_input_property(self, prop: str) -> str:
return tables._CASING_BACKWARD_TABLE.get(prop) or prop
| [
[
[
171,
177
]
],
[
[
185,
199
],
[
280,
286
],
[
1088,
1094
],
[
1565,
1571
]
],
[
[
207,
215
],
[
538,
546
],
[
696,
704
]
],
[
[
233,
239
],
[
1953,
1959
],
[
2074,
2080
]
],
[
[
241,
248
],
[
1650,
1657
]
],
[
[
257,
279
],
[
1687,
1709
]
]
] |
import logging
from .base import ApplicationWrapper
from ..configuration.utils import coerce_config
from ..support.converters import asbool
log = logging.getLogger(__name__)
class IdentityApplicationWrapper(ApplicationWrapper):
"""Provides user identity when authentication is enabled.
The repoze.who provided identity takes precedence over the identity
provided by IdentityApplicationWrapper if available.
Supported options which can be provided by config are:
- ``sa_auth.authmetadata``: The TGAuthMetadata object that should be used to retrieve identity metadata.
- ``identity.enabled``: Enable the Identity Application Wrapper. By default enabled if authmetadata available.
- ``identity.allow_missing_user``: Whenever the identity should be discarded or not when the authmetadata is unable to find an user.
"""
def __init__(self, handler, config):
super(IdentityApplicationWrapper, self).__init__(handler, config)
options = {
'enabled': True,
'allow_missing_user': True,
'authmetadata': config.get('sa_auth', {}).get('authmetadata'),
}
options.update(coerce_config(config, 'identity.', {
'enabled': asbool,
'allow_missing_user': asbool
}))
self.enabled = options['enabled'] and options['authmetadata'] is not None
self.options = options
self.tgmdprovider = options['authmetadata']
log.debug('Identity enabled: %s -> %s', self.enabled, self.options)
@property
def injected(self):
return self.enabled
def __call__(self, controller, environ, context):
identity = environ.get('repoze.who.identity')
if identity is None:
context.request.identity = None
return self.next_handler(controller, environ, context)
req_identity = {}
# Get the userid retrieved by repoze.who Authenticator
userid = identity['repoze.who.userid']
if userid is not None:
# Finding the user, groups and permissions:
identity['user'] = identity_user = self.tgmdprovider.get_user(identity, userid)
if identity_user:
identity['groups'] = self.tgmdprovider.get_groups(identity, userid)
identity['permissions'] = self.tgmdprovider.get_permissions(identity, userid)
else:
identity['groups'] = identity['permissions'] = []
req_identity = Identity()
req_identity.update(identity)
req_identity['repoze.what.userid'] = userid
if req_identity.get('user') is None and not self.options['allow_missing_user']:
req_identity = {}
# Add identity to request with repoze.who/what compatibility
context.request.identity = req_identity
environ['repoze.who.identity'] = req_identity
environ['repoze.what.credentials'] = req_identity
return self.next_handler(controller, environ, context)
class Identity(dict):
"""dict subclass: prevent members from being rendered during print.
Took as is from repoze.who.
"""
def __repr__(self):
return '<TurboGears Identity (hidden, dict-like) at %s>' % id(self)
__str__ = __repr__ | [
[
[
7,
14
],
[
147,
154
]
],
[
[
33,
51
],
[
210,
228
]
],
[
[
86,
99
],
[
1179,
1192
]
],
[
[
133,
139
],
[
1240,
1246
],
[
1282,
1288
]
],
[
[
141,
144
],
[
1475,
1478
]
],
[
[
183,
209
],
[
920,
946
]
],
[
[
3035,
3043
],
[
2497,
2505
]
]
] |
#!/usr/bin/env python3
# This file is Copyright (c) 2018-2019 Rohit Singh <[email protected]>
# This file is Copyright (c) 2019 Florent Kermarrec <[email protected]>
# License: BSD
import sys
from migen import *
from litex.build.generic_platform import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.clock import *
from litex.soc.cores import dna, xadc
from litex.soc.cores.uart import *
from litex.soc.integration.cpu_interface import get_csr_header
from litedram.modules import MT8KTF51264
from litedram.modules import _TechnologyTimings, _SpeedgradeTimings
from litedram.phy import s7ddrphy
from litepcie.phy.s7pciephy import S7PCIEPHY
from litepcie.core import LitePCIeEndpoint, LitePCIeMSI
from litepcie.frontend.dma import LitePCIeDMA
from litepcie.frontend.wishbone import LitePCIeWishboneBridge
from litex_boards.platforms import nereid
# CRG ----------------------------------------------------------------------------------------------
class CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_clk200 = ClockDomain()
clk100 = platform.request("clk100")
self.submodules.pll = pll = S7PLL()
pll.register_clkin(clk100, 100e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
pll.create_clkout(self.cd_sys4x, 4*sys_clk_freq)
pll.create_clkout(self.cd_clk200, 200e6)
self.comb += pll.reset.eq(platform.request("cpu_reset"))
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_clk200)
# NereidSoC ----------------------------------------------------------------------------------------
class NereidSoC(SoCSDRAM):
SoCSDRAM.mem_map["csr"] = 0x00000000
SoCSDRAM.mem_map["rom"] = 0x20000000
def __init__(self, platform, with_pcie_uart=True):
sys_clk_freq = int(100e6)
SoCSDRAM.__init__(self, platform, sys_clk_freq,
csr_data_width=32,
integrated_rom_size=0x10000,
integrated_sram_size=0x10000,
integrated_main_ram_size=0x10000, # FIXME: keep this for initial PCIe tests
ident="Nereid LiteX Test SoC", ident_version=True,
with_uart=not with_pcie_uart)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = CRG(platform, sys_clk_freq)
self.add_csr("crg")
# DNA --------------------------------------------------------------------------------------
self.submodules.dna = dna.DNA()
self.add_csr("dna")
# XADC -------------------------------------------------------------------------------------
self.submodules.xadc = xadc.XADC()
self.add_csr("xadc")
# SDRAM ------------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = s7ddrphy.K7DDRPHY(
platform.request("ddram"),
sys_clk_freq=sys_clk_freq,
iodelay_clk_freq=200e6)
sdram_module = MT8KTF51264(sys_clk_freq, "1:4", speedgrade="800")
self.register_sdram(self.ddrphy,
sdram_module.geom_settings,
sdram_module.timing_settings)
self.add_csr("ddrphy")
# PCIe -------------------------------------------------------------------------------------
# pcie phy
self.submodules.pcie_phy = S7PCIEPHY(platform, platform.request("pcie_x1"), bar0_size=0x20000)
self.pcie_phy.cd_pcie.clk.attr.add("keep")
platform.add_platform_command("create_clock -name pcie_clk -period 8 [get_nets pcie_clk]")
platform.add_false_path_constraints(
self.crg.cd_sys.clk,
self.pcie_phy.cd_pcie.clk)
self.add_csr("pcie_phy")
# pcie endpoint
self.submodules.pcie_endpoint = LitePCIeEndpoint(self.pcie_phy)
# pcie wishbone bridge
self.submodules.pcie_wishbone = LitePCIeWishboneBridge(self.pcie_endpoint,
lambda a: 1, shadow_base=self.shadow_base)
self.add_wb_master(self.pcie_wishbone.wishbone)
# pcie dma
self.submodules.pcie_dma = LitePCIeDMA(self.pcie_phy, self.pcie_endpoint,
with_buffering=True, buffering_depth=1024, with_loopback=True)
self.add_csr("pcie_dma")
# pcie msi
self.submodules.pcie_msi = LitePCIeMSI()
self.add_csr("pcie_msi")
self.comb += self.pcie_msi.source.connect(self.pcie_phy.msi)
self.msis = {
"DMA_WRITER": self.pcie_dma.writer.irq,
"DMA_READER": self.pcie_dma.reader.irq
}
for i, (k, v) in enumerate(sorted(self.msis.items())):
self.comb += self.pcie_msi.irqs[i].eq(v)
self.add_constant(k + "_INTERRUPT", i)
# pcie uart
if with_pcie_uart:
class PCIeUART(Module, AutoCSR):
def __init__(self, uart):
self.rx_valid = CSRStatus()
self.rx_ready = CSR()
self.rx_data = CSRStatus(8)
self.tx_valid = CSR()
self.tx_ready = CSRStatus()
self.tx_data = CSRStorage(8)
# # #
# cpu to pcie
self.comb += [
self.rx_valid.status.eq(uart.sink.valid),
uart.sink.ready.eq(self.rx_ready.re),
self.rx_data.status.eq(uart.sink.data),
]
# pcie to cpu
self.sync += [
If(self.tx_valid.re,
uart.source.valid.eq(1)
).Elif(uart.source.ready,
uart.source.valid.eq(0)
)
]
self.comb += [
self.tx_ready.status.eq(~uart.source.valid),
uart.source.data.eq(self.tx_data.storage)
]
uart_interface = RS232PHYInterface()
self.submodules.uart = UART(uart_interface)
self.add_csr("uart")
self.add_interrupt("uart")
self.submodules.pcie_uart = PCIeUART(uart_interface)
self.add_csr("pcie_uart")
# Leds -------------------------------------------------------------------------------------
# led blinking (sys)
sys_counter = Signal(32)
self.sync.sys += sys_counter.eq(sys_counter + 1)
rgb = platform.request("rgb_led")
self.comb += [
rgb.r.eq(1),
rgb.g.eq(sys_counter[26]),
rgb.b.eq(1),
]
def generate_software_header(self, filename):
csr_header = get_csr_header(self.get_csr_regions(),
self.get_constants(),
with_access_functions=False,
with_shadow_base=False)
tools.write_to_file(filename, csr_header)
# Build --------------------------------------------------------------------------------------------
def main():
platform = nereid.Platform()
soc = NereidSoC(platform)
builder = Builder(soc, output_dir="../build/nereid", csr_csv="../build/nereid/csr.csv",
compile_gateware=not "no-compile" in sys.argv[1:])
vns = builder.build(build_name="nereid")
soc.generate_software_header("../software/kernel/csr.h")
if __name__ == "__main__":
main()
| [
[
[
200,
203
],
[
7632,
7635
]
],
[
[
223,
224
]
],
[
[
267,
268
]
],
[
[
312,
313
]
],
[
[
358,
359
]
],
[
[
402,
403
]
],
[
[
438,
439
]
],
[
[
468,
471
],
[
2721,
2724
]
],
[
[
473,
477
],
[
2892,
2896
]
],
[
[
511,
512
],
[
1086,
1092
],
[
1855,
1863
],
[
1870,
1878
],
[
1911,
1919
],
[
1179,
1190
],
[
1231,
1242
],
[
1299,
1310
],
[
1395,
1400
],
[
1707,
1719
],
[
2046,
2054
],
[
5148,
5154
],
[
5156,
5163
],
[
6336,
6353
],
[
6391,
6395
],
[
6740,
6746
],
[
7274,
7279
],
[
7509,
7516
],
[
5244,
5253
],
[
5292,
5295
],
[
5333,
5342
],
[
5383,
5386
],
[
5425,
5434
],
[
5472,
5482
],
[
5891,
5893
]
],
[
[
561,
575
],
[
7044,
7058
]
],
[
[
606,
617
],
[
3290,
3301
]
],
[
[
647,
665
]
],
[
[
667,
685
]
],
[
[
711,
719
],
[
3118,
3126
]
],
[
[
756,
765
],
[
3699,
3708
]
],
[
[
792,
808
],
[
4132,
4148
]
],
[
[
810,
821
],
[
4655,
4666
]
],
[
[
856,
867
],
[
4445,
4456
]
],
[
[
907,
929
],
[
4236,
4258
]
],
[
[
966,
972
],
[
7447,
7453
]
],
[
[
1082,
1085
],
[
2533,
2536
]
],
[
[
1845,
1854
],
[
7475,
7484
]
],
[
[
7424,
7428
],
[
7785,
7789
]
]
] |
# coding: utf-8
import pytest
from edipy import fields, validators, exceptions
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.Range(1, 5)]), '1'),
(fields.Integer(1, validators=[validators.MaxValue(3)]), '2'),
(fields.Integer(1, validators=[validators.MinValue(1)]), '5'),
(fields.String(5, validators=[validators.Regex(r"[0-9]+")]), '12345'),
(fields.String(12, validators=[validators.Email()]), '[email protected]'),
])
def test_using_validators(fixed_type, data):
try:
fixed_type.encode(data)
except exceptions.ValidationError:
pytest.fail(u"ValidationError should not be thrown")
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.Range(1, 5)]), '0'),
(fields.Integer(1, validators=[validators.Range(1, 5)]), '6'),
])
def test_validate_range(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.MaxValue(1)]), '2'),
(fields.Integer(1, validators=[validators.MaxValue(5)]), '6'),
])
def test_validate_max_value(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.Integer(1, validators=[validators.MinValue(1)]), '0'),
(fields.Integer(1, validators=[validators.MinValue(5)]), '4'),
])
def test_validate_min_value(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
@pytest.mark.parametrize('fixed_type, data', [
(fields.String(5, validators=[validators.Regex(r"[0-9]+")]), 'a123f'),
(fields.String(5, validators=[validators.Regex(r"\d")]), 'abcde'),
(fields.String(5, validators=[validators.Regex(r"[A-Z]{6}")]), 'ABCDE'),
])
def test_validate_regex(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
def test_throws_exception_when_regex_is_invalid():
with pytest.raises(ValueError):
field = fields.String(5, validators=[validators.Regex(")")])
@pytest.mark.parametrize('fixed_type, data', [
(fields.String(11, validators=[validators.Email()]), 'edimail.com'),
(fields.String(11, validators=[validators.Email()]), 'edi@mailcom'),
])
def test_validate_email(fixed_type, data):
with pytest.raises(exceptions.ValidationError):
fixed_type.encode(data)
| [
[
[
24,
30
],
[
84,
90
],
[
673,
679
],
[
986,
992
],
[
1303,
1309
],
[
1620,
1626
],
[
2180,
2186
],
[
616,
622
],
[
908,
914
],
[
1225,
1231
],
[
1542,
1548
],
[
1944,
1950
],
[
2081,
2087
],
[
2427,
2433
]
],
[
[
50,
56
],
[
135,
141
],
[
202,
208
],
[
269,
275
],
[
336,
342
],
[
411,
417
],
[
724,
730
],
[
791,
797
],
[
1037,
1043
],
[
1104,
1110
],
[
1354,
1360
],
[
1421,
1427
],
[
1671,
1677
],
[
1746,
1752
],
[
1817,
1823
],
[
2231,
2237
],
[
2304,
2310
],
[
2124,
2130
]
],
[
[
58,
68
],
[
165,
175
],
[
232,
242
],
[
299,
309
],
[
365,
375
],
[
441,
451
],
[
754,
764
],
[
821,
831
],
[
1067,
1077
],
[
1134,
1144
],
[
1384,
1394
],
[
1451,
1461
],
[
1700,
1710
],
[
1775,
1785
],
[
1846,
1856
],
[
2261,
2271
],
[
2334,
2344
],
[
2153,
2163
]
],
[
[
70,
80
],
[
580,
590
],
[
922,
932
],
[
1239,
1249
],
[
1556,
1566
],
[
1958,
1968
],
[
2441,
2451
]
],
[
[
487,
508
]
],
[
[
860,
879
]
],
[
[
1173,
1196
]
],
[
[
1490,
1513
]
],
[
[
1896,
1915
]
],
[
[
2025,
2068
]
],
[
[
2379,
2398
]
]
] |
"""
tulflow.harvest
~~~~~~~~~~~~~~~
This module contains objects to harvest data from one given location to another.
"""
import hashlib
import io
import logging
import pandas
import sickle
from lxml import etree
from sickle import Sickle
from sickle.models import xml_to_dict
from sickle.oaiexceptions import NoRecordsMatch
from tulflow import process
NS = {
"marc21": "http://www.loc.gov/MARC21/slim",
"oai": "http://www.openarchives.org/OAI/2.0/"
}
def oai_to_s3(**kwargs):
"""Wrapper function for using OAI Harvest, Default Processor, and S3 Writer."""
kwargs["harvest_params"] = {
"metadataPrefix": kwargs.get("metadata_prefix"),
"from": kwargs.get("harvest_from_date"),
"until": kwargs.get("harvest_until_date")
}
dag_id = kwargs["dag"].dag_id
dag_start_date = kwargs["timestamp"]
oai_sets = generate_oai_sets(**kwargs)
all_processed = []
sets_with_no_records = []
if oai_sets:
for oai_set in oai_sets:
kwargs["harvest_params"]["set"] = oai_set
data = harvest_oai(**kwargs)
if data == []:
sets_with_no_records.append(oai_set)
logging.info("Skipping processing % set because it has no data.", oai_set)
continue
outdir = dag_s3_prefix(dag_id, dag_start_date)
processed = process_xml(data, dag_write_string_to_s3, outdir, **kwargs)
all_processed.append(processed)
else:
data = harvest_oai(**kwargs)
if data == []:
sets_with_no_records.append(oai_set)
outdir = dag_s3_prefix(dag_id, dag_start_date)
processed = process_xml(data, dag_write_string_to_s3, outdir, **kwargs)
all_processed.append(processed)
all_updated = sum([set['updated'] for set in all_processed])
all_deleted = sum([set['deleted'] for set in all_processed])
logging.info("Total OAI Records Harvested & Processed: %s", all_updated)
logging.info("Total OAI Records Harvest & Marked for Deletion: %s", all_deleted)
logging.info("Total sets with no records: %s", len(sets_with_no_records))
logging.info("Sets with no records %s", sets_with_no_records)
return {"updated": all_updated, "deleted": all_deleted, "sets_with_no_records": sets_with_no_records}
def generate_oai_sets(**kwargs):
"""Generate the oai sets we want to harvest."""
all_sets = bool(kwargs.get("all_sets"))
included_sets = kwargs.get("included_sets")
excluded_sets = kwargs.get("excluded_sets")
oai_endpoint = kwargs.get("oai_endpoint")
if all_sets:
logging.info("Seeing All Sets Needed.")
return []
elif included_sets:
logging.info("Seeing SetSpec List.")
if not isinstance(included_sets, list):
return [included_sets]
return included_sets
elif excluded_sets:
logging.info("Seeing Excluded SetSpec List.")
if not isinstance(excluded_sets, list):
excluded_sets = [excluded_sets]
list_sets = Sickle(oai_endpoint).ListSets()
all_sets = [oai_set.xml.find("oai:setSpec", namespaces=NS).text for oai_set in list_sets]
remaining_sets = list(set(all_sets) - set(excluded_sets))
logging.info(remaining_sets)
return remaining_sets
return []
class HarvestIterator(sickle.iterator.OAIItemIterator):
def next(self):
"""Return the next record/header/set."""
while True:
for item in self._items:
mapped = self.mapper(item)
if self.ignore_deleted and mapped.deleted:
continue
if hasattr(mapped, 'metadata') and mapped.metadata == None:
logging.info("Skipping record with no metadata: %s", mapped.header.identifier)
continue
return mapped
if self.resumption_token and self.resumption_token.token:
self._next_response()
else:
raise StopIteration
pass
# TODO: Remove if https://github.com/mloesch/sickle/pull/47 gets merged.
class HarvestRecord(sickle.models.Record):
def get_metadata(self):
# We want to get record/metadata/<container>/*
# <container> would be the element ``dc``
# in the ``oai_dc`` case.
meta_data = self.xml.find('.//' + self._oai_namespace + 'metadata')
if meta_data != None:
return xml_to_dict(meta_data.getchildren()[0], strip_ns=self._strip_ns)
pass
def harvest_oai(**kwargs):
"""Create OAI ListRecords Iterator for Harvesting Data."""
oai_endpoint = kwargs.get("oai_endpoint")
harvest_params = kwargs.get("harvest_params")
logging.info("Harvesting from %s", oai_endpoint)
logging.info("Harvesting %s", harvest_params)
sickle = Sickle(oai_endpoint, retry_status_codes=[500,503], max_retries=3)
class_mapping = harvest_params.get("class_mapping", {
"ListRecords": HarvestRecord,
})
iterator = harvest_params.get("iterator", HarvestIterator)
for key in class_mapping:
sickle.class_mapping[key] = class_mapping[key]
sickle.iterator = iterator
try:
return sickle.ListRecords(**harvest_params)
except NoRecordsMatch:
logging.info("No records found.")
return []
class OaiXml:
"""oai-pmh xml etree wrapper"""
def __init__(self, dag_id, timestamp):
etree.register_namespace("oai", "http://www.openarchives.org/OAI/2.0/")
etree.register_namespace("marc21", "http://www.loc.gov/MARC21/slim")
self.root = etree.Element("{http://www.openarchives.org/OAI/2.0/}collection")
self.root.attrib["dag-id"] = dag_id
self.root.attrib["dag-timestamp"] = timestamp
def append(self, record):
self.root.append(record)
def tostring(self):
return etree.tostring(self.root, encoding="utf-8").decode("utf-8")
def process_xml(data, writer, outdir, **kwargs):
"""Process & Write XML data to S3."""
parser = kwargs.get("parser")
records_per_file = kwargs.get("records_per_file")
if kwargs.get("dag"):
run_id = kwargs.get("dag").dag_id
else:
run_id = "no-dag-provided"
if kwargs.get("timestamp"):
timestamp = kwargs.get("timestamp")
else:
timestamp = "no-timestamp-provided"
if not records_per_file:
records_per_file = 1000
count = deleted_count = 0
oai_updates = OaiXml(run_id, timestamp)
oai_deletes = OaiXml(run_id, timestamp)
logging.info("Processing XML")
for record in data:
record_id = record.header.identifier
record = record.xml
record.attrib["airflow-record-id"] = record_id
if parser:
record = parser(record, **kwargs)
if record.xpath(".//oai:header[@status='deleted']", namespaces=NS):
logging.info("Added record %s to deleted xml file(s)", record_id)
deleted_count += 1
oai_deletes.append(record)
if deleted_count % int(records_per_file) == 0:
writer(oai_deletes.tostring(), outdir + "/deleted", **kwargs)
oai_deletes = OaiXml(run_id, timestamp)
else:
logging.info("Added record %s to new-updated xml file", record_id)
count += 1
oai_updates.append(record)
if count % int(records_per_file) == 0:
writer(oai_updates.tostring(), outdir + "/new-updated", **kwargs)
oai_updates = OaiXml(run_id, timestamp)
writer(oai_updates.tostring(), outdir + "/new-updated", **kwargs)
writer(oai_deletes.tostring(), outdir + "/deleted", **kwargs)
logging.info("OAI Records Harvested & Processed: %s", count)
logging.info("OAI Records Harvest & Marked for Deletion: %s", deleted_count)
return {"updated": count, "deleted": deleted_count}
def perform_xml_lookup_with_cache():
cache = {}
def perform_xml_lookup(oai_record, **kwargs):
"""Parse additions/updates & add boundwiths."""
if len(cache) == 0:
logging.info("*** Fetching CSV lookup file from s3 ***")
access_id = kwargs.get("access_id")
access_secret = kwargs.get("access_secret")
bucket = kwargs.get("bucket_name")
lookup_key = kwargs.get("lookup_key")
csv_data = process.get_s3_content(bucket, lookup_key, access_id, access_secret)
cache["value"] = pandas.read_csv(io.BytesIO(csv_data), header=0)
lookup_csv = cache["value"]
for record in oai_record.xpath(".//marc21:record", namespaces=NS):
record_id = process.get_record_001(record)
logging.info("Reading in Record %s", record_id)
parent_txt = lookup_csv.loc[lookup_csv.child_id == int(record_id), "parent_xml"].values
if len(set(parent_txt)) >= 1:
logging.info("Child XML record found %s", record_id)
for parent_node in parent_txt[0].split("||"):
try:
record.append(etree.fromstring(parent_node))
except etree.XMLSyntaxError as error:
logging.error("Problem with string syntax:")
logging.error(error)
logging.error(parent_node)
return oai_record
return perform_xml_lookup
def dag_write_string_to_s3(string, prefix, **kwargs):
"""Push a string in memory to s3 with a defined prefix"""
access_id = kwargs.get("access_id")
access_secret = kwargs.get("access_secret")
bucket_name = kwargs.get("bucket_name")
logging.info("Writing to S3 Bucket %s", bucket_name)
our_hash = hashlib.md5(string.encode("utf-8")).hexdigest()
filename = "{}/{}".format(prefix, our_hash)
process.generate_s3_object(string, bucket_name, filename, access_id, access_secret)
def write_log(string, prefix, **kwargs):
"""Write the data to logging info."""
prefix = prefix
logging.info(prefix)
string = string
logging.info(string)
def dag_s3_prefix(dag_id, timestamp):
"""Define the prefix that will be prepended to all files created by this dag run"""
return "{}/{}".format(dag_id, timestamp)
| [
[
[
128,
135
],
[
9705,
9712
]
],
[
[
143,
145
],
[
8474,
8476
]
],
[
[
153,
160
],
[
1183,
1190
],
[
1898,
1905
],
[
1975,
1982
],
[
2060,
2067
],
[
2138,
2145
],
[
2605,
2612
],
[
2695,
2702
],
[
2876,
2883
],
[
3238,
3245
],
[
3722,
3729
],
[
4705,
4712
],
[
4758,
4765
],
[
5268,
5275
],
[
6528,
6535
],
[
6865,
6872
],
[
7221,
7228
],
[
7679,
7686
],
[
7744,
7751
],
[
9636,
9643
],
[
9998,
10005
],
[
10043,
10050
],
[
8079,
8086
],
[
8686,
8693
],
[
8892,
8899
],
[
9183,
9190
],
[
9252,
9259
],
[
9297,
9304
]
],
[
[
168,
174
],
[
8458,
8464
]
],
[
[
182,
188
],
[
3335,
3341
],
[
4125,
4131
]
],
[
[
206,
211
],
[
5423,
5428
],
[
5503,
5508
],
[
5592,
5597
],
[
5860,
5865
],
[
9070,
9075
],
[
9128,
9133
]
],
[
[
231,
237
],
[
3034,
3040
],
[
4817,
4823
]
],
[
[
264,
275
],
[
4440,
4451
]
],
[
[
309,
323
],
[
5244,
5258
]
],
[
[
344,
351
],
[
9805,
9812
],
[
8360,
8367
],
[
8643,
8650
]
],
[
[
353,
355
],
[
3129,
3131
],
[
6848,
6850
],
[
8614,
8616
]
],
[
[
470,
479
]
],
[
[
2312,
2329
],
[
861,
878
]
],
[
[
3319,
3334
],
[
5037,
5052
]
],
[
[
4111,
4124
],
[
4965,
4978
]
],
[
[
4519,
4530
],
[
1065,
1076
],
[
1495,
1506
]
],
[
[
5328,
5334
],
[
6454,
6460
],
[
6498,
6504
],
[
7169,
7175
],
[
7513,
7519
]
],
[
[
5926,
5937
],
[
1366,
1377
],
[
1664,
1675
]
],
[
[
7883,
7912
]
],
[
[
9388,
9410
],
[
1384,
1406
],
[
1682,
1704
]
],
[
[
9895,
9904
]
],
[
[
10070,
10083
],
[
1304,
1317
],
[
1606,
1619
]
]
] |
import turtle
STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]
MOVE_DISTANCE = 20
UP = 90
DOWN = 270
RIGHT = 0
LEFT = 180
class Snake:
"""Initializes length and segments of snake."""
def __init__(self):
self.length = 3
self.segments = []
self.create_snake()
self.head = self.segments[0]
def create_snake(self):
"""Creates snake and sets starting position of snake."""
for position in STARTING_POSITIONS:
self.add_segment(position)
def add_segment(self, position):
new_segment = turtle.Turtle(shape="square")
new_segment.color("white")
new_segment.penup()
new_segment.setpos(position)
self.segments.append(new_segment)
def reset_snake(self):
for segment in self.segments:
segment.hideturtle()
self.segments.clear()
self.create_snake()
self.head = self.segments[0]
def extend(self, loot):
for time in range(loot):
self.add_segment(self.segments[-1].position())
def move(self):
"""Moves snake forward with segments following index zero segment."""
for seg_num in range(len(self.segments) - 1, 0, -1):
self.segments[seg_num].goto(self.segments[seg_num - 1].pos())
self.head.forward(MOVE_DISTANCE)
def up(self):
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
| [
[
[
7,
13
],
[
592,
598
]
],
[
[
17,
35
],
[
469,
487
]
],
[
[
68,
81
],
[
1360,
1373
]
],
[
[
88,
90
],
[
1471,
1473
],
[
1533,
1535
]
],
[
[
97,
101
],
[
1431,
1435
],
[
1571,
1575
]
],
[
[
109,
114
],
[
1676,
1681
],
[
1741,
1746
]
],
[
[
120,
124
],
[
1636,
1640
],
[
1782,
1786
]
],
[
[
142,
147
]
]
] |
# -*- coding: utf-8 -*-
"""This module contains all functions and classes for the MLTree. The MLTree buils a tree-like
structure of the objects in a given repository. This allows the user to access objects in a
comfortable way allowing for autocompletion (i.e. in Jupyter notebooks).
To use it one can simply call the :py:meth:`pailab.tools.tree.MLTree.add_tree` method to
add such a tree to the current repository::
>>from pailab.tools.tree import MLTree
>>MLTree.add_tree(ml_repo)
After the tree has been added, one can simply use the tree. Here, using autocompletion makes the basic work wih repo objects quite simply.
Each tree node provides useful functions that can be applied:
- ``load`` loads the object of the given tree node or the child tree nodes of the current node. a
After calling load the respective nodes have a new attribute ``obj`` that contains the respective loaded object. To load all objects belonging to the models subtree like
parameters, evaluations or measures one can call::
>> ml_repo.tree.models.load()
- ``history`` lists the history of all objects of the respective subtree, where history excepts certain parameters such as a range of versions or
which repo object information to include. To list th history of all training data just use::
>> ml_repo.tree.training_data.history()
- ``modifications`` lists all objects of the respective subtree that have been modified and no yet been committed.
There are also node dependent function (depending on what object the node represents).
"""
import logging
from numpy import load
from deepdiff import DeepDiff
from pailab.ml_repo.repo import MLObjectType, MLRepo
from pailab.ml_repo.repo_objects import RepoInfoKey, DataSet # pylint: disable=E0401
from pailab.ml_repo.repo_store import RepoStore # pylint: disable=E0401
import pailab.ml_repo.repo_store as repo_store
import pailab.ml_repo.repo_objects as repo_objects
logger = logging.getLogger(__name__)
#region collections and items
class _RepoObjectItem:
def __init__(self, name, ml_repo, repo_obj = None):
self._name = name
self._repo = ml_repo
if repo_obj is not None:
self.obj = repo_obj
def _set(self, path, items):
if len(path) > 0:
if len(path) == 1:
setattr(self, path[0], items[0])
return
if hasattr(self, path[0]):
getattr(self, path[0])._set(path[1:], items[1:])
else:
setattr(self, path[0], items[0])
items[0]._set(path[1:], items[1:])
def load(self, version=repo_store.LAST_VERSION, full_object=False,
modifier_versions=None, containing_str=None):
"""Loads the object into the tree and stores it in obj member.
Args:
version (str, optional): The version of the object to be loaded. Defaults to repo_store.LAST_VERSION.
full_object (bool, optional): If True, also the bigobject-members of the object will be loaded and stored. Defaults to False.
modifier_versions (dict of str to str, optional): The version of the object that has been created with the objects
and their respective versions defined in the dict will be loaded. Defaults to None.
containing_str (str, optional): The object will only be loaded if the given string is contained in the objects
name (intended for internal use). Defaults to None.
"""
if containing_str is None or containing_str in self._name:
if self._repo is not None:
self.obj = self._repo.get(self._name, version, full_object, modifier_versions, throw_error_not_exist = False)
for v in self.__dict__.values():
if hasattr(v,'load'):
v.load(version, full_object, modifier_versions, containing_str)
def modifications(self, commit=False, commit_message=''):
result = {}
if self._name is not None:
try:
if self._repo is not None:
obj_orig = self._repo.get(
self.obj.repo_info[RepoInfoKey.NAME], version=self.obj.repo_info[RepoInfoKey.VERSION])
diff = DeepDiff(obj_orig, self.obj,
ignore_order=True)
except AttributeError:
return None
if len(diff) == 0:
return None
else:
if commit and (self._repo is not None):
version = self._repo.add(
self.obj, message=commit_message)
self.obj = self._repo.get(self._name, version=version)
result = {self._name: diff}
for v in self.__dict__.values():
if hasattr(v, 'modifications'):
tmp = v.modifications(commit, commit_message)
if tmp is not None:
result.update(tmp)
return result
def history(self, version = (repo_store.FIRST_VERSION,repo_store.LAST_VERSION),
repo_info = [RepoInfoKey.NAME, RepoInfoKey.AUTHOR, RepoInfoKey.COMMIT_DATE, RepoInfoKey.COMMIT_MESSAGE],
obj_data = []):
history = []
if self._repo is not None:
history = self._repo.get(self._name, version = version, throw_error_not_exist=False)
if not isinstance(history, list):
history = [history]
result = {}
tmp = []
for h in history:
r = {}
for r_info in repo_info:
r[str(r_info)] = h.repo_info[r_info]
for o_info in obj_data:
r[o_info] = obj_data.__dict__[o_info]
tmp.append(r)
result[self._name] = tmp
for v in self.__dict__.values():
if isinstance(v, _RepoObjectItem):
tmp2 = v.history(version, repo_info, obj_data)
if tmp2 is not None:
result.update(tmp2)
if len(result) > 0:
return result
def __call__(self, containing_str=None):
# if len(self.__dict__) == 1:
if containing_str is not None:
result = []
if containing_str in self._name:
result.append(self._name)
for v in self.__dict__.values():
if isinstance(v, _RepoObjectItem):
d = v(containing_str)
if isinstance(d, str):
result.append(d)
else:
result.extend(d)
return [x for x in result if containing_str in x]
else:
return self._name
return result
class _RawDataItem(_RepoObjectItem):
def __init__(self, name, ml_repo, repo_obj = None):
super(_RawDataItem,self).__init__(name, ml_repo, repo_obj)
def append(self, x_data, y_data = None):
"""Append data to a RawData object
It appends data to the given RawData object and updates all training and test DataSets which implicitely changed by this update.
Args:
name (string): name of RawData object
x_data (numpy matrix): the x_data to append
y_data (numpy matrix, optional): Defaults to None. The y_data to append
Raises:
Exception: If the data is not consistent to the RawData (e.g. different number of x-coordinates) it throws an exception.
"""
logger.info('Start appending ' + str(x_data.shape[0]) + ' datapoints to RawData' + self._name)
raw_data = self._repo.get(self._name)
if len(raw_data.x_coord_names) != x_data.shape[1]:
raise Exception('Number of columns of x_data of RawData object is not equal to number of columns of additional x_data.')
if raw_data.y_coord_names is None and y_data is not None:
raise Exception('RawData object does not contain y_data but y_data is given')
if raw_data.y_coord_names is not None:
if y_data is None:
raise Exception('RawData object has y_data but no y_data is given')
if y_data.shape[1] != len(raw_data.y_coord_names ):
raise Exception('Number of columns of y_data of RawData object is not equal to number of columns of additional y_data.')
numpy_dict = {'x_data' : x_data}
if raw_data.y_coord_names is not None:
numpy_dict['y_data'] = y_data
raw_data.n_data += x_data.shape[0]
old_version = raw_data.repo_info[RepoInfoKey.VERSION]
new_version = self._repo.add(raw_data)
self._repo._numpy_repo.append(self._name, old_version, new_version, numpy_dict)
# now find all datasets which are affected by the updated data
changed_data_sets = []
training_data = self._repo.get_training_data(full_object = False)
if isinstance(training_data, DataSet):
if training_data.raw_data == self._name and training_data.raw_data_version == repo_store.RepoStore.LAST_VERSION:
if training_data.end_index is None or training_data.end_index < 0:
training_data.raw_data_version = new_version
changed_data_sets.append(training_data)
test_data = self._repo.get_names(MLObjectType.TEST_DATA)
for d in test_data:
data = self._repo.get(d)
if isinstance(data, DataSet):
if data.raw_data == self._name and data.raw_data_version == repo_store.RepoStore.LAST_VERSION:
if data.end_index is None or data.end_index < 0:
data.raw_data_version = new_version
changed_data_sets.append(data)
self._repo.add(changed_data_sets, 'RawData ' + self._name + ' updated, add DataSets depending om the updated RawData.')
if hasattr(self, 'obj'):#update current object
self.obj = self._repo.get(self._name, version=new_version)
logger.info('Finished appending data to RawData' + self._name)
class _RawDataCollection(_RepoObjectItem):
@staticmethod
def __get_name_from_path(path):
return path.split('/')[-1]
def __init__(self, repo):
super(_RawDataCollection, self).__init__('raw_data', repo)
names = repo.get_names(MLObjectType.RAW_DATA)
for n in names:
setattr(self, _RawDataCollection.__get_name_from_path(n), _RawDataItem(n, repo))
def add(self, name, data, input_variables = None, target_variables = None):
"""Add raw data to the repository
Arguments:
data_name {name of data} -- the name of the data added
data {pandas DataFrame} -- the data as pandas datatable
input_variables {str or iterable of str} -- column name or iterable of column names defining the input variables of the given data
target_variables {str or iterable of str} -- column name or iterable of column names defining the target variables of the given data
Keyword Arguments:
input_variables {list of strings} -- list of column names defining the input variables for the machine learning (default: {None}). If None, all variables are used as input
target_variables {list of strings} -- list of column names defining the target variables for the machine learning (default: {None}). If None, no target data is added from the table.
"""
path = 'raw_data/' + name
if input_variables is None:
input_variables = list(data)
if not target_variables is None:
[input_variables.remove(x) for x in target_variables]
else:
if isinstance(input_variables, str):
input_variables = [input_variables]
# check whether the input_variables are included in the data
if not [item for item in input_variables if item in list(data)] == list(input_variables):
raise Exception('RawData does not include at least one column included in input_variables')
if target_variables is not None:
if isinstance(target_variables, str):
target_variables = [target_variables]
# check if target variables are in list
if not [item for item in target_variables if item in list(data)] == list(target_variables):
raise Exception('RawData does not include at least one column included in target_variables')
raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, data.loc[:, target_variables].values,
target_variables, repo_info = {RepoInfoKey.NAME: path})
else:
raw_data = repo_objects.RawData(data.loc[:, input_variables].values, input_variables, repo_info = {RepoInfoKey.NAME: path})
v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA)
obj = self._repo.get(path, version=v, full_object = False)
setattr(self, name, _RawDataItem(path, self._repo, obj))
def add_from_numpy_file(self, name, filename_X, x_names, filename_Y=None, y_names = None):
path = name
X = load(filename_X)
Y = None
if filename_Y is not None:
Y = load(filename_Y)
raw_data = repo_objects.RawData(X, x_names, Y, y_names, repo_info = {RepoInfoKey.NAME: path})
v = self._repo.add(raw_data, 'data ' + path + ' added to repository' , category = MLObjectType.RAW_DATA)
obj = self._repo.get(path, version=v, full_object = False)
setattr(self, name, _RawDataItem(path, self._repo, obj))
class _TrainingDataCollection(_RepoObjectItem):
@staticmethod
def __get_name_from_path(path):
return path.split('/')[-1]
def __init__(self, repo):
super(_TrainingDataCollection, self).__init__('training_data', None)
self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class
names = repo.get_names(MLObjectType.TRAINING_DATA)
for n in names:
setattr(self, _TrainingDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo))
def add(self, name, raw_data, start_index=0,
end_index=None, raw_data_version='last'):
#path = 'training_data/' + name
data_set = repo_objects.DataSet(raw_data, start_index, end_index,
raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TRAINING_DATA})
v = self.__repo.add(data_set)
tmp = self.__repo.get(name, version=v)
item = _RepoObjectItem(name, self.__repo, tmp)
setattr(self, name, item)
class _TestDataCollection(_RepoObjectItem):
@staticmethod
def __get_name_from_path(path):
return path.split('/')[-1]
def __init__(self, repo):
super(_TestDataCollection, self).__init__('test_data', None)
self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class
names = repo.get_names(MLObjectType.TEST_DATA)
for n in names:
setattr(self, _TestDataCollection.__get_name_from_path(n), _RepoObjectItem(n,repo))
def add(self, name, raw_data, start_index=0,
end_index=None, raw_data_version='last'):
data_set = repo_objects.DataSet(raw_data, start_index, end_index,
raw_data_version, repo_info = {RepoInfoKey.NAME: name, RepoInfoKey.CATEGORY: MLObjectType.TEST_DATA})
v = self.__repo.add(data_set)
tmp = self.__repo.get(name, version=v)
item = _RepoObjectItem(name, self.__repo, tmp)
setattr(self, name, item)
class _MeasureItem(_RepoObjectItem):
def __init__(self, name, ml_repo, repo_obj = None):
super(_MeasureItem, self).__init__(name, ml_repo, repo_obj)
class _JobItem(_RepoObjectItem):
def __init__(self, name, ml_repo, repo_obj = None):
super(_JobItem, self).__init__(name, ml_repo, repo_obj)
class _MeasureCollection(_RepoObjectItem):
def __init__(self, name, ml_repo):
super(_MeasureCollection, self).__init__('measures', None)
names = ml_repo.get_names(MLObjectType.MEASURE)
for n in names:
path = n.split('/')[2:]
items = [None] * len(path)
for i in range(len(items)-1):
items[i] = _RepoObjectItem(path[i], None)
items[-1] = _MeasureItem(n, ml_repo)
self._set(path, items)
#items[-2] = MeasuresOnDataItem
class _EvalCollection(_RepoObjectItem):
def __init__(self, name, ml_repo):
super(_EvalCollection, self).__init__('eval', None)
names = ml_repo.get_names(MLObjectType.EVAL_DATA)
for n in names:
path = n.split('/')[2:]
items = [None] * len(path)
for i in range(len(items)-1):
items[i] = _RepoObjectItem(path[i], None)
items[-1] = _MeasureItem(n, ml_repo)
self._set(path, items)
class _TestCollection(_RepoObjectItem):
def __init__(self, name, ml_repo):
super(_TestCollection, self).__init__('tests', None)
names = ml_repo.get_names(MLObjectType.TEST)
for n in names:
path = n.split('/')[2:]
items = [None] * len(path)
for i in range(len(items)-1):
items[i] = _RepoObjectItem(path[i], None)
items[-1] = _RepoObjectItem(n, ml_repo)
self._set(path, items)
class _JobCollection(_RepoObjectItem):
def __init__(self, name, ml_repo, model_name):
super(_JobCollection, self).__init__('jobs', None)
names = ml_repo.get_names(MLObjectType.JOB)
for n in names:
if model_name in n:
path = n.split('/')
path = path[path.index('jobs')+1:]
items = [None] * len(path)
for i in range(len(items)-1):
items[i] = _RepoObjectItem(path[i], None)
items[-1] = _JobItem(n, ml_repo)
self._set(path, items)
class _ModelItem(_RepoObjectItem):
def __init__(self, name, ml_repo, repo_obj = None):
super(_ModelItem,self).__init__(name, ml_repo, repo_obj)
self.model = _RepoObjectItem(name + '/model', ml_repo)
self.eval = _EvalCollection(name + '/eval', ml_repo)
self.model_param = _RepoObjectItem(name + '/model_param', ml_repo)
self.tests = _TestCollection(name + '/tests', ml_repo)
self.measures = _MeasureCollection(name+ '/measure', ml_repo)
self.jobs = _JobCollection(name+'/jobs', ml_repo, name)
if ml_repo._object_exists(name+'/training_stat'):
self.training_statistic = _RepoObjectItem(name+'/training_stat', ml_repo)
if ml_repo._object_exists(name+'/training_param'):
self.training_param = _RepoObjectItem(name + '/training_param', ml_repo)
def set_label(self, label_name, version = repo_store.RepoStore.LAST_VERSION, message=''):
self._repo.set_label(label_name, self._name+ '/model', version, message)
class _LabelCollection(_RepoObjectItem):
def __init__(self, repo):
super(_LabelCollection,self).__init__('labels', None)
names = repo.get_names(MLObjectType.LABEL)
for n in names:
#label = ml_repo.get()
setattr(self, n, _RepoObjectItem(n, repo))
class _ModelCollection(_RepoObjectItem):
@staticmethod
def __get_name_from_path(name):
return name
def __init__(self, repo):
super(_ModelCollection,self).__init__('models', None)
names = repo.get_names(MLObjectType.MODEL)
for n in names:
setattr(self, _ModelCollection.__get_name_from_path(n), _ModelItem(n, repo))
self.labels = _LabelCollection(repo)
def add(self, name):
setattr(self, name, _ModelItem(name,self._repo))
class _CacheDataCollection(_RepoObjectItem):
@staticmethod
def __get_name_from_path(path):
return path.split('/')[-1]
def __init__(self, repo):
super(_CacheDataCollection, self).__init__('cache', None)
self.__repo = repo # we store ml_repo in __repo to circumvent that obj is loaded from eneric base class
names = repo.get_names(MLObjectType.CACHED_VALUE)
for n in names:
setattr(self, _CacheDataCollection.__get_name_from_path(n), _RepoObjectItem(n, repo))
#endregion
class MLTree:
@staticmethod
def add_tree(ml_repo):
"""Adds an MLTree to a repository.
Args:
ml_repo (MLRepo): the repository the tre is added
"""
setattr(ml_repo, 'tree', MLTree(ml_repo))
ml_repo._add_triggers.append(ml_repo.tree.reload)
def __create(self):
self.raw_data = _RawDataCollection(self.__ml_repo)
self.training_data = _TrainingDataCollection(self.__ml_repo)
self.test_data = _TestDataCollection(self.__ml_repo)
self.models = _ModelCollection(self.__ml_repo)
self.cache = _CacheDataCollection(self.__ml_repo)
def __init__(self, ml_repo):
self.__ml_repo = ml_repo
self.__create()
def reload(self, **kwargs):
"""Method to reload the tree after objects have been added or deleted from the repository.
"""
self.__create() # todo make this more efficient by just updating collections and items which are affected by this
def modifications(self):
"""Return a dictionary of all objects that were modified but no yet
commited to the repository.
Returns:
dict: dictionary mapping object ids to dictionary of the modified attributes
"""
result = {}
tmp = self.raw_data.modifications()
if tmp is not None:
result.update(tmp)
tmp = self.training_data.modifications()
if tmp is not None:
result.update(tmp)
tmp = self.test_data.modifications()
if tmp is not None:
result.update(stmp)
tmp = self.models.modifications()
if tmp is not None:
result.update(tmp)
if len(result) == 0:
return None
return result
| [
[
[
1558,
1565
],
[
1938,
1945
]
],
[
[
1584,
1588
],
[
13341,
13345
],
[
13426,
13430
]
],
[
[
1610,
1618
],
[
4348,
4356
]
],
[
[
1651,
1663
],
[
9414,
9426
],
[
10427,
10439
],
[
13058,
13070
],
[
13636,
13648
],
[
14185,
14197
],
[
14655,
14667
],
[
15239,
15251
],
[
15660,
15672
],
[
16361,
16373
],
[
16884,
16896
],
[
17366,
17378
],
[
17855,
17867
],
[
19438,
19450
],
[
19820,
19832
],
[
20471,
20483
]
],
[
[
1665,
1671
]
],
[
[
1712,
1723
],
[
5206,
5217
],
[
5224,
5235
],
[
5244,
5255
],
[
5269,
5280
],
[
4253,
4264
],
[
4299,
4310
],
[
8661,
8672
],
[
12793,
12804
],
[
12943,
12954
],
[
13521,
13532
],
[
14609,
14620
],
[
14633,
14644
],
[
15614,
15625
],
[
15638,
15649
]
],
[
[
1725,
1732
],
[
9030,
9037
],
[
9535,
9542
]
],
[
[
1796,
1805
]
],
[
[
1838,
1877
],
[
2618,
2628
],
[
5125,
5135
],
[
5150,
5160
],
[
19144,
19154
],
[
9130,
9140
],
[
9621,
9631
]
],
[
[
1885,
1928
],
[
12632,
12644
],
[
12855,
12867
],
[
13463,
13475
],
[
14506,
14518
],
[
15511,
15523
]
],
[
[
1929,
1935
],
[
7594,
7600
],
[
10102,
10108
]
],
[
[
2006,
2021
],
[
6843,
6858
],
[
10191,
10206
],
[
13822,
13837
],
[
14885,
14900
],
[
15879,
15894
],
[
16038,
16053
],
[
16203,
16218
],
[
16733,
16748
],
[
17214,
17229
],
[
17693,
17708
],
[
18273,
18288
],
[
19297,
19312
],
[
19604,
19619
],
[
20119,
20134
],
[
5950,
5965
],
[
6483,
6498
],
[
14312,
14327
],
[
14784,
14799
],
[
15358,
15373
],
[
15785,
15800
],
[
16551,
16566
],
[
17076,
17091
],
[
17553,
17568
],
[
17608,
17623
],
[
18136,
18151
],
[
18433,
18448
],
[
18563,
18578
],
[
18904,
18919
],
[
19045,
19060
],
[
19546,
19561
],
[
20594,
20609
]
],
[
[
6830,
6842
],
[
6931,
6943
],
[
10544,
10556
],
[
13176,
13188
],
[
13754,
13766
]
],
[
[
10172,
10190
],
[
10343,
10361
],
[
10500,
10518
],
[
20991,
21009
]
],
[
[
13798,
13821
],
[
13979,
14002
],
[
14263,
14286
],
[
21055,
21078
]
],
[
[
14865,
14884
],
[
15041,
15060
],
[
15313,
15332
],
[
21120,
21139
]
],
[
[
15866,
15878
],
[
15967,
15979
],
[
16606,
16618
],
[
17131,
17143
]
],
[
[
16029,
16037
],
[
16126,
16134
],
[
18195,
18203
]
],
[
[
16184,
16202
],
[
16274,
16292
],
[
18698,
18716
]
],
[
[
16717,
16732
],
[
16804,
16819
],
[
18495,
18510
]
],
[
[
17198,
17213
],
[
17285,
17300
],
[
18632,
18647
]
],
[
[
17678,
17692
],
[
17776,
17790
],
[
18764,
18778
]
],
[
[
18262,
18272
],
[
18361,
18371
],
[
19932,
19942
],
[
20060,
20070
]
],
[
[
19280,
19296
],
[
19359,
19375
],
[
19975,
19991
]
],
[
[
19587,
19603
],
[
19741,
19757
],
[
19890,
19906
],
[
21178,
21194
]
],
[
[
20098,
20118
],
[
20276,
20296
],
[
20548,
20568
],
[
21232,
21252
]
],
[
[
20640,
20646
],
[
20859,
20865
]
]
] |
count = input('How many people will be in the dinner group? ')
count = int(count)
if count > 8:
print('You\'ll have to wait for a table.')
else:
print('The table is ready.')
| [
[
[
1,
6
],
[
76,
81
]
],
[
[
64,
69
],
[
87,
92
]
]
] |
#!/usr/bin/env python
from distutils.core import setup
from glob import glob
from setuptools import find_packages
setup(name='Fibonacci',
version='1.0',
description='Python Distribution Utilities',
author='Kevin Chen',
packages=find_packages('src'),
package_dir={'': 'src'},
py_modules=[splitext(basename(path))[0] for path in glob('src/*.py')],
) | [
[
[
51,
56
],
[
118,
123
]
],
[
[
74,
78
],
[
367,
371
]
],
[
[
103,
116
],
[
256,
269
]
]
] |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Image caching and management.
"""
import os
from os_win import utilsfactory
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import units
import nova.conf
from nova import exception
from nova import utils
from nova.virt.hyperv import pathutils
from nova.virt import images
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class ImageCache(object):
def __init__(self):
self._pathutils = pathutils.PathUtils()
self._vhdutils = utilsfactory.get_vhdutils()
def _get_root_vhd_size_gb(self, instance):
if instance.old_flavor:
return instance.old_flavor.root_gb
else:
return instance.root_gb
def _resize_and_cache_vhd(self, instance, vhd_path):
vhd_size = self._vhdutils.get_vhd_size(vhd_path)['VirtualSize']
root_vhd_size_gb = self._get_root_vhd_size_gb(instance)
root_vhd_size = root_vhd_size_gb * units.Gi
root_vhd_internal_size = (
self._vhdutils.get_internal_vhd_size_by_file_size(
vhd_path, root_vhd_size))
if root_vhd_internal_size < vhd_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=root_vhd_size, image_size=vhd_size)
if root_vhd_internal_size > vhd_size:
path_parts = os.path.splitext(vhd_path)
resized_vhd_path = '%s_%s%s' % (path_parts[0],
root_vhd_size_gb,
path_parts[1])
@utils.synchronized(resized_vhd_path)
def copy_and_resize_vhd():
if not self._pathutils.exists(resized_vhd_path):
try:
LOG.debug("Copying VHD %(vhd_path)s to "
"%(resized_vhd_path)s",
{'vhd_path': vhd_path,
'resized_vhd_path': resized_vhd_path})
self._pathutils.copyfile(vhd_path, resized_vhd_path)
LOG.debug("Resizing VHD %(resized_vhd_path)s to new "
"size %(root_vhd_size)s",
{'resized_vhd_path': resized_vhd_path,
'root_vhd_size': root_vhd_size})
self._vhdutils.resize_vhd(resized_vhd_path,
root_vhd_internal_size,
is_file_max_size=False)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(resized_vhd_path):
self._pathutils.remove(resized_vhd_path)
copy_and_resize_vhd()
return resized_vhd_path
def get_cached_image(self, context, instance):
image_id = instance.image_ref
base_vhd_dir = self._pathutils.get_base_vhd_dir()
base_vhd_path = os.path.join(base_vhd_dir, image_id)
@utils.synchronized(base_vhd_path)
def fetch_image_if_not_existing():
vhd_path = None
for format_ext in ['vhd', 'vhdx']:
test_path = base_vhd_path + '.' + format_ext
if self._pathutils.exists(test_path):
vhd_path = test_path
break
if not vhd_path:
try:
images.fetch(context, image_id, base_vhd_path)
format_ext = self._vhdutils.get_vhd_format(base_vhd_path)
vhd_path = base_vhd_path + '.' + format_ext.lower()
self._pathutils.rename(base_vhd_path, vhd_path)
except Exception:
with excutils.save_and_reraise_exception():
if self._pathutils.exists(base_vhd_path):
self._pathutils.remove(base_vhd_path)
return vhd_path
vhd_path = fetch_image_if_not_existing()
if CONF.use_cow_images and vhd_path.split('.')[-1].lower() == 'vhd':
# Resize the base VHD image as it's not possible to resize a
# differencing VHD. This does not apply to VHDX images.
resized_vhd_path = self._resize_and_cache_vhd(instance, vhd_path)
if resized_vhd_path:
return resized_vhd_path
return vhd_path
| [
[
[
683,
685
],
[
1974,
1976
],
[
3683,
3685
]
],
[
[
706,
718
],
[
1135,
1147
]
],
[
[
740,
754
],
[
959,
966
]
],
[
[
778,
786
],
[
3254,
3262
],
[
4460,
4468
]
],
[
[
810,
815
],
[
1578,
1583
]
],
[
[
824,
833
],
[
995,
999
]
],
[
[
851,
860
],
[
1801,
1810
]
],
[
[
878,
883
],
[
2195,
2200
],
[
3730,
3735
]
],
[
[
913,
922
],
[
1088,
1097
]
],
[
[
945,
951
],
[
4135,
4141
]
],
[
[
953,
956
],
[
2385,
2388
],
[
2716,
2719
]
],
[
[
988,
992
],
[
4722,
4726
]
],
[
[
1018,
1028
]
]
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Bottle is a fast and simple micro-framework for small web applications. It
offers request dispatching (Routes) with url parameter support, templates,
a built-in HTTP Server and adapters for many third party WSGI/HTTP-server and
template engines - all in a single file and with no dependencies other than the
Python Standard Library.
Homepage and documentation: http://bottlepy.org/
Copyright (c) 2014, Marcel Hellkamp.
License: MIT (see LICENSE for details)
"""
from __future__ import with_statement
__author__ = 'Marcel Hellkamp'
__version__ = '0.13-dev'
__license__ = 'MIT'
# The gevent and eventlet server adapters need to patch some modules before
# they are imported. This is why we parse the commandline parameters here but
# handle them later
if __name__ == '__main__':
from optparse import OptionParser
_cmd_parser = OptionParser(usage="usage: %prog [options] package.module:app")
_opt = _cmd_parser.add_option
_opt("--version", action="store_true", help="show version number.")
_opt("-b", "--bind", metavar="ADDRESS", help="bind socket to ADDRESS.")
_opt("-s", "--server", default='wsgiref', help="use SERVER as backend.")
_opt("-p", "--plugin", action="append", help="install additional plugin/s.")
_opt("--debug", action="store_true", help="start server in debug mode.")
_opt("--reload", action="store_true", help="auto-reload on file changes.")
_cmd_options, _cmd_args = _cmd_parser.parse_args()
if _cmd_options.server:
if _cmd_options.server.startswith('gevent'):
import gevent.monkey; gevent.monkey.patch_all()
elif _cmd_options.server.startswith('eventlet'):
import eventlet; eventlet.monkey_patch()
import base64, cgi, email.utils, functools, hmac, imp, itertools, mimetypes,\
os, re, subprocess, sys, tempfile, threading, time, warnings
from datetime import date as datedate, datetime, timedelta
from tempfile import TemporaryFile
from traceback import format_exc, print_exc
from inspect import getargspec
from unicodedata import normalize
try: from simplejson import dumps as json_dumps, loads as json_lds
except ImportError: # pragma: no cover
try: from json import dumps as json_dumps, loads as json_lds
except ImportError:
try: from django.utils.simplejson import dumps as json_dumps, loads as json_lds
except ImportError:
def json_dumps(data):
raise ImportError("JSON support requires Python 2.6 or simplejson.")
json_lds = json_dumps
# We now try to fix 2.5/2.6/3.1/3.2 incompatibilities.
# It ain't pretty but it works... Sorry for the mess.
py = sys.version_info
py3k = py >= (3, 0, 0)
py25 = py < (2, 6, 0)
py31 = (3, 1, 0) <= py < (3, 2, 0)
# Workaround for the missing "as" keyword in py3k.
def _e(): return sys.exc_info()[1]
# Workaround for the "print is a keyword/function" Python 2/3 dilemma
# and a fallback for mod_wsgi (resticts stdout/err attribute access)
try:
_stdout, _stderr = sys.stdout.write, sys.stderr.write
except IOError:
_stdout = lambda x: sys.stdout.write(x)
_stderr = lambda x: sys.stderr.write(x)
# Lots of stdlib and builtin differences.
if py3k:
import http.client as httplib
import _thread as thread
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
from http.cookies import SimpleCookie
from collections import MutableMapping as DictMixin
import pickle
from io import BytesIO
from configparser import ConfigParser
basestring = str
unicode = str
json_loads = lambda s: json_lds(touni(s))
callable = lambda x: hasattr(x, '__call__')
imap = map
def _raise(*a): raise a[0](a[1]).with_traceback(a[2])
else: # 2.x
import httplib
import thread
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
from Cookie import SimpleCookie
from itertools import imap
import cPickle as pickle
from StringIO import StringIO as BytesIO
from ConfigParser import SafeConfigParser as ConfigParser
if py25:
msg = "Python 2.5 support may be dropped in future versions of Bottle."
warnings.warn(msg, DeprecationWarning)
from UserDict import DictMixin
def next(it): return it.next()
bytes = str
else: # 2.6, 2.7
from collections import MutableMapping as DictMixin
unicode = unicode
json_loads = json_lds
eval(compile('def _raise(*a): raise a[0], a[1], a[2]', '<py3fix>', 'exec'))
# Some helpers for string/byte handling
def tob(s, enc='utf8'):
return s.encode(enc) if isinstance(s, unicode) else bytes(s)
def touni(s, enc='utf8', err='strict'):
if isinstance(s, bytes):
return s.decode(enc, err)
else:
return unicode(s or ("" if s is None else s))
tonat = touni if py3k else tob
# 3.2 fixes cgi.FieldStorage to accept bytes (which makes a lot of sense).
# 3.1 needs a workaround.
if py31:
from io import TextIOWrapper
class NCTextIOWrapper(TextIOWrapper):
def close(self): pass # Keep wrapped buffer open.
# A bug in functools causes it to break if the wrapper is an instance method
def update_wrapper(wrapper, wrapped, *a, **ka):
try:
functools.update_wrapper(wrapper, wrapped, *a, **ka)
except AttributeError:
pass
# These helpers are used at module level and need to be defined first.
# And yes, I know PEP-8, but sometimes a lower-case classname makes more sense.
def depr(message, strict=False):
warnings.warn(message, DeprecationWarning, stacklevel=3)
def makelist(data): # This is just too handy
if isinstance(data, (tuple, list, set, dict)):
return list(data)
elif data:
return [data]
else:
return []
class DictProperty(object):
""" Property that maps to a key in a local dict-like attribute. """
def __init__(self, attr, key=None, read_only=False):
self.attr, self.key, self.read_only = attr, key, read_only
def __call__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter, self.key = func, self.key or func.__name__
return self
def __get__(self, obj, cls):
if obj is None: return self
key, storage = self.key, getattr(obj, self.attr)
if key not in storage: storage[key] = self.getter(obj)
return storage[key]
def __set__(self, obj, value):
if self.read_only: raise AttributeError("Read-Only property.")
getattr(obj, self.attr)[self.key] = value
def __delete__(self, obj):
if self.read_only: raise AttributeError("Read-Only property.")
del getattr(obj, self.attr)[self.key]
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None: return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
class lazy_attribute(object):
""" A property that caches itself to the class object. """
def __init__(self, func):
functools.update_wrapper(self, func, updated=[])
self.getter = func
def __get__(self, obj, cls):
value = self.getter(cls)
setattr(cls, self.__name__, value)
return value
###############################################################################
# Exceptions and Events ########################################################
###############################################################################
class BottleException(Exception):
""" A base class for exceptions used by bottle. """
pass
###############################################################################
# Routing ######################################################################
###############################################################################
class RouteError(BottleException):
""" This is a base class for all routing related exceptions """
class RouteReset(BottleException):
""" If raised by a plugin or request handler, the route is reset and all
plugins are re-applied. """
class RouterUnknownModeError(RouteError): pass
class RouteSyntaxError(RouteError):
""" The route parser found something not supported by this router. """
class RouteBuildError(RouteError):
""" The route could not be built. """
def _re_flatten(p):
""" Turn all capturing groups in a regular expression pattern into
non-capturing groups. """
if '(' not in p:
return p
return re.sub(r'(\\*)(\(\?P<[^>]+>|\((?!\?))',
lambda m: m.group(0) if len(m.group(1)) % 2 else m.group(1) + '(?:', p)
class Router(object):
""" A Router is an ordered collection of route->target pairs. It is used to
efficiently match WSGI requests against a number of routes and return
the first target that satisfies the request. The target may be anything,
usually a string, ID or callable object. A route consists of a path-rule
and a HTTP method.
The path-rule is either a static path (e.g. `/contact`) or a dynamic
path that contains wildcards (e.g. `/wiki/<page>`). The wildcard syntax
and details on the matching order are described in docs:`routing`.
"""
default_pattern = '[^/]+'
default_filter = 're'
#: The current CPython regexp implementation does not allow more
#: than 99 matching groups per regular expression.
_MAX_GROUPS_PER_PATTERN = 99
def __init__(self, strict=False):
self.rules = [] # All rules in order
self._groups = {} # index of regexes to find them in dyna_routes
self.builder = {} # Data structure for the url builder
self.static = {} # Search structure for static routes
self.dyna_routes = {}
self.dyna_regexes = {} # Search structure for dynamic routes
#: If true, static routes are no longer checked first.
self.strict_order = strict
self.filters = {
're': lambda conf:
(_re_flatten(conf or self.default_pattern), None, None),
'int': lambda conf: (r'-?\d+', int, lambda x: str(int(x))),
'float': lambda conf: (r'-?[\d.]+', float, lambda x: str(float(x))),
'path': lambda conf: (r'.+?', None, None)}
def add_filter(self, name, func):
""" Add a filter. The provided function is called with the configuration
string as parameter and must return a (regexp, to_python, to_url) tuple.
The first element is a string, the last two are callables or None. """
self.filters[name] = func
rule_syntax = re.compile('(\\\\*)'
'(?:(?::([a-zA-Z_][a-zA-Z_0-9]*)?()(?:#(.*?)#)?)'
'|(?:<([a-zA-Z_][a-zA-Z_0-9]*)?(?::([a-zA-Z_]*)'
'(?::((?:\\\\.|[^\\\\>]+)+)?)?)?>))')
def _itertokens(self, rule):
offset, prefix = 0, ''
for match in self.rule_syntax.finditer(rule):
prefix += rule[offset:match.start()]
g = match.groups()
if len(g[0])%2: # Escaped wildcard
prefix += match.group(0)[len(g[0]):]
offset = match.end()
continue
if prefix:
yield prefix, None, None
name, filtr, conf = g[4:7] if g[2] is None else g[1:4]
yield name, filtr or 'default', conf or None
offset, prefix = match.end(), ''
if offset <= len(rule) or prefix:
yield prefix+rule[offset:], None, None
def add(self, rule, method, target, name=None):
""" Add a new rule or replace the target for an existing rule. """
anons = 0 # Number of anonymous wildcards found
keys = [] # Names of keys
pattern = '' # Regular expression pattern with named groups
filters = [] # Lists of wildcard input filters
builder = [] # Data structure for the URL builder
is_static = True
for key, mode, conf in self._itertokens(rule):
if mode:
is_static = False
if mode == 'default': mode = self.default_filter
mask, in_filter, out_filter = self.filters[mode](conf)
if not key:
pattern += '(?:%s)' % mask
key = 'anon%d' % anons
anons += 1
else:
pattern += '(?P<%s>%s)' % (key, mask)
keys.append(key)
if in_filter: filters.append((key, in_filter))
builder.append((key, out_filter or str))
elif key:
pattern += re.escape(key)
builder.append((None, key))
self.builder[rule] = builder
if name: self.builder[name] = builder
if is_static and not self.strict_order:
self.static.setdefault(method, {})
self.static[method][self.build(rule)] = (target, None)
return
try:
re_pattern = re.compile('^(%s)$' % pattern)
re_match = re_pattern.match
except re.error:
raise RouteSyntaxError("Could not add Route: %s (%s)" % (rule, _e()))
if filters:
def getargs(path):
url_args = re_match(path).groupdict()
for name, wildcard_filter in filters:
try:
url_args[name] = wildcard_filter(url_args[name])
except ValueError:
raise HTTPError(400, 'Path has wrong format.')
return url_args
elif re_pattern.groupindex:
def getargs(path):
return re_match(path).groupdict()
else:
getargs = None
flatpat = _re_flatten(pattern)
whole_rule = (rule, flatpat, target, getargs)
if (flatpat, method) in self._groups:
if DEBUG:
msg = 'Route <%s %s> overwrites a previously defined route'
warnings.warn(msg % (method, rule), RuntimeWarning)
self.dyna_routes[method][self._groups[flatpat, method]] = whole_rule
else:
self.dyna_routes.setdefault(method, []).append(whole_rule)
self._groups[flatpat, method] = len(self.dyna_routes[method]) - 1
self._compile(method)
def _compile(self, method):
all_rules = self.dyna_routes[method]
comborules = self.dyna_regexes[method] = []
maxgroups = self._MAX_GROUPS_PER_PATTERN
for x in range(0, len(all_rules), maxgroups):
some = all_rules[x:x+maxgroups]
combined = (flatpat for (_, flatpat, _, _) in some)
combined = '|'.join('(^%s$)' % flatpat for flatpat in combined)
combined = re.compile(combined).match
rules = [(target, getargs) for (_, _, target, getargs) in some]
comborules.append((combined, rules))
def build(self, _name, *anons, **query):
""" Build an URL by filling the wildcards in a rule. """
builder = self.builder.get(_name)
if not builder: raise RouteBuildError("No route with that name.", _name)
try:
for i, value in enumerate(anons): query['anon%d'%i] = value
url = ''.join([f(query.pop(n)) if n else f for (n,f) in builder])
return url if not query else url+'?'+urlencode(query)
except KeyError:
raise RouteBuildError('Missing URL argument: %r' % _e().args[0])
def match(self, environ):
""" Return a (target, url_args) tuple or raise HTTPError(400/404/405). """
verb = environ['REQUEST_METHOD'].upper()
path = environ['PATH_INFO'] or '/'
if verb == 'HEAD':
methods = ['PROXY', verb, 'GET', 'ANY']
else:
methods = ['PROXY', verb, 'ANY']
for method in methods:
if method in self.static and path in self.static[method]:
target, getargs = self.static[method][path]
return target, getargs(path) if getargs else {}
elif method in self.dyna_regexes:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
target, getargs = rules[match.lastindex - 1]
return target, getargs(path) if getargs else {}
# No matching route found. Collect alternative methods for 405 response
allowed = set([])
nocheck = set(methods)
for method in set(self.static) - nocheck:
if path in self.static[method]:
allowed.add(verb)
for method in set(self.dyna_regexes) - allowed - nocheck:
for combined, rules in self.dyna_regexes[method]:
match = combined(path)
if match:
allowed.add(method)
if allowed:
allow_header = ",".join(sorted(allowed))
raise HTTPError(405, "Method not allowed.", Allow=allow_header)
# No matching route and no alternative method found. We give up
raise HTTPError(404, "Not found: " + repr(path))
class Route(object):
""" This class wraps a route callback along with route specific metadata and
configuration and applies Plugins on demand. It is also responsible for
turing an URL path rule into a regular expression usable by the Router.
"""
def __init__(self, app, rule, method, callback, name=None,
plugins=None, skiplist=None, **config):
#: The application this route is installed to.
self.app = app
#: The path-rule string (e.g. ``/wiki/<page>``).
self.rule = rule
#: The HTTP method as a string (e.g. ``GET``).
self.method = method
#: The original callback with no plugins applied. Useful for introspection.
self.callback = callback
#: The name of the route (if specified) or ``None``.
self.name = name or None
#: A list of route-specific plugins (see :meth:`Bottle.route`).
self.plugins = plugins or []
#: A list of plugins to not apply to this route (see :meth:`Bottle.route`).
self.skiplist = skiplist or []
#: Additional keyword arguments passed to the :meth:`Bottle.route`
#: decorator are stored in this dictionary. Used for route-specific
#: plugin configuration and meta-data.
self.config = ConfigDict().load_dict(config)
@cached_property
def call(self):
""" The route callback with all plugins applied. This property is
created on demand and then cached to speed up subsequent requests."""
return self._make_callback()
def reset(self):
""" Forget any cached values. The next time :attr:`call` is accessed,
all plugins are re-applied. """
self.__dict__.pop('call', None)
def prepare(self):
""" Do all on-demand work immediately (useful for debugging)."""
self.call
def all_plugins(self):
""" Yield all Plugins affecting this route. """
unique = set()
for p in reversed(self.app.plugins + self.plugins):
if True in self.skiplist: break
name = getattr(p, 'name', False)
if name and (name in self.skiplist or name in unique): continue
if p in self.skiplist or type(p) in self.skiplist: continue
if name: unique.add(name)
yield p
def _make_callback(self):
callback = self.callback
for plugin in self.all_plugins():
try:
if hasattr(plugin, 'apply'):
callback = plugin.apply(callback, self)
else:
callback = plugin(callback)
except RouteReset: # Try again with changed configuration.
return self._make_callback()
if not callback is self.callback:
update_wrapper(callback, self.callback)
return callback
def get_undecorated_callback(self):
""" Return the callback. If the callback is a decorated function, try to
recover the original function. """
func = self.callback
func = getattr(func, '__func__' if py3k else 'im_func', func)
closure_attr = '__closure__' if py3k else 'func_closure'
while hasattr(func, closure_attr) and getattr(func, closure_attr):
func = getattr(func, closure_attr)[0].cell_contents
return func
def get_callback_args(self):
""" Return a list of argument names the callback (most likely) accepts
as keyword arguments. If the callback is a decorated function, try
to recover the original function before inspection. """
return getargspec(self.get_undecorated_callback())[0]
def get_config(self, key, default=None):
""" Lookup a config field and return its value, first checking the
route.config, then route.app.config."""
for conf in (self.config, self.app.conifg):
if key in conf: return conf[key]
return default
def __repr__(self):
cb = self.get_undecorated_callback()
return '<%s %r %r>' % (self.method, self.rule, cb)
###############################################################################
# Application Object ###########################################################
###############################################################################
class Bottle(object):
""" Each Bottle object represents a single, distinct web application and
consists of routes, callbacks, plugins, resources and configuration.
Instances are callable WSGI applications.
:param catchall: If true (default), handle all exceptions. Turn off to
let debugging middleware handle exceptions.
"""
def __init__(self, catchall=True, autojson=True):
#: A :class:`ConfigDict` for app specific configuration.
self.config = ConfigDict()
self.config._on_change = functools.partial(self.trigger_hook, 'config')
self.config.meta_set('autojson', 'validate', bool)
self.config.meta_set('catchall', 'validate', bool)
self.config['catchall'] = catchall
self.config['autojson'] = autojson
#: A :class:`ResourceManager` for application files
self.resources = ResourceManager()
self.routes = [] # List of installed :class:`Route` instances.
self.router = Router() # Maps requests to :class:`Route` instances.
self.error_handler = {}
# Core plugins
self.plugins = [] # List of installed plugins.
if self.config['autojson']:
self.install(JSONPlugin())
self.install(TemplatePlugin())
#: If true, most exceptions are caught and returned as :exc:`HTTPError`
catchall = DictProperty('config', 'catchall')
__hook_names = 'before_request', 'after_request', 'app_reset', 'config'
__hook_reversed = 'after_request'
@cached_property
def _hooks(self):
return dict((name, []) for name in self.__hook_names)
def add_hook(self, name, func):
""" Attach a callback to a hook. Three hooks are currently implemented:
before_request
Executed once before each request. The request context is
available, but no routing has happened yet.
after_request
Executed once after each request regardless of its outcome.
app_reset
Called whenever :meth:`Bottle.reset` is called.
"""
if name in self.__hook_reversed:
self._hooks[name].insert(0, func)
else:
self._hooks[name].append(func)
def remove_hook(self, name, func):
""" Remove a callback from a hook. """
if name in self._hooks and func in self._hooks[name]:
self._hooks[name].remove(func)
return True
def trigger_hook(self, __name, *args, **kwargs):
""" Trigger a hook and return a list of results. """
return [hook(*args, **kwargs) for hook in self._hooks[__name][:]]
def hook(self, name):
""" Return a decorator that attaches a callback to a hook. See
:meth:`add_hook` for details."""
def decorator(func):
self.add_hook(name, func)
return func
return decorator
def mount(self, prefix, app, **options):
""" Mount an application (:class:`Bottle` or plain WSGI) to a specific
URL prefix. Example::
root_app.mount('/admin/', admin_app)
:param prefix: path prefix or `mount-point`. If it ends in a slash,
that slash is mandatory.
:param app: an instance of :class:`Bottle` or a WSGI application.
All other parameters are passed to the underlying :meth:`route` call.
"""
segments = [p for p in prefix.split('/') if p]
if not segments: raise ValueError('Empty path prefix.')
path_depth = len(segments)
def mountpoint_wrapper():
try:
request.path_shift(path_depth)
rs = HTTPResponse([])
def start_response(status, headerlist, exc_info=None):
if exc_info:
_raise(*exc_info)
rs.status = status
for name, value in headerlist: rs.add_header(name, value)
return rs.body.append
body = app(request.environ, start_response)
if body and rs.body: body = itertools.chain(rs.body, body)
rs.body = body or rs.body
return rs
finally:
request.path_shift(-path_depth)
options.setdefault('skip', True)
options.setdefault('method', 'PROXY')
options.setdefault('mountpoint', {'prefix': prefix, 'target': app})
options['callback'] = mountpoint_wrapper
self.route('/%s/<:re:.*>' % '/'.join(segments), **options)
if not prefix.endswith('/'):
self.route('/' + '/'.join(segments), **options)
def merge(self, routes):
""" Merge the routes of another :class:`Bottle` application or a list of
:class:`Route` objects into this application. The routes keep their
'owner', meaning that the :data:`Route.app` attribute is not
changed. """
if isinstance(routes, Bottle):
routes = routes.routes
for route in routes:
self.add_route(route)
def install(self, plugin):
""" Add a plugin to the list of plugins and prepare it for being
applied to all routes of this application. A plugin may be a simple
decorator or an object that implements the :class:`Plugin` API.
"""
if hasattr(plugin, 'setup'): plugin.setup(self)
if not callable(plugin) and not hasattr(plugin, 'apply'):
raise TypeError("Plugins must be callable or implement .apply()")
self.plugins.append(plugin)
self.reset()
return plugin
def uninstall(self, plugin):
""" Uninstall plugins. Pass an instance to remove a specific plugin, a type
object to remove all plugins that match that type, a string to remove
all plugins with a matching ``name`` attribute or ``True`` to remove all
plugins. Return the list of removed plugins. """
removed, remove = [], plugin
for i, plugin in list(enumerate(self.plugins))[::-1]:
if remove is True or remove is plugin or remove is type(plugin) \
or getattr(plugin, 'name', True) == remove:
removed.append(plugin)
del self.plugins[i]
if hasattr(plugin, 'close'): plugin.close()
if removed: self.reset()
return removed
def reset(self, route=None):
""" Reset all routes (force plugins to be re-applied) and clear all
caches. If an ID or route object is given, only that specific route
is affected. """
if route is None: routes = self.routes
elif isinstance(route, Route): routes = [route]
else: routes = [self.routes[route]]
for route in routes: route.reset()
if DEBUG:
for route in routes: route.prepare()
self.trigger_hook('app_reset')
def close(self):
""" Close the application and all installed plugins. """
for plugin in self.plugins:
if hasattr(plugin, 'close'): plugin.close()
def run(self, **kwargs):
""" Calls :func:`run` with the same parameters. """
run(self, **kwargs)
def match(self, environ):
""" Search for a matching route and return a (:class:`Route` , urlargs)
tuple. The second value is a dictionary with parameters extracted
from the URL. Raise :exc:`HTTPError` (404/405) on a non-match."""
return self.router.match(environ)
def get_url(self, routename, **kargs):
""" Return a string that matches a named route """
scriptname = request.environ.get('SCRIPT_NAME', '').strip('/') + '/'
location = self.router.build(routename, **kargs).lstrip('/')
return urljoin(urljoin('/', scriptname), location)
def add_route(self, route):
""" Add a route object, but do not change the :data:`Route.app`
attribute."""
self.routes.append(route)
self.router.add(route.rule, route.method, route, name=route.name)
if DEBUG: route.prepare()
def route(self, path=None, method='GET', callback=None, name=None,
apply=None, skip=None, **config):
""" A decorator to bind a function to a request URL. Example::
@app.route('/hello/<name>')
def hello(name):
return 'Hello %s' % name
The ``:name`` part is a wildcard. See :class:`Router` for syntax
details.
:param path: Request path or a list of paths to listen to. If no
path is specified, it is automatically generated from the
signature of the function.
:param method: HTTP method (`GET`, `POST`, `PUT`, ...) or a list of
methods to listen to. (default: `GET`)
:param callback: An optional shortcut to avoid the decorator
syntax. ``route(..., callback=func)`` equals ``route(...)(func)``
:param name: The name for this route. (default: None)
:param apply: A decorator or plugin or a list of plugins. These are
applied to the route callback in addition to installed plugins.
:param skip: A list of plugins, plugin classes or names. Matching
plugins are not installed to this route. ``True`` skips all.
Any additional keyword arguments are stored as route-specific
configuration and passed to plugins (see :meth:`Plugin.apply`).
"""
if callable(path): path, callback = None, path
plugins = makelist(apply)
skiplist = makelist(skip)
def decorator(callback):
if isinstance(callback, basestring): callback = load(callback)
for rule in makelist(path) or yieldroutes(callback):
for verb in makelist(method):
verb = verb.upper()
route = Route(self, rule, verb, callback, name=name,
plugins=plugins, skiplist=skiplist, **config)
self.add_route(route)
return callback
return decorator(callback) if callback else decorator
def get(self, path=None, method='GET', **options):
""" Equals :meth:`route`. """
return self.route(path, method, **options)
def post(self, path=None, method='POST', **options):
""" Equals :meth:`route` with a ``POST`` method parameter. """
return self.route(path, method, **options)
def put(self, path=None, method='PUT', **options):
""" Equals :meth:`route` with a ``PUT`` method parameter. """
return self.route(path, method, **options)
def delete(self, path=None, method='DELETE', **options):
""" Equals :meth:`route` with a ``DELETE`` method parameter. """
return self.route(path, method, **options)
def patch(self, path=None, method='PATCH', **options):
""" Equals :meth:`route` with a ``PATCH`` method parameter. """
return self.route(path, method, **options)
def error(self, code=500):
""" Decorator: Register an output handler for a HTTP error code"""
def wrapper(handler):
self.error_handler[int(code)] = handler
return handler
return wrapper
def default_error_handler(self, res):
return tob(template(ERROR_PAGE_TEMPLATE, e=res))
def _handle(self, environ):
path = environ['bottle.raw_path'] = environ['PATH_INFO']
if py3k:
try:
environ['PATH_INFO'] = path.encode('latin1').decode('utf8')
except UnicodeError:
return HTTPError(400, 'Invalid path string. Expected UTF-8')
try:
environ['bottle.app'] = self
request.bind(environ)
response.bind()
try:
self.trigger_hook('before_request')
route, args = self.router.match(environ)
environ['route.handle'] = route
environ['bottle.route'] = route
environ['route.url_args'] = args
return route.call(**args)
finally:
self.trigger_hook('after_request')
except HTTPResponse:
return _e()
except RouteReset:
route.reset()
return self._handle(environ)
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except Exception:
if not self.catchall: raise
stacktrace = format_exc()
environ['wsgi.errors'].write(stacktrace)
return HTTPError(500, "Internal Server Error", _e(), stacktrace)
def _cast(self, out, peek=None):
""" Try to convert the parameter into something WSGI compatible and set
correct HTTP headers when possible.
Support: False, str, unicode, dict, HTTPResponse, HTTPError, file-like,
iterable of strings and iterable of unicodes
"""
# Empty output is done here
if not out:
if 'Content-Length' not in response:
response['Content-Length'] = 0
return []
# Join lists of byte or unicode strings. Mixed lists are NOT supported
if isinstance(out, (tuple, list))\
and isinstance(out[0], (bytes, unicode)):
out = out[0][0:0].join(out) # b'abc'[0:0] -> b''
# Encode unicode strings
if isinstance(out, unicode):
out = out.encode(response.charset)
# Byte Strings are just returned
if isinstance(out, bytes):
if 'Content-Length' not in response:
response['Content-Length'] = len(out)
return [out]
# HTTPError or HTTPException (recursive, because they may wrap anything)
# TODO: Handle these explicitly in handle() or make them iterable.
if isinstance(out, HTTPError):
out.apply(response)
out = self.error_handler.get(out.status_code, self.default_error_handler)(out)
return self._cast(out)
if isinstance(out, HTTPResponse):
out.apply(response)
return self._cast(out.body)
# File-like objects.
if hasattr(out, 'read'):
if 'wsgi.file_wrapper' in request.environ:
return request.environ['wsgi.file_wrapper'](out)
elif hasattr(out, 'close') or not hasattr(out, '__iter__'):
return WSGIFileWrapper(out)
# Handle Iterables. We peek into them to detect their inner type.
try:
iout = iter(out)
first = next(iout)
while not first:
first = next(iout)
except StopIteration:
return self._cast('')
except HTTPResponse:
first = _e()
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
first = HTTPError(500, 'Unhandled exception', _e(), format_exc())
# These are the inner types allowed in iterator or generator objects.
if isinstance(first, HTTPResponse):
return self._cast(first)
elif isinstance(first, bytes):
new_iter = itertools.chain([first], iout)
elif isinstance(first, unicode):
encoder = lambda x: x.encode(response.charset)
new_iter = imap(encoder, itertools.chain([first], iout))
else:
msg = 'Unsupported response type: %s' % type(first)
return self._cast(HTTPError(500, msg))
if hasattr(out, 'close'):
new_iter = _closeiter(new_iter, out.close)
return new_iter
def wsgi(self, environ, start_response):
""" The bottle WSGI-interface. """
try:
out = self._cast(self._handle(environ))
# rfc2616 section 4.3
if response._status_code in (100, 101, 204, 304)\
or environ['REQUEST_METHOD'] == 'HEAD':
if hasattr(out, 'close'): out.close()
out = []
start_response(response._status_line, response.headerlist)
return out
except (KeyboardInterrupt, SystemExit, MemoryError):
raise
except:
if not self.catchall: raise
err = '<h1>Critical error while processing request: %s</h1>' \
% html_escape(environ.get('PATH_INFO', '/'))
if DEBUG:
err += '<h2>Error:</h2>\n<pre>\n%s\n</pre>\n' \
'<h2>Traceback:</h2>\n<pre>\n%s\n</pre>\n' \
% (html_escape(repr(_e())), html_escape(format_exc()))
environ['wsgi.errors'].write(err)
headers = [('Content-Type', 'text/html; charset=UTF-8')]
start_response('500 INTERNAL SERVER ERROR', headers, sys.exc_info())
return [tob(err)]
def __call__(self, environ, start_response):
""" Each instance of :class:'Bottle' is a WSGI application. """
return self.wsgi(environ, start_response)
def __enter__(self):
""" Use this application as default for all module-level shortcuts. """
default_app.push(self)
return self
def __exit__(self, exc_type, exc_value, traceback):
default_app.pop()
###############################################################################
# HTTP and WSGI Tools ##########################################################
###############################################################################
class BaseRequest(object):
""" A wrapper for WSGI environment dictionaries that adds a lot of
convenient access methods and properties. Most of them are read-only.
Adding new attributes to a request actually adds them to the environ
dictionary (as 'bottle.request.ext.<name>'). This is the recommended
way to store and access request-specific data.
"""
__slots__ = ('environ', )
#: Maximum size of memory buffer for :attr:`body` in bytes.
MEMFILE_MAX = 102400
def __init__(self, environ=None):
""" Wrap a WSGI environ dictionary. """
#: The wrapped WSGI environ dictionary. This is the only real attribute.
#: All other attributes actually are read-only properties.
self.environ = {} if environ is None else environ
self.environ['bottle.request'] = self
@DictProperty('environ', 'bottle.app', read_only=True)
def app(self):
""" Bottle application handling this request. """
raise RuntimeError('This request is not connected to an application.')
@DictProperty('environ', 'bottle.route', read_only=True)
def route(self):
""" The bottle :class:`Route` object that matches this request. """
raise RuntimeError('This request is not connected to a route.')
@DictProperty('environ', 'route.url_args', read_only=True)
def url_args(self):
""" The arguments extracted from the URL. """
raise RuntimeError('This request is not connected to a route.')
@property
def path(self):
""" The value of ``PATH_INFO`` with exactly one prefixed slash (to fix
broken clients and avoid the "empty path" edge case). """
return '/' + self.environ.get('PATH_INFO','').lstrip('/')
@property
def method(self):
""" The ``REQUEST_METHOD`` value as an uppercase string. """
return self.environ.get('REQUEST_METHOD', 'GET').upper()
@DictProperty('environ', 'bottle.request.headers', read_only=True)
def headers(self):
""" A :class:`WSGIHeaderDict` that provides case-insensitive access to
HTTP request headers. """
return WSGIHeaderDict(self.environ)
def get_header(self, name, default=None):
""" Return the value of a request header, or a given default value. """
return self.headers.get(name, default)
@DictProperty('environ', 'bottle.request.cookies', read_only=True)
def cookies(self):
""" Cookies parsed into a :class:`FormsDict`. Signed cookies are NOT
decoded. Use :meth:`get_cookie` if you expect signed cookies. """
cookies = SimpleCookie(self.environ.get('HTTP_COOKIE','')).values()
return FormsDict((c.key, c.value) for c in cookies)
def get_cookie(self, key, default=None, secret=None):
""" Return the content of a cookie. To read a `Signed Cookie`, the
`secret` must match the one used to create the cookie (see
:meth:`BaseResponse.set_cookie`). If anything goes wrong (missing
cookie or wrong signature), return a default value. """
value = self.cookies.get(key)
if secret and value:
dec = cookie_decode(value, secret) # (key, value) tuple or None
return dec[1] if dec and dec[0] == key else default
return value or default
@DictProperty('environ', 'bottle.request.query', read_only=True)
def query(self):
""" The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. """
get = self.environ['bottle.get'] = FormsDict()
pairs = _parse_qsl(self.environ.get('QUERY_STRING', ''))
for key, value in pairs:
get[key] = value
return get
@DictProperty('environ', 'bottle.request.forms', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`. """
forms = FormsDict()
for name, item in self.POST.allitems():
if not isinstance(item, FileUpload):
forms[name] = item
return forms
@DictProperty('environ', 'bottle.request.params', read_only=True)
def params(self):
""" A :class:`FormsDict` with the combined values of :attr:`query` and
:attr:`forms`. File uploads are stored in :attr:`files`. """
params = FormsDict()
for key, value in self.query.allitems():
params[key] = value
for key, value in self.forms.allitems():
params[key] = value
return params
@DictProperty('environ', 'bottle.request.files', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
files = FormsDict()
for name, item in self.POST.allitems():
if isinstance(item, FileUpload):
files[name] = item
return files
@DictProperty('environ', 'bottle.request.json', read_only=True)
def json(self):
""" If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. """
ctype = self.environ.get('CONTENT_TYPE', '').lower().split(';')[0]
if ctype == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_loads(b)
return None
def _iter_body(self, read, bufsize):
maxread = max(0, self.content_length)
while maxread:
part = read(min(maxread, bufsize))
if not part: break
yield part
maxread -= len(part)
@staticmethod
def _iter_chunked(read, bufsize):
err = HTTPError(400, 'Error while parsing chunked transfer body.')
rn, sem, bs = tob('\r\n'), tob(';'), tob('')
while True:
header = read(1)
while header[-2:] != rn:
c = read(1)
header += c
if not c: raise err
if len(header) > bufsize: raise err
size, _, _ = header.partition(sem)
try:
maxread = int(tonat(size.strip()), 16)
except ValueError:
raise err
if maxread == 0: break
buff = bs
while maxread > 0:
if not buff:
buff = read(min(maxread, bufsize))
part, buff = buff[:maxread], buff[maxread:]
if not part: raise err
yield part
maxread -= len(part)
if read(2) != rn:
raise err
@DictProperty('environ', 'bottle.request.body', read_only=True)
def _body(self):
body_iter = self._iter_chunked if self.chunked else self._iter_body
read_func = self.environ['wsgi.input'].read
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read_func, self.MEMFILE_MAX):
body.write(part)
body_size += len(part)
if not is_temp_file and body_size > self.MEMFILE_MAX:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
""" read body until content-length or MEMFILE_MAX into a string. Raise
HTTPError(413) on requests that are to large. """
clen = self.content_length
if clen > self.MEMFILE_MAX:
raise HTTPError(413, 'Request too large')
if clen < 0: clen = self.MEMFILE_MAX + 1
data = self.body.read(clen)
if len(data) > self.MEMFILE_MAX: # Fail fast
raise HTTPError(413, 'Request too large')
return data
@property
def body(self):
""" The HTTP request body as a seek-able file-like object. Depending on
:attr:`MEMFILE_MAX`, this is either a temporary file or a
:class:`io.BytesIO` instance. Accessing this property for the first
time reads and replaces the ``wsgi.input`` environ variable.
Subsequent accesses just do a `seek(0)` on the file object. """
self._body.seek(0)
return self._body
@property
def chunked(self):
""" True if Chunked transfer encoding was. """
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
#: An alias for :attr:`query`.
GET = query
@DictProperty('environ', 'bottle.request.post', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path (also: 3.1 workaround)
if not self.content_type.startswith('multipart/'):
pairs = _parse_qsl(tonat(self._get_body_string(), 'latin1'))
for key, value in pairs:
post[key] = value
return post
safe_env = {'QUERY_STRING':''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in self.environ: safe_env[key] = self.environ[key]
args = dict(fp=self.body, environ=safe_env, keep_blank_values=True)
if py31:
args['fp'] = NCTextIOWrapper(args['fp'], encoding='utf8',
newline='\n')
elif py3k:
args['encoding'] = 'utf8'
data = cgi.FieldStorage(**args)
self['_cgi.FieldStorage'] = data #http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
post[item.name] = FileUpload(item.file, item.name,
item.filename, item.headers)
else:
post[item.name] = item.value
return post
@property
def url(self):
""" The full request URI including hostname and scheme. If your app
lives behind a reverse proxy or load balancer and you get confusing
results, make sure that the ``X-Forwarded-Host`` header is set
correctly. """
return self.urlparts.geturl()
@DictProperty('environ', 'bottle.request.urlparts', read_only=True)
def urlparts(self):
""" The :attr:`url` string as an :class:`urlparse.SplitResult` tuple.
The tuple contains (scheme, host, path, query_string and fragment),
but the fragment is always empty because it is not visible to the
server. """
env = self.environ
http = env.get('HTTP_X_FORWARDED_PROTO') or env.get('wsgi.url_scheme', 'http')
host = env.get('HTTP_X_FORWARDED_HOST') or env.get('HTTP_HOST')
if not host:
# HTTP 1.1 requires a Host-header. This is for HTTP/1.0 clients.
host = env.get('SERVER_NAME', '127.0.0.1')
port = env.get('SERVER_PORT')
if port and port != ('80' if http == 'http' else '443'):
host += ':' + port
path = urlquote(self.fullpath)
return UrlSplitResult(http, host, path, env.get('QUERY_STRING'), '')
@property
def fullpath(self):
""" Request path including :attr:`script_name` (if present). """
return urljoin(self.script_name, self.path.lstrip('/'))
@property
def query_string(self):
""" The raw :attr:`query` part of the URL (everything in between ``?``
and ``#``) as a string. """
return self.environ.get('QUERY_STRING', '')
@property
def script_name(self):
""" The initial portion of the URL's `path` that was removed by a higher
level (server or routing middleware) before the application was
called. This script path is returned with leading and tailing
slashes. """
script_name = self.environ.get('SCRIPT_NAME', '').strip('/')
return '/' + script_name + '/' if script_name else '/'
def path_shift(self, shift=1):
""" Shift path segments from :attr:`path` to :attr:`script_name` and
vice versa.
:param shift: The number of path segments to shift. May be negative
to change the shift direction. (default: 1)
"""
script = self.environ.get('SCRIPT_NAME','/')
self['SCRIPT_NAME'], self['PATH_INFO'] = path_shift(script, self.path, shift)
@property
def content_length(self):
""" The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. """
return int(self.environ.get('CONTENT_LENGTH') or -1)
@property
def content_type(self):
""" The Content-Type header as a lowercase-string (default: empty). """
return self.environ.get('CONTENT_TYPE', '').lower()
@property
def is_xhr(self):
""" True if the request was triggered by a XMLHttpRequest. This only
works with JavaScript libraries that support the `X-Requested-With`
header (most of the popular libraries do). """
requested_with = self.environ.get('HTTP_X_REQUESTED_WITH','')
return requested_with.lower() == 'xmlhttprequest'
@property
def is_ajax(self):
""" Alias for :attr:`is_xhr`. "Ajax" is not the right term. """
return self.is_xhr
@property
def auth(self):
""" HTTP authentication data as a (user, password) tuple. This
implementation currently supports basic (not digest) authentication
only. If the authentication happened at a higher level (e.g. in the
front web-server or a middleware), the password field is None, but
the user field is looked up from the ``REMOTE_USER`` environ
variable. On any errors, None is returned. """
basic = parse_auth(self.environ.get('HTTP_AUTHORIZATION',''))
if basic: return basic
ruser = self.environ.get('REMOTE_USER')
if ruser: return (ruser, None)
return None
@property
def remote_route(self):
""" A list of all IPs that were involved in this request, starting with
the client IP and followed by zero or more proxies. This does only
work if all proxies support the ```X-Forwarded-For`` header. Note
that this information can be forged by malicious clients. """
proxy = self.environ.get('HTTP_X_FORWARDED_FOR')
if proxy: return [ip.strip() for ip in proxy.split(',')]
remote = self.environ.get('REMOTE_ADDR')
return [remote] if remote else []
@property
def remote_addr(self):
""" The client IP as a string. Note that this information can be forged
by malicious clients. """
route = self.remote_route
return route[0] if route else None
def copy(self):
""" Return a new :class:`Request` with a shallow :attr:`environ` copy. """
return Request(self.environ.copy())
def get(self, value, default=None): return self.environ.get(value, default)
def __getitem__(self, key): return self.environ[key]
def __delitem__(self, key): self[key] = ""; del(self.environ[key])
def __iter__(self): return iter(self.environ)
def __len__(self): return len(self.environ)
def keys(self): return self.environ.keys()
def __setitem__(self, key, value):
""" Change an environ value and clear all caches that depend on it. """
if self.environ.get('bottle.request.readonly'):
raise KeyError('The environ dictionary is read-only.')
self.environ[key] = value
todelete = ()
if key == 'wsgi.input':
todelete = ('body', 'forms', 'files', 'params', 'post', 'json')
elif key == 'QUERY_STRING':
todelete = ('query', 'params')
elif key.startswith('HTTP_'):
todelete = ('headers', 'cookies')
for key in todelete:
self.environ.pop('bottle.request.'+key, None)
def __repr__(self):
return '<%s: %s %s>' % (self.__class__.__name__, self.method, self.url)
def __getattr__(self, name):
""" Search in self.environ for additional user defined attributes. """
try:
var = self.environ['bottle.request.ext.%s'%name]
return var.__get__(self) if hasattr(var, '__get__') else var
except KeyError:
raise AttributeError('Attribute %r not defined.' % name)
def __setattr__(self, name, value):
if name == 'environ': return object.__setattr__(self, name, value)
self.environ['bottle.request.ext.%s'%name] = value
def _hkey(s):
return s.title().replace('_','-')
class HeaderProperty(object):
def __init__(self, name, reader=None, writer=str, default=''):
self.name, self.default = name, default
self.reader, self.writer = reader, writer
self.__doc__ = 'Current value of the %r header.' % name.title()
def __get__(self, obj, _):
if obj is None: return self
value = obj.headers.get(self.name, self.default)
return self.reader(value) if self.reader else value
def __set__(self, obj, value):
obj.headers[self.name] = self.writer(value)
def __delete__(self, obj):
del obj.headers[self.name]
class BaseResponse(object):
""" Storage class for a response body as well as headers and cookies.
This class does support dict-like case-insensitive item-access to
headers, but is NOT a dict. Most notably, iterating over a response
yields parts of the body and not the headers.
:param body: The response body as one of the supported types.
:param status: Either an HTTP status code (e.g. 200) or a status line
including the reason phrase (e.g. '200 OK').
:param headers: A dictionary or a list of name-value pairs.
Additional keyword arguments are added to the list of headers.
Underscores in the header name are replaced with dashes.
"""
default_status = 200
default_content_type = 'text/html; charset=UTF-8'
# Header blacklist for specific response codes
# (rfc2616 section 10.2.3 and 10.3.5)
bad_headers = {
204: set(('Content-Type',)),
304: set(('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Range', 'Content-Type',
'Content-Md5', 'Last-Modified'))}
def __init__(self, body='', status=None, headers=None, **more_headers):
self._cookies = None
self._headers = {}
self.body = body
self.status = status or self.default_status
if headers:
if isinstance(headers, dict):
headers = headers.items()
for name, value in headers:
self.add_header(name, value)
if more_headers:
for name, value in more_headers.items():
self.add_header(name, value)
def copy(self, cls=None):
""" Returns a copy of self. """
cls = cls or BaseResponse
assert issubclass(cls, BaseResponse)
copy = cls()
copy.status = self.status
copy._headers = dict((k, v[:]) for (k, v) in self._headers.items())
if self._cookies:
copy._cookies = SimpleCookie()
copy._cookies.load(self._cookies.output())
return copy
def __iter__(self):
return iter(self.body)
def close(self):
if hasattr(self.body, 'close'):
self.body.close()
@property
def status_line(self):
""" The HTTP status line as a string (e.g. ``404 Not Found``)."""
return self._status_line
@property
def status_code(self):
""" The HTTP status code as an integer (e.g. 404)."""
return self._status_code
def _set_status(self, status):
if isinstance(status, int):
code, status = status, _HTTP_STATUS_LINES.get(status)
elif ' ' in status:
status = status.strip()
code = int(status.split()[0])
else:
raise ValueError('String status line without a reason phrase.')
if not 100 <= code <= 999: raise ValueError('Status code out of range.')
self._status_code = code
self._status_line = str(status or ('%d Unknown' % code))
def _get_status(self):
return self._status_line
status = property(_get_status, _set_status, None,
''' A writeable property to change the HTTP response status. It accepts
either a numeric code (100-999) or a string with a custom reason
phrase (e.g. "404 Brain not found"). Both :data:`status_line` and
:data:`status_code` are updated accordingly. The return value is
always a status string. ''')
del _get_status, _set_status
@property
def headers(self):
""" An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. """
hdict = HeaderDict()
hdict.dict = self._headers
return hdict
def __contains__(self, name): return _hkey(name) in self._headers
def __delitem__(self, name): del self._headers[_hkey(name)]
def __getitem__(self, name): return self._headers[_hkey(name)][-1]
def __setitem__(self, name, value): self._headers[_hkey(name)] = [str(value)]
def get_header(self, name, default=None):
""" Return the value of a previously defined header. If there is no
header with that name, return a default value. """
return self._headers.get(_hkey(name), [default])[-1]
def set_header(self, name, value):
""" Create a new response header, replacing any previously defined
headers with the same name. """
self._headers[_hkey(name)] = [value if isinstance(value, unicode) else str(value)]
def add_header(self, name, value):
""" Add an additional response header, not removing duplicates. """
self._headers.setdefault(_hkey(name), []).append(str(value))
def iter_headers(self):
""" Yield (header, value) tuples, skipping headers that are not
allowed with the current response status code. """
return self.headerlist
@property
def headerlist(self):
""" WSGI conform list of (header, value) tuples. """
out = []
headers = list(self._headers.items())
if 'Content-Type' not in self._headers:
headers.append(('Content-Type', [self.default_content_type]))
if self._status_code in self.bad_headers:
bad_headers = self.bad_headers[self._status_code]
headers = [h for h in headers if h[0] not in bad_headers]
out += [(name, val) for (name, vals) in headers for val in vals]
if self._cookies:
for c in self._cookies.values():
out.append(('Set-Cookie', c.OutputString()))
if py3k:
out = [
(k, v.encode('utf8').decode('latin1')
if isinstance(v, unicode) else v) for (k, v) in out]
return out
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int)
expires = HeaderProperty('Expires',
reader=lambda x: datetime.utcfromtimestamp(parse_date(x)),
writer=lambda x: http_date(x))
@property
def charset(self, default='UTF-8'):
""" Return the charset specified in the content-type header (default: utf8). """
if 'charset=' in self.content_type:
return self.content_type.split('charset=')[-1].split(';')[0].strip()
return default
def set_cookie(self, name, value, secret=None, **options):
""" Create a new cookie or replace an old one. If the `secret` parameter is
set, create a `Signed Cookie` (described below).
:param name: the name of the cookie.
:param value: the value of the cookie.
:param secret: a signature key required for signed cookies.
Additionally, this method accepts all RFC 2109 attributes that are
supported by :class:`cookie.Morsel`, including:
:param max_age: maximum age in seconds. (default: None)
:param expires: a datetime object or UNIX timestamp. (default: None)
:param domain: the domain that is allowed to read the cookie.
(default: current domain)
:param path: limits the cookie to a given path (default: current path)
:param secure: limit the cookie to HTTPS connections (default: off).
:param httponly: prevents client-side javascript to read this cookie
(default: off, requires Python 2.6 or newer).
If neither `expires` nor `max_age` is set (default), the cookie will
expire at the end of the browser session (as soon as the browser
window is closed).
Signed cookies may store any pickle-able object and are
cryptographically signed to prevent manipulation. Keep in mind that
cookies are limited to 4kb in most browsers.
Warning: Signed cookies are not encrypted (the client can still see
the content) and not copy-protected (the client can restore an old
cookie). The main intention is to make pickling and unpickling
save, not to store secret information at client side.
"""
if not self._cookies:
self._cookies = SimpleCookie()
if secret:
value = touni(cookie_encode((name, value), secret))
elif not isinstance(value, basestring):
raise TypeError('Secret key missing for non-string Cookie.')
if len(value) > 4096: raise ValueError('Cookie value to long.')
self._cookies[name] = value
for key, value in options.items():
if key == 'max_age':
if isinstance(value, timedelta):
value = value.seconds + value.days * 24 * 3600
if key == 'expires':
if isinstance(value, (datedate, datetime)):
value = value.timetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
self._cookies[name][key.replace('_', '-')] = value
def delete_cookie(self, key, **kwargs):
""" Delete a cookie. Be sure to use the same `domain` and `path`
settings as used to create the cookie. """
kwargs['max_age'] = -1
kwargs['expires'] = 0
self.set_cookie(key, '', **kwargs)
def __repr__(self):
out = ''
for name, value in self.headerlist:
out += '%s: %s\n' % (name.title(), value.strip())
return out
def _local_property():
ls = threading.local()
def fget(_):
try: return ls.var
except AttributeError:
raise RuntimeError("Request context not initialized.")
def fset(_, value): ls.var = value
def fdel(_): del ls.var
return property(fget, fset, fdel, 'Thread-local property')
class LocalRequest(BaseRequest):
""" A thread-local subclass of :class:`BaseRequest` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`request`). If accessed during a
request/response cycle, this instance always refers to the *current*
request (even on a multithreaded server). """
bind = BaseRequest.__init__
environ = _local_property()
class LocalResponse(BaseResponse):
""" A thread-local subclass of :class:`BaseResponse` with a different
set of attributes for each thread. There is usually only one global
instance of this class (:data:`response`). Its attributes are used
to build the HTTP response at the end of the request/response cycle.
"""
bind = BaseResponse.__init__
_status_line = _local_property()
_status_code = _local_property()
_cookies = _local_property()
_headers = _local_property()
body = _local_property()
Request = BaseRequest
Response = BaseResponse
class HTTPResponse(Response, BottleException):
def __init__(self, body='', status=None, headers=None, **more_headers):
super(HTTPResponse, self).__init__(body, status, headers, **more_headers)
def apply(self, other):
other._status_code = self._status_code
other._status_line = self._status_line
other._headers = self._headers
other._cookies = self._cookies
other.body = self.body
class HTTPError(HTTPResponse):
default_status = 500
def __init__(self, status=None, body=None, exception=None, traceback=None,
**options):
self.exception = exception
self.traceback = traceback
super(HTTPError, self).__init__(body, status, **options)
###############################################################################
# Plugins ######################################################################
###############################################################################
class PluginError(BottleException): pass
class JSONPlugin(object):
name = 'json'
api = 2
def __init__(self, json_dumps=json_dumps):
self.json_dumps = json_dumps
def apply(self, callback, _):
dumps = self.json_dumps
if not dumps: return callback
def wrapper(*a, **ka):
try:
rv = callback(*a, **ka)
except HTTPError:
rv = _e()
if isinstance(rv, dict):
#Attempt to serialize, raises exception on failure
json_response = dumps(rv)
#Set content type only if serialization successful
response.content_type = 'application/json'
return json_response
elif isinstance(rv, HTTPResponse) and isinstance(rv.body, dict):
rv.body = dumps(rv.body)
rv.content_type = 'application/json'
return rv
return wrapper
class TemplatePlugin(object):
""" This plugin applies the :func:`view` decorator to all routes with a
`template` config parameter. If the parameter is a tuple, the second
element must be a dict with additional options (e.g. `template_engine`)
or default variables for the template. """
name = 'template'
api = 2
def apply(self, callback, route):
conf = route.config.get('template')
if isinstance(conf, (tuple, list)) and len(conf) == 2:
return view(conf[0], **conf[1])(callback)
elif isinstance(conf, str):
return view(conf)(callback)
else:
return callback
#: Not a plugin, but part of the plugin API. TODO: Find a better place.
class _ImportRedirect(object):
def __init__(self, name, impmask):
""" Create a virtual package that redirects imports (see PEP 302). """
self.name = name
self.impmask = impmask
self.module = sys.modules.setdefault(name, imp.new_module(name))
self.module.__dict__.update({'__file__': __file__, '__path__': [],
'__all__': [], '__loader__': self})
sys.meta_path.append(self)
def find_module(self, fullname, path=None):
if '.' not in fullname: return
packname = fullname.rsplit('.', 1)[0]
if packname != self.name: return
return self
def load_module(self, fullname):
if fullname in sys.modules: return sys.modules[fullname]
modname = fullname.rsplit('.', 1)[1]
realname = self.impmask % modname
__import__(realname)
module = sys.modules[fullname] = sys.modules[realname]
setattr(self.module, modname, module)
module.__loader__ = self
return module
###############################################################################
# Common Utilities #############################################################
###############################################################################
class MultiDict(DictMixin):
""" This dict stores multiple values per key, but behaves exactly like a
normal dict in that it returns only the newest value for any given key.
There are special methods available to access the full list of values.
"""
def __init__(self, *a, **k):
self.dict = dict((k, [v]) for (k, v) in dict(*a, **k).items())
def __len__(self): return len(self.dict)
def __iter__(self): return iter(self.dict)
def __contains__(self, key): return key in self.dict
def __delitem__(self, key): del self.dict[key]
def __getitem__(self, key): return self.dict[key][-1]
def __setitem__(self, key, value): self.append(key, value)
def keys(self): return self.dict.keys()
if py3k:
def values(self): return (v[-1] for v in self.dict.values())
def items(self): return ((k, v[-1]) for k, v in self.dict.items())
def allitems(self):
return ((k, v) for k, vl in self.dict.items() for v in vl)
iterkeys = keys
itervalues = values
iteritems = items
iterallitems = allitems
else:
def values(self): return [v[-1] for v in self.dict.values()]
def items(self): return [(k, v[-1]) for k, v in self.dict.items()]
def iterkeys(self): return self.dict.iterkeys()
def itervalues(self): return (v[-1] for v in self.dict.itervalues())
def iteritems(self):
return ((k, v[-1]) for k, v in self.dict.iteritems())
def iterallitems(self):
return ((k, v) for k, vl in self.dict.iteritems() for v in vl)
def allitems(self):
return [(k, v) for k, vl in self.dict.iteritems() for v in vl]
def get(self, key, default=None, index=-1, type=None):
""" Return the most recent value for a key.
:param default: The default value to be returned if the key is not
present or the type conversion fails.
:param index: An index for the list of available values.
:param type: If defined, this callable is used to cast the value
into a specific type. Exception are suppressed and result in
the default value to be returned.
"""
try:
val = self.dict[key][index]
return type(val) if type else val
except Exception:
pass
return default
def append(self, key, value):
""" Add a new value to the list of values for this key. """
self.dict.setdefault(key, []).append(value)
def replace(self, key, value):
""" Replace the list of values with a single value. """
self.dict[key] = [value]
def getall(self, key):
""" Return a (possibly empty) list of values for a key. """
return self.dict.get(key) or []
#: Aliases for WTForms to mimic other multi-dict APIs (Django)
getone = get
getlist = getall
class FormsDict(MultiDict):
""" This :class:`MultiDict` subclass is used to store request form data.
Additionally to the normal dict-like item access methods (which return
unmodified data as native strings), this container also supports
attribute-like access to its values. Attributes are automatically de-
or recoded to match :attr:`input_encoding` (default: 'utf8'). Missing
attributes default to an empty string. """
#: Encoding used for attribute values.
input_encoding = 'utf8'
#: If true (default), unicode strings are first encoded with `latin1`
#: and then decoded to match :attr:`input_encoding`.
recode_unicode = True
def _fix(self, s, encoding=None):
if isinstance(s, unicode) and self.recode_unicode: # Python 3 WSGI
return s.encode('latin1').decode(encoding or self.input_encoding)
elif isinstance(s, bytes): # Python 2 WSGI
return s.decode(encoding or self.input_encoding)
else:
return s
def decode(self, encoding=None):
""" Returns a copy with all keys and values de- or recoded to match
:attr:`input_encoding`. Some libraries (e.g. WTForms) want a
unicode dictionary. """
copy = FormsDict()
enc = copy.input_encoding = encoding or self.input_encoding
copy.recode_unicode = False
for key, value in self.allitems():
copy.append(self._fix(key, enc), self._fix(value, enc))
return copy
def getunicode(self, name, default=None, encoding=None):
""" Return the value as a unicode string, or the default. """
try:
return self._fix(self[name], encoding)
except (UnicodeError, KeyError):
return default
def __getattr__(self, name, default=unicode()):
# Without this guard, pickle generates a cryptic TypeError:
if name.startswith('__') and name.endswith('__'):
return super(FormsDict, self).__getattr__(name)
return self.getunicode(name, default=default)
class HeaderDict(MultiDict):
""" A case-insensitive version of :class:`MultiDict` that defaults to
replace the old value instead of appending it. """
def __init__(self, *a, **ka):
self.dict = {}
if a or ka: self.update(*a, **ka)
def __contains__(self, key): return _hkey(key) in self.dict
def __delitem__(self, key): del self.dict[_hkey(key)]
def __getitem__(self, key): return self.dict[_hkey(key)][-1]
def __setitem__(self, key, value): self.dict[_hkey(key)] = [str(value)]
def append(self, key, value):
self.dict.setdefault(_hkey(key), []).append(str(value))
def replace(self, key, value): self.dict[_hkey(key)] = [str(value)]
def getall(self, key): return self.dict.get(_hkey(key)) or []
def get(self, key, default=None, index=-1):
return MultiDict.get(self, _hkey(key), default, index)
def filter(self, names):
for name in [_hkey(n) for n in names]:
if name in self.dict:
del self.dict[name]
class WSGIHeaderDict(DictMixin):
""" This dict-like class wraps a WSGI environ dict and provides convenient
access to HTTP_* fields. Keys and values are native strings
(2.x bytes or 3.x unicode) and keys are case-insensitive. If the WSGI
environment contains non-native string values, these are de- or encoded
using a lossless 'latin1' character set.
The API will remain stable even on changes to the relevant PEPs.
Currently PEP 333, 444 and 3333 are supported. (PEP 444 is the only one
that uses non-native strings.)
"""
#: List of keys that do not have a ``HTTP_`` prefix.
cgikeys = ('CONTENT_TYPE', 'CONTENT_LENGTH')
def __init__(self, environ):
self.environ = environ
def _ekey(self, key):
""" Translate header field name to CGI/WSGI environ key. """
key = key.replace('-','_').upper()
if key in self.cgikeys:
return key
return 'HTTP_' + key
def raw(self, key, default=None):
""" Return the header value as is (may be bytes or unicode). """
return self.environ.get(self._ekey(key), default)
def __getitem__(self, key):
val = self.environ[self._ekey(key)]
if py3k:
if isinstance(val, unicode):
val = val.encode('latin1').decode('utf8')
else:
val = val.decode('utf8')
return val
def __setitem__(self, key, value):
raise TypeError("%s is read-only." % self.__class__)
def __delitem__(self, key):
raise TypeError("%s is read-only." % self.__class__)
def __iter__(self):
for key in self.environ:
if key[:5] == 'HTTP_':
yield _hkey(key[5:])
elif key in self.cgikeys:
yield _hkey(key)
def keys(self): return [x for x in self]
def __len__(self): return len(self.keys())
def __contains__(self, key): return self._ekey(key) in self.environ
class ConfigDict(dict):
""" A dict-like configuration storage with additional support for
namespaces, validators, meta-data, on_change listeners and more.
"""
__slots__ = ('_meta', '_on_change')
def __init__(self):
self._meta = {}
self._on_change = lambda name, value: None
def load_config(self, filename):
""" Load values from an ``*.ini`` style config file.
If the config file contains sections, their names are used as
namespaces for the values within. The two special sections
``DEFAULT`` and ``bottle`` refer to the root namespace (no prefix).
"""
conf = ConfigParser()
conf.read(filename)
for section in conf.sections():
for key, value in conf.items(section):
if section not in ('DEFAULT', 'bottle'):
key = section + '.' + key
self[key] = value
return self
def load_dict(self, source, namespace=''):
""" Load values from a dictionary structure. Nesting can be used to
represent namespaces.
>>> c = ConfigDict()
>>> c.load_dict({'some': {'namespace': {'key': 'value'} } })
{'some.namespace.key': 'value'}
"""
for key, value in source.items():
if isinstance(key, str):
nskey = (namespace + '.' + key).strip('.')
if isinstance(value, dict):
self.load_dict(value, namespace=nskey)
else:
self[nskey] = value
else:
raise TypeError('Key has type %r (not a string)' % type(key))
return self
def update(self, *a, **ka):
""" If the first parameter is a string, all keys are prefixed with this
namespace. Apart from that it works just as the usual dict.update().
Example: ``update('some.namespace', key='value')`` """
prefix = ''
if a and isinstance(a[0], str):
prefix = a[0].strip('.') + '.'
a = a[1:]
for key, value in dict(*a, **ka).items():
self[prefix+key] = value
def setdefault(self, key, value):
if key not in self:
self[key] = value
return self[key]
def __setitem__(self, key, value):
if not isinstance(key, str):
raise TypeError('Key has type %r (not a string)' % type(key))
value = self.meta_get(key, 'filter', lambda x: x)(value)
if key in self and self[key] is value:
return
self._on_change(key, value)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
self._on_change(key, None)
dict.__delitem__(self, key)
def meta_get(self, key, metafield, default=None):
""" Return the value of a meta field for a key. """
return self._meta.get(key, {}).get(metafield, default)
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. """
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key]
def meta_list(self, key):
""" Return an iterable of meta field names defined for a key. """
return self._meta.get(key, {}).keys()
class AppStack(list):
""" A stack-like list. Calling it returns the head of the stack. """
def __call__(self):
""" Return the current default application. """
return self[-1]
def push(self, value=None):
""" Add a new :class:`Bottle` instance to the stack """
if not isinstance(value, Bottle):
value = Bottle()
self.append(value)
return value
class WSGIFileWrapper(object):
def __init__(self, fp, buffer_size=1024*64):
self.fp, self.buffer_size = fp, buffer_size
for attr in ('fileno', 'close', 'read', 'readlines', 'tell', 'seek'):
if hasattr(fp, attr): setattr(self, attr, getattr(fp, attr))
def __iter__(self):
buff, read = self.buffer_size, self.read
while True:
part = read(buff)
if not part: return
yield part
class _closeiter(object):
""" This only exists to be able to attach a .close method to iterators that
do not support attribute assignment (most of itertools). """
def __init__(self, iterator, close=None):
self.iterator = iterator
self.close_callbacks = makelist(close)
def __iter__(self):
return iter(self.iterator)
def close(self):
for func in self.close_callbacks:
func()
class ResourceManager(object):
""" This class manages a list of search paths and helps to find and open
application-bound resources (files).
:param base: default value for :meth:`add_path` calls.
:param opener: callable used to open resources.
:param cachemode: controls which lookups are cached. One of 'all',
'found' or 'none'.
"""
def __init__(self, base='./', opener=open, cachemode='all'):
self.opener = opener
self.base = base
self.cachemode = cachemode
#: A list of search paths. See :meth:`add_path` for details.
self.path = []
#: A cache for resolved paths. ``res.cache.clear()`` clears the cache.
self.cache = {}
def add_path(self, path, base=None, index=None, create=False):
""" Add a new path to the list of search paths. Return False if the
path does not exist.
:param path: The new search path. Relative paths are turned into
an absolute and normalized form. If the path looks like a file
(not ending in `/`), the filename is stripped off.
:param base: Path used to absolutize relative search paths.
Defaults to :attr:`base` which defaults to ``os.getcwd()``.
:param index: Position within the list of search paths. Defaults
to last index (appends to the list).
The `base` parameter makes it easy to reference files installed
along with a python module or package::
res.add_path('./resources/', __file__)
"""
base = os.path.abspath(os.path.dirname(base or self.base))
path = os.path.abspath(os.path.join(base, os.path.dirname(path)))
path += os.sep
if path in self.path:
self.path.remove(path)
if create and not os.path.isdir(path):
os.makedirs(path)
if index is None:
self.path.append(path)
else:
self.path.insert(index, path)
self.cache.clear()
return os.path.exists(path)
def __iter__(self):
""" Iterate over all existing files in all registered paths. """
search = self.path[:]
while search:
path = search.pop()
if not os.path.isdir(path): continue
for name in os.listdir(path):
full = os.path.join(path, name)
if os.path.isdir(full): search.append(full)
else: yield full
def lookup(self, name):
""" Search for a resource and return an absolute file path, or `None`.
The :attr:`path` list is searched in order. The first match is
returend. Symlinks are followed. The result is cached to speed up
future lookups. """
if name not in self.cache or DEBUG:
for path in self.path:
fpath = os.path.join(path, name)
if os.path.isfile(fpath):
if self.cachemode in ('all', 'found'):
self.cache[name] = fpath
return fpath
if self.cachemode == 'all':
self.cache[name] = None
return self.cache[name]
def open(self, name, mode='r', *args, **kwargs):
""" Find a resource and return a file object, or raise IOError. """
fname = self.lookup(name)
if not fname: raise IOError("Resource %r not found." % name)
return self.opener(fname, mode=mode, *args, **kwargs)
class FileUpload(object):
def __init__(self, fileobj, name, filename, headers=None):
""" Wrapper for file uploads. """
#: Open file(-like) object (BytesIO buffer or temporary file)
self.file = fileobj
#: Name of the upload form field
self.name = name
#: Raw filename as sent by the client (may contain unsafe characters)
self.raw_filename = filename
#: A :class:`HeaderDict` with additional headers (e.g. content-type)
self.headers = HeaderDict(headers) if headers else HeaderDict()
content_type = HeaderProperty('Content-Type')
content_length = HeaderProperty('Content-Length', reader=int, default=-1)
@cached_property
def filename(self):
""" Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
"""
fname = self.raw_filename
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
def _copy_file(self, fp, chunk_size=2**16):
read, write, offset = self.file.read, fp.write, self.file.tell()
while 1:
buf = read(chunk_size)
if not buf: break
write(buf)
self.file.seek(offset)
def save(self, destination, overwrite=False, chunk_size=2**16):
""" Save file to disk or copy its content to an open file(-like) object.
If *destination* is a directory, :attr:`filename` is added to the
path. Existing files are not overwritten by default (IOError).
:param destination: File path, directory or file(-like) object.
:param overwrite: If True, replace existing files. (default: False)
:param chunk_size: Bytes to read at a time. (default: 64kb)
"""
if isinstance(destination, basestring): # Except file-likes here
if os.path.isdir(destination):
destination = os.path.join(destination, self.filename)
if not overwrite and os.path.exists(destination):
raise IOError('File exists.')
with open(destination, 'wb') as fp:
self._copy_file(fp, chunk_size)
else:
self._copy_file(destination, chunk_size)
###############################################################################
# Application Helper ###########################################################
###############################################################################
def abort(code=500, text='Unknown Error.'):
""" Aborts execution and causes a HTTP error. """
raise HTTPError(code, text)
def redirect(url, code=None):
""" Aborts execution and causes a 303 or 302 redirect, depending on
the HTTP protocol version. """
if not code:
code = 303 if request.get('SERVER_PROTOCOL') == "HTTP/1.1" else 302
res = response.copy(cls=HTTPResponse)
res.status = code
res.body = ""
res.set_header('Location', urljoin(request.url, url))
raise res
def _file_iter_range(fp, offset, bytes, maxread=1024*1024):
""" Yield chunks from a range in a file. No chunk is bigger than maxread."""
fp.seek(offset)
while bytes > 0:
part = fp.read(min(bytes, maxread))
if not part: break
bytes -= len(part)
yield part
def static_file(filename, root, mimetype='auto', download=False, charset='UTF-8'):
""" Open a file in a safe way and return :exc:`HTTPResponse` with status
code 200, 305, 403 or 404. The ``Content-Type``, ``Content-Encoding``,
``Content-Length`` and ``Last-Modified`` headers are set if possible.
Special support for ``If-Modified-Since``, ``Range`` and ``HEAD``
requests.
:param filename: Name or path of the file to send.
:param root: Root path for file lookups. Should be an absolute directory
path.
:param mimetype: Defines the content-type header (default: guess from
file extension)
:param download: If True, ask the browser to open a `Save as...` dialog
instead of opening the file with the associated program. You can
specify a custom filename as a string. If not specified, the
original filename is used (default: False).
:param charset: The charset to use for files with a ``text/*``
mime-type. (default: UTF-8)
"""
root = os.path.abspath(root) + os.sep
filename = os.path.abspath(os.path.join(root, filename.strip('/\\')))
headers = dict()
if not filename.startswith(root):
return HTTPError(403, "Access denied.")
if not os.path.exists(filename) or not os.path.isfile(filename):
return HTTPError(404, "File does not exist.")
if not os.access(filename, os.R_OK):
return HTTPError(403, "You do not have permission to access this file.")
if mimetype == 'auto':
mimetype, encoding = mimetypes.guess_type(filename)
if encoding: headers['Content-Encoding'] = encoding
if mimetype:
if mimetype[:5] == 'text/' and charset and 'charset' not in mimetype:
mimetype += '; charset=%s' % charset
headers['Content-Type'] = mimetype
if download:
download = os.path.basename(filename if download == True else download)
headers['Content-Disposition'] = 'attachment; filename="%s"' % download
stats = os.stat(filename)
headers['Content-Length'] = clen = stats.st_size
lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
headers['Last-Modified'] = lm
ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
if ims:
ims = parse_date(ims.split(";")[0].strip())
if ims is not None and ims >= int(stats.st_mtime):
headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
return HTTPResponse(status=304, **headers)
body = '' if request.method == 'HEAD' else open(filename, 'rb')
headers["Accept-Ranges"] = "bytes"
ranges = request.environ.get('HTTP_RANGE')
if 'HTTP_RANGE' in request.environ:
ranges = list(parse_range_header(request.environ['HTTP_RANGE'], clen))
if not ranges:
return HTTPError(416, "Requested Range Not Satisfiable")
offset, end = ranges[0]
headers["Content-Range"] = "bytes %d-%d/%d" % (offset, end-1, clen)
headers["Content-Length"] = str(end-offset)
if body: body = _file_iter_range(body, offset, end-offset)
return HTTPResponse(body, status=206, **headers)
return HTTPResponse(body, **headers)
###############################################################################
# HTTP Utilities and MISC (TODO) ###############################################
###############################################################################
def debug(mode=True):
""" Change the debug level.
There is only one debug level supported at the moment."""
global DEBUG
if mode: warnings.simplefilter('default')
DEBUG = bool(mode)
def http_date(value):
if isinstance(value, (datedate, datetime)):
value = value.utctimetuple()
elif isinstance(value, (int, float)):
value = time.gmtime(value)
if not isinstance(value, basestring):
value = time.strftime("%a, %d %b %Y %H:%M:%S GMT", value)
return value
def parse_date(ims):
""" Parse rfc1123, rfc850 and asctime timestamps and return UTC epoch. """
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_auth(header):
""" Parse rfc2617 HTTP authentication header string (basic) and return (user,pass) tuple or None"""
try:
method, data = header.split(None, 1)
if method.lower() == 'basic':
user, pwd = touni(base64.b64decode(tob(data))).split(':',1)
return user, pwd
except (KeyError, ValueError):
return None
def parse_range_header(header, maxlen=0):
""" Yield (start, end) ranges parsed from a HTTP Range header. Skip
unsatisfiable ranges. The end index is non-inclusive."""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def _parse_qsl(qs):
r = []
for pair in qs.replace(';','&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
def _lscmp(a, b):
""" Compares two strings in a cryptographically safe way:
Runtime is not affected by length of common prefix. """
return not sum(0 if x==y else 1 for x, y in zip(a, b)) and len(a) == len(b)
def cookie_encode(data, key):
""" Encode and sign a pickle-able object. Return a (byte) string """
msg = base64.b64encode(pickle.dumps(data, -1))
sig = base64.b64encode(hmac.new(tob(key), msg).digest())
return tob('!') + sig + tob('?') + msg
def cookie_decode(data, key):
""" Verify and decode an encoded string. Return an object or None."""
data = tob(data)
if cookie_is_encoded(data):
sig, msg = data.split(tob('?'), 1)
if _lscmp(sig[1:], base64.b64encode(hmac.new(tob(key), msg).digest())):
return pickle.loads(base64.b64decode(msg))
return None
def cookie_is_encoded(data):
""" Return True if the argument looks like a encoded cookie."""
return bool(data.startswith(tob('!')) and tob('?') in data)
def html_escape(string):
""" Escape HTML special characters ``&<>`` and quotes ``'"``. """
return string.replace('&','&').replace('<','<').replace('>','>')\
.replace('"','"').replace("'",''')
def html_quote(string):
""" Escape and quote a string to be used as an HTTP attribute."""
return '"%s"' % html_escape(string).replace('\n',' ')\
.replace('\r',' ').replace('\t','	')
def yieldroutes(func):
""" Return a generator for routes that match the signature (name, args)
of the func parameter. This may yield more than one route if the function
takes optional keyword arguments. The output is best described by example::
a() -> '/a'
b(x, y) -> '/b/<x>/<y>'
c(x, y=5) -> '/c/<x>' and '/c/<x>/<y>'
d(x=5, y=6) -> '/d' and '/d/<x>' and '/d/<x>/<y>'
"""
path = '/' + func.__name__.replace('__','/').lstrip('/')
spec = getargspec(func)
argc = len(spec[0]) - len(spec[3] or [])
path += ('/<%s>' * argc) % tuple(spec[0][:argc])
yield path
for arg in spec[0][argc:]:
path += '/<%s>' % arg
yield path
def path_shift(script_name, path_info, shift=1):
""" Shift path fragments from PATH_INFO to SCRIPT_NAME and vice versa.
:return: The modified paths.
:param script_name: The SCRIPT_NAME path.
:param script_name: The PATH_INFO path.
:param shift: The number of path fragments to shift. May be negative to
change the shift direction. (default: 1)
"""
if shift == 0: return script_name, path_info
pathlist = path_info.strip('/').split('/')
scriptlist = script_name.strip('/').split('/')
if pathlist and pathlist[0] == '': pathlist = []
if scriptlist and scriptlist[0] == '': scriptlist = []
if 0 < shift <= len(pathlist):
moved = pathlist[:shift]
scriptlist = scriptlist + moved
pathlist = pathlist[shift:]
elif 0 > shift >= -len(scriptlist):
moved = scriptlist[shift:]
pathlist = moved + pathlist
scriptlist = scriptlist[:shift]
else:
empty = 'SCRIPT_NAME' if shift < 0 else 'PATH_INFO'
raise AssertionError("Cannot shift. Nothing left from %s" % empty)
new_script_name = '/' + '/'.join(scriptlist)
new_path_info = '/' + '/'.join(pathlist)
if path_info.endswith('/') and pathlist: new_path_info += '/'
return new_script_name, new_path_info
def auth_basic(check, realm="private", text="Access denied"):
""" Callback decorator to require HTTP auth (basic).
TODO: Add route(check_auth=...) parameter. """
def decorator(func):
@functools.wraps(func)
def wrapper(*a, **ka):
user, password = request.auth or (None, None)
if user is None or not check(user, password):
err = HTTPError(401, text)
err.add_header('WWW-Authenticate', 'Basic realm="%s"' % realm)
return err
return func(*a, **ka)
return wrapper
return decorator
# Shortcuts for common Bottle methods.
# They all refer to the current default application.
def make_default_app_wrapper(name):
""" Return a callable that relays calls to the current default app. """
@functools.wraps(getattr(Bottle, name))
def wrapper(*a, **ka):
return getattr(app(), name)(*a, **ka)
return wrapper
route = make_default_app_wrapper('route')
get = make_default_app_wrapper('get')
post = make_default_app_wrapper('post')
put = make_default_app_wrapper('put')
delete = make_default_app_wrapper('delete')
patch = make_default_app_wrapper('patch')
error = make_default_app_wrapper('error')
mount = make_default_app_wrapper('mount')
hook = make_default_app_wrapper('hook')
install = make_default_app_wrapper('install')
uninstall = make_default_app_wrapper('uninstall')
url = make_default_app_wrapper('get_url')
###############################################################################
# Server Adapter ###############################################################
###############################################################################
class ServerAdapter(object):
quiet = False
def __init__(self, host='127.0.0.1', port=8080, **options):
self.options = options
self.host = host
self.port = int(port)
def run(self, handler): # pragma: no cover
pass
def __repr__(self):
args = ', '.join(['%s=%s'%(k,repr(v)) for k, v in self.options.items()])
return "%s(%s)" % (self.__class__.__name__, args)
class CGIServer(ServerAdapter):
quiet = True
def run(self, handler): # pragma: no cover
from wsgiref.handlers import CGIHandler
def fixed_environ(environ, start_response):
environ.setdefault('PATH_INFO', '')
return handler(environ, start_response)
CGIHandler().run(fixed_environ)
class FlupFCGIServer(ServerAdapter):
def run(self, handler): # pragma: no cover
import flup.server.fcgi
self.options.setdefault('bindAddress', (self.host, self.port))
flup.server.fcgi.WSGIServer(handler, **self.options).run()
class WSGIRefServer(ServerAdapter):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
self.srv = make_server(self.host, self.port, app, server_cls, handler_cls)
self.port = self.srv.server_port # update port actual port (0 means random)
try:
self.srv.serve_forever()
except KeyboardInterrupt:
self.srv.server_close() # Prevent ResourceWarning: unclosed socket
raise
class CherryPyServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from cherrypy import wsgiserver
self.options['bind_addr'] = (self.host, self.port)
self.options['wsgi_app'] = handler
certfile = self.options.get('certfile')
if certfile:
del self.options['certfile']
keyfile = self.options.get('keyfile')
if keyfile:
del self.options['keyfile']
server = wsgiserver.CherryPyWSGIServer(**self.options)
if certfile:
server.ssl_certificate = certfile
if keyfile:
server.ssl_private_key = keyfile
try:
server.start()
finally:
server.stop()
class WaitressServer(ServerAdapter):
def run(self, handler):
from waitress import serve
serve(handler, host=self.host, port=self.port)
class PasteServer(ServerAdapter):
def run(self, handler): # pragma: no cover
from paste import httpserver
from paste.translogger import TransLogger
handler = TransLogger(handler, setup_console_handler=(not self.quiet))
httpserver.serve(handler, host=self.host, port=str(self.port),
**self.options)
class MeinheldServer(ServerAdapter):
def run(self, handler):
from meinheld import server
server.listen((self.host, self.port))
server.run(handler)
class FapwsServer(ServerAdapter):
""" Extremely fast webserver using libev. See http://www.fapws.org/ """
def run(self, handler): # pragma: no cover
import fapws._evwsgi as evwsgi
from fapws import base, config
port = self.port
if float(config.SERVER_IDENT[-2:]) > 0.4:
# fapws3 silently changed its API in 0.5
port = str(port)
evwsgi.start(self.host, port)
# fapws3 never releases the GIL. Complain upstream. I tried. No luck.
if 'BOTTLE_CHILD' in os.environ and not self.quiet:
_stderr("WARNING: Auto-reloading does not work with Fapws3.\n")
_stderr(" (Fapws3 breaks python thread support)\n")
evwsgi.set_base_module(base)
def app(environ, start_response):
environ['wsgi.multiprocess'] = False
return handler(environ, start_response)
evwsgi.wsgi_cb(('', app))
evwsgi.run()
class TornadoServer(ServerAdapter):
""" The super hyped asynchronous server by facebook. Untested. """
def run(self, handler): # pragma: no cover
import tornado.wsgi, tornado.httpserver, tornado.ioloop
container = tornado.wsgi.WSGIContainer(handler)
server = tornado.httpserver.HTTPServer(container)
server.listen(port=self.port,address=self.host)
tornado.ioloop.IOLoop.instance().start()
class AppEngineServer(ServerAdapter):
""" Adapter for Google App Engine. """
quiet = True
def run(self, handler):
from google.appengine.ext.webapp import util
# A main() function in the handler script enables 'App Caching'.
# Lets makes sure it is there. This _really_ improves performance.
module = sys.modules.get('__main__')
if module and not hasattr(module, 'main'):
module.main = lambda: util.run_wsgi_app(handler)
util.run_wsgi_app(handler)
class TwistedServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from twisted.web import server, wsgi
from twisted.python.threadpool import ThreadPool
from twisted.internet import reactor
thread_pool = ThreadPool()
thread_pool.start()
reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop)
factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, handler))
reactor.listenTCP(self.port, factory, interface=self.host)
if not reactor.running:
reactor.run()
class DieselServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from diesel.protocols.wsgi import WSGIApplication
app = WSGIApplication(handler, port=self.port)
app.run()
class GeventServer(ServerAdapter):
""" Untested. Options:
* `fast` (default: False) uses libevent's http server, but has some
issues: No streaming, no pipelining, no SSL.
* See gevent.wsgi.WSGIServer() documentation for more options.
"""
def run(self, handler):
from gevent import wsgi, pywsgi, local
if not isinstance(threading.local(), local.local):
msg = "Bottle requires gevent.monkey.patch_all() (before import)"
raise RuntimeError(msg)
if not self.options.pop('fast', None): wsgi = pywsgi
self.options['log'] = None if self.quiet else 'default'
address = (self.host, self.port)
server = wsgi.WSGIServer(address, handler, **self.options)
if 'BOTTLE_CHILD' in os.environ:
import signal
signal.signal(signal.SIGINT, lambda s, f: server.stop())
server.serve_forever()
class GeventSocketIOServer(ServerAdapter):
def run(self,handler):
from socketio import server
address = (self.host, self.port)
server.SocketIOServer(address, handler, **self.options).serve_forever()
class GunicornServer(ServerAdapter):
""" Untested. See http://gunicorn.org/configure.html for options. """
def run(self, handler):
from gunicorn.app.base import Application
config = {'bind': "%s:%d" % (self.host, int(self.port))}
config.update(self.options)
class GunicornApplication(Application):
def init(self, parser, opts, args):
return config
def load(self):
return handler
GunicornApplication().run()
class EventletServer(ServerAdapter):
""" Untested. Options:
* `backlog` adjust the eventlet backlog parameter which is the maximum
number of queued connections. Should be at least 1; the maximum
value is system-dependent.
* `family`: (default is 2) socket family, optional. See socket
documentation for available families.
"""
def run(self, handler):
from eventlet import wsgi, listen, patcher
if not patcher.is_monkey_patched(os):
msg = "Bottle requires eventlet.monkey_patch() (before import)"
raise RuntimeError(msg)
socket_args = {}
for arg in ('backlog', 'family'):
try:
socket_args[arg] = self.options.pop(arg)
except KeyError:
pass
address = (self.host, self.port)
try:
wsgi.server(listen(address, **socket_args), handler,
log_output=(not self.quiet))
except TypeError:
# Fallback, if we have old version of eventlet
wsgi.server(listen(address), handler)
class RocketServer(ServerAdapter):
""" Untested. """
def run(self, handler):
from rocket import Rocket
server = Rocket((self.host, self.port), 'wsgi', { 'wsgi_app' : handler })
server.start()
class BjoernServer(ServerAdapter):
""" Fast server written in C: https://github.com/jonashaag/bjoern """
def run(self, handler):
from bjoern import run
run(handler, self.host, self.port)
class AutoServer(ServerAdapter):
""" Untested. """
adapters = [WaitressServer, PasteServer, TwistedServer, CherryPyServer, WSGIRefServer]
def run(self, handler):
for sa in self.adapters:
try:
return sa(self.host, self.port, **self.options).run(handler)
except ImportError:
pass
server_names = {
'cgi': CGIServer,
'flup': FlupFCGIServer,
'wsgiref': WSGIRefServer,
'waitress': WaitressServer,
'cherrypy': CherryPyServer,
'paste': PasteServer,
'fapws3': FapwsServer,
'tornado': TornadoServer,
'gae': AppEngineServer,
'twisted': TwistedServer,
'diesel': DieselServer,
'meinheld': MeinheldServer,
'gunicorn': GunicornServer,
'eventlet': EventletServer,
'gevent': GeventServer,
'geventSocketIO':GeventSocketIOServer,
'rocket': RocketServer,
'bjoern' : BjoernServer,
'auto': AutoServer,
}
###############################################################################
# Application Control ##########################################################
###############################################################################
def load(target, **namespace):
""" Import a module or fetch an object from a module.
* ``package.module`` returns `module` as a module object.
* ``pack.mod:name`` returns the module variable `name` from `pack.mod`.
* ``pack.mod:func()`` calls `pack.mod.func()` and returns the result.
The last form accepts not only function calls, but any type of
expression. Keyword arguments passed to this function are available as
local variables. Example: ``import_string('re:compile(x)', x='[a-z]')``
"""
module, target = target.split(":", 1) if ':' in target else (target, None)
if module not in sys.modules: __import__(module)
if not target: return sys.modules[module]
if target.isalnum(): return getattr(sys.modules[module], target)
package_name = module.split('.')[0]
namespace[package_name] = sys.modules[package_name]
return eval('%s.%s' % (module, target), namespace)
def load_app(target):
""" Load a bottle application from a module and make sure that the import
does not affect the current default application, but returns a separate
application object. See :func:`load` for the target parameter. """
global NORUN; NORUN, nr_old = True, NORUN
tmp = default_app.push() # Create a new "default application"
try:
rv = load(target) # Import the target module
return rv if callable(rv) else tmp
finally:
default_app.remove(tmp) # Remove the temporary added default application
NORUN = nr_old
_debug = debug
def run(app=None, server='wsgiref', host='127.0.0.1', port=8080,
interval=1, reloader=False, quiet=False, plugins=None,
debug=None, **kargs):
""" Start a server instance. This method blocks until the server terminates.
:param app: WSGI application or target string supported by
:func:`load_app`. (default: :func:`default_app`)
:param server: Server adapter to use. See :data:`server_names` keys
for valid names or pass a :class:`ServerAdapter` subclass.
(default: `wsgiref`)
:param host: Server address to bind to. Pass ``0.0.0.0`` to listens on
all interfaces including the external one. (default: 127.0.0.1)
:param port: Server port to bind to. Values below 1024 require root
privileges. (default: 8080)
:param reloader: Start auto-reloading server? (default: False)
:param interval: Auto-reloader interval in seconds (default: 1)
:param quiet: Suppress output to stdout and stderr? (default: False)
:param options: Options passed to the server adapter.
"""
if NORUN: return
if reloader and not os.environ.get('BOTTLE_CHILD'):
lockfile = None
try:
fd, lockfile = tempfile.mkstemp(prefix='bottle.', suffix='.lock')
os.close(fd) # We only need this file to exist. We never write to it
while os.path.exists(lockfile):
args = [sys.executable] + sys.argv
environ = os.environ.copy()
environ['BOTTLE_CHILD'] = 'true'
environ['BOTTLE_LOCKFILE'] = lockfile
p = subprocess.Popen(args, env=environ)
while p.poll() is None: # Busy wait...
os.utime(lockfile, None) # I am alive!
time.sleep(interval)
if p.poll() != 3:
if os.path.exists(lockfile): os.unlink(lockfile)
sys.exit(p.poll())
except KeyboardInterrupt:
pass
finally:
if os.path.exists(lockfile):
os.unlink(lockfile)
return
try:
if debug is not None: _debug(debug)
app = app or default_app()
if isinstance(app, basestring):
app = load_app(app)
if not callable(app):
raise ValueError("Application is not callable: %r" % app)
for plugin in plugins or []:
if isinstance(plugin, basestring):
plugin = load(plugin)
app.install(plugin)
if server in server_names:
server = server_names.get(server)
if isinstance(server, basestring):
server = load(server)
if isinstance(server, type):
server = server(host=host, port=port, **kargs)
if not isinstance(server, ServerAdapter):
raise ValueError("Unknown or unsupported server: %r" % server)
server.quiet = server.quiet or quiet
if not server.quiet:
_stderr("Bottle v%s server starting up (using %s)...\n" % (__version__, repr(server)))
_stderr("Listening on http://%s:%d/\n" % (server.host, server.port))
_stderr("Hit Ctrl-C to quit.\n\n")
if reloader:
lockfile = os.environ.get('BOTTLE_LOCKFILE')
bgcheck = FileCheckerThread(lockfile, interval)
with bgcheck:
server.run(app)
if bgcheck.status == 'reload':
sys.exit(3)
else:
server.run(app)
except KeyboardInterrupt:
pass
except (SystemExit, MemoryError):
raise
except:
if not reloader: raise
if not getattr(server, 'quiet', quiet):
print_exc()
time.sleep(interval)
sys.exit(3)
class FileCheckerThread(threading.Thread):
""" Interrupt main-thread as soon as a changed module file is detected,
the lockfile gets deleted or gets to old. """
def __init__(self, lockfile, interval):
threading.Thread.__init__(self)
self.daemon = True
self.lockfile, self.interval = lockfile, interval
#: Is one of 'reload', 'error' or 'exit'
self.status = None
def run(self):
exists = os.path.exists
mtime = lambda p: os.stat(p).st_mtime
files = dict()
for module in list(sys.modules.values()):
path = getattr(module, '__file__', '')
if path[-4:] in ('.pyo', '.pyc'): path = path[:-1]
if path and exists(path): files[path] = mtime(path)
while not self.status:
if not exists(self.lockfile)\
or mtime(self.lockfile) < time.time() - self.interval - 5:
self.status = 'error'
thread.interrupt_main()
for path, lmtime in list(files.items()):
if not exists(path) or mtime(path) > lmtime:
self.status = 'reload'
thread.interrupt_main()
break
time.sleep(self.interval)
def __enter__(self):
self.start()
def __exit__(self, exc_type, *_):
if not self.status: self.status = 'exit' # silent exit
self.join()
return exc_type is not None and issubclass(exc_type, KeyboardInterrupt)
###############################################################################
# Template Adapters ############################################################
###############################################################################
class TemplateError(HTTPError):
def __init__(self, message):
HTTPError.__init__(self, 500, message)
class BaseTemplate(object):
""" Base class and minimal API for template adapters """
extensions = ['tpl','html','thtml','stpl']
settings = {} #used in prepare()
defaults = {} #used in render()
def __init__(self, source=None, name=None, lookup=None, encoding='utf8', **settings):
""" Create a new template.
If the source parameter (str or buffer) is missing, the name argument
is used to guess a template filename. Subclasses can assume that
self.source and/or self.filename are set. Both are strings.
The lookup, encoding and settings parameters are stored as instance
variables.
The lookup parameter stores a list containing directory paths.
The encoding parameter should be used to decode byte strings or files.
The settings parameter contains a dict for engine-specific settings.
"""
self.name = name
self.source = source.read() if hasattr(source, 'read') else source
self.filename = source.filename if hasattr(source, 'filename') else None
self.lookup = [os.path.abspath(x) for x in lookup] if lookup else []
self.encoding = encoding
self.settings = self.settings.copy() # Copy from class variable
self.settings.update(settings) # Apply
if not self.source and self.name:
self.filename = self.search(self.name, self.lookup)
if not self.filename:
raise TemplateError('Template %s not found.' % repr(name))
if not self.source and not self.filename:
raise TemplateError('No template specified.')
self.prepare(**self.settings)
@classmethod
def search(cls, name, lookup=None):
""" Search name in all directories specified in lookup.
First without, then with common extensions. Return first hit. """
if not lookup:
depr('The template lookup path list should not be empty.', True) #0.12
lookup = ['.']
if os.path.isabs(name) and os.path.isfile(name):
depr('Absolute template path names are deprecated.', True) #0.12
return os.path.abspath(name)
for spath in lookup:
spath = os.path.abspath(spath) + os.sep
fname = os.path.abspath(os.path.join(spath, name))
if not fname.startswith(spath): continue
if os.path.isfile(fname): return fname
for ext in cls.extensions:
if os.path.isfile('%s.%s' % (fname, ext)):
return '%s.%s' % (fname, ext)
@classmethod
def global_config(cls, key, *args):
""" This reads or sets the global settings stored in class.settings. """
if args:
cls.settings = cls.settings.copy() # Make settings local to class
cls.settings[key] = args[0]
else:
return cls.settings[key]
def prepare(self, **options):
""" Run preparations (parsing, caching, ...).
It should be possible to call this again to refresh a template or to
update settings.
"""
raise NotImplementedError
def render(self, *args, **kwargs):
""" Render the template with the specified local variables and return
a single byte or unicode string. If it is a byte string, the encoding
must match self.encoding. This method must be thread-safe!
Local variables may be provided in dictionaries (args)
or directly, as keywords (kwargs).
"""
raise NotImplementedError
class MakoTemplate(BaseTemplate):
def prepare(self, **options):
from mako.template import Template
from mako.lookup import TemplateLookup
options.update({'input_encoding':self.encoding})
options.setdefault('format_exceptions', bool(DEBUG))
lookup = TemplateLookup(directories=self.lookup, **options)
if self.source:
self.tpl = Template(self.source, lookup=lookup, **options)
else:
self.tpl = Template(uri=self.name, filename=self.filename, lookup=lookup, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
class CheetahTemplate(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
class Jinja2Template(BaseTemplate):
def prepare(self, filters=None, tests=None, globals={}, **kwargs):
from jinja2 import Environment, FunctionLoader
self.env = Environment(loader=FunctionLoader(self.loader), **kwargs)
if filters: self.env.filters.update(filters)
if tests: self.env.tests.update(tests)
if globals: self.env.globals.update(globals)
if self.source:
self.tpl = self.env.from_string(self.source)
else:
self.tpl = self.env.get_template(self.filename)
def render(self, *args, **kwargs):
for dictarg in args: kwargs.update(dictarg)
_defaults = self.defaults.copy()
_defaults.update(kwargs)
return self.tpl.render(**_defaults)
def loader(self, name):
fname = self.search(name, self.lookup)
if not fname: return
with open(fname, "rb") as f:
return f.read().decode(self.encoding)
class SimpleTemplate(BaseTemplate):
def prepare(self, escape_func=html_escape, noescape=False, syntax=None, **ka):
self.cache = {}
enc = self.encoding
self._str = lambda x: touni(x, enc)
self._escape = lambda x: escape_func(touni(x, enc))
self.syntax = syntax
if noescape:
self._str, self._escape = self._escape, self._str
@cached_property
def co(self):
return compile(self.code, self.filename or '<string>', 'exec')
@cached_property
def code(self):
source = self.source
if not source:
with open(self.filename, 'rb') as f:
source = f.read()
try:
source, encoding = touni(source), 'utf8'
except UnicodeError:
depr('Template encodings other than utf8 are no longer supported.') #0.11
source, encoding = touni(source, 'latin1'), 'latin1'
parser = StplParser(source, encoding=encoding, syntax=self.syntax)
code = parser.translate()
self.encoding = parser.encoding
return code
def _rebase(self, _env, _name=None, **kwargs):
_env['_rebase'] = (_name, kwargs)
def _include(self, _env, _name=None, **kwargs):
env = _env.copy()
env.update(kwargs)
if _name not in self.cache:
self.cache[_name] = self.__class__(name=_name, lookup=self.lookup)
return self.cache[_name].execute(env['_stdout'], env)
def execute(self, _stdout, kwargs):
env = self.defaults.copy()
env.update(kwargs)
env.update({'_stdout': _stdout, '_printlist': _stdout.extend,
'include': functools.partial(self._include, env),
'rebase': functools.partial(self._rebase, env), '_rebase': None,
'_str': self._str, '_escape': self._escape, 'get': env.get,
'setdefault': env.setdefault, 'defined': env.__contains__ })
eval(self.co, env)
if env.get('_rebase'):
subtpl, rargs = env.pop('_rebase')
rargs['base'] = ''.join(_stdout) #copy stdout
del _stdout[:] # clear stdout
return self._include(env, subtpl, **rargs)
return env
def render(self, *args, **kwargs):
""" Render the template using keyword arguments as local variables. """
env = {}; stdout = []
for dictarg in args: env.update(dictarg)
env.update(kwargs)
self.execute(stdout, env)
return ''.join(stdout)
class StplSyntaxError(TemplateError): pass
class StplParser(object):
""" Parser for stpl templates. """
_re_cache = {} #: Cache for compiled re patterns
# This huge pile of voodoo magic splits python code into 8 different tokens.
# 1: All kinds of python strings (trust me, it works)
_re_tok = '((?m)[urbURB]?(?:\'\'(?!\')|""(?!")|\'{6}|"{6}' \
'|\'(?:[^\\\\\']|\\\\.)+?\'|"(?:[^\\\\"]|\\\\.)+?"' \
'|\'{3}(?:[^\\\\]|\\\\.|\\n)+?\'{3}' \
'|"{3}(?:[^\\\\]|\\\\.|\\n)+?"{3}))'
_re_inl = _re_tok.replace('|\\n','') # We re-use this string pattern later
# 2: Comments (until end of line, but not the newline itself)
_re_tok += '|(#.*)'
# 3,4: Keywords that start or continue a python block (only start of line)
_re_tok += '|^([ \\t]*(?:if|for|while|with|try|def|class)\\b)' \
'|^([ \\t]*(?:elif|else|except|finally)\\b)'
# 5: Our special 'end' keyword (but only if it stands alone)
_re_tok += '|((?:^|;)[ \\t]*end[ \\t]*(?=(?:%(block_close)s[ \\t]*)?\\r?$|;|#))'
# 6: A customizable end-of-code-block template token (only end of line)
_re_tok += '|(%(block_close)s[ \\t]*(?=$))'
# 7: And finally, a single newline. The 8th token is 'everything else'
_re_tok += '|(\\r?\\n)'
# Match the start tokens of code areas in a template
_re_split = '(?m)^[ \t]*(\\\\?)((%(line_start)s)|(%(block_start)s))'
# Match inline statements (may contain python strings)
_re_inl = '%%(inline_start)s((?:%s|[^\'"\n]+?)*?)%%(inline_end)s' % _re_inl
default_syntax = '<% %> % {{ }}'
def __init__(self, source, syntax=None, encoding='utf8'):
self.source, self.encoding = touni(source, encoding), encoding
self.set_syntax(syntax or self.default_syntax)
self.code_buffer, self.text_buffer = [], []
self.lineno, self.offset = 1, 0
self.indent, self.indent_mod = 0, 0
def get_syntax(self):
""" Tokens as a space separated string (default: <% %> % {{ }}) """
return self._syntax
def set_syntax(self, syntax):
self._syntax = syntax
self._tokens = syntax.split()
if not syntax in self._re_cache:
names = 'block_start block_close line_start inline_start inline_end'
etokens = map(re.escape, self._tokens)
pattern_vars = dict(zip(names.split(), etokens))
patterns = (self._re_split, self._re_tok, self._re_inl)
patterns = [re.compile(p%pattern_vars) for p in patterns]
self._re_cache[syntax] = patterns
self.re_split, self.re_tok, self.re_inl = self._re_cache[syntax]
syntax = property(get_syntax, set_syntax)
def translate(self):
if self.offset: raise RuntimeError('Parser is a one time instance.')
while True:
m = self.re_split.search(self.source[self.offset:])
if m:
text = self.source[self.offset:self.offset+m.start()]
self.text_buffer.append(text)
offs = self.offset
self.offset += m.end()
if m.group(1): # Escape syntax
line, sep, _ = self.source[self.offset:].partition('\n')
self.text_buffer.append(self.source[offs+m.start():offs+m.start(1)]+m.group(2)+line+sep)
self.offset += len(line+sep)
continue
self.flush_text()
self.read_code(multiline=bool(m.group(4)))
else: break
self.text_buffer.append(self.source[self.offset:])
self.flush_text()
return ''.join(self.code_buffer)
def read_code(self, multiline):
code_line, comment = '', ''
while True:
m = self.re_tok.search(self.source[self.offset:])
if not m:
code_line += self.source[self.offset:]
self.offset = len(self.source)
self.write_code(code_line.strip(), comment)
return
code_line += self.source[self.offset:self.offset+m.start()]
self.offset += m.end()
_str, _com, _blk1, _blk2, _end, _cend, _nl = m.groups()
if code_line and (_blk1 or _blk2): # a if b else c
code_line += _blk1 or _blk2
continue
if _str: # Python string
code_line += _str
elif _com: # Python comment (up to EOL)
comment = _com
if multiline and _com.strip().endswith(self._tokens[1]):
multiline = False # Allow end-of-block in comments
elif _blk1: # Start-block keyword (if/for/while/def/try/...)
code_line, self.indent_mod = _blk1, -1
self.indent += 1
elif _blk2: # Continue-block keyword (else/elif/except/...)
code_line, self.indent_mod = _blk2, -1
elif _end: # The non-standard 'end'-keyword (ends a block)
self.indent -= 1
elif _cend: # The end-code-block template token (usually '%>')
if multiline: multiline = False
else: code_line += _cend
else: # \n
self.write_code(code_line.strip(), comment)
self.lineno += 1
code_line, comment, self.indent_mod = '', '', 0
if not multiline:
break
def flush_text(self):
text = ''.join(self.text_buffer)
del self.text_buffer[:]
if not text: return
parts, pos, nl = [], 0, '\\\n'+' '*self.indent
for m in self.re_inl.finditer(text):
prefix, pos = text[pos:m.start()], m.end()
if prefix:
parts.append(nl.join(map(repr, prefix.splitlines(True))))
if prefix.endswith('\n'): parts[-1] += nl
parts.append(self.process_inline(m.group(1).strip()))
if pos < len(text):
prefix = text[pos:]
lines = prefix.splitlines(True)
if lines[-1].endswith('\\\\\n'): lines[-1] = lines[-1][:-3]
elif lines[-1].endswith('\\\\\r\n'): lines[-1] = lines[-1][:-4]
parts.append(nl.join(map(repr, lines)))
code = '_printlist((%s,))' % ', '.join(parts)
self.lineno += code.count('\n')+1
self.write_code(code)
@staticmethod
def process_inline(chunk):
if chunk[0] == '!': return '_str(%s)' % chunk[1:]
return '_escape(%s)' % chunk
def write_code(self, line, comment=''):
code = ' ' * (self.indent+self.indent_mod)
code += line.lstrip() + comment + '\n'
self.code_buffer.append(code)
def template(*args, **kwargs):
"""
Get a rendered template as a string iterator.
You can use a name, a filename or a template string as first parameter.
Template rendering arguments can be passed as dictionaries
or directly (as keyword arguments).
"""
tpl = args[0] if args else None
adapter = kwargs.pop('template_adapter', SimpleTemplate)
lookup = kwargs.pop('template_lookup', TEMPLATE_PATH)
tplid = (id(lookup), tpl)
if tplid not in TEMPLATES or DEBUG:
settings = kwargs.pop('template_settings', {})
if isinstance(tpl, adapter):
TEMPLATES[tplid] = tpl
if settings: TEMPLATES[tplid].prepare(**settings)
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl:
TEMPLATES[tplid] = adapter(source=tpl, lookup=lookup, **settings)
else:
TEMPLATES[tplid] = adapter(name=tpl, lookup=lookup, **settings)
if not TEMPLATES[tplid]:
abort(500, 'Template (%s) not found' % tpl)
for dictarg in args[1:]: kwargs.update(dictarg)
return TEMPLATES[tplid].render(kwargs)
mako_template = functools.partial(template, template_adapter=MakoTemplate)
cheetah_template = functools.partial(template, template_adapter=CheetahTemplate)
jinja2_template = functools.partial(template, template_adapter=Jinja2Template)
def view(tpl_name, **defaults):
""" Decorator: renders a template for a handler.
The handler can control its behavior like that:
- return a dict of template vars to fill out the template
- return something other than a dict and the view decorator will not
process the template, but return the handler result as is.
This includes returning a HTTPResponse(dict) to get,
for instance, JSON with autojson or other castfilters.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if isinstance(result, (dict, DictMixin)):
tplvars = defaults.copy()
tplvars.update(result)
return template(tpl_name, **tplvars)
elif result is None:
return template(tpl_name, defaults)
return result
return wrapper
return decorator
mako_view = functools.partial(view, template_adapter=MakoTemplate)
cheetah_view = functools.partial(view, template_adapter=CheetahTemplate)
jinja2_view = functools.partial(view, template_adapter=Jinja2Template)
###############################################################################
# Constants and Globals ########################################################
###############################################################################
TEMPLATE_PATH = ['./', './views/']
TEMPLATES = {}
DEBUG = False
NORUN = False # If set, run() does nothing. Used by load_app()
#: A dict to map HTTP status codes (e.g. 404) to phrases (e.g. 'Not Found')
HTTP_CODES = httplib.responses
HTTP_CODES[418] = "I'm a teapot" # RFC 2324
HTTP_CODES[428] = "Precondition Required"
HTTP_CODES[429] = "Too Many Requests"
HTTP_CODES[431] = "Request Header Fields Too Large"
HTTP_CODES[511] = "Network Authentication Required"
_HTTP_STATUS_LINES = dict((k, '%d %s'%(k,v)) for (k,v) in HTTP_CODES.items())
#: The default template used for error pages. Override with @error()
ERROR_PAGE_TEMPLATE = """
%%try:
%%from %s import DEBUG, request
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html>
<head>
<title>Error: {{e.status}}</title>
<style type="text/css">
html {background-color: #eee; font-family: sans-serif;}
body {background-color: #fff; border: 1px solid #ddd;
padding: 15px; margin: 15px;}
pre {background-color: #eee; border: 1px solid #ddd; padding: 5px;}
</style>
</head>
<body>
<h1>Error: {{e.status}}</h1>
<p>Sorry, the requested URL <tt>{{repr(request.url)}}</tt>
caused an error:</p>
<pre>{{e.body}}</pre>
%%if DEBUG and e.exception:
<h2>Exception:</h2>
<pre>{{repr(e.exception)}}</pre>
%%end
%%if DEBUG and e.traceback:
<h2>Traceback:</h2>
<pre>{{e.traceback}}</pre>
%%end
</body>
</html>
%%except ImportError:
<b>ImportError:</b> Could not generate the error page. Please add bottle to
the import path.
%%end
""" % __name__
#: A thread-safe instance of :class:`LocalRequest`. If accessed from within a
#: request callback, this instance always refers to the *current* request
#: (even on a multithreaded server).
request = LocalRequest()
#: A thread-safe instance of :class:`LocalResponse`. It is used to change the
#: HTTP response for the *current* request.
response = LocalResponse()
#: A thread-safe namespace. Not used by Bottle.
local = threading.local()
# Initialize app stack (create first empty Bottle app)
# BC: 0.6.4 and needed for run()
app = default_app = AppStack()
app.push()
#: A virtual package that redirects import statements.
#: Example: ``import bottle.ext.sqlite`` actually imports `bottle_sqlite`.
ext = _ImportRedirect('bottle.ext' if __name__ == '__main__' else __name__+".ext", 'bottle_%s').module
if __name__ == '__main__':
opt, args, parser = _cmd_options, _cmd_args, _cmd_parser
if opt.version:
_stdout('Bottle %s\n'%__version__)
sys.exit(0)
if not args:
parser.print_help()
_stderr('\nError: No application entry point specified.\n')
sys.exit(1)
sys.path.insert(0, '.')
sys.modules.setdefault('bottle', sys.modules['__main__'])
host, port = (opt.bind or 'localhost'), 8080
if ':' in host and host.rfind(']') < host.rfind(':'):
host, port = host.rsplit(':', 1)
host = host.strip('[]')
run(args[0], host=host, port=int(port), server=opt.server,
reloader=opt.reload, plugins=opt.plugin, debug=opt.debug)
# THE END
| [
[
[
536,
550
]
],
[
[
552,
562
]
],
[
[
583,
594
],
[
142911,
142922
],
[
119702,
119713
]
],
[
[
608,
619
]
],
[
[
855,
867
],
[
886,
898
]
],
[
[
872,
883
],
[
961,
972
],
[
1476,
1487
],
[
142849,
142860
]
],
[
[
954,
958
],
[
988,
992
],
[
1060,
1064
],
[
1136,
1140
],
[
1213,
1217
],
[
1294,
1298
],
[
1371,
1375
]
],
[
[
1450,
1462
],
[
1508,
1520
],
[
1540,
1552
],
[
1655,
1667
],
[
142824,
142836
]
],
[
[
1464,
1473
],
[
142838,
142847
]
],
[
[
1601,
1614
],
[
1616,
1622
]
],
[
[
1718,
1726
],
[
1728,
1736
]
],
[
[
1760,
1766
],
[
97657,
97663
],
[
99299,
99305
],
[
99350,
99356
],
[
99673,
99679
],
[
99758,
99764
]
],
[
[
1768,
1771
],
[
49925,
49928
]
],
[
[
1773,
1784
],
[
97220,
97225
]
],
[
[
1786,
1795
],
[
3463,
3472
],
[
138514,
138523
],
[
138592,
138601
],
[
138672,
138681
],
[
139727,
139736
],
[
139797,
139806
],
[
139869,
139878
],
[
5463,
5472
],
[
6257,
6266
],
[
7517,
7526
],
[
22550,
22559
],
[
103261,
103270
],
[
129884,
129893
],
[
129945,
129954
],
[
102653,
102662
],
[
139268,
139277
]
],
[
[
1797,
1801
],
[
99367,
99371
],
[
99690,
99694
]
],
[
[
1803,
1806
],
[
71788,
71791
]
],
[
[
1808,
1817
],
[
37264,
37273
],
[
37432,
37441
],
[
26118,
26127
]
],
[
[
1819,
1828
],
[
94677,
94686
]
],
[
[
1839,
1841
],
[
87191,
87193
],
[
87207,
87209
],
[
87258,
87260
],
[
87274,
87276
],
[
87293,
87295
],
[
87333,
87335
],
[
87431,
87433
],
[
87464,
87466
],
[
87641,
87643
],
[
87863,
87865
],
[
87917,
87919
],
[
87958,
87960
],
[
88002,
88004
],
[
88472,
88474
],
[
88516,
88518
],
[
90530,
90532
],
[
90567,
90569
],
[
91630,
91632
],
[
91688,
91690
],
[
91762,
91764
],
[
94162,
94164
],
[
94186,
94188
],
[
94208,
94210
],
[
94224,
94226
],
[
94386,
94388
],
[
94418,
94420
],
[
94509,
94511
],
[
94529,
94531
],
[
94993,
94995
],
[
95147,
95149
],
[
108469,
108471
],
[
111425,
111427
],
[
112812,
112814
],
[
117777,
117779
],
[
117936,
117938
],
[
118023,
118025
],
[
118126,
118128
],
[
118378,
118380
],
[
118515,
118517
],
[
118541,
118543
],
[
118683,
118685
],
[
118725,
118727
],
[
119903,
119905
],
[
120886,
120888
],
[
123390,
123392
],
[
124298,
124300
],
[
124322,
124324
],
[
124440,
124442
],
[
124512,
124514
],
[
124537,
124539
],
[
124564,
124566
],
[
124580,
124582
],
[
124675,
124677
],
[
124769,
124771
],
[
120927,
120929
]
],
[
[
1843,
1845
],
[
11102,
11104
],
[
8990,
8992
],
[
13109,
13111
],
[
13473,
13475
],
[
13559,
13561
],
[
15231,
15233
],
[
90597,
90599
],
[
90661,
90663
],
[
133027,
133029
],
[
133205,
133207
]
],
[
[
1847,
1857
],
[
118267,
118277
]
],
[
[
1859,
1862
],
[
2690,
2693
],
[
3043,
3046
],
[
3061,
3064
],
[
142932,
142935
],
[
143065,
143068
],
[
143082,
143085
],
[
143110,
143113
],
[
143143,
143146
],
[
2857,
2860
],
[
3118,
3121
],
[
3162,
3165
],
[
38866,
38869
],
[
71759,
71762
],
[
71965,
71968
],
[
72248,
72251
],
[
72268,
72271
],
[
72447,
72450
],
[
72423,
72426
],
[
109668,
109671
],
[
115703,
115706
],
[
115761,
115764
],
[
115821,
115824
],
[
115920,
115923
],
[
118073,
118076
],
[
118091,
118094
],
[
118581,
118584
],
[
120114,
120117
],
[
120415,
120418
],
[
120998,
121001
]
],
[
[
1864,
1872
],
[
117873,
117881
]
],
[
[
1874,
1883
],
[
120454,
120463
],
[
142389,
142398
],
[
67488,
67497
],
[
111016,
111025
],
[
120656,
120665
],
[
126740,
126749
]
],
[
[
1885,
1889
],
[
66854,
66858
],
[
66897,
66901
],
[
95227,
95231
],
[
95270,
95274
],
[
95535,
95539
],
[
95578,
95582
],
[
96953,
96957
],
[
97030,
97034
],
[
97265,
97269
],
[
97309,
97313
],
[
118437,
118441
],
[
120386,
120390
],
[
121311,
121315
],
[
121661,
121665
]
],
[
[
1891,
1899
],
[
4396,
4404
],
[
5747,
5755
],
[
14464,
14472
],
[
96731,
96739
]
],
[
[
1922,
1938
],
[
66704,
66712
],
[
96836,
96844
]
],
[
[
1940,
1948
],
[
63886,
63894
],
[
66714,
66722
],
[
96846,
96854
]
],
[
[
1950,
1959
],
[
66554,
66563
]
],
[
[
1981,
1994
],
[
47264,
47277
]
],
[
[
2017,
2027
],
[
34554,
34564
],
[
37028,
37038
],
[
38671,
38681
]
],
[
[
2029,
2038
],
[
120366,
120375
]
],
[
[
2059,
2069
],
[
21261,
21271
],
[
100932,
100942
]
],
[
[
2094,
2103
],
[
90447,
90456
]
],
[
[
2134,
2153
],
[
69963,
69973
]
],
[
[
2155,
2172
],
[
4653,
4661
],
[
3763,
3771
]
],
[
[
2238,
2257
],
[
69963,
69973
]
],
[
[
2259,
2276
],
[
4653,
4661
],
[
3763,
3771
]
],
[
[
2350,
2369
],
[
69963,
69973
]
],
[
[
2371,
2388
],
[
4653,
4661
],
[
3763,
3771
]
],
[
[
2433,
2443
],
[
2559,
2569
],
[
69963,
69973
]
],
[
[
2548,
2556
],
[
4653,
4661
],
[
3763,
3771
]
],
[
[
2683,
2685
],
[
2714,
2716
],
[
2737,
2739
],
[
2773,
2775
]
],
[
[
2707,
2711
],
[
3228,
3232
],
[
5060,
5064
],
[
73570,
73574
],
[
20735,
20739
],
[
20802,
20806
],
[
33526,
33530
],
[
49866,
49870
],
[
63536,
63540
],
[
80096,
80100
]
],
[
[
2730,
2734
],
[
4301,
4305
]
],
[
[
2753,
2757
],
[
5179,
5183
],
[
49722,
49726
]
],
[
[
2844,
2846
],
[
13644,
13646
],
[
15934,
15936
],
[
34285,
34287
],
[
34679,
34681
],
[
36824,
36826
],
[
37022,
37024
],
[
38651,
38653
],
[
70257,
70259
]
],
[
[
3024,
3031
],
[
142889,
142896
]
],
[
[
3033,
3040
],
[
142997,
143004
],
[
108512,
108519
],
[
108588,
108595
],
[
119643,
119650
],
[
119742,
119749
],
[
119823,
119830
]
],
[
[
3098,
3105
],
[
142889,
142896
]
],
[
[
3142,
3149
],
[
142997,
143004
],
[
108512,
108519
],
[
108588,
108595
],
[
119643,
119650
],
[
119742,
119749
],
[
119823,
119830
]
],
[
[
3245,
3267
],
[
140392,
140399
]
],
[
[
3279,
3296
],
[
121398,
121404
],
[
121599,
121605
]
],
[
[
3326,
3333
],
[
29797,
29804
],
[
29805,
29812
],
[
51762,
51769
],
[
92728,
92735
]
],
[
[
3335,
3364
],
[
51573,
51587
]
],
[
[
3394,
3403
],
[
15829,
15838
]
],
[
[
3405,
3422
],
[
51534,
51542
]
],
[
[
3424,
3445
],
[
3481,
3491
]
],
[
[
3450,
3460
],
[
98827,
98837
],
[
98879,
98889
]
],
[
[
3541,
3553
],
[
42204,
42216
],
[
59897,
59909
],
[
66111,
66123
]
],
[
[
3582,
3609
],
[
72835,
72844
],
[
78878,
78887
],
[
139412,
139421
]
],
[
[
3621,
3627
],
[
99316,
99322
],
[
99745,
99751
]
],
[
[
3647,
3654
],
[
47026,
47033
]
],
[
[
3684,
3696
],
[
81510,
81522
]
],
[
[
3701,
3711
],
[
66245,
66255
],
[
91577,
91587
],
[
97001,
97011
],
[
118876,
118886
],
[
119093,
119103
],
[
119288,
119298
],
[
31732,
31742
]
],
[
[
3722,
3729
],
[
77582,
77589
],
[
4850,
4857
],
[
5003,
5010
],
[
35340,
35347
],
[
35472,
35479
],
[
37326,
37333
],
[
62445,
62452
],
[
63649,
63656
],
[
76517,
76524
],
[
80133,
80140
],
[
90370,
90377
]
],
[
[
3740,
3750
],
[
45495,
45505
]
],
[
[
3786,
3794
],
[
27434,
27442
],
[
31551,
31559
],
[
116453,
116461
],
[
118936,
118944
]
],
[
[
3834,
3838
],
[
37418,
37422
]
],
[
[
3853,
3859
],
[
25837,
25843
]
],
[
[
3926,
3933
],
[
140392,
140399
]
],
[
[
3945,
3951
],
[
121398,
121404
],
[
121599,
121605
]
],
[
[
3977,
3984
],
[
29797,
29804
],
[
29805,
29812
],
[
51762,
51769
],
[
92728,
92735
]
],
[
[
3986,
4015
],
[
51573,
51587
]
],
[
[
4039,
4048
],
[
15829,
15838
]
],
[
[
4050,
4067
],
[
51534,
51542
]
],
[
[
4069,
4090
],
[
98827,
98837
],
[
98879,
98889
]
],
[
[
4114,
4126
],
[
42204,
42216
],
[
59897,
59909
],
[
66111,
66123
]
],
[
[
4153,
4157
],
[
37418,
37422
]
],
[
[
4169,
4186
],
[
99316,
99322
],
[
99745,
99751
]
],
[
[
4212,
4231
],
[
47026,
47033
]
],
[
[
4261,
4293
],
[
81510,
81522
]
],
[
[
4315,
4318
],
[
4410,
4413
]
],
[
[
4464,
4473
],
[
72835,
72844
],
[
78878,
78887
],
[
139412,
139421
]
],
[
[
4486,
4490
],
[
36636,
36640
],
[
36700,
36704
]
],
[
[
4521,
4526
],
[
4864,
4869
],
[
4936,
4941
],
[
35333,
35338
],
[
35597,
35602
],
[
37233,
37238
],
[
76672,
76677
]
],
[
[
4586,
4613
],
[
72835,
72844
],
[
78878,
78887
],
[
139412,
139421
]
],
[
[
4618,
4625
],
[
77582,
77589
],
[
4850,
4857
],
[
5003,
5010
],
[
35340,
35347
],
[
35472,
35479
],
[
37326,
37333
],
[
62445,
62452
],
[
63649,
63656
],
[
76517,
76524
],
[
80133,
80140
],
[
90370,
90377
]
],
[
[
4640,
4650
],
[
45495,
45505
]
],
[
[
4788,
4791
],
[
5070,
5073
],
[
33375,
33378
],
[
38902,
38905
],
[
45928,
45931
],
[
45941,
45944
],
[
45951,
45954
],
[
97674,
97677
],
[
99376,
99379
],
[
99412,
99415
],
[
99429,
99432
],
[
99561,
99564
],
[
99633,
99636
],
[
99699,
99702
],
[
99928,
99931
],
[
99942,
99945
]
],
[
[
4879,
4884
],
[
5051,
5056
],
[
3772,
3777
],
[
66166,
66171
],
[
97651,
97656
],
[
128940,
128945
],
[
129108,
129113
],
[
132420,
132425
],
[
128422,
128427
],
[
128481,
128486
]
],
[
[
5043,
5048
],
[
46283,
46288
],
[
49280,
49285
]
],
[
[
5204,
5217
],
[
5245,
5258
]
],
[
[
5229,
5244
],
[
49753,
49768
]
],
[
[
5402,
5416
],
[
20430,
20444
]
],
[
[
5714,
5718
],
[
124188,
124192
],
[
124356,
124360
],
[
129003,
129007
]
],
[
[
5809,
5817
],
[
31613,
31621
],
[
31648,
31656
],
[
85392,
85400
],
[
31795,
31803
],
[
31864,
31872
]
],
[
[
6000,
6012
],
[
23370,
23382
],
[
40430,
40442
],
[
40646,
40658
],
[
40877,
40889
],
[
41512,
41524
],
[
41942,
41954
],
[
42918,
42930
],
[
43481,
43493
],
[
44039,
44051
],
[
44497,
44509
],
[
44931,
44943
],
[
46774,
46786
],
[
48694,
48706
],
[
50683,
50695
]
],
[
[
6922,
6937
],
[
18964,
18979
],
[
23526,
23541
],
[
89784,
89799
],
[
128614,
128629
],
[
128725,
128740
]
],
[
[
7392,
7406
]
],
[
[
7979,
7994
],
[
8338,
8353
],
[
8443,
8458
],
[
68872,
68887
],
[
69846,
69861
]
],
[
[
8327,
8337
],
[
8604,
8614
],
[
8647,
8657
],
[
8759,
8769
]
],
[
[
8432,
8442
],
[
20271,
20281
],
[
34305,
34315
]
],
[
[
8581,
8603
]
],
[
[
8630,
8646
],
[
13587,
13603
]
],
[
[
8743,
8758
],
[
15566,
15581
],
[
15889,
15904
]
],
[
[
8820,
8831
],
[
14228,
14239
],
[
10502,
10513
]
],
[
[
9118,
9124
],
[
22999,
23005
]
],
[
[
17635,
17640
],
[
28709,
28714
],
[
31950,
31955
]
],
[
[
21985,
21991
],
[
26983,
26989
],
[
84553,
84559
],
[
84582,
84588
],
[
103285,
103291
]
],
[
[
39577,
39588
],
[
67799,
67810
],
[
68175,
68186
],
[
68805,
68816
]
],
[
[
57228,
57233
],
[
61726,
61731
],
[
61807,
61812
],
[
61875,
61880
],
[
61946,
61951
],
[
62193,
62198
],
[
62402,
62407
],
[
62620,
62625
],
[
78139,
78144
],
[
78209,
78214
],
[
78270,
78275
],
[
78335,
78340
],
[
78425,
78430
],
[
78505,
78510
],
[
78580,
78585
],
[
78681,
78686
],
[
78759,
78764
],
[
80589,
80594
],
[
80664,
80669
]
],
[
[
57284,
57298
],
[
63724,
63738
],
[
63776,
63790
],
[
63835,
63849
],
[
89669,
89683
],
[
89721,
89735
]
],
[
[
57893,
57905
],
[
68250,
68262
],
[
68586,
68598
],
[
68828,
68840
],
[
59654,
59666
],
[
59698,
59710
]
],
[
[
67460,
67475
],
[
68210,
68225
],
[
68627,
68642
],
[
68664,
68679
],
[
68701,
68716
],
[
68738,
68753
],
[
68775,
68790
]
],
[
[
67786,
67798
],
[
142167,
142179
]
],
[
[
68236,
68249
],
[
142316,
142329
]
],
[
[
68795,
68802
],
[
55543,
55550
]
],
[
[
68817,
68825
],
[
68862,
68870
]
],
[
[
68849,
68861
],
[
69298,
69310
],
[
34252,
34264
],
[
36113,
36125
],
[
36790,
36802
],
[
37150,
37162
],
[
68980,
68992
],
[
92643,
92655
],
[
95608,
95620
],
[
96253,
96265
],
[
96306,
96318
],
[
25692,
25704
],
[
70604,
70616
]
],
[
[
69288,
69297
],
[
122204,
122213
],
[
17435,
17444
],
[
17580,
17589
],
[
33681,
33690
],
[
34639,
34648
],
[
35916,
35925
],
[
36984,
36993
],
[
37572,
37581
],
[
45845,
45854
],
[
47745,
47754
],
[
47937,
47946
],
[
69530,
69539
],
[
92357,
92366
],
[
94342,
94351
],
[
94459,
94468
],
[
94554,
94563
],
[
95961,
95970
],
[
122257,
122266
],
[
13978,
13987
],
[
70225,
70234
],
[
102844,
102853
]
],
[
[
69834,
69845
]
],
[
[
69877,
69887
],
[
23225,
23235
]
],
[
[
70797,
70811
],
[
23260,
23274
]
],
[
[
71538,
71553
],
[
142675,
142690
]
],
[
[
72825,
72834
],
[
75776,
75785
],
[
77853,
77862
],
[
78661,
78670
]
],
[
[
75766,
75775
],
[
42277,
42286
],
[
43317,
43326
],
[
43868,
43877
],
[
44295,
44304
],
[
44764,
44773
],
[
49027,
49036
],
[
77030,
77039
],
[
77745,
77754
]
],
[
[
77842,
77852
],
[
61615,
61625
],
[
89600,
89610
],
[
89636,
89646
]
],
[
[
78863,
78877
],
[
41733,
41747
]
],
[
[
80849,
80859
],
[
18927,
18937
],
[
22504,
22514
]
],
[
[
84229,
84237
],
[
142516,
142524
]
],
[
[
84647,
84662
],
[
36478,
36493
]
],
[
[
85112,
85122
],
[
37650,
37660
]
],
[
[
85559,
85574
],
[
22887,
22902
]
],
[
[
89095,
89105
],
[
43964,
43974
],
[
44856,
44866
],
[
50157,
50167
]
],
[
[
92253,
92258
],
[
138358,
138363
]
],
[
[
92385,
92393
]
],
[
[
92775,
92791
],
[
96195,
96211
]
],
[
[
93076,
93087
]
],
[
[
96589,
96594
],
[
116602,
116607
]
],
[
[
96792,
96801
],
[
63953,
63962
]
],
[
[
97102,
97112
],
[
63912,
63922
],
[
95416,
95426
]
],
[
[
97411,
97421
],
[
54428,
54438
]
],
[
[
97788,
97806
],
[
95862,
95880
]
],
[
[
98637,
98647
],
[
43345,
43355
],
[
49269,
49279
]
],
[
[
98964,
98970
],
[
99657,
99663
]
],
[
[
99190,
99203
],
[
66172,
66185
]
],
[
[
99450,
99463
],
[
42758,
42771
]
],
[
[
99803,
99820
],
[
99578,
99595
]
],
[
[
99966,
99977
],
[
128291,
128302
],
[
38411,
38422
],
[
38634,
38645
],
[
38659,
38670
],
[
100315,
100326
]
],
[
[
100205,
100215
]
],
[
[
100428,
100439
],
[
31813,
31824
]
],
[
[
101148,
101158
],
[
52854,
52864
]
],
[
[
102449,
102459
]
],
[
[
103148,
103172
],
[
103405,
103429
],
[
103451,
103475
],
[
103495,
103519
],
[
103540,
103564
],
[
103584,
103608
],
[
103631,
103655
],
[
103677,
103701
],
[
103723,
103747
],
[
103769,
103793
],
[
103814,
103838
],
[
103862,
103886
],
[
103912,
103936
]
],
[
[
103393,
103398
]
],
[
[
103439,
103442
]
],
[
[
103483,
103487
]
],
[
[
103528,
103531
]
],
[
[
103572,
103578
]
],
[
[
103619,
103624
]
],
[
[
103665,
103670
]
],
[
[
103711,
103716
]
],
[
[
103757,
103761
]
],
[
[
103802,
103809
]
],
[
[
103850,
103859
]
],
[
[
103900,
103903
]
],
[
[
104204,
104217
],
[
104638,
104651
],
[
104981,
104994
],
[
105236,
105249
],
[
106509,
106522
],
[
107258,
107271
],
[
107412,
107425
],
[
107776,
107789
],
[
107950,
107963
],
[
108905,
108918
],
[
109346,
109359
],
[
109865,
109878
],
[
110444,
110457
],
[
110662,
110675
],
[
111592,
111605
],
[
111815,
111828
],
[
112332,
112345
],
[
113448,
113461
],
[
113674,
113687
],
[
113885,
113898
],
[
119465,
119478
]
],
[
[
104628,
104637
],
[
114251,
114260
]
],
[
[
104966,
104980
],
[
114274,
114288
]
],
[
[
105222,
105235
],
[
113999,
114012
],
[
114305,
114318
]
],
[
[
106494,
106508
],
[
113983,
113997
],
[
114368,
114382
]
],
[
[
107243,
107257
],
[
113939,
113953
],
[
114336,
114350
]
],
[
[
107400,
107411
],
[
113955,
113966
],
[
114397,
114408
]
],
[
[
107761,
107775
],
[
114569,
114583
]
],
[
[
107938,
107949
],
[
114424,
114435
]
],
[
[
108891,
108904
],
[
114452,
114465
]
],
[
[
109330,
109345
],
[
114478,
114493
]
],
[
[
109851,
109864
],
[
113968,
113981
],
[
114510,
114523
]
],
[
[
110431,
110443
],
[
114539,
114551
]
],
[
[
110649,
110661
],
[
114663,
114675
]
],
[
[
111571,
111591
],
[
114698,
114718
]
],
[
[
111800,
111814
],
[
114601,
114615
]
],
[
[
112317,
112331
],
[
114633,
114647
]
],
[
[
113435,
113447
],
[
114734,
114746
]
],
[
[
113661,
113673
],
[
114763,
114775
]
],
[
[
113874,
113884
],
[
114789,
114799
]
],
[
[
114223,
114235
],
[
119198,
119210
],
[
119233,
119245
]
],
[
[
115056,
115060
],
[
116392,
116396
],
[
119131,
119135
],
[
119322,
119326
],
[
31756,
31760
]
],
[
[
116007,
116015
],
[
118907,
118915
]
],
[
[
116593,
116599
],
[
118800,
118806
]
],
[
[
116612,
116615
],
[
143350,
143353
],
[
29204,
29207
]
],
[
[
120436,
120453
],
[
119959,
119976
]
],
[
[
122190,
122203
],
[
130739,
130752
],
[
123758,
123771
],
[
123879,
123892
]
],
[
[
122304,
122316
],
[
125857,
125869
],
[
126622,
126634
],
[
127290,
127302
],
[
128241,
128253
]
],
[
[
125844,
125856
],
[
138559,
138571
],
[
139768,
139780
]
],
[
[
126606,
126621
],
[
138637,
138652
],
[
139838,
139853
]
],
[
[
127275,
127289
],
[
138717,
138731
],
[
139910,
139924
]
],
[
[
128226,
128240
],
[
137752,
137766
]
],
[
[
130723,
130738
]
],
[
[
130768,
130778
],
[
129159,
129169
]
],
[
[
137399,
137407
],
[
138532,
138540
],
[
138610,
138618
],
[
138690,
138698
],
[
33379,
33387
],
[
139529,
139537
],
[
139615,
139623
]
],
[
[
138498,
138511
]
],
[
[
138573,
138589
]
],
[
[
138654,
138669
]
],
[
[
138739,
138743
],
[
139745,
139749
],
[
139815,
139819
],
[
139887,
139891
],
[
71305,
71309
],
[
71395,
71399
]
],
[
[
139715,
139724
]
],
[
[
139782,
139794
]
],
[
[
139855,
139866
]
],
[
[
140175,
140188
],
[
137811,
137824
]
],
[
[
140210,
140219
],
[
137876,
137885
],
[
138000,
138009
],
[
138048,
138057
],
[
138165,
138174
],
[
138257,
138266
],
[
138332,
138341
],
[
138465,
138474
]
],
[
[
140225,
140230
],
[
14365,
14370
],
[
28832,
28837
],
[
30091,
30096
],
[
38469,
38474
],
[
88406,
88411
],
[
126106,
126111
],
[
137889,
137894
]
],
[
[
140239,
140244
],
[
116298,
116303
],
[
117739,
117744
]
],
[
[
140379,
140389
],
[
140410,
140420
],
[
140454,
140464
],
[
140496,
140506
],
[
140534,
140544
],
[
140586,
140596
],
[
140696,
140706
]
],
[
[
140638,
140656
],
[
60528,
60546
]
],
[
[
140786,
140805
],
[
33388,
33407
]
],
[
[
142157,
142164
],
[
29657,
29664
],
[
33802,
33809
],
[
36301,
36308
],
[
36341,
36348
],
[
92561,
92568
],
[
92736,
92743
],
[
95344,
95351
],
[
95662,
95669
],
[
95766,
95773
],
[
95823,
95830
],
[
95881,
95888
],
[
25640,
25647
],
[
26041,
26048
],
[
26254,
26261
],
[
102735,
102742
]
],
[
[
142305,
142313
],
[
33836,
33844
],
[
35100,
35108
],
[
35126,
35134
],
[
35511,
35519
],
[
35644,
35652
],
[
35670,
35678
],
[
35950,
35958
],
[
36150,
36158
],
[
37909,
37917
],
[
38114,
38122
],
[
38137,
38145
],
[
92625,
92633
],
[
37377,
37385
],
[
70492,
70500
]
],
[
[
142381,
142386
]
],
[
[
142496,
142499
],
[
142527,
142530
],
[
103350,
103353
]
],
[
[
142502,
142513
],
[
39198,
39209
],
[
39306,
39317
],
[
116314,
116325
],
[
116496,
116507
],
[
118835,
118846
]
],
[
[
142669,
142672
]
],
[
[
142804,
142807
],
[
142868,
142871
],
[
143187,
143190
],
[
143397,
143400
],
[
143426,
143429
],
[
143446,
143449
],
[
143464,
143467
]
],
[
[
142809,
142813
],
[
142955,
142959
],
[
143354,
143358
]
],
[
[
142815,
142821
],
[
142969,
142975
]
],
[
[
143173,
143177
],
[
143232,
143236
],
[
143241,
143245
],
[
143259,
143263
],
[
143297,
143301
],
[
143328,
143332
]
],
[
[
143179,
143183
],
[
143383,
143387
]
],
[
[
143284,
143288
],
[
143328,
143332
]
],
[
[
143290,
143294
],
[
143383,
143387
]
],
[
[
143321,
143325
],
[
143368,
143372
]
],
[
[
96768,
96773
]
],
[
[
116276,
116281
]
],
[
[
116577,
116582
]
]
] |
"""basic ding-dong bot for the wechaty plugin"""
from typing import Union
from wechaty import Message, Contact, Room, FileBox
from wechaty.plugin import WechatyPlugin
class DingDongPlugin(WechatyPlugin):
"""basic ding-dong plugin"""
@property
def name(self):
"""name of the plugin"""
return 'ding-dong'
async def on_message(self, msg: Message):
"""listen message event"""
from_contact = msg.talker()
text = msg.text()
room = msg.room()
if text == '#ding':
conversation: Union[
Room, Contact] = from_contact if room is None else room
await conversation.ready()
await conversation.say('dong')
file_box = FileBox.from_url(
'https://ss3.bdstatic.com/70cFv8Sh_Q1YnxGkpoWK1HF6hhy/it/'
'u=1116676390,2305043183&fm=26&gp=0.jpg',
name='ding-dong.jpg')
await conversation.say(file_box)
| [
[
[
68,
73
],
[
558,
563
]
],
[
[
95,
102
],
[
371,
378
]
],
[
[
104,
111
],
[
587,
594
]
],
[
[
113,
117
],
[
581,
585
]
],
[
[
119,
126
],
[
742,
749
]
],
[
[
154,
167
],
[
191,
204
]
],
[
[
176,
190
]
]
] |
"""
_WorkQueueTestCase_
Unit tests for the WMBS File class.
"""
from __future__ import print_function
import time
import unittest
from WMCore.Agent.HeartbeatAPI import HeartbeatAPI
from WMQuality.TestInit import TestInit
class HeartbeatTest(unittest.TestCase):
def setUp(self):
"""
_setUp_
Setup the database and logging connection. Try to create all of the
Heartbeat tables. Also add some dummy locations.
"""
self.testInit = TestInit(__file__)
self.testInit.setLogging() # logLevel = logging.SQLDEBUG
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules=["WMCore.Agent.Database"],
useDefault=False)
def tearDown(self):
"""
_tearDown_
Drop all the Heartbeat tables.
"""
self.testInit.clearDatabase()
def testAddComponent(self):
"""
_testAddComponent_
Test creation of components and worker threads as well as the
get heartbeat DAOs
"""
comp1 = HeartbeatAPI("testComponent1", pollInterval=60, heartbeatTimeout=600)
comp1.registerComponent()
self.assertEqual(comp1.getHeartbeatInfo(), []) # no worker thread yet
comp1.registerWorker("testWorker1")
self.assertEqual(len(comp1.getHeartbeatInfo()), 1)
comp1.registerWorker("testWorker2")
self.assertEqual(len(comp1.getHeartbeatInfo()), 2)
comp2 = HeartbeatAPI("testComponent2", pollInterval=30, heartbeatTimeout=300)
comp2.registerComponent()
self.assertEqual(comp2.getHeartbeatInfo(), []) # no worker thread yet
self.assertEqual(len(comp2.getAllHeartbeatInfo()), 2)
comp2.registerWorker("testWorker21")
self.assertEqual(len(comp2.getHeartbeatInfo()), 1)
self.assertEqual(len(comp2.getAllHeartbeatInfo()), 3)
comp1.updateWorkerHeartbeat("testWorker1", "Running")
comp1.updateWorkerHeartbeat("testWorker2", "Running")
comp2.updateWorkerHeartbeat("testWorker21", "Running")
self.assertEqual(len(comp1.getAllHeartbeatInfo()), 3)
self.assertEqual(len(comp2.getAllHeartbeatInfo()), 3)
comp1Res = comp1.getHeartbeatInfo()
comp2Res = comp2.getHeartbeatInfo()
self.assertEqual(len(comp1Res), 2)
self.assertEqual(len(comp2Res), 1)
self.assertItemsEqual([item["name"] for item in comp1Res], ["testComponent1", "testComponent1"])
self.assertItemsEqual([item["worker_name"] for item in comp1Res], ["testWorker1", "testWorker2"])
self.assertItemsEqual([item["state"] for item in comp1Res], ["Running", "Running"])
self.assertItemsEqual([item["poll_interval"] for item in comp1Res], [60, 60])
self.assertItemsEqual([item["update_threshold"] for item in comp1Res], [600, 600])
self.assertItemsEqual([item["name"] for item in comp2Res], ["testComponent2"])
self.assertItemsEqual([item["worker_name"] for item in comp2Res], ["testWorker21"])
self.assertItemsEqual([item["state"] for item in comp2Res], ["Running"])
self.assertItemsEqual([item["poll_interval"] for item in comp2Res], [30])
self.assertItemsEqual([item["update_threshold"] for item in comp2Res], [300])
def testUpdateWorkers(self):
"""
_testUpdateWorkers_
Create a couple of components and workers and test the update methods
"""
comp1 = HeartbeatAPI("testComponent1", pollInterval=60, heartbeatTimeout=600)
comp1.registerComponent()
comp1.registerWorker("testWorker1")
comp1.registerWorker("testWorker2")
comp2 = HeartbeatAPI("testComponent2", pollInterval=30, heartbeatTimeout=300)
comp2.registerComponent()
comp2.registerWorker("testWorker21")
comp1.updateWorkerCycle("testWorker1", 1.001, None)
comp2.updateWorkerCycle("testWorker21", 1234.1, 100)
hb1 = comp1.getHeartbeatInfo()
hb2 = comp2.getHeartbeatInfo()
for worker in hb1:
if worker['worker_name'] == 'testWorker1':
self.assertTrue(worker["cycle_time"] > 1.0)
else:
self.assertEqual(worker["cycle_time"], 0)
self.assertItemsEqual([item["outcome"] for item in hb1], [None, None])
self.assertItemsEqual([item["error_message"] for item in hb1], [None, None])
self.assertEqual(round(hb2[0]["cycle_time"], 1), 1234.1)
self.assertEqual(hb2[0]["outcome"], '100')
self.assertEqual(hb2[0]["error_message"], None)
# time to update workers with an error
comp1.updateWorkerError("testWorker2", "BAD JOB!!!")
hb1 = comp1.getHeartbeatInfo()
for worker in hb1:
if worker['worker_name'] == 'testWorker2':
self.assertTrue(worker["last_error"] > int(time.time() - 10))
self.assertEqual(worker["state"], "Error")
self.assertEqual(worker["error_message"], "BAD JOB!!!")
if __name__ == "__main__":
unittest.main()
| [
[
[
89,
103
]
],
[
[
112,
116
],
[
4889,
4893
]
],
[
[
124,
132
],
[
247,
255
],
[
5072,
5080
]
],
[
[
172,
184
],
[
1088,
1100
],
[
1496,
1508
],
[
3486,
3498
],
[
3695,
3707
]
],
[
[
216,
224
],
[
489,
497
]
],
[
[
233,
246
]
]
] |
# Generated by Django 2.2.10 on 2020-03-20 13:00
import wagtail.core.blocks
import wagtail.core.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("budgetportal", "0053_custompage"),
]
operations = [
migrations.AddField(
model_name="custompage",
name="body",
field=wagtail.core.fields.StreamField(
[
(
"section",
wagtail.core.blocks.StructBlock(
[
(
"presentation_class",
wagtail.core.blocks.ChoiceBlock(
choices=[
("is-default", "Default"),
("is-invisible", "No background/border"),
("is-bevel", "Bevel"),
]
),
),
("heading", wagtail.core.blocks.CharBlock()),
("content", wagtail.core.blocks.RichTextBlock()),
]
),
),
("html", wagtail.core.blocks.RawHTMLBlock()),
],
default=None,
),
preserve_default=False,
),
]
| [
[
[
57,
76
]
],
[
[
84,
103
],
[
380,
387
],
[
512,
519
],
[
703,
710
],
[
1170,
1177
],
[
1248,
1255
],
[
1395,
1402
]
],
[
[
126,
136
],
[
155,
165
],
[
279,
289
]
],
[
[
145,
154
]
]
] |
import torch
from torch import nn
from torchvision.models.vgg import vgg16
class GeneratorLoss_NEW(nn.Module):
def __init__(self):
super(GeneratorLoss_NEW, self).__init__()
vgg = vgg16(pretrained=True)
# loss_network = nn.Sequential(*list(vgg.features)[:31]).eval()
loss_network = nn.Sequential(*list(vgg.features)[:35]).eval()
for param in loss_network.parameters():
param.requires_grad = False
self.loss_network = loss_network
self.mse_loss = nn.MSELoss()
self.tv_loss = TVLoss()
self.charbonnier_loss = L1_Charbonnier_loss()
def forward(self, out_labels, out_images, target_images):
# Adversarial Loss
adversarial_loss = torch.mean(1 - out_labels)
# Perception Loss
# perception_loss = self.mse_loss(self.loss_network(out_images), self.loss_network(target_images))
perception_loss = self.charbonnier_loss(self.loss_network(out_images), self.loss_network(target_images))
# Image Loss
# image_loss = self.mse_loss(out_images, target_images)
image_loss = self.charbonnier_loss(out_images, target_images)
# TV Loss
tv_loss = self.tv_loss(out_images)
return image_loss + 0.001 * adversarial_loss + 0.006 * perception_loss + 2e-8 * tv_loss
class TVLoss(nn.Module):
def __init__(self, tv_loss_weight=1):
super(TVLoss, self).__init__()
self.tv_loss_weight = tv_loss_weight
def forward(self, x):
batch_size = x.size()[0]
h_x = x.size()[2]
w_x = x.size()[3]
count_h = self.tensor_size(x[:, :, 1:, :])
count_w = self.tensor_size(x[:, :, :, 1:])
h_tv = torch.pow((x[:, :, 1:, :] - x[:, :, :h_x - 1, :]), 2).sum()
w_tv = torch.pow((x[:, :, :, 1:] - x[:, :, :, :w_x - 1]), 2).sum()
return self.tv_loss_weight * 2 * (h_tv / count_h + w_tv / count_w) / batch_size
@staticmethod
def tensor_size(t):
return t.size()[1] * t.size()[2] * t.size()[3]
class L1_Charbonnier_loss(torch.nn.Module):
"""L1 Charbonnierloss."""
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-6
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps)
loss = torch.mean(error)
return loss
if __name__ == "__main__":
g_loss = GeneratorLoss_NEW()
print(g_loss)
| [
[
[
7,
12
],
[
2051,
2056
],
[
735,
740
],
[
1702,
1707
],
[
1777,
1782
],
[
2244,
2249
],
[
2277,
2282
],
[
2327,
2332
]
],
[
[
31,
33
],
[
101,
103
],
[
1335,
1337
],
[
319,
321
],
[
519,
521
]
],
[
[
69,
74
],
[
201,
206
]
],
[
[
83,
100
],
[
2407,
2424
],
[
151,
168
]
],
[
[
1328,
1334
],
[
555,
561
],
[
1403,
1409
]
],
[
[
2031,
2050
],
[
596,
615
],
[
2137,
2156
]
],
[
[
2398,
2404
],
[
2437,
2443
]
]
] |
# -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to create a Pipeline for the Data Pipelines API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.datapipelines import util
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.datapipelines import flags
_DETAILED_HELP = {
'DESCRIPTION':
'{description}',
'EXAMPLES':
""" \
To create a BATCH Data Pipeline ``PIPELINE_NAME'' in project ``example'' in region ``us-central1'', run:
$ {command} PIPELINE_NAME --project=example --region=us-central1
--pipeline-type=BATCH
--template-file-gcs-location='gs://path_to_template_file'
--parameters=inputFile="gs://path_to_input_file",output="gs://path_to_output_file"
--schedule="0 * * * *" --temp-location="gs://path_to_temp_location"
""",
}
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class Create(base.CreateCommand):
"""Creates Data Pipelines Pipeline."""
detailed_help = _DETAILED_HELP
@staticmethod
def Args(parser):
flags.AddCreatePipelineFlags(parser)
flags.GetDisplayNameArg('Data Pipelines pipeline').AddToParser(parser)
flags.GetPipelineTypeArg(required=True).AddToParser(parser)
flags.GetTemplateTypeArg(required=False).AddToParser(parser)
flags.GetScheduleArg(required=False).AddToParser(parser)
flags.GetTimeZoneArg(required=False).AddToParser(parser)
flags.GetTemplateFileGcsLocationArg(required=False).AddToParser(parser)
flags.GetParametersArg(required=False).AddToParser(parser)
flags.GetMaxWorkersArg(required=False).AddToParser(parser)
flags.GetNumWorkersArg(required=False).AddToParser(parser)
flags.GetNetworkArg(required=False).AddToParser(parser)
flags.GetSubnetworkArg(required=False).AddToParser(parser)
flags.GetWorkerMachineTypeArg(required=False).AddToParser(parser)
flags.GetTempLocationArg(required=False).AddToParser(parser)
flags.GetDataflowKmsKeyArg(required=False).AddToParser(parser)
flags.GetDisablePublicIpsArg(required=False).AddToParser(parser)
flags.GetDataflowServiceAccountEmailArg(required=False).AddToParser(parser)
flags.GetEnableStreamingEngineArg(required=False).AddToParser(parser)
flags.GetAdditionalExperimentsArg(required=False).AddToParser(parser)
flags.GetAdditionalUserLabelsArg(required=False).AddToParser(parser)
flags.GetWorkerRegionArgs(required=False).AddToParser(parser)
flags.GetFlexRsGoalArg(required=False).AddToParser(parser)
flags.GetStreamingUpdateArgs(required=False).AddToParser(parser)
def Run(self, args):
"""Run the create command."""
client = util.PipelinesClient()
pipelines_ref = args.CONCEPTS.pipeline.Parse()
region_ref = pipelines_ref.Parent()
return client.Create(
pipeline=pipelines_ref.RelativeName(),
parent=region_ref.RelativeName(),
args=args)
| [
[
[
708,
723
]
],
[
[
747,
755
]
],
[
[
779,
795
]
],
[
[
846,
850
],
[
3324,
3328
]
],
[
[
887,
891
],
[
1595,
1599
],
[
1539,
1543
],
[
1558,
1562
]
],
[
[
945,
950
],
[
1732,
1737
],
[
1773,
1778
],
[
1848,
1853
],
[
1912,
1917
],
[
1977,
1982
],
[
2038,
2043
],
[
2099,
2104
],
[
2175,
2180
],
[
2238,
2243
],
[
2301,
2306
],
[
2364,
2369
],
[
2424,
2429
],
[
2487,
2492
],
[
2557,
2562
],
[
2622,
2627
],
[
2689,
2694
],
[
2758,
2763
],
[
2838,
2843
],
[
2912,
2917
],
[
2986,
2991
],
[
3059,
3064
],
[
3125,
3130
],
[
3188,
3193
]
],
[
[
952,
966
],
[
1676,
1690
]
],
[
[
1588,
1594
]
]
] |
from kivy.graphics import Color
from .navigation import Navigation
class Colors:
WHITE = Color(1, 1, 1, 1)
BLACK = Color(0, 0, 0, 1)
GREY = Color(.8, .8, .8, 1)
RED = Color(1, 0, 0, 1)
GREEN = Color(0, 1, 0, 1)
BLUE = Color(0, 0, 1, 1)
@staticmethod
def lerp(value, *args):
if value <= 0:
return args[0]
elif value >= 1:
return args[-1]
a = None
b = None
pos = 2
neg = -2
slice = 1 / (len(args) - 1)
for i in range(len(args)):
v = i * slice
diff = value - v
if diff == 0:
return args[i]
elif diff > 0:
if diff < pos:
b = args[i]
pos = diff
else:
if diff > neg:
a = args[i]
neg = diff
pvalue = pos / slice
nvalue = 1 - pvalue
return Color(
a.r * pvalue + b.r * nvalue,
a.g * pvalue + b.g * nvalue,
a.b * pvalue + b.b * nvalue,
1
)
| [
[
[
26,
31
],
[
95,
100
],
[
125,
130
],
[
155,
160
],
[
187,
192
],
[
217,
222
],
[
246,
251
],
[
975,
980
]
],
[
[
57,
67
]
],
[
[
75,
81
]
]
] |
from core.plugins.openstack import (
OpenstackChecksBase,
)
FEATURES = {'neutron': {'main': [
'availability_zone'],
'openvswitch-agent': [
'l2_population',
'firewall_driver'],
'l3-agent': [
'agent_mode',
'ovs_use_veth'],
'dhcp-agent': [
'enable_metadata_network',
'enable_isolated_metadata',
'ovs_use_veth']},
'nova': {'main': [
'vcpu_pin_set',
'cpu_shared_set',
'cpu_dedicated_set',
'live_migration_permit_auto_converge',
'live_migration_permit_post_copy',
]}}
# checked against neutron
DEFAULTS = {'neutron': {'dhcp-agent': {
'enable_metadata_network': False,
'enable_isolated_metadata': False}},
'nova': {'main': {'live_migration_permit_auto_converge': False,
'live_migration_permit_post_copy': False}}}
YAML_PRIORITY = 5
class ServiceFeatureChecks(OpenstackChecksBase):
@property
def output(self):
if self._output:
return {"features": self._output}
def get_service_features(self):
"""
This is used to display whether or not specific features are enabled.
"""
for service in FEATURES:
for module in FEATURES[service]:
module_features = {}
cfg = self.ost_projects.all[service].config[module]
if not cfg.exists:
continue
for key in FEATURES[service][module]:
val = cfg.get(key)
if val is not None:
module_features[key] = val
if key not in module_features:
if key in DEFAULTS.get(service, {}).get(module, {}):
default = DEFAULTS[service][module][key]
module_features[key] = default
# TODO: only include modules for which there is an actual agent
# installed since otherwise their config is irrelevant.
if module_features:
if service not in self._output:
self._output[service] = {}
self._output[service][module] = module_features
def __call__(self):
# Only run if we think Openstack is installed.
if not self.openstack_installed:
return
self.get_service_features()
| [
[
[
41,
60
],
[
1310,
1329
]
],
[
[
65,
73
],
[
1602,
1610
],
[
1638,
1646
],
[
1854,
1862
]
],
[
[
946,
954
],
[
2097,
2105
],
[
2178,
2186
]
],
[
[
1263,
1276
]
],
[
[
1289,
1309
]
]
] |
from rest_framework import serializers
from wallet.models import UserWallet, PaymentMethod, DriverWallet
class UserWalletSerializer(serializers.ModelSerializer):
class Meta:
model = UserWallet
fields = "__all__"
class DriverWalletSerializer(serializers.ModelSerializer):
class Meta:
model = DriverWallet
fields = "__all__"
class PaymentMethodSerializer(serializers.ModelSerializer):
class Meta:
model = PaymentMethod
fields = "__all__"
| [
[
[
27,
38
],
[
134,
145
],
[
265,
276
],
[
399,
410
]
],
[
[
65,
75
],
[
196,
206
]
],
[
[
77,
90
],
[
461,
474
]
],
[
[
92,
104
],
[
327,
339
]
],
[
[
113,
133
]
],
[
[
242,
264
]
],
[
[
375,
398
]
]
] |
from typing import List, Callable
from autumn.curve import scale_up_function
def get_importation_rate_func_as_birth_rates(
importation_times: List[float],
importation_n_cases: List[float],
detect_prop_func,
starting_pops: list,
):
"""
When imported cases are explicitly simulated as part of the modelled population. They enter the late_infectious
compartment through a birth process
"""
# inflate importation numbers to account for undetected cases (assumed to be asymptomatic or sympt non hospital)
for i, time in enumerate(importation_times):
importation_n_cases[i] /= detect_prop_func(time)
# scale-up curve for importation numbers
importation_numbers_scale_up = scale_up_function(
importation_times, importation_n_cases, method=4, smoothness=5.0, bound_low=0.0
)
def recruitment_rate(t):
return importation_numbers_scale_up(t) / sum(starting_pops)
return recruitment_rate
# dummy proportions for now:
# FIXME: These are parameters!
IMPORTATION_PROPS_BY_AGE = {
"0": 0.04,
"5": 0.04,
"10": 0.04,
"15": 0.04,
"20": 0.08,
"25": 0.09,
"30": 0.09,
"35": 0.09,
"40": 0.09,
"45": 0.08,
"50": 0.08,
"55": 0.08,
"60": 0.04,
"65": 0.04,
"70": 0.04,
"75": 0.04,
}
| [
[
[
19,
23
],
[
148,
152
],
[
186,
190
]
],
[
[
25,
33
]
],
[
[
59,
76
],
[
724,
741
]
],
[
[
83,
123
]
],
[
[
1026,
1050
]
]
] |
class Mahasiswa:
def __init__(self, nama, nilai):
self.nama = nama
self.nilai = nilai
def hitung_nilai(self):
return sum(self.nilai)/len(self.nilai)
mahasiswa = Mahasiswa("Fazlur", (90,70,70,70))
print("Nama :", mahasiswa.nama)
print("Total Nilai :", mahasiswa.hitung_nilai()) | [
[
[
6,
15
],
[
199,
208
]
],
[
[
187,
196
],
[
251,
260
],
[
291,
300
]
]
] |
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pkgutil
import importlib.util
import time
import threading
import sys
from typing import NamedTuple, Any, Union, TYPE_CHECKING, Optional
from .i18n import _
from .util import (profiler, DaemonThread, UserCancelled, ThreadJob, UserFacingException)
from . import bip32
from . import plugins
from .simple_config import SimpleConfig
from .logging import get_logger, Logger
if TYPE_CHECKING:
from .plugins.hw_wallet import HW_PluginBase
_logger = get_logger(__name__)
plugin_loaders = {}
hook_names = set()
hooks = {}
class Plugins(DaemonThread):
LOGGING_SHORTCUT = 'p'
@profiler
def __init__(self, config: SimpleConfig, gui_name):
DaemonThread.__init__(self)
self.setName('Plugins')
self.pkgpath = os.path.dirname(plugins.__file__)
self.config = config
self.hw_wallets = {}
self.plugins = {}
self.gui_name = gui_name
self.descriptions = {}
self.device_manager = DeviceMgr(config)
self.load_plugins()
self.add_jobs(self.device_manager.thread_jobs())
self.start()
def load_plugins(self):
for loader, name, ispkg in pkgutil.iter_modules([self.pkgpath]):
full_name = f'electrum_mona.plugins.{name}'
spec = importlib.util.find_spec(full_name)
if spec is None: # pkgutil found it but importlib can't ?!
raise Exception(f"Error pre-loading {full_name}: no spec")
try:
module = importlib.util.module_from_spec(spec)
# sys.modules needs to be modified for relative imports to work
# see https://stackoverflow.com/a/50395128
sys.modules[spec.name] = module
spec.loader.exec_module(module)
except Exception as e:
raise Exception(f"Error pre-loading {full_name}: {repr(e)}") from e
d = module.__dict__
gui_good = self.gui_name in d.get('available_for', [])
if not gui_good:
continue
details = d.get('registers_wallet_type')
if details:
self.register_wallet_type(name, gui_good, details)
details = d.get('registers_keystore')
if details:
self.register_keystore(name, gui_good, details)
self.descriptions[name] = d
if not d.get('requires_wallet_type') and self.config.get('use_' + name):
try:
self.load_plugin(name)
except BaseException as e:
self.logger.exception(f"cannot initialize plugin {name}: {e}")
def get(self, name):
return self.plugins.get(name)
def count(self):
return len(self.plugins)
def load_plugin(self, name):
if name in self.plugins:
return self.plugins[name]
full_name = f'electrum_mona.plugins.{name}.{self.gui_name}'
spec = importlib.util.find_spec(full_name)
if spec is None:
raise RuntimeError("%s implementation for %s plugin not found"
% (self.gui_name, name))
try:
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
plugin = module.Plugin(self, self.config, name)
except Exception as e:
raise Exception(f"Error loading {name} plugin: {repr(e)}") from e
self.add_jobs(plugin.thread_jobs())
self.plugins[name] = plugin
self.logger.info(f"loaded {name}")
return plugin
def close_plugin(self, plugin):
self.remove_jobs(plugin.thread_jobs())
def enable(self, name):
self.config.set_key('use_' + name, True, True)
p = self.get(name)
if p:
return p
return self.load_plugin(name)
def disable(self, name):
self.config.set_key('use_' + name, False, True)
p = self.get(name)
if not p:
return
self.plugins.pop(name)
p.close()
self.logger.info(f"closed {name}")
def toggle(self, name):
p = self.get(name)
return self.disable(name) if p else self.enable(name)
def is_available(self, name, w):
d = self.descriptions.get(name)
if not d:
return False
deps = d.get('requires', [])
for dep, s in deps:
try:
__import__(dep)
except ImportError as e:
self.logger.warning(f'Plugin {name} unavailable: {repr(e)}')
return False
requires = d.get('requires_wallet_type', [])
return not requires or w.wallet_type in requires
def get_hardware_support(self):
out = []
for name, (gui_good, details) in self.hw_wallets.items():
if gui_good:
try:
p = self.get_plugin(name)
if p.is_enabled():
out.append(HardwarePluginToScan(name=name,
description=details[2],
plugin=p,
exception=None))
except Exception as e:
self.logger.exception(f"cannot load plugin for: {name}")
out.append(HardwarePluginToScan(name=name,
description=details[2],
plugin=None,
exception=e))
return out
def register_wallet_type(self, name, gui_good, wallet_type):
from .wallet import register_wallet_type, register_constructor
self.logger.info(f"registering wallet type {(wallet_type, name)}")
def loader():
plugin = self.get_plugin(name)
register_constructor(wallet_type, plugin.wallet_class)
register_wallet_type(wallet_type)
plugin_loaders[wallet_type] = loader
def register_keystore(self, name, gui_good, details):
from .keystore import register_keystore
def dynamic_constructor(d):
return self.get_plugin(name).keystore_class(d)
if details[0] == 'hardware':
self.hw_wallets[name] = (gui_good, details)
self.logger.info(f"registering hardware {name}: {details}")
register_keystore(details[1], dynamic_constructor)
def get_plugin(self, name):
if not name in self.plugins:
self.load_plugin(name)
return self.plugins[name]
def run(self):
while self.is_running():
time.sleep(0.1)
self.run_jobs()
self.on_stop()
def hook(func):
hook_names.add(func.__name__)
return func
def run_hook(name, *args):
results = []
f_list = hooks.get(name, [])
for p, f in f_list:
if p.is_enabled():
try:
r = f(*args)
except Exception:
_logger.exception(f"Plugin error. plugin: {p}, hook: {name}")
r = False
if r:
results.append(r)
if results:
assert len(results) == 1, results
return results[0]
class BasePlugin(Logger):
def __init__(self, parent, config, name):
self.parent = parent # The plugins object
self.name = name
self.config = config
self.wallet = None
Logger.__init__(self)
# add self to hooks
for k in dir(self):
if k in hook_names:
l = hooks.get(k, [])
l.append((self, getattr(self, k)))
hooks[k] = l
def __str__(self):
return self.name
def close(self):
# remove self from hooks
for attr_name in dir(self):
if attr_name in hook_names:
# found attribute in self that is also the name of a hook
l = hooks.get(attr_name, [])
try:
l.remove((self, getattr(self, attr_name)))
except ValueError:
# maybe attr name just collided with hook name and was not hook
continue
hooks[attr_name] = l
self.parent.close_plugin(self)
self.on_close()
def on_close(self):
pass
def requires_settings(self):
return False
def thread_jobs(self):
return []
def is_enabled(self):
return self.is_available() and self.config.get('use_'+self.name) is True
def is_available(self):
return True
def can_user_disable(self):
return True
def settings_dialog(self):
pass
class DeviceUnpairableError(UserFacingException): pass
class HardwarePluginLibraryUnavailable(Exception): pass
class Device(NamedTuple):
path: Union[str, bytes]
interface_number: int
id_: str
product_key: Any # when using hid, often Tuple[int, int]
usage_page: int
transport_ui_string: str
class DeviceInfo(NamedTuple):
device: Device
label: Optional[str] = None
initialized: Optional[bool] = None
exception: Optional[Exception] = None
class HardwarePluginToScan(NamedTuple):
name: str
description: str
plugin: Optional['HW_PluginBase']
exception: Optional[Exception]
class DeviceMgr(ThreadJob):
'''Manages hardware clients. A client communicates over a hardware
channel with the device.
In addition to tracking device HID IDs, the device manager tracks
hardware wallets and manages wallet pairing. A HID ID may be
paired with a wallet when it is confirmed that the hardware device
matches the wallet, i.e. they have the same master public key. A
HID ID can be unpaired if e.g. it is wiped.
Because of hotplugging, a wallet must request its client
dynamically each time it is required, rather than caching it
itself.
The device manager is shared across plugins, so just one place
does hardware scans when needed. By tracking HID IDs, if a device
is plugged into a different port the wallet is automatically
re-paired.
Wallets are informed on connect / disconnect events. It must
implement connected(), disconnected() callbacks. Being connected
implies a pairing. Callbacks can happen in any thread context,
and we do them without holding the lock.
Confusingly, the HID ID (serial number) reported by the HID system
doesn't match the device ID reported by the device itself. We use
the HID IDs.
This plugin is thread-safe. Currently only devices supported by
hidapi are implemented.'''
def __init__(self, config):
ThreadJob.__init__(self)
# Keyed by xpub. The value is the device id
# has been paired, and None otherwise.
self.xpub_ids = {}
# A list of clients. The key is the client, the value is
# a (path, id_) pair.
self.clients = {}
# What we recognise. Each entry is a (vendor_id, product_id)
# pair.
self.recognised_hardware = set()
# Custom enumerate functions for devices we don't know about.
self.enumerate_func = set()
# For synchronization
self.lock = threading.RLock()
self.hid_lock = threading.RLock()
self.config = config
def thread_jobs(self):
# Thread job to handle device timeouts
return [self]
def run(self):
'''Handle device timeouts. Runs in the context of the Plugins
thread.'''
with self.lock:
clients = list(self.clients.keys())
cutoff = time.time() - self.config.get_session_timeout()
for client in clients:
client.timeout(cutoff)
def register_devices(self, device_pairs):
for pair in device_pairs:
self.recognised_hardware.add(pair)
def register_enumerate_func(self, func):
self.enumerate_func.add(func)
def create_client(self, device, handler, plugin):
# Get from cache first
client = self.client_lookup(device.id_)
if client:
return client
client = plugin.create_client(device, handler)
if client:
self.logger.info(f"Registering {client}")
with self.lock:
self.clients[client] = (device.path, device.id_)
return client
def xpub_id(self, xpub):
with self.lock:
return self.xpub_ids.get(xpub)
def xpub_by_id(self, id_):
with self.lock:
for xpub, xpub_id in self.xpub_ids.items():
if xpub_id == id_:
return xpub
return None
def unpair_xpub(self, xpub):
with self.lock:
if xpub not in self.xpub_ids:
return
_id = self.xpub_ids.pop(xpub)
self._close_client(_id)
def unpair_id(self, id_):
xpub = self.xpub_by_id(id_)
if xpub:
self.unpair_xpub(xpub)
else:
self._close_client(id_)
def _close_client(self, id_):
client = self.client_lookup(id_)
self.clients.pop(client, None)
if client:
client.close()
def pair_xpub(self, xpub, id_):
with self.lock:
self.xpub_ids[xpub] = id_
def client_lookup(self, id_):
with self.lock:
for client, (path, client_id) in self.clients.items():
if client_id == id_:
return client
return None
def client_by_id(self, id_):
'''Returns a client for the device ID if one is registered. If
a device is wiped or in bootloader mode pairing is impossible;
in such cases we communicate by device ID and not wallet.'''
self.scan_devices()
return self.client_lookup(id_)
def client_for_keystore(self, plugin, handler, keystore, force_pair):
self.logger.info("getting client for keystore")
if handler is None:
raise Exception(_("Handler not found for") + ' ' + plugin.name + '\n' + _("A library is probably missing."))
handler.update_status(False)
devices = self.scan_devices()
xpub = keystore.xpub
derivation = keystore.get_derivation()
client = self.client_by_xpub(plugin, xpub, handler, devices)
if client is None and force_pair:
info = self.select_device(plugin, handler, keystore, devices)
client = self.force_pair_xpub(plugin, handler, info, xpub, derivation, devices)
if client:
handler.update_status(True)
self.logger.info("end client for keystore")
return client
def client_by_xpub(self, plugin, xpub, handler, devices):
_id = self.xpub_id(xpub)
client = self.client_lookup(_id)
if client:
# An unpaired client might have another wallet's handler
# from a prior scan. Replace to fix dialog parenting.
client.handler = handler
return client
for device in devices:
if device.id_ == _id:
return self.create_client(device, handler, plugin)
def force_pair_xpub(self, plugin, handler, info, xpub, derivation, devices):
# The wallet has not been previously paired, so let the user
# choose an unpaired device and compare its first address.
xtype = bip32.xpub_type(xpub)
client = self.client_lookup(info.device.id_)
if client and client.is_pairable():
# See comment above for same code
client.handler = handler
# This will trigger a PIN/passphrase entry request
try:
client_xpub = client.get_xpub(derivation, xtype)
except (UserCancelled, RuntimeError):
# Bad / cancelled PIN / passphrase
client_xpub = None
if client_xpub == xpub:
self.pair_xpub(xpub, info.device.id_)
return client
# The user input has wrong PIN or passphrase, or cancelled input,
# or it is not pairable
raise DeviceUnpairableError(
_('Electrum cannot pair with your {}.\n\n'
'Before you request bitcoins to be sent to addresses in this '
'wallet, ensure you can pair with your device, or that you have '
'its seed (and passphrase, if any). Otherwise all bitcoins you '
'receive will be unspendable.').format(plugin.device))
def unpaired_device_infos(self, handler, plugin: 'HW_PluginBase', devices=None,
include_failing_clients=False):
'''Returns a list of DeviceInfo objects: one for each connected,
unpaired device accepted by the plugin.'''
if not plugin.libraries_available:
message = plugin.get_library_not_available_message()
raise HardwarePluginLibraryUnavailable(message)
if devices is None:
devices = self.scan_devices()
devices = [dev for dev in devices if not self.xpub_by_id(dev.id_)]
infos = []
for device in devices:
if device.product_key not in plugin.DEVICE_IDS:
continue
try:
client = self.create_client(device, handler, plugin)
except Exception as e:
self.logger.error(f'failed to create client for {plugin.name} at {device.path}: {repr(e)}')
if include_failing_clients:
infos.append(DeviceInfo(device=device, exception=e))
continue
if not client:
continue
infos.append(DeviceInfo(device=device,
label=client.label(),
initialized=client.is_initialized()))
return infos
def select_device(self, plugin, handler, keystore, devices=None):
'''Ask the user to select a device to use if there is more than one,
and return the DeviceInfo for the device.'''
while True:
infos = self.unpaired_device_infos(handler, plugin, devices)
if infos:
break
msg = _('Please insert your {}').format(plugin.device)
if keystore.label:
msg += ' ({})'.format(keystore.label)
msg += '. {}\n\n{}'.format(
_('Verify the cable is connected and that '
'no other application is using it.'),
_('Try to connect again?')
)
if not handler.yes_no_question(msg):
raise UserCancelled()
devices = None
if len(infos) == 1:
return infos[0]
# select device by label
for info in infos:
if info.label == keystore.label:
return info
msg = _("Please select which {} device to use:").format(plugin.device)
descriptions = [str(info.label) + ' (%s)'%(_("initialized") if info.initialized else _("wiped")) for info in infos]
c = handler.query_choice(msg, descriptions)
if c is None:
raise UserCancelled()
info = infos[c]
# save new label
keystore.set_label(info.label)
if handler.win.wallet is not None:
handler.win.wallet.save_keystore()
return info
def _scan_devices_with_hid(self):
try:
import hid
except ImportError:
return []
with self.hid_lock:
hid_list = hid.enumerate(0, 0)
devices = []
for d in hid_list:
product_key = (d['vendor_id'], d['product_id'])
if product_key in self.recognised_hardware:
# Older versions of hid don't provide interface_number
interface_number = d.get('interface_number', -1)
usage_page = d['usage_page']
id_ = d['serial_number']
if len(id_) == 0:
id_ = str(d['path'])
id_ += str(interface_number) + str(usage_page)
devices.append(Device(path=d['path'],
interface_number=interface_number,
id_=id_,
product_key=product_key,
usage_page=usage_page,
transport_ui_string='hid'))
return devices
def scan_devices(self):
self.logger.info("scanning devices...")
# First see what's connected that we know about
devices = self._scan_devices_with_hid()
# Let plugin handlers enumerate devices we don't know about
for f in self.enumerate_func:
try:
new_devices = f()
except BaseException as e:
self.logger.error('custom device enum failed. func {}, error {}'
.format(str(f), repr(e)))
else:
devices.extend(new_devices)
# find out what was disconnected
pairs = [(dev.path, dev.id_) for dev in devices]
disconnected_ids = []
with self.lock:
connected = {}
for client, pair in self.clients.items():
if pair in pairs and client.has_usable_connection_with_device():
connected[client] = pair
else:
disconnected_ids.append(pair[1])
self.clients = connected
# Unpair disconnected devices
for id_ in disconnected_ids:
self.unpair_id(id_)
return devices
| [
[
[
1169,
1171
],
[
1921,
1923
]
],
[
[
1179,
1186
],
[
2321,
2328
]
],
[
[
1194,
1208
],
[
2434,
2443
],
[
2659,
2668
],
[
4108,
4117
],
[
4334,
4343
]
],
[
[
1216,
1220
],
[
7853,
7857
],
[
12844,
12848
]
],
[
[
1228,
1237
],
[
12459,
12468
],
[
12501,
12510
]
],
[
[
1245,
1248
],
[
2852,
2855
]
],
[
[
1268,
1278
],
[
10029,
10039
],
[
10240,
10250
],
[
10414,
10424
]
],
[
[
1280,
1283
],
[
10126,
10129
]
],
[
[
1285,
1290
],
[
10052,
10057
]
],
[
[
1292,
1305
],
[
1553,
1566
]
],
[
[
1307,
1315
],
[
10283,
10291
],
[
10321,
10329
],
[
10358,
10366
],
[
10474,
10482
],
[
10515,
10523
]
],
[
[
1335,
1336
],
[
15239,
15240
],
[
15295,
15296
],
[
17376,
17377
],
[
19428,
19429
],
[
19618,
19619
],
[
19734,
19735
],
[
20092,
20093
],
[
20208,
20209
],
[
20250,
20251
]
],
[
[
1356,
1364
],
[
1765,
1773
]
],
[
[
1366,
1378
],
[
1716,
1728
],
[
1838,
1850
]
],
[
[
1380,
1393
],
[
16983,
16996
],
[
19846,
19859
],
[
20373,
20386
]
],
[
[
1395,
1404
],
[
10553,
10562
],
[
11902,
11911
]
],
[
[
1406,
1425
],
[
9931,
9950
]
],
[
[
1441,
1446
],
[
16616,
16621
]
],
[
[
1461,
1468
],
[
1937,
1944
]
],
[
[
1496,
1508
],
[
1805,
1817
]
],
[
[
1530,
1540
],
[
1629,
1639
]
],
[
[
1542,
1548
],
[
8453,
8459
],
[
8649,
8655
]
],
[
[
1603,
1616
]
],
[
[
1619,
1626
],
[
8209,
8216
]
],
[
[
1650,
1664
],
[
7182,
7196
]
],
[
[
1670,
1680
],
[
7942,
7952
],
[
8747,
8757
],
[
9044,
9054
]
],
[
[
1689,
1694
],
[
8046,
8051
],
[
8779,
8784
],
[
8863,
8868
],
[
9150,
9155
],
[
9423,
9428
]
],
[
[
1708,
1715
]
],
[
[
7926,
7930
]
],
[
[
7993,
8001
]
],
[
[
8442,
8452
]
],
[
[
9909,
9930
],
[
17341,
17362
]
],
[
[
9964,
9996
],
[
18122,
18154
]
],
[
[
10022,
10028
],
[
10265,
10271
],
[
21340,
21346
]
],
[
[
10229,
10239
],
[
18750,
18760
],
[
18892,
18902
]
],
[
[
10393,
10413
],
[
6132,
6152
],
[
6530,
6550
]
],
[
[
10543,
10552
],
[
2133,
2142
]
]
] |
"""
Contains classes related to Roblox group data and parsing.
"""
from typing import Optional, Tuple
from .bases.basegroup import BaseGroup
from .partials.partialuser import PartialUser
from .shout import Shout
from .utilities.shared import ClientSharedObject
class Group(BaseGroup):
"""
Represents a Join Request
Attributes:
_shared: The shared object, which is passed to all objects this client generates.
id: the id of the group.
name: name of the group.
description: description of the group.
owner: player who owns the group.
shout: the current group shout.
member_count: about of members in the group.
is_builders_club_only: can only people with builder club join.
public_entry_allowed: can you join without your join request having to be accepted.
is_locked: Is the group locked?
"""
def __init__(self, shared: ClientSharedObject, data: dict):
"""
Arguments:
data: The data we get back from the endpoint.
shared: The shared object, which is passed to all objects this client generates.
"""
super().__init__(shared, data["id"])
self._shared: ClientSharedObject = shared
self.id: int = data["id"]
self.name: str = data["name"]
self.description: str = data["description"]
self.owner: PartialUser = PartialUser(shared=shared, data=data["owner"])
self.shout: Optional[Shout] = data["shout"] and Shout(
shared=self._shared,
data=data["shout"]
) or None
self.member_count: int = data["memberCount"]
self.is_builders_club_only: bool = data["isBuildersClubOnly"]
self.public_entry_allowed: bool = data["publicEntryAllowed"]
self.is_locked: bool = data.get("isLocked") or False
def __repr__(self):
return f"<{self.__class__.__name__} id={self.id} name={self.name!r} owner={self.owner}>"
async def update_shout(self, message: str, update_self: bool = True) -> Tuple[Optional[Shout], Optional[Shout]]:
"""
Updates the shout.
Arguments:
message: The new shout message.
update_self: Whether to update self.shout automatically.
"""
shout_response = await self._requests.patch(
url=self._shared.url_generator.get_url("groups", f"v1/groups/{self.id}/status"),
json={
"message": message
}
)
shout_data = shout_response.json()
old_shout: Optional[Shout] = self.shout
new_shout: Optional[Shout] = shout_data and Shout(
shared=self._shared,
data=shout_data
) or None
if update_self:
self.shout = new_shout
return old_shout, new_shout
| [
[
[
89,
97
],
[
1476,
1484
],
[
2059,
2067
],
[
2076,
2084
],
[
2566,
2574
],
[
2614,
2622
]
],
[
[
99,
104
],
[
2053,
2058
]
],
[
[
135,
144
],
[
279,
288
]
],
[
[
179,
190
],
[
1409,
1420
],
[
1395,
1406
]
],
[
[
210,
215
],
[
1512,
1517
],
[
1485,
1490
],
[
2068,
2073
],
[
2085,
2090
],
[
2575,
2580
],
[
2647,
2652
],
[
2623,
2628
]
],
[
[
246,
264
],
[
927,
945
],
[
1222,
1240
]
],
[
[
273,
278
]
]
] |
# noqa
from typing import Any, BinaryIO
class CustomSerializer:
"""Custom serializer implementation to test the injection of different serialization strategies to an input."""
@property
def extension(self) -> str: # noqa
return "ext"
def serialize(self, value: Any, writer: BinaryIO): # noqa
raise NotImplementedError()
def deserialize(self, reader: BinaryIO) -> Any: # noqa
raise NotImplementedError()
def __repr__(self) -> str: # noqa
return "CustomSerializerInstance"
| [
[
[
26,
29
],
[
290,
293
],
[
406,
409
]
],
[
[
31,
39
],
[
303,
311
],
[
393,
401
]
],
[
[
48,
64
]
]
] |
import io
import os
import re
import struct
from xml.etree import ElementTree
_UNIT_KM = -3
_UNIT_100M = -2
_UNIT_10M = -1
_UNIT_1M = 0
_UNIT_10CM = 1
_UNIT_CM = 2
_UNIT_MM = 3
_UNIT_0_1MM = 4
_UNIT_0_01MM = 5
_UNIT_UM = 6
_UNIT_INCH = 6
_TIFF_TYPE_SIZES = {
1: 1,
2: 1,
3: 2,
4: 4,
5: 8,
6: 1,
7: 1,
8: 2,
9: 4,
10: 8,
11: 4,
12: 8,
}
def _convertToDPI(density, unit):
if unit == _UNIT_KM:
return int(density * 0.0000254 + 0.5)
elif unit == _UNIT_100M:
return int(density * 0.000254 + 0.5)
elif unit == _UNIT_10M:
return int(density * 0.00254 + 0.5)
elif unit == _UNIT_1M:
return int(density * 0.0254 + 0.5)
elif unit == _UNIT_10CM:
return int(density * 0.254 + 0.5)
elif unit == _UNIT_CM:
return int(density * 2.54 + 0.5)
elif unit == _UNIT_MM:
return int(density * 25.4 + 0.5)
elif unit == _UNIT_0_1MM:
return density * 254
elif unit == _UNIT_0_01MM:
return density * 2540
elif unit == _UNIT_UM:
return density * 25400
return density
def _convertToPx(value):
matched = re.match(r"(\d+(?:\.\d+)?)?([a-z]*)$", value)
if not matched:
raise ValueError("unknown length value: %s" % value)
length, unit = matched.groups()
if unit == "":
return float(length)
elif unit == "cm":
return float(length) * 96 / 2.54
elif unit == "mm":
return float(length) * 96 / 2.54 / 10
elif unit == "in":
return float(length) * 96
elif unit == "pc":
return float(length) * 96 / 6
elif unit == "pt":
return float(length) * 96 / 6
elif unit == "px":
return float(length)
raise ValueError("unknown unit type: %s" % unit)
def get(filepath):
"""
Return (width, height) for a given img file content
no requirements
:type filepath: Union[bytes, str, pathlib.Path]
:rtype Tuple[int, int]
"""
height = -1
width = -1
if isinstance(filepath, io.BytesIO): # file-like object
fhandle = filepath
else:
fhandle = open(filepath, 'rb')
try:
head = fhandle.read(24)
size = len(head)
# handle GIFs
if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'):
# Check to see if content_type is correct
try:
width, height = struct.unpack("<hh", head[6:10])
except struct.error:
raise ValueError("Invalid GIF file")
# see png edition spec bytes are below chunk length then and finally the
elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n') and head[12:16] == b'IHDR':
try:
width, height = struct.unpack(">LL", head[16:24])
except struct.error:
raise ValueError("Invalid PNG file")
# Maybe this is for an older PNG version.
elif size >= 16 and head.startswith(b'\211PNG\r\n\032\n'):
# Check to see if we have the right content type
try:
width, height = struct.unpack(">LL", head[8:16])
except struct.error:
raise ValueError("Invalid PNG file")
# handle JPEGs
elif size >= 2 and head.startswith(b'\377\330'):
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf or ftype in [0xc4, 0xc8, 0xcc]:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except (struct.error, TypeError):
raise ValueError("Invalid JPEG file")
# handle JPEG2000s
elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'):
fhandle.seek(48)
try:
height, width = struct.unpack('>LL', fhandle.read(8))
except struct.error:
raise ValueError("Invalid JPEG2000 file")
# handle big endian TIFF
elif size >= 8 and head.startswith(b"\x4d\x4d\x00\x2a"):
offset = struct.unpack('>L', head[4:8])[0]
fhandle.seek(offset)
ifdsize = struct.unpack(">H", fhandle.read(2))[0]
for i in range(ifdsize):
tag, datatype, count, data = struct.unpack(">HHLL", fhandle.read(12))
if tag == 256:
if datatype == 3:
width = int(data / 65536)
elif datatype == 4:
width = data
else:
raise ValueError("Invalid TIFF file: width column data type should be SHORT/LONG.")
elif tag == 257:
if datatype == 3:
height = int(data / 65536)
elif datatype == 4:
height = data
else:
raise ValueError("Invalid TIFF file: height column data type should be SHORT/LONG.")
if width != -1 and height != -1:
break
if width == -1 or height == -1:
raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.")
elif size >= 8 and head.startswith(b"\x49\x49\x2a\x00"):
offset = struct.unpack('<L', head[4:8])[0]
fhandle.seek(offset)
ifdsize = struct.unpack("<H", fhandle.read(2))[0]
for i in range(ifdsize):
tag, datatype, count, data = struct.unpack("<HHLL", fhandle.read(12))
if tag == 256:
width = data
elif tag == 257:
height = data
if width != -1 and height != -1:
break
if width == -1 or height == -1:
raise ValueError("Invalid TIFF file: width and/or height IDS entries are missing.")
# handle little endian BigTiff
elif size >= 8 and head.startswith(b"\x49\x49\x2b\x00"):
bytesize_offset = struct.unpack('<L', head[4:8])[0]
if bytesize_offset != 8:
raise ValueError('Invalid BigTIFF file: Expected offset to be 8, found {} instead.'.format(offset))
offset = struct.unpack('<Q', head[8:16])[0]
fhandle.seek(offset)
ifdsize = struct.unpack("<Q", fhandle.read(8))[0]
for i in range(ifdsize):
tag, datatype, count, data = struct.unpack("<HHQQ", fhandle.read(20))
if tag == 256:
width = data
elif tag == 257:
height = data
if width != -1 and height != -1:
break
if width == -1 or height == -1:
raise ValueError("Invalid BigTIFF file: width and/or height IDS entries are missing.")
# handle SVGs
elif size >= 5 and (head.startswith(b'<?xml') or head.startswith(b'<svg')):
fhandle.seek(0)
data = fhandle.read(1024)
try:
data = data.decode('utf-8')
width = re.search(r'[^-]width="(.*?)"', data).group(1)
height = re.search(r'[^-]height="(.*?)"', data).group(1)
except Exception:
raise ValueError("Invalid SVG file")
width = _convertToPx(width)
height = _convertToPx(height)
# handle Netpbm
elif head[:1] == b"P" and head[1:2] in b"123456":
fhandle.seek(2)
sizes = []
while True:
next_chr = fhandle.read(1)
if next_chr.isspace():
continue
if next_chr == b"":
raise ValueError("Invalid Netpbm file")
if next_chr == b"#":
fhandle.readline()
continue
if not next_chr.isdigit():
raise ValueError("Invalid character found on Netpbm file")
size = next_chr
next_chr = fhandle.read(1)
while next_chr.isdigit():
size += next_chr
next_chr = fhandle.read(1)
sizes.append(int(size))
if len(sizes) == 2:
break
fhandle.seek(-1, os.SEEK_CUR)
width, height = sizes
finally:
fhandle.close()
return width, height
def getDPI(filepath):
"""
Return (x DPI, y DPI) for a given img file content
no requirements
:type filepath: Union[bytes, str, pathlib.Path]
:rtype Tuple[int, int]
"""
xDPI = -1
yDPI = -1
if not isinstance(filepath, bytes):
filepath = str(filepath)
with open(filepath, 'rb') as fhandle:
head = fhandle.read(24)
size = len(head)
# handle GIFs
# GIFs doesn't have density
if size >= 10 and head[:6] in (b'GIF87a', b'GIF89a'):
pass
# see png edition spec bytes are below chunk length then and finally the
elif size >= 24 and head.startswith(b'\211PNG\r\n\032\n'):
chunkOffset = 8
chunk = head[8:]
while True:
chunkType = chunk[4:8]
if chunkType == b'pHYs':
try:
xDensity, yDensity, unit = struct.unpack(">LLB", chunk[8:])
except struct.error:
raise ValueError("Invalid PNG file")
if unit:
xDPI = _convertToDPI(xDensity, _UNIT_1M)
yDPI = _convertToDPI(yDensity, _UNIT_1M)
else: # no unit
xDPI = xDensity
yDPI = yDensity
break
elif chunkType == b'IDAT':
break
else:
try:
dataSize, = struct.unpack(">L", chunk[0:4])
except struct.error:
raise ValueError("Invalid PNG file")
chunkOffset += dataSize + 12
fhandle.seek(chunkOffset)
chunk = fhandle.read(17)
# handle JPEGs
elif size >= 2 and head.startswith(b'\377\330'):
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
if ftype == 0xe0: # APP0 marker
fhandle.seek(7, 1)
unit, xDensity, yDensity = struct.unpack(">BHH", fhandle.read(5))
if unit == 1 or unit == 0:
xDPI = xDensity
yDPI = yDensity
elif unit == 2:
xDPI = _convertToDPI(xDensity, _UNIT_CM)
yDPI = _convertToDPI(yDensity, _UNIT_CM)
break
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
except struct.error:
raise ValueError("Invalid JPEG file")
# handle JPEG2000s
elif size >= 12 and head.startswith(b'\x00\x00\x00\x0cjP \r\n\x87\n'):
fhandle.seek(32)
# skip JP2 image header box
headerSize = struct.unpack('>L', fhandle.read(4))[0] - 8
fhandle.seek(4, 1)
foundResBox = False
try:
while headerSize > 0:
boxHeader = fhandle.read(8)
boxType = boxHeader[4:]
if boxType == b'res ': # find resolution super box
foundResBox = True
headerSize -= 8
break
boxSize, = struct.unpack('>L', boxHeader[:4])
fhandle.seek(boxSize - 8, 1)
headerSize -= boxSize
if foundResBox:
while headerSize > 0:
boxHeader = fhandle.read(8)
boxType = boxHeader[4:]
if boxType == b'resd': # Display resolution box
yDensity, xDensity, yUnit, xUnit = struct.unpack(">HHBB", fhandle.read(10))
xDPI = _convertToDPI(xDensity, xUnit)
yDPI = _convertToDPI(yDensity, yUnit)
break
boxSize, = struct.unpack('>L', boxHeader[:4])
fhandle.seek(boxSize - 8, 1)
headerSize -= boxSize
except struct.error as e:
raise ValueError("Invalid JPEG2000 file")
return xDPI, yDPI
| [
[
[
7,
9
],
[
2012,
2014
]
],
[
[
17,
19
],
[
8706,
8708
]
],
[
[
27,
29
],
[
1130,
1132
],
[
7483,
7485
],
[
7555,
7557
]
],
[
[
37,
43
],
[
2375,
2381
],
[
2427,
2433
],
[
2718,
2724
],
[
2771,
2777
],
[
3065,
3071
],
[
3117,
3123
],
[
3704,
3710
],
[
3882,
3888
],
[
3940,
3946
],
[
4205,
4211
],
[
4262,
4268
],
[
4453,
4459
],
[
4542,
4548
],
[
4664,
4670
],
[
5675,
5681
],
[
5764,
5770
],
[
5886,
5892
],
[
6411,
6417
],
[
6619,
6625
],
[
6709,
6715
],
[
6831,
6837
],
[
9735,
9741
],
[
9795,
9801
],
[
10324,
10330
],
[
10383,
10389
],
[
10992,
10998
],
[
11620,
11626
],
[
11683,
11689
],
[
11952,
11958
],
[
12422,
12428
],
[
12858,
12864
],
[
13100,
13106
],
[
13253,
13259
]
],
[
[
66,
77
]
],
[
[
79,
87
],
[
413,
421
]
],
[
[
93,
103
],
[
486,
496
]
],
[
[
109,
118
],
[
560,
569
]
],
[
[
124,
132
],
[
632,
640
],
[
9954,
9962
],
[
10019,
10027
]
],
[
[
137,
147
],
[
702,
712
]
],
[
[
152,
160
],
[
773,
781
],
[
11269,
11277
],
[
11338,
11346
]
],
[
[
165,
173
],
[
841,
849
]
],
[
[
178,
189
],
[
909,
920
]
],
[
[
194,
206
],
[
968,
980
]
],
[
[
211,
219
],
[
1029,
1037
]
],
[
[
224,
234
]
],
[
[
240,
256
]
],
[
[
368,
381
],
[
9930,
9943
],
[
9995,
10008
],
[
11245,
11258
],
[
11314,
11327
],
[
12934,
12947
],
[
13000,
13013
]
],
[
[
1095,
1107
],
[
7706,
7718
],
[
7747,
7759
]
],
[
[
1766,
1769
]
],
[
[
8823,
8829
]
]
] |
def digest_target(target):
from .element import digest_element
return digest_element(target)
| [
[
[
5,
18
]
]
] |
# Copyright 2017 The TensorFlow Authors and modified by Emilien Garreau. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Method to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired sample_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired sample_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always sample_size, even
when number of examples set to True in indicator is less than sample_size.
"""
import tensorflow as tf
from kerod.utils import ops
def subsample_indicator(indicator, num_samples):
"""Subsample indicator vector.
Given a boolean indicator vector with M elements set to `True`, the function
assigns all but `num_samples` of these previously `True` elements to
`False`. If `num_samples` is greater than M, the original indicator vector
is returned.
Arguments:
- *indicator*: a 1-dimensional boolean tensor indicating which elements
are allowed to be sampled and which are not.
- *num_samples*: int32 scalar tensor
Returns:
A boolean tensor with the same shape as input (indicator) tensor
"""
indices = tf.where(indicator)
indices = tf.random.shuffle(indices)
indices = tf.reshape(indices, [-1])
num_samples = tf.minimum(tf.size(indices), num_samples)
selected_indices = tf.slice(indices, [0], tf.reshape(num_samples, [1]))
selected_indicator = ops.indices_to_dense_vector(selected_indices, tf.shape(indicator)[0])
return tf.equal(selected_indicator, 1)
def sample_balanced_positive_negative(indicator, sample_size, labels, positive_fraction=0.5):
"""Subsamples minibatches to a desired balance of positives and negatives.
Arguments:
- *indicator*: boolean tensor of shape [N] whose True entries can be sampled.
- *sample_size*: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches positive_fraction.
- *labels*: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
- *positive_fraction*: desired fraction of positive examples (scalar in [0,1])
in the batch.
Returns:
*sampled_idx_indicator*: boolean tensor of shape [N], True for entries which are sampled.
"""
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if sample_size is None:
max_num_pos = tf.reduce_sum(tf.cast(positive_idx, dtype=tf.int32))
else:
max_num_pos = int(positive_fraction * sample_size)
sampled_pos_idx = subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))
if sample_size is None:
negative_positive_ratio = (1 - positive_fraction) / positive_fraction
max_num_neg = tf.cast(negative_positive_ratio * tf.cast(num_sampled_pos, dtype=tf.float32),
dtype=tf.int32)
else:
max_num_neg = sample_size - num_sampled_pos
sampled_neg_idx = subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
def batch_sample_balanced_positive_negative(indicators,
sample_size,
labels,
positive_fraction=0.5,
dtype=tf.float32):
"""Subsamples minibatches to a desired balance of positives and negatives.
Arguments:
- *indicator*: boolean tensor of shape [batch_size, N] whose True entries can be sampled.
- *sample_size*: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches positive_fraction.
- *labels*: boolean tensor of shape [batch_size, N] denoting positive(=True) and negative
(=False) examples.
- *positive_fraction*: desired fraction of positive examples (scalar in [0,1])
in the batch.
Returns:
A boolean tensor of shape [M, N], True for entries which are sampled.
"""
def _minibatch_subsample_fn(inputs):
indicators, targets = inputs
return sample_balanced_positive_negative(tf.cast(indicators, tf.bool),
sample_size,
tf.cast(targets, tf.bool),
positive_fraction=positive_fraction)
return tf.cast(tf.map_fn(_minibatch_subsample_fn, [indicators, labels],
dtype=tf.bool,
parallel_iterations=16,
back_prop=True),
dtype=dtype)
| [
[
[
1614,
1630
],
[
4718,
4720
],
[
2290,
2292
],
[
2324,
2326
],
[
2365,
2367
],
[
2410,
2412
],
[
2421,
2423
],
[
2475,
2477
],
[
2498,
2500
],
[
2600,
2602
],
[
2636,
2638
],
[
3488,
3490
],
[
3530,
3532
],
[
3583,
3585
],
[
3728,
3730
],
[
3742,
3744
],
[
3770,
3772
],
[
3941,
3943
],
[
3955,
3957
],
[
3980,
3982
],
[
4119,
4121
],
[
4153,
4155
],
[
4184,
4186
],
[
4233,
4235
],
[
4386,
4388
],
[
5833,
5835
],
[
5841,
5843
],
[
5933,
5935
],
[
5567,
5569
],
[
5587,
5589
],
[
5708,
5710
],
[
5725,
5727
]
],
[
[
1656,
1659
],
[
2554,
2557
]
],
[
[
1666,
1685
],
[
3872,
3891
],
[
4327,
4346
]
],
[
[
2674,
2707
],
[
5533,
5566
]
],
[
[
4440,
4479
]
]
] |
# -*- coding: utf-8 -*-
import os
import re
import json
import os.path
import unittest
reg_cmnt = re.compile(r"/\*.*?\*/", re.DOTALL)
class Config:
"Работа с конфигурационным файлом"
def __init__(self, main_path=None, user_path=None):
if main_path is None:
self._main_path = "config.json5"
else:
self._main_path = main_path
if user_path is None:
self._user_path = "config_user.json5"
else:
self._user_path = user_path
self._cfg_dict = {}
def __getitem__(self, key):
return self._cfg_dict[key]
def __len__(self):
return len(self._cfg_dict)
def _load_json(self, path):
data = {}
if os.path.exists(path):
txt = open(path).read()
txt = reg_cmnt.sub("", txt) # remove comments
data = json.loads(txt)
return data
def _set_default(self, cfg):
cfg["path_to_dict"] = cfg.get("path_to_dict", "dict.json")
cfg["path_to_stat"] = cfg.get("path_to_stat", "statistic.json")
cfg["words_per_lesson"] = int(cfg.get("words_per_lesson", 5))
cfg["CntStudyWords"] = int(cfg.get("CntStudyWords", 50))
cfg["MinPercent"] = float(cfg.get("MinPercent", 97.0))
cfg["MinSuccessCnt"] = int(cfg.get("MinSuccessCnt", 10))
cfg["retry_time"] = int(cfg.get("retry_time", 1800))
cfg["hide_transcription"] = cfg.get("hide_transcription", "no")
cfg["start_time_delay"] = int(cfg.get("start_time_delay", 1))
cfg["stat_count_row"] = int(cfg.get("stat_count_row", 200))
cfg["right_answer_percent"] = float(cfg.get("right_answer_percent", 10.0))
cfg["wrong_answer_percent"] = float(cfg.get("wrong_answer_percent", 40.0))
cfg["empty_answer_is_error"] = cfg.get("empty_answer_is_error", "no")
cfg["internet_dictionary_url"] = cfg.get("internet_dictionary_url",
{"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"})
def create_default_user_config(self):
if not os.path.isfile(self._user_path):
txt = "{\n /*\n User config\n */\n\n}"
open(self._user_path, "wt").write(txt)
def reload(self):
self._cfg_dict = {}
self._cfg_dict.update(self._load_json(self._main_path))
self._cfg_dict.update(self._load_json(self._user_path))
self._set_default(self._cfg_dict)
return self._cfg_dict
def get_dict(self):
return self._cfg_dict
class ConfigTestCase(unittest.TestCase):
"Набор тестов для класса Config"
def setUp(self):
if os.path.isfile("test_config_user.json"):
os.remove("test_config_user.json")
def tearDown(self):
if os.path.isfile("test_config_user.json"):
os.remove("test_config_user.json")
def equal_cfg(self, cfg, test_dict):
for key, val in test_dict.items():
self.assertEqual(cfg[key], val)
self.assertEqual(len(cfg), 14)
def test_main(self):
"Тестирование загрузки основного файла с конфигурацией"
test_dict = {
"path_to_dict": "dict.json",
"path_to_stat": "statistic.json",
"words_per_lesson": 5,
"CntStudyWords": 50,
"MinPercent": 97.0,
"MinSuccessCnt": 10,
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no",
"internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}}
cfg = Config("config.json5", "fake_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_user(self):
"Тестирование загрузки пользовательского файла с конфигурацией"
test_dict = {
"path_to_dict": "dict1.json",
"path_to_stat": "statistic1.json",
"words_per_lesson": 6,
"CntStudyWords": 60,
"MinPercent": 98.0,
"MinSuccessCnt": 11,
"retry_time": 1801,
"hide_transcription": "yes",
"start_time_delay": 2,
"stat_count_row": 300,
"right_answer_percent": 20.0,
"wrong_answer_percent": 50.0,
"empty_answer_is_error": "yes",
"internet_dictionary_url": {"EN_RU": "http1://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http1://slovari.yandex.ru/{word}/en/#lingvo/"}}
json.dump(test_dict, open("test_config_user.json", "w"))
cfg = Config("config.json5", "test_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_user_part(self):
"Тестирование загрузки пользовательского файла с конфигурацией, который перекрывает только часть настроек"
test_dict = {
"path_to_dict": "dict1.json",
"path_to_stat": "statistic1.json",
"words_per_lesson": 6,
"CntStudyWords": 60,
"MinPercent": 98.0,
"MinSuccessCnt": 11}
json.dump(test_dict, open("test_config_user.json", "w"))
test_dict.update({
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no"})
cfg = Config("config.json5", "test_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
def test_not_exists(self):
"Тестирование выставления дефолтных настроек"
test_dict = {
"path_to_dict": "dict.json",
"path_to_stat": "statistic.json",
"words_per_lesson": 5,
"CntStudyWords": 50,
"MinPercent": 97.0,
"MinSuccessCnt": 10,
"retry_time": 1800,
"hide_transcription": "no",
"start_time_delay": 1,
"stat_count_row": 200,
"right_answer_percent": 10.0,
"wrong_answer_percent": 40.0,
"empty_answer_is_error": "no",
"internet_dictionary_url": {"EN_RU": "http://slovari.yandex.ru/{word}/en-ru/#lingvo/",
"RU_EN": "http://slovari.yandex.ru/{word}/en/#lingvo/"}}
cfg = Config("config.json5", "fake_config_user.json")
cfg.reload()
self.equal_cfg(cfg, test_dict)
cfg = Config("fake_config.json", "fake_config_user.json")
cfg.reload()
if __name__ == "__main__":
os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
suite = unittest.TestLoader().loadTestsFromTestCase(ConfigTestCase)
unittest.TextTestRunner(verbosity=2).run(suite)
| [
[
[
32,
34
]
],
[
[
42,
44
],
[
100,
102
],
[
125,
127
]
],
[
[
52,
56
],
[
864,
868
],
[
4890,
4894
],
[
5469,
5473
]
],
[
[
64,
71
],
[
6984,
6986
],
[
6993,
6995
],
[
7009,
7011
],
[
7025,
7027
],
[
728,
730
],
[
2201,
2203
],
[
2768,
2770
],
[
2821,
2823
],
[
2892,
2894
],
[
2945,
2947
]
],
[
[
79,
87
],
[
2678,
2686
],
[
7066,
7074
],
[
7130,
7138
]
],
[
[
89,
97
],
[
804,
812
]
],
[
[
144,
150
],
[
3961,
3967
],
[
4961,
4967
],
[
5839,
5845
],
[
6756,
6762
],
[
6879,
6885
]
],
[
[
2663,
2677
],
[
7110,
7124
]
],
[
[
7058,
7063
],
[
7171,
7176
]
]
] |
import pytest
import copy
from pathlib import Path
import sys
sys.path.append(str(Path(__file__).absolute().parent.parent))
from swimmer_abm.model import Model
def test_init():
model = Model(nswimmers=3)
assert len(model.swimmers) == 3
def test_step():
model = Model(nswimmers=1)
swimmer = copy.deepcopy(model.swimmers[0])
dt = 1
swimmer.swim(dt)
model.step(dt)
assert swimmer.pos == model.swimmers[0].pos
def test_repr():
model = Model(nswimmers=1)
assert isinstance(str(model), str)
| [
[
[
7,
13
]
],
[
[
21,
25
],
[
312,
316
]
],
[
[
46,
50
],
[
82,
86
]
],
[
[
58,
61
],
[
62,
65
]
],
[
[
154,
159
],
[
190,
195
],
[
279,
284
],
[
474,
479
]
],
[
[
165,
174
]
],
[
[
254,
263
]
],
[
[
449,
458
]
]
] |
#!/usr/bin/python3
class Evaluator:
def __init__(self, lexer):
self.__lexer = lexer
def evaluate(self, line):
return int(next(self.__lexer.tokenize(line)).raw_value)
class REPL:
def __init__(self, read, print, evaluate):
self.__read = read
self.__eval = evaluate
self.__print = print
def loop(self):
while True:
try:
line = self.__read('mm-i> ')
result = self.__eval(line)
self.__print(result)
except KeyboardInterrupt:
break
if __name__ == '__main__':
from lexer import Lexer
REPL(input, print, Evaluator(Lexer()).evaluate).loop()
| [
[
[
26,
35
],
[
660,
669
]
],
[
[
198,
202
],
[
641,
645
]
],
[
[
631,
636
],
[
670,
675
]
]
] |
import os
from time import sleep
import pytest
from tlz import frequencies
from distributed import get_task_stream
from distributed.client import wait
from distributed.diagnostics.task_stream import TaskStreamPlugin
from distributed.metrics import time
from distributed.utils_test import div, gen_cluster, inc, slowinc
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_TaskStreamPlugin(c, s, *workers):
es = TaskStreamPlugin(s)
s.add_plugin(es)
assert not es.buffer
futures = c.map(div, [1] * 10, range(10))
total = c.submit(sum, futures[1:])
await wait(total)
assert len(es.buffer) == 11
workers = dict()
rects = es.rectangles(0, 10, workers)
assert workers
assert all(n == "div" for n in rects["name"])
assert all(d > 0 for d in rects["duration"])
counts = frequencies(rects["color"])
assert counts["black"] == 1
assert set(counts.values()) == {9, 1}
assert len(set(rects["y"])) == 3
rects = es.rectangles(2, 5, workers)
assert all(len(L) == 3 for L in rects.values())
starts = sorted(rects["start"])
rects = es.rectangles(
2, 5, workers=workers, start_boundary=(starts[0] + starts[1]) / 2000
)
assert set(rects["start"]).issubset(set(starts[1:]))
@gen_cluster(client=True)
async def test_maxlen(c, s, a, b):
tasks = TaskStreamPlugin(s, maxlen=5)
s.add_plugin(tasks)
futures = c.map(inc, range(10))
await wait(futures)
assert len(tasks.buffer) == 5
@gen_cluster(client=True)
async def test_collect(c, s, a, b):
tasks = TaskStreamPlugin(s)
s.add_plugin(tasks)
start = time()
futures = c.map(slowinc, range(10), delay=0.1)
await wait(futures)
L = tasks.collect()
assert len(L) == len(futures)
L = tasks.collect(start=start)
assert len(L) == len(futures)
L = tasks.collect(start=start + 0.2)
assert 4 <= len(L) <= len(futures)
L = tasks.collect(start="20 s")
assert len(L) == len(futures)
L = tasks.collect(start="500ms")
assert 0 < len(L) <= len(futures)
L = tasks.collect(count=3)
assert len(L) == 3
assert L == list(tasks.buffer)[-3:]
assert tasks.collect(stop=start + 100, count=3) == tasks.collect(count=3)
assert tasks.collect(start=start, count=3) == list(tasks.buffer)[:3]
@gen_cluster(client=True)
async def test_no_startstops(c, s, a, b):
tasks = TaskStreamPlugin(s)
s.add_plugin(tasks)
# just to create the key on the scheduler
future = c.submit(inc, 1)
await wait(future)
assert len(tasks.buffer) == 1
tasks.transition(future.key, "processing", "erred")
# Transition was not recorded because it didn't contain `startstops`
assert len(tasks.buffer) == 1
tasks.transition(future.key, "processing", "erred", startstops=[])
# Transition was not recorded because `startstops` was empty
assert len(tasks.buffer) == 1
tasks.transition(
future.key, "processing", "erred", startstops=[dict(start=time(), stop=time())]
)
assert len(tasks.buffer) == 2
@gen_cluster(client=True)
async def test_client(c, s, a, b):
L = await c.get_task_stream()
assert L == ()
futures = c.map(slowinc, range(10), delay=0.1)
await wait(futures)
tasks = s.plugins[TaskStreamPlugin.name]
L = await c.get_task_stream()
assert L == tuple(tasks.buffer)
def test_client_sync(client):
with get_task_stream(client=client) as ts:
sleep(0.1) # to smooth over time differences on the scheduler
# to smooth over time differences on the scheduler
futures = client.map(inc, range(10))
wait(futures)
assert len(ts.data) == 10
@gen_cluster(client=True)
async def test_get_task_stream_plot(c, s, a, b):
bokeh = pytest.importorskip("bokeh")
await c.get_task_stream()
futures = c.map(slowinc, range(10), delay=0.1)
await wait(futures)
data, figure = await c.get_task_stream(plot=True)
assert isinstance(figure, bokeh.plotting.Figure)
def test_get_task_stream_save(client, tmpdir):
bokeh = pytest.importorskip("bokeh")
tmpdir = str(tmpdir)
fn = os.path.join(tmpdir, "foo.html")
with get_task_stream(plot="save", filename=fn) as ts:
wait(client.map(inc, range(10)))
with open(fn) as f:
data = f.read()
assert "inc" in data
assert "bokeh" in data
assert isinstance(ts.figure, bokeh.plotting.Figure)
| [
[
[
7,
9
],
[
4136,
4138
]
],
[
[
27,
32
],
[
3460,
3465
]
],
[
[
41,
47
],
[
3769,
3775
],
[
4073,
4079
]
],
[
[
64,
75
],
[
843,
854
]
],
[
[
101,
116
],
[
3414,
3429
],
[
4179,
4194
]
],
[
[
148,
152
],
[
602,
606
],
[
1455,
1459
],
[
1703,
1707
],
[
2532,
2536
],
[
3243,
3247
],
[
3635,
3639
],
[
3890,
3894
],
[
4236,
4240
]
],
[
[
201,
217
],
[
440,
456
],
[
1355,
1371
],
[
1579,
1595
],
[
2402,
2418
],
[
3280,
3296
]
],
[
[
250,
254
],
[
1635,
1639
],
[
3003,
3007
],
[
3016,
3020
]
],
[
[
290,
293
],
[
527,
530
]
],
[
[
295,
306
],
[
324,
335
],
[
1283,
1294
],
[
1506,
1517
],
[
2323,
2334
],
[
3068,
3079
],
[
3683,
3694
]
],
[
[
308,
311
],
[
1429,
1432
],
[
2514,
2517
],
[
3611,
3614
],
[
4252,
4255
]
],
[
[
313,
320
],
[
1662,
1669
],
[
3202,
3209
],
[
3849,
3856
]
],
[
[
382,
1279
]
],
[
[
1308,
1502
]
],
[
[
1531,
2319
]
],
[
[
2348,
3064
]
],
[
[
3093,
3372
]
],
[
[
3379,
3395
]
],
[
[
3708,
4011
]
],
[
[
4018,
4043
]
]
] |
"""This module implements the dataset item entity"""
# Copyright (C) 2021-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import abc
import copy
import itertools
import logging
from threading import Lock
from typing import List, Optional, Sequence
import numpy as np
from ote_sdk.entities.annotation import Annotation, AnnotationSceneEntity
from ote_sdk.entities.label import LabelEntity
from ote_sdk.entities.media import IMedia2DEntity
from ote_sdk.entities.metadata import IMetadata, MetadataItemEntity
from ote_sdk.entities.model import ModelEntity
from ote_sdk.entities.scored_label import ScoredLabel
from ote_sdk.entities.shapes.rectangle import Rectangle
from ote_sdk.entities.subset import Subset
from ote_sdk.utils.shape_factory import ShapeFactory
logger = logging.getLogger(__name__)
class DatasetItemEntity(metaclass=abc.ABCMeta):
"""
DatasetItemEntity represents an item in the DatasetEntity. It holds a media item, annotation and an ROI.
The ROI determines the region of interest for the dataset item, and is described by a shape entity.
Dataset items hold five fundamental properties:
- A 2d media entity (e.g. Image)
- A 2d annotation entity for the full resolution media entity
- An ROI, describing the region of interest.
- The subset it belongs to
- Metadata for the media entity (e.g. saliency map or active score)
.. rubric:: Getting data from dataset item
The first step is to fetch the input data for the network.
>>> dataset_item = DatasetItemEntity()
>>> media_numpy = dataset_item.numpy # RGB media data (Height, Width, Channels)
This returns the numpy data for the assigned ROI. But it is possible to extract any arbitrary region.
>>> from ote_sdk.entities.shapes.rectangle import Rectangle
>>> top_left_quart_roi = Annotation(Rectangle(x1=0.0, y1=0.0, x2=0.5, y2=0.5), labels=[])
>>> top_left_quart_numpy = dataset_item.roi_numpy(roi=top_left_quart_roi)
Get the subset of labels for the item ROI:
>>> labels = dataset_item.get_roi_labels(labels=...)
Get the annotations __visible__ in the ROI:
>>> dataset_item.get_annotations()
.. rubric:: Adding output data to dataset item
It is possible to add shapes or just labels for the ROI.
Add shapes to dataset item:
>>> box = Rectangle(x1=0.2, y1=0.3, x2=0.6, y2=0.5)
>>> dataset_item.append_annotations(annotations=[Annotation(box, labels=[...])])
Add labels to ROI:
>>> dataset_item.append_labels(labels=[...])
:param media: Media item
:param annotation_scene: Annotation scene
:param roi: Region Of Interest
:param metadata: Metadata attached to dataset item
:param subset: `Subset` for item. E.g. `Subset.VALIDATION`
"""
# pylint: disable=too-many-arguments
def __init__(
self,
media: IMedia2DEntity,
annotation_scene: AnnotationSceneEntity,
roi: Optional[Annotation] = None,
metadata: Optional[Sequence[MetadataItemEntity]] = None,
subset: Subset = Subset.NONE,
):
self.__media: IMedia2DEntity = media
self.__annotation_scene: AnnotationSceneEntity = annotation_scene
self.__subset: Subset = subset
self.__roi_lock = Lock()
# set ROI
if roi is None:
for annotation in annotation_scene.annotations:
# if there is a full box in annotation.shapes, set it as ROI
if Rectangle.is_full_box(annotation.shape):
roi = annotation
break
self.__roi = roi
self.__metadata: List[MetadataItemEntity] = []
if metadata is not None:
self.__metadata = list(metadata)
@property
def metadata(self) -> Sequence[MetadataItemEntity]:
"""Provides access to metadata."""
return self.__metadata
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"media={self.media}, "
f"annotation_scene={self.annotation_scene}, "
f"roi={self.roi}, "
f"subset={self.subset})"
)
@property
def roi(self) -> Annotation:
"""Region Of Interest."""
with self.__roi_lock:
if self.__roi is None:
requested_roi = Annotation(Rectangle.generate_full_box(), labels=[])
self.__roi = requested_roi
else:
requested_roi = self.__roi
return requested_roi
@roi.setter
def roi(self, roi: Optional[Annotation]):
with self.__roi_lock:
self.__roi = roi
@property
def subset(self) -> Subset:
"""
Returns the subset that the IDatasetItem belongs to. e.g. Subset.TRAINING.
"""
return self.__subset
@subset.setter
def subset(self, value: Subset):
self.__subset = value
@property
def media(self) -> IMedia2DEntity:
"""Media."""
return self.__media
def roi_numpy(self, roi: Optional[Annotation] = None) -> np.ndarray:
"""
Gives the numpy data for the media, given an ROI.
This function allows to take a crop of any arbitrary region of the media in the Dataset entity.
If the ROI is not given, the ROI assigned to the DatasetItem will be used as default.
:param roi: Shape entity. The shape will be converted if needed, to extract the ROI numpy.
:return: Numpy array with media data
"""
if roi is None:
roi = self.roi
if roi is not None:
roi.shape = ShapeFactory.shape_as_rectangle(roi.shape)
return self.media.roi_numpy(roi=roi)
@property
def numpy(self) -> np.ndarray:
"""
Returns the numpy data for the media, taking ROI into account.
:return: Numpy array. RGB array of shape (Height, Width, Channels)
"""
return self.roi_numpy()
@property
def width(self) -> int:
"""
The width of the dataset item, taking into account the ROI.
"""
roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape)
roi_shape_as_box = roi_shape_as_box.clip_to_visible_region()
width = self.media.width
# Note that we cannot directly use roi_shape_as_box.width due to the rounding
# because round(x2 - x1) is not always equal to round(x2) - round(x1)
x1 = int(round(roi_shape_as_box.x1 * width))
x2 = int(round(roi_shape_as_box.x2 * width))
return x2 - x1
@property
def height(self) -> int:
"""
The height of the dataset item, taking into account the ROI.
"""
roi_shape_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape)
roi_shape_as_box = roi_shape_as_box.clip_to_visible_region()
height = self.media.height
# Note that we cannot directly use roi_shape_as_box.height due to the rounding
# because round(y2 - y1) is not always equal to round(y2) - round(y1)
y1 = int(round(roi_shape_as_box.y1 * height))
y2 = int(round(roi_shape_as_box.y2 * height))
return y2 - y1
@property
def annotation_scene(self) -> AnnotationSceneEntity:
"""Access to annotation scene."""
return self.__annotation_scene
@annotation_scene.setter
def annotation_scene(self, value: AnnotationSceneEntity):
self.__annotation_scene = value
def get_annotations(
self,
labels: Optional[List[LabelEntity]] = None,
include_empty: bool = False,
) -> List[Annotation]:
"""
Returns a list of annotations that exist in the dataset item (wrt. ROI). This is done by checking that the
center of the annotation is located in the ROI.
:param labels: Subset of input labels to filter with; if ``None``, all the shapes within the ROI are returned
:param include_empty: if True, returns both empty and non-empty labels
:return: The intersection of the input label set and those present within the ROI
"""
is_full_box = Rectangle.is_full_box(self.roi.shape)
annotations = []
if is_full_box and labels is None and not include_empty:
# Fast path for the case where we do not need to change the shapes
# todo: this line is incorrect. CVS-75919
annotations = self.annotation_scene.annotations
else:
# Todo: improve speed. This is O(n) for n shapes.
roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape)
labels_set = {label.name for label in labels} if labels is not None else {}
for annotation in self.annotation_scene.annotations:
if not is_full_box and not self.roi.shape.contains_center(
annotation.shape
):
continue
shape_labels = annotation.get_labels(include_empty)
if labels is not None:
shape_labels = [
label for label in shape_labels if label.name in labels_set
]
if len(shape_labels) == 0:
continue
if not is_full_box:
# Create a denormalized copy of the shape.
shape = annotation.shape.denormalize_wrt_roi_shape(roi_as_box)
else:
# Also create a copy of the shape, so that we can safely modify the labels
# without tampering with the original shape.
shape = copy.deepcopy(annotation.shape)
annotations.append(Annotation(shape=shape, labels=shape_labels))
return annotations
def append_annotations(self, annotations: Sequence[Annotation]):
"""
Adds a list of shapes to the annotation
"""
roi_as_box = ShapeFactory.shape_as_rectangle(self.roi.shape)
validated_annotations = [
Annotation(
shape=annotation.shape.normalize_wrt_roi_shape(roi_as_box),
labels=annotation.get_labels(),
)
for annotation in annotations
if ShapeFactory().shape_produces_valid_crop(
shape=annotation.shape,
media_width=self.media.width,
media_height=self.media.height,
)
]
n_invalid_shapes = len(annotations) - len(validated_annotations)
if n_invalid_shapes > 0:
logger.info(
"%d shapes will not be added to the dataset item as they "
"would produce invalid crops (this is expected for some tasks, "
"such as segmentation).",
n_invalid_shapes,
)
self.annotation_scene.append_annotations(validated_annotations)
def get_roi_labels(
self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False
) -> List[LabelEntity]:
"""
Return the subset of the input labels which exist in the dataset item (wrt. ROI).
:param labels: Subset of input labels to filter with; if ``None``, all the labels within the ROI are returned
:param include_empty: if True, returns both empty and non-empty labels
:return: The intersection of the input label set and those present within the ROI
"""
filtered_labels = set()
for label in self.roi.get_labels(include_empty):
if labels is None or label.get_label() in labels:
filtered_labels.add(label.get_label())
return sorted(list(filtered_labels), key=lambda x: x.name)
def get_shapes_labels(
self, labels: Optional[List[LabelEntity]] = None, include_empty: bool = False
) -> List[LabelEntity]:
"""
Get the labels of the shapes present in this dataset item. if a label list is supplied, only labels present
within that list are returned. if include_empty is True, present empty labels are returned as well.
:param labels: if supplied only labels present in this list are returned
:param include_empty: if True, returns both empty and non-empty labels
:return: a list of labels from the shapes within the roi of this dataset item
"""
annotations = self.get_annotations()
scored_label_set = set(
itertools.chain(
*[annotation.get_labels(include_empty) for annotation in annotations]
)
)
label_set = {scored_label.get_label() for scored_label in scored_label_set}
if labels is None:
return list(label_set)
return [label for label in label_set if label in labels]
def append_labels(self, labels: List[ScoredLabel]):
"""
Appends labels to the DatasetItem and adds it to the the annotation label as well if it's not yet there
:param labels: list of labels to be appended
"""
if len(labels) == 0:
return
roi_annotation = None
for annotation in self.annotation_scene.annotations:
if annotation == self.roi:
roi_annotation = annotation
break
if roi_annotation is None: # no annotation found with shape
roi_annotation = self.roi
self.annotation_scene.append_annotation(roi_annotation)
for label in labels:
if label not in self.roi.get_labels(include_empty=True):
self.roi.append_label(label)
if label not in roi_annotation.get_labels(include_empty=True):
roi_annotation.append_label(label)
def __eq__(self, other):
if isinstance(other, DatasetItemEntity):
return (
self.media == other.media
and self.annotation_scene == other.annotation_scene
and self.roi == other.roi
and self.subset == other.subset
)
return False
def __deepcopy__(self, memo):
"""
When we deepcopy this object, be sure not to deep copy the lock, as this is not possible,
make a new lock instead.
"""
# Call ROI getter to ensure original object has an ROI.
_ = self.roi
clone = copy.copy(self)
for name, value in vars(self).items():
if "__roi_lock" in name:
setattr(clone, name, Lock())
else:
setattr(clone, name, copy.deepcopy(value, memo))
return clone
def append_metadata_item(
self, data: IMetadata, model: Optional[ModelEntity] = None
):
"""
Appends metadata produced by some model to the dataset item.
.. rubric:: Adding visualization heatmap (ResultMediaEntity) to DatasetItemEntity
>>> from ote_sdk.entities.image import Image
>>> from ote_sdk.entities.result_media import ResultMediaEntity
>>> media = Image(file_path='image.jpeg')
>>> annotation = NullAnnotationSceneEntity()
>>> dataset_item = DatasetItem(media=media, annotation_scene=annotation)
>>> data = np.ones((120, 120, 3)).astype(np.uint8) * 255 # Saliency numpy
>>> result_media = ResultMediaEntity(name="Gradcam++",
... type="Gradcam++",
... annotation_scene=annotation,
... numpy=data)
>>> dataset_item.append_metadata_item(result_media)
.. rubric:: Representation vector for active learning
>>> from ote_sdk.entities.tensor import TensorEntity
>>> tensor = TensorEntity(name="representation_vector", numpy=data)
>>> dataset_item.append_metadata_item(data=tensor, model=model)
:param data: any object of a class inherited from IMetadata. (e.g., FloatMetadata, Tensor)
:param model: model that was used to generated metadata
"""
self.__metadata.append(MetadataItemEntity(data=data, model=model))
def get_metadata_by_name_and_model(
self, name: str, model: Optional[ModelEntity]
) -> Sequence[MetadataItemEntity]:
"""
Returns a metadata item with `name` and generated by `model`.
:param name: the name of the metadata
:param model: the model which was used to generate the metadata.
:return:
"""
return [
meta
for meta in self.metadata
if meta.data.name == name and meta.model == model
]
| [
[
[
146,
149
],
[
852,
855
]
],
[
[
157,
161
],
[
9623,
9627
],
[
14324,
14328
],
[
14525,
14529
]
],
[
[
169,
178
],
[
12420,
12429
]
],
[
[
186,
193
],
[
788,
795
]
],
[
[
216,
220
],
[
3266,
3270
],
[
14462,
14466
]
],
[
[
240,
244
],
[
3627,
3631
],
[
7589,
7593
],
[
7516,
7520
],
[
11000,
11004
],
[
10936,
10940
],
[
11817,
11821
],
[
11753,
11757
],
[
12796,
12800
]
],
[
[
246,
254
],
[
2943,
2951
],
[
2990,
2998
],
[
4546,
4554
],
[
5031,
5039
],
[
7507,
7515
],
[
10927,
10935
],
[
11744,
11752
],
[
14643,
14651
],
[
16157,
16165
]
],
[
[
256,
264
],
[
2999,
3007
],
[
3776,
3784
],
[
9811,
9819
],
[
16188,
16196
]
],
[
[
273,
284
],
[
5063,
5065
],
[
5732,
5734
]
],
[
[
326,
336
],
[
2952,
2962
],
[
4173,
4183
],
[
4316,
4326
],
[
4555,
4565
],
[
5040,
5050
],
[
7594,
7604
],
[
9691,
9701
],
[
9820,
9830
],
[
10022,
10032
]
],
[
[
338,
359
],
[
2907,
2928
],
[
3160,
3181
],
[
7215,
7236
],
[
7387,
7408
]
],
[
[
395,
406
],
[
7521,
7532
],
[
11005,
11016
],
[
10941,
10952
],
[
11822,
11833
],
[
11758,
11769
]
],
[
[
442,
456
],
[
2865,
2879
],
[
3104,
3118
],
[
4936,
4950
]
],
[
[
495,
504
],
[
14625,
14634
]
],
[
[
506,
524
],
[
3008,
3026
],
[
3632,
3650
],
[
3785,
3803
],
[
16040,
16058
],
[
16197,
16215
]
],
[
[
560,
571
],
[
14652,
14663
],
[
16166,
16177
]
],
[
[
614,
625
],
[
12801,
12812
]
],
[
[
672,
681
],
[
3472,
3481
],
[
4327,
4336
],
[
8112,
8121
]
],
[
[
718,
724
],
[
3062,
3068
],
[
3053,
3059
],
[
3224,
3230
],
[
4667,
4673
],
[
4859,
4865
]
],
[
[
765,
777
],
[
5605,
5617
],
[
6109,
6121
],
[
6717,
6729
],
[
8534,
8546
],
[
9927,
9939
],
[
10229,
10241
]
],
[
[
779,
785
],
[
10548,
10554
]
],
[
[
824,
841
],
[
13756,
13773
]
]
] |
from sys import argv, exit
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QMainWindow, QWidget
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.setCentralWidget(CustomWidget(self))
self.show()
class CustomWidget(QWidget):
def __init__(self, parent=None):
super(CustomWidget, self).__init__(parent)
self.setFocusPolicy(Qt.StrongFocus)
pass
def mousePressEvent(self, event):
print(event)
def keyPressEvent(self, event):
print(event)
if __name__ == "__main__":
app = QApplication(argv)
ex = MainWindow()
exit(app.exec_())
| [
[
[
16,
20
],
[
656,
660
]
],
[
[
22,
26
],
[
688,
692
]
],
[
[
53,
55
],
[
456,
458
]
],
[
[
84,
96
],
[
643,
655
]
],
[
[
98,
109
],
[
138,
149
]
],
[
[
111,
118
],
[
330,
337
]
],
[
[
127,
137
],
[
671,
681
],
[
204,
214
]
],
[
[
317,
329
],
[
269,
281
],
[
391,
403
]
],
[
[
637,
640
],
[
693,
696
]
],
[
[
666,
668
]
]
] |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging
import multiprocessing
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
from oslo.utils import strutils
import six
from neutron_vpnaas.openstack.common._i18n import _
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param env_variables: Environment variables and their values that
will be set for the process.
:type env_variables: dict
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be logging.DEBUG or logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
env_variables = kwargs.pop('env_variables', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
sanitized_cmd = strutils.mask_password(' '.join(cmd))
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, _('Running cmd (subprocess): %s'), sanitized_cmd)
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell,
env=env_variables)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
sanitized_stdout = strutils.mask_password(stdout)
sanitized_stderr = strutils.mask_password(stderr)
raise ProcessExecutionError(exit_code=_returncode,
stdout=sanitized_stdout,
stderr=sanitized_stderr,
cmd=sanitized_cmd)
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, _('%r failed. Retrying.'), sanitized_cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
sanitized_cmd = strutils.mask_password(cmd)
LOG.debug('Running cmd (SSH): %s', sanitized_cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
sanitized_stdout = strutils.mask_password(stdout)
stderr = stderr_stream.read()
sanitized_stderr = strutils.mask_password(stderr)
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=sanitized_stdout,
stderr=sanitized_stderr,
cmd=sanitized_cmd)
return (sanitized_stdout, sanitized_stderr)
def get_worker_count():
"""Utility to get the default worker count.
@return: The number of CPUs if that can be determined, else a default
worker count of 1 is returned.
"""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
| [
[
[
698,
703
],
[
7123,
7128
],
[
7137,
7142
]
],
[
[
711,
718
],
[
969,
976
],
[
5126,
5133
]
],
[
[
726,
741
],
[
11008,
11023
]
],
[
[
749,
751
],
[
5475,
5477
],
[
5494,
5496
],
[
6049,
6051
]
],
[
[
759,
765
],
[
8214,
8220
]
],
[
[
773,
778
],
[
5712,
5717
]
],
[
[
786,
792
],
[
2587,
2593
],
[
2601,
2607
],
[
2617,
2623
]
],
[
[
821,
831
],
[
5992,
6002
],
[
6251,
6261
]
],
[
[
853,
864
],
[
8196,
8207
],
[
8512,
8523
]
],
[
[
888,
896
],
[
5794,
5802
],
[
7567,
7575
],
[
7633,
7641
],
[
9502,
9510
],
[
10113,
10121
],
[
10201,
10209
]
],
[
[
904,
907
],
[
6683,
6686
],
[
9182,
9185
]
],
[
[
959,
960
],
[
1642,
1643
],
[
1763,
1764
],
[
5399,
5400
],
[
5604,
5605
],
[
5922,
5923
],
[
8099,
8100
],
[
9636,
9637
],
[
9789,
9790
]
],
[
[
963,
966
],
[
5904,
5907
],
[
7365,
7368
],
[
8081,
8084
],
[
9534,
9537
],
[
10392,
10395
]
],
[
[
1005,
1025
],
[
1090,
1110
],
[
9615,
9635
],
[
9768,
9788
]
],
[
[
1144,
1164
],
[
1229,
1249
],
[
5378,
5398
]
],
[
[
1283,
1304
],
[
2257,
2278
],
[
7686,
7707
],
[
7973,
7994
],
[
9129,
9150
],
[
10500,
10521
]
],
[
[
2312,
2331
],
[
2396,
2415
],
[
5559,
5578
]
],
[
[
2447,
2464
],
[
6181,
6198
]
],
[
[
2639,
2646
],
[
9070,
9077
]
],
[
[
8539,
8545
]
],
[
[
9386,
9397
]
],
[
[
10789,
10805
]
]
] |
# -*- coding:utf-8; -*-
class SolutionV1:
def combinationSum(self, candidates, target):
# 1. 定义保存结果的组合
result = set()
# 2. 定义递归函数,i表示递归层数,但是具体含义还不知道
def helper(nums, candidates, target):
# 4. 编写递归模板
# 1) 定义递归终止条件
# 应该是从candidate选出来的数的sum=target就返回,所以这时候递归层数i应该是候选值列表。
# 修改递归参数第一个参数i为nums,表示一个候选值列表
if sum(nums) == target:
result.append(tuple(nums))
return
# 5. 那么sum(nums)>target 这时候也应该停止了,因为candidates都是正整数
if sum(nums) > target:
return
# 2) 处理当前层逻辑
# 6. 当前层逻辑处理:如果sum(nums)<target,那么下一步nums中新增元素可能是candidates中的任一元素
newNums = [nums + [i] for i in candidates]
# 3)下探递归
# 7. 递归下一层
for nums in newNums:
helper(nums, candidates, target)
# 4)清理当前层,当前层没有要清理的
# 3. 首次调用递归函数
helper([], candidates, target)
return [list(nums) for nums in result]
class Solution:
""" 递归代码优化,语言层面优化代码
"""
def combinationSum(self, candidates, target):
result = set()
def helper(nums, candidates, target):
if sum(nums) == target:
result.add(tuple(sorted(nums)))
return
if sum(nums) > target:
return
for i in candidates:
helper(nums + [i], candidates, target)
helper([], candidates, target)
return [list(nums) for nums in result]
| [
[
[
32,
42
]
],
[
[
1045,
1053
]
]
] |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from datetime import datetime
from django.utils import timezone
from sentry.models import Commit, CommitAuthor, Integration, PullRequest, Repository
from sentry.testutils import APITestCase
from uuid import uuid4
from .testutils import (
PUSH_EVENT_EXAMPLE_INSTALLATION,
PULL_REQUEST_OPENED_EVENT_EXAMPLE,
PULL_REQUEST_EDITED_EVENT_EXAMPLE,
PULL_REQUEST_CLOSED_EVENT_EXAMPLE,
)
from sentry.utils.compat.mock import patch
class WebhookTest(APITestCase):
def test_get(self):
url = "/extensions/github-enterprise/webhook/"
response = self.client.get(url)
assert response.status_code == 405
def test_unknown_host_event(self):
# No integration defined in the database, so event should be rejected
# because we can't find metadata and secret for it
url = "/extensions/github-enterprise/webhook/"
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="push",
HTTP_X_GITHUB_ENTERPRISE_HOST="99.99.99.99",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 400
def test_unregistered_event(self):
project = self.project # force creation
url = u"/extensions/github-enterprise/webhook/".format(project.organization.id)
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="UnregisteredEvent",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=56a3df597e02adbc17fb617502c70e19d96a6136",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_invalid_signature_event(self, mock_installation):
mock_installation.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
url = "/extensions/github-enterprise/webhook/"
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="push",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=33521abeaaf9a57c2abf486e0ccd54d23cf36fec",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 401
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_missing_signature_ok(self, mock_installation):
# Old Github:e doesn't send a signature, so we have to accept that.
mock_installation.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
url = "/extensions/github-enterprise/webhook/"
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="push",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
class PushEventWebhookTest(APITestCase):
@patch("sentry.integrations.github_enterprise.client.get_jwt")
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_simple(self, mock_get_installation_metadata, mock_get_jwt):
mock_get_jwt.return_value = ""
project = self.project # force creation
url = "/extensions/github-enterprise/webhook/"
mock_get_installation_metadata.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
Repository.objects.create(
organization_id=project.organization.id,
external_id="35129377",
provider="integrations:github_enterprise",
name="baxterthehacker/public-repo",
)
integration = Integration.objects.create(
external_id="35.232.149.196:12345",
provider="github_enterprise",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation_id": "12345",
"installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
},
)
integration.add_organization(project.organization, self.user)
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="push",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
commit_list = list(
Commit.objects.filter(
# organization_id=project.organization_id,
)
.select_related("author")
.order_by("-date_added")
)
assert len(commit_list) == 2
commit = commit_list[0]
assert commit.key == "133d60480286590a610a0eb7352ff6e02b9674c4"
assert commit.message == u"Update README.md (àgain)"
assert commit.author.name == u"bàxterthehacker"
assert commit.author.email == "[email protected]"
assert commit.author.external_id is None
assert commit.date_added == datetime(2015, 5, 5, 23, 45, 15, tzinfo=timezone.utc)
commit = commit_list[1]
assert commit.key == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
assert commit.message == "Update README.md"
assert commit.author.name == u"bàxterthehacker"
assert commit.author.email == "[email protected]"
assert commit.author.external_id is None
assert commit.date_added == datetime(2015, 5, 5, 23, 40, 15, tzinfo=timezone.utc)
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_anonymous_lookup(self, mock_get_installation_metadata):
project = self.project # force creation
url = "/extensions/github-enterprise/webhook/"
mock_get_installation_metadata.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
integration = Integration.objects.create(
provider="github_enterprise",
external_id="35.232.149.196:12345",
name="octocat",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
},
)
integration.add_organization(project.organization, self.user)
Repository.objects.create(
organization_id=project.organization.id,
external_id="35129377",
provider="integrations:github_enterprise",
name="baxterthehacker/public-repo",
)
CommitAuthor.objects.create(
external_id="github_enterprise:baxterthehacker",
organization_id=project.organization_id,
email="[email protected]",
name=u"bàxterthehacker",
)
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="push",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
commit_list = list(
Commit.objects.filter(organization_id=project.organization_id)
.select_related("author")
.order_by("-date_added")
)
# should be skipping the #skipsentry commit
assert len(commit_list) == 2
commit = commit_list[0]
assert commit.key == "133d60480286590a610a0eb7352ff6e02b9674c4"
assert commit.message == u"Update README.md (àgain)"
assert commit.author.name == u"bàxterthehacker"
assert commit.author.email == "[email protected]"
assert commit.date_added == datetime(2015, 5, 5, 23, 45, 15, tzinfo=timezone.utc)
commit = commit_list[1]
assert commit.key == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
assert commit.message == "Update README.md"
assert commit.author.name == u"bàxterthehacker"
assert commit.author.email == "[email protected]"
assert commit.date_added == datetime(2015, 5, 5, 23, 40, 15, tzinfo=timezone.utc)
@patch("sentry.integrations.github_enterprise.client.get_jwt")
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_multiple_orgs(self, mock_get_installation_metadata, mock_get_jwt):
mock_get_jwt.return_value = ""
project = self.project # force creation
url = "/extensions/github-enterprise/webhook/"
mock_get_installation_metadata.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
Repository.objects.create(
organization_id=project.organization.id,
external_id="35129377",
provider="integrations:github_enterprise",
name="baxterthehacker/public-repo",
)
integration = Integration.objects.create(
external_id="35.232.149.196:12345",
provider="github_enterprise",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation_id": "12345",
"installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
},
)
integration.add_organization(project.organization, self.user)
org2 = self.create_organization()
project2 = self.create_project(organization=org2, name="bar")
Repository.objects.create(
organization_id=project2.organization.id,
external_id="77",
provider="integrations:github_enterprise",
name="another/repo",
)
integration = Integration.objects.create(
external_id="35.232.149.196:99",
provider="github_enterprise",
metadata={
"domain_name": "35.232.149.196/another",
"installation": {
"installation_id": "99",
"id": "2",
"private_key": "private_key",
"verify_ssl": True,
},
},
)
integration.add_organization(org2, self.user)
response = self.client.post(
path=url,
data=PUSH_EVENT_EXAMPLE_INSTALLATION,
content_type="application/json",
HTTP_X_GITHUB_EVENT="push",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=2a0586cc46490b17441834e1e143ec3d8c1fe032",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
commit_list = list(
Commit.objects.filter(organization_id=project.organization_id)
.select_related("author")
.order_by("-date_added")
)
assert len(commit_list) == 2
commit_list = list(
Commit.objects.filter(organization_id=org2.id)
.select_related("author")
.order_by("-date_added")
)
assert len(commit_list) == 0
class PullRequestEventWebhook(APITestCase):
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_opened(self, mock_get_installation_metadata):
project = self.project # force creation
url = "/extensions/github-enterprise/webhook/"
mock_get_installation_metadata.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
integration = Integration.objects.create(
provider="github_enterprise",
external_id="35.232.149.196:234",
name="octocat",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
},
)
integration.add_organization(project.organization, self.user)
repo = Repository.objects.create(
organization_id=project.organization.id,
external_id="35129377",
provider="integrations:github_enterprise",
name="baxterthehacker/public-repo",
)
response = self.client.post(
path=url,
data=PULL_REQUEST_OPENED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=aa5b11bc52b9fac082cb59f9ee8667cb222c3aff",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
prs = PullRequest.objects.filter(
repository_id=repo.id, organization_id=project.organization.id
)
assert len(prs) == 1
pr = prs[0]
assert pr.key == "1"
assert pr.message == u"This is a pretty simple change that we need to pull into master."
assert pr.title == u"Update the README with new information"
assert pr.author.name == u"baxterthehacker"
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_edited(self, mock_get_installation_metadata):
project = self.project # force creation
url = "/extensions/github-enterprise/webhook/"
mock_get_installation_metadata.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
integration = Integration.objects.create(
provider="github_enterprise",
external_id="35.232.149.196:234",
name="octocat",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
},
)
integration.add_organization(project.organization, self.user)
repo = Repository.objects.create(
organization_id=project.organization.id,
external_id="35129377",
provider="integrations:github_enterprise",
name="baxterthehacker/public-repo",
)
pr = PullRequest.objects.create(
key="1", repository_id=repo.id, organization_id=project.organization.id
)
response = self.client.post(
path=url,
data=PULL_REQUEST_EDITED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=b50a13afd33b514e8e62e603827ea62530f0690e",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
pr = PullRequest.objects.get(id=pr.id)
assert pr.key == "1"
assert pr.message == u"new edited body"
assert pr.title == u"new edited title"
assert pr.author.name == u"baxterthehacker"
@patch("sentry.integrations.github_enterprise.webhook.get_installation_metadata")
def test_closed(self, mock_get_installation_metadata):
project = self.project # force creation
url = "/extensions/github-enterprise/webhook/"
mock_get_installation_metadata.return_value = {
"url": "35.232.149.196",
"id": "2",
"name": "test-app",
"webhook_secret": "b3002c3e321d4b7880360d397db2ccfd",
"private_key": "private_key",
"verify_ssl": True,
}
integration = Integration.objects.create(
provider="github_enterprise",
external_id="35.232.149.196:234",
name="octocat",
metadata={
"domain_name": "35.232.149.196/baxterthehacker",
"installation": {"id": "2", "private_key": "private_key", "verify_ssl": True},
},
)
integration.add_organization(project.organization, self.user)
repo = Repository.objects.create(
organization_id=project.organization.id,
external_id="35129377",
provider="integrations:github_enterprise",
name="baxterthehacker/public-repo",
)
response = self.client.post(
path=url,
data=PULL_REQUEST_CLOSED_EVENT_EXAMPLE,
content_type="application/json",
HTTP_X_GITHUB_EVENT="pull_request",
HTTP_X_GITHUB_ENTERPRISE_HOST="35.232.149.196",
HTTP_X_HUB_SIGNATURE="sha1=dff1c803cf1e48c1b9aefe4a17952ea132758806",
HTTP_X_GITHUB_DELIVERY=six.text_type(uuid4()),
)
assert response.status_code == 204
prs = PullRequest.objects.filter(
repository_id=repo.id, organization_id=project.organization.id
)
assert len(prs) == 1
pr = prs[0]
assert pr.key == "1"
assert pr.message == u"new closed body"
assert pr.title == u"new closed title"
assert pr.author.name == u"baxterthehacker"
assert pr.merge_commit_sha == "0d1a26e67d8f5eaf1f6ba5c57fc3c7d91ac0fd1c"
| [
[
[
47,
62
]
],
[
[
71,
74
],
[
1231,
1234
],
[
1870,
1873
],
[
2809,
2812
],
[
3739,
3742
],
[
5600,
5603
],
[
8679,
8682
],
[
12384,
12387
],
[
14565,
14568
],
[
16827,
16830
],
[
18751,
18754
]
],
[
[
97,
105
],
[
6327,
6335
],
[
6761,
6769
],
[
9363,
9371
],
[
9735,
9743
]
],
[
[
131,
139
],
[
6367,
6375
],
[
6801,
6809
],
[
9403,
9411
],
[
9775,
9783
]
],
[
[
166,
172
],
[
5719,
5725
],
[
8798,
8804
],
[
12503,
12509
],
[
12730,
12736
]
],
[
[
174,
186
],
[
8068,
8080
]
],
[
[
188,
199
],
[
4788,
4799
],
[
7397,
7408
],
[
10726,
10737
],
[
11519,
11530
],
[
13516,
13527
],
[
15642,
15653
],
[
17702,
17713
]
],
[
[
201,
212
],
[
14658,
14669
],
[
16323,
16334
],
[
16919,
16930
],
[
18844,
18855
]
],
[
[
214,
224
],
[
4537,
4547
],
[
7830,
7840
],
[
10475,
10485
],
[
11288,
11298
],
[
13954,
13964
],
[
16080,
16090
],
[
18140,
18150
]
],
[
[
254,
265
],
[
535,
546
],
[
3845,
3856
],
[
12931,
12942
]
],
[
[
283,
288
],
[
1245,
1250
],
[
1884,
1889
],
[
2823,
2828
],
[
3753,
3758
],
[
5614,
5619
],
[
8693,
8698
],
[
12398,
12403
],
[
14579,
14584
],
[
16841,
16846
],
[
18765,
18770
]
],
[
[
319,
350
],
[
1021,
1052
],
[
1562,
1593
],
[
2514,
2545
],
[
3526,
3557
],
[
5305,
5336
],
[
8384,
8415
],
[
12089,
12120
]
],
[
[
356,
389
],
[
14260,
14293
]
],
[
[
395,
428
],
[
16522,
16555
]
],
[
[
434,
467
],
[
18446,
18479
]
],
[
[
509,
514
],
[
1953,
1958
],
[
2892,
2897
],
[
3864,
3869
],
[
3931,
3936
],
[
6821,
6826
],
[
9795,
9800
],
[
9862,
9867
],
[
12950,
12955
],
[
15076,
15081
],
[
17136,
17141
]
],
[
[
523,
534
]
],
[
[
3824,
3844
]
],
[
[
12907,
12930
]
]
] |
"""
Calls the Turbomole executable.
"""
import os
import re
from decimal import Decimal
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
from qcelemental.models import AtomicResult, Provenance, BasisSet
from qcelemental.util import safe_version, which
from ...exceptions import InputError
from ..model import ProgramHarness
from ..qcvar_identities_resources import build_atomicproperties, build_out
from ...util import execute, temporary_directory
from .define import execute_define, prepare_stdin
from .harvester import harvest
from .methods import KEYWORDS, METHODS
class TurbomoleHarness(ProgramHarness):
_defaults = {
"name": "Turbomole",
"scratch": True,
"thread_safe": False,
"thread_parallel": False,
"node_parallel": True,
"managed_memory": True,
}
version_cache: Dict[str, str] = {}
@staticmethod
def found(raise_error: bool = False) -> bool:
return which(
"define",
return_bool=True,
raise_error=raise_error,
raise_msg="Please install via http://www.cosmologic.de/turbomole/home.html",
)
def get_version(self) -> str:
which_prog = which("define")
if which_prog not in self.version_cache:
# We use basically a dummy stdin as we dont want to pipe any real
# input into define. We only want to parse the version number from
# the string.
with temporary_directory(suffix="_define_scratch") as tmpdir:
tmpdir = Path(tmpdir)
stdout = execute_define("\n", cwd=tmpdir)
# Tested with V7.3 and V7.4.0
version_re = re.compile("TURBOMOLE (?:rev\. )?(V.+?)\s+")
mobj = version_re.search(stdout)
version = mobj[1]
self.version_cache[which_prog] = safe_version(version)
return self.version_cache[which_prog]
def compute(self, input_model: "AtomicInput", config: "TaskConfig") -> "AtomicResult":
self.found(raise_error=True)
job_inputs = self.build_input(input_model, config)
success, dexe = self.execute(job_inputs)
# TODO: handle input errors?! But then define probably already crashed...
# if 'There is an error in the input file' in dexe["stdout"]:
# raise InputError(dexe["stdout"])
if success:
dexe["outfiles"]["stdout"] = dexe["stdout"]
dexe["outfiles"]["stderr"] = dexe["stderr"]
return self.parse_output(dexe["outfiles"], input_model)
def sub_control(self, control, pattern, repl, **kwargs):
control_subbed = re.sub(pattern, repl, control, **kwargs)
return control_subbed
def append_control(self, control, to_append):
return self.sub_control(control, "\$end", f"{to_append}\n$end")
def build_input(
self, input_model: "AtomicInput", config: "TaskConfig", template: Optional[str] = None
) -> Dict[str, Any]:
# The 'define' wrapper can only handle normal string basis set input. If
# a QCSchema basis set is given we break early, because this is not handled
# right now.
if isinstance(input_model.model.basis, BasisSet):
raise InputError("QCSchema BasisSet for model.basis not implemented. Use string basis name.")
turbomolerec = {
"infiles": {},
"outfiles": {"control": "control"},
"scratch_directory": config.scratch_directory,
}
# Handle molecule
# TODO: what's up with moldata? Do I need it?
coord_str, moldata = input_model.molecule.to_string(dtype="turbomole", return_data=True)
# Prepare stdin for define call
model = input_model.model
# geeopt will hold the for which to calculate the gradient.
# 'x' corresponds to the ground state, 'a 1' would be the GS too.
# 'a1 2' would be the 1st excited state of the irreducible group A1.
# Right now only GS are supported, so this is hardcoded as 'x'.
geoopt = "x" if input_model.driver.derivative_int() > 0 else ""
stdin, subs = prepare_stdin(
model.method,
model.basis,
input_model.keywords,
input_model.molecule.molecular_charge,
input_model.molecule.molecular_multiplicity,
geoopt,
)
with temporary_directory(suffix="_define_scratch") as tmpdir:
tmpdir = Path(tmpdir)
with open(tmpdir / "coord", "w") as handle:
handle.write(coord_str)
stdout = execute_define(stdin, cwd=tmpdir)
# The define scratch will be populated by some files that we want to keep
to_keep = "basis auxbasis coord control alpha beta mos".split()
for fn in to_keep:
full_fn = tmpdir / fn
if not full_fn.exists():
continue
with open(full_fn) as handle:
turbomolerec["infiles"][fn] = handle.read()
env = os.environ.copy()
env["PARA_ARCH"] = "SMP"
env["PARNODES"] = str(config.ncores)
env["SMPCPUS"] = str(config.ncores)
turbomolerec["environment"] = env
# Memory is set in the control file
keywords = input_model.keywords
########################
# DETERMINE SOME FLAGS #
########################
ri_calculation = any([keywords.get(ri_kw, False) for ri_kw in KEYWORDS["ri"]])
ricc2_calculation = model.method in METHODS["ricc2"]
###################
# MEMORY HANDLING #
###################
# Central file that controls Turbomole. We assign it here to the "control"
# variable as we may need to modify it, e.g. for a Hessian calculation.
control = turbomolerec["infiles"]["control"]
# Calculate total available memory in MB
mem_mb = config.memory * (1024 ** 3) / 1e6
ri_fraction = 0.25
# Total amount of memory allocated to ricore
ricore = 0
if ri_calculation:
# This is the default given by Turbomole
ricore = mem_mb * ri_fraction
ri_per_core = int(ricore / config.ncores)
# Update $ricore entry in the control file
control = self.sub_control(control, "\$ricore\s+(\d+)", f"$ricore {ri_per_core} MiB per_core")
# Calculate remaining memory
maxcor = mem_mb - ricore
assert maxcor > 0, "Not enough memory for maxcor! Need {-maxcor} MB more!"
# maxcore per_core
per_core = int(maxcor / config.ncores)
# Update $maxcor entry in the control file
control = self.sub_control(control, "\$maxcor\s+(\d+)\s+MiB\s+per_core", f"$maxcor {per_core} MiB per_core")
############################
# DETERMINE SHELL COMMANDS #
############################
# ----------------------#
# | Energy calculations |
# ----------------------#
# Set appropriate commands. We always need a reference wavefunction
# so the first command will be dscf or ridft to converge the SCF.
commands = ["ridft"] if ri_calculation else ["dscf"]
# ------------------------#
# | Gradient calculations |
# ------------------------#
# Keep the gradient file for parsing
if input_model.driver.derivative_int() == 1:
turbomolerec["outfiles"]["gradient"] = "gradient"
# ricc2 will also calculate the gradient. But this requires setting
# 'geoopt (state)' in the control file. This is currently handled in the
# 'define' call.
if ricc2_calculation:
commands.append("ricc2")
# Gradient calculation for DFT/HF
elif input_model.driver.derivative_int() == 1:
grad_command = "rdgrad" if ri_calculation else "grad"
commands.append(grad_command)
# -----------------------#
# | Hessian calculations |
# -----------------------#
if input_model.driver.derivative_int() == 2:
freq_command = "NumForce -level cc2" if ricc2_calculation else "aoforce"
# NumForce seems to ignore the nprhessian command and will always
# write to hessian
hessian_outfile = "hessian" if ricc2_calculation else "nprhessian"
commands.append(freq_command)
# Add some keywords to the control file
# noproj: Don't project out translation and rotation
# nprhessian: Set filename of un-projected hessian
control = self.append_control(control, "$noproj\n$nprhessian file=nprhessian")
turbomolerec["outfiles"][hessian_outfile] = None
# Build the full shell command and set it
command = ["; ".join(commands)]
turbomolerec["command"] = command
# Re-assign the potentially modified control file, e.g. for a Hessian calculation
turbomolerec["infiles"]["control"] = control
# TODO: check if the chosen commands are available with which()?
return turbomolerec
def execute(
self, inputs: Dict[str, Any], *, extra_outfiles=None, extra_commands=None, scratch_name=None, timeout=None
) -> Tuple[bool, Dict]:
success, dexe = execute(
inputs["command"],
inputs["infiles"],
inputs["outfiles"],
shell=True,
# TODO: scratch_messy?
# scratch_messy=False,
)
return success, dexe
def parse_output(
self, outfiles: Dict[str, str], input_model: "AtomicInput"
) -> "AtomicResult": # lgtm: [py/similar-function]
stdout = outfiles.pop("stdout")
qcvars, gradient, hessian = harvest(input_model.molecule, stdout, **outfiles)
if gradient is not None:
qcvars["CURRENT GRADIENT"] = gradient
if hessian is not None:
qcvars["CURRENT HESSIAN"] = hessian
retres = qcvars[f"CURRENT {input_model.driver.upper()}"]
if isinstance(retres, Decimal):
retres = float(retres)
build_out(qcvars)
atprop = build_atomicproperties(qcvars)
output_data = input_model.dict()
output_data["extras"]["outfiles"] = outfiles
output_data["properties"] = atprop
output_data["provenance"] = Provenance(creator="Turbomole", version=self.get_version(), routine="turbomole")
output_data["return_result"] = retres
output_data["stdout"] = stdout
output_data["success"] = True
return AtomicResult(**output_data)
| [
[
[
47,
49
],
[
5070,
5072
]
],
[
[
57,
59
],
[
1700,
1702
],
[
2655,
2657
]
],
[
[
80,
87
],
[
10121,
10128
]
],
[
[
108,
112
],
[
1562,
1566
],
[
4479,
4483
]
],
[
[
132,
135
],
[
2985,
2988
],
[
9214,
9217
]
],
[
[
137,
141
],
[
860,
864
],
[
2975,
2979
],
[
9318,
9322
],
[
9204,
9208
],
[
9633,
9637
]
],
[
[
143,
151
],
[
2945,
2953
]
],
[
[
153,
158
],
[
9306,
9311
]
],
[
[
191,
203
],
[
10635,
10647
]
],
[
[
205,
215
],
[
10415,
10425
]
],
[
[
217,
225
],
[
3225,
3233
]
],
[
[
255,
267
],
[
1865,
1877
]
],
[
[
269,
274
],
[
964,
969
],
[
1215,
1220
]
],
[
[
302,
312
],
[
3254,
3264
]
],
[
[
333,
347
],
[
617,
631
]
],
[
[
389,
411
],
[
10210,
10232
]
],
[
[
413,
422
],
[
10175,
10184
]
],
[
[
443,
450
],
[
9350,
9357
]
],
[
[
452,
471
],
[
1480,
1499
],
[
4401,
4420
]
],
[
[
492,
506
],
[
1600,
1614
],
[
4609,
4623
]
],
[
[
508,
521
],
[
4150,
4163
]
],
[
[
545,
552
],
[
9810,
9817
]
],
[
[
574,
582
],
[
5508,
5516
]
],
[
[
584,
591
],
[
5569,
5576
]
],
[
[
600,
616
]
]
] |
from datetime import datetime
from functools import partial
from typing import Callable, List, Union
from symbiotic.schedule import Schedule
class Action(object):
def __init__(self, callback: Callable, *args, **kwargs):
self._callback: partial = partial(callback, *args, **kwargs)
self._schedule: Union[Schedule, None] = None
self._next_execution: Union[datetime, None] = None
def __repr__(self):
rep = f'{self.__class__.__qualname__}:'
rep += f' {self._callback.func.__name__},'
rep += f' args: {self._callback.args},'
rep += f' kwargs: {self._callback.keywords}'
return rep
def __call__(self):
return self._callback()
def set_schedule(self, schedule: Schedule) -> None:
self._schedule = schedule
self.schedule_next_execution()
def should_execute(self):
return datetime.now() > self._next_execution
def schedule_next_execution(self):
datetimes = [instant.next_datetime() for instant in self._schedule.instants()]
self._next_execution = min(datetimes) # get the earliest execution datetime
class ActionScheduler(object):
def __init__(self):
self.actions: List[Action] = []
self._schedule: Union[Schedule, None] = None
def start_session(self, schedule: Schedule):
self._schedule = schedule
def add(self, callback: Callable, *args, **kwargs):
action = Action(callback, *args, *kwargs)
action.set_schedule(self._schedule)
self.actions.append(action)
return action
def end_session(self):
self._schedule = None
def run(self):
for action in self.actions[:]:
if action.should_execute():
action()
action.schedule_next_execution()
| [
[
[
21,
29
],
[
386,
394
],
[
886,
894
]
],
[
[
52,
59
],
[
262,
269
],
[
252,
259
]
],
[
[
79,
87
],
[
200,
208
],
[
1400,
1408
]
],
[
[
89,
93
],
[
1216,
1220
]
],
[
[
95,
100
],
[
321,
326
],
[
380,
385
],
[
1258,
1263
]
],
[
[
133,
141
],
[
327,
335
],
[
748,
756
],
[
1264,
1272
],
[
1326,
1334
]
],
[
[
150,
156
],
[
1221,
1227
],
[
1445,
1451
]
],
[
[
1144,
1159
]
]
] |
import os
import json
def combine_schema(borough_name):
borough_name = borough_name.lower()
neighborhood_data = ""
with open('../scraped_data/borough_schema/' + borough_name + ".json", 'r', encoding='utf-8') as json_file:
data = json.load(json_file)
for zipCodes in range(len(data[borough_name])):
with open('../scraped_data/neighborhood_schema/' + borough_name + ".json", 'r+', encoding='utf-8') as zipcode_file:
neighborhood_data = json.load(zipcode_file)
neighborhood_data[borough_name][zipCodes]["zipCodes"] = data[borough_name][zipCodes]["zipCodes"]
print(neighborhood_data)
with open('../scraped_data/neighborhood_schema/' + borough_name + ".json", 'w', encoding='utf-8') as combined_file:
json.dump(neighborhood_data, combined_file, sort_keys=True, indent='\t', separators=(',', ': '))
def main():
borough_files = os.listdir("./boroughs")
for borough in borough_files:
name = borough.split(".")[0].replace("-", " ").title()
parse_borough = input(name + " => ")
if parse_borough != "skip":
convert_to_json = input("Convert " + name + " data to json format? (yes/no) => ")
if convert_to_json == "yes":
print("Writing to file ...")
combine_schema(name)
else:
print("Will not convert data json ...")
else:
print("Skipping borough: " + name + " ... ")
if __name__ == '__main__':
main() | [
[
[
7,
9
],
[
953,
955
]
],
[
[
17,
21
],
[
251,
255
],
[
492,
496
],
[
822,
826
]
],
[
[
28,
42
],
[
1352,
1366
]
],
[
[
925,
929
],
[
1551,
1555
]
]
] |
"""Implementations of metrics for 3D semantic segmentation."""
import tensorflow as tf
def average_volume_difference():
raise NotImplementedError()
def dice(y_true, y_pred, axis=(1, 2, 3, 4)):
"""Calculate Dice similarity between labels and predictions.
Dice similarity is in [0, 1], where 1 is perfect overlap and 0 is no
overlap. If both labels and predictions are empty (e.g., all background),
then Dice similarity is 1.
If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an
axis parameter of `(1, 2, 3)` will result in a tensor that contains a Dice
score for every class in every item in the batch. The shape of this tensor
will be `(batch, classes)`. If the inputs only have one class (e.g., binary
segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used.
This will result in a tensor of shape `(batch,)`, where every value is the
Dice similarity for that prediction.
Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ6
Returns
-------
Tensor of Dice similarities.
Citations
---------
Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation:
analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015
Aug 12. doi:10.1186/s12880-015-0068-x
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
eps = tf.keras.backend.epsilon()
intersection = tf.reduce_sum(y_true * y_pred, axis=axis)
summation = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis)
return (2 * intersection + eps) / (summation + eps)
def generalized_dice(y_true, y_pred, axis=(1, 2, 3)):
"""Calculate Generalized Dice similarity. This is useful for multi-class
predictions.
If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an
axis parameter of `(1, 2, 3)` should be used. This will result in a tensor
of shape `(batch,)`, where every value is the Generalized Dice similarity
for that prediction, across all classes.
Returns
-------
Tensor of Generalized Dice similarities.
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2:
raise ValueError("y_true and y_pred must be at least rank 2.")
epsilon = tf.keras.backend.epsilon()
w = tf.math.reciprocal(tf.square(tf.reduce_sum(y_true, axis=axis)))
w = tf.where(tf.math.is_finite(w), w, epsilon)
num = 2 * tf.reduce_sum(w * tf.reduce_sum(y_true * y_pred, axis= axis), axis=-1)
den = tf.reduce_sum(w * tf.reduce_sum(y_true + y_pred, axis= axis), axis=-1)
gdice = num/den
gdice = tf.where(tf.math.is_finite(gdice), gdice, tf.zeros_like(gdice))
return gdice
def hamming(y_true, y_pred, axis=(1, 2, 3)):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.reduce_mean(tf.not_equal(y_pred, y_true), axis=axis)
def haussdorf():
raise NotADirectoryError()
def jaccard(y_true, y_pred, axis=(1, 2, 3, 4)):
"""Calculate Jaccard similarity between labels and predictions.
Jaccard similarity is in [0, 1], where 1 is perfect overlap and 0 is no
overlap. If both labels and predictions are empty (e.g., all background),
then Jaccard similarity is 1.
If we assume the inputs are rank 5 [`(batch, x, y, z, classes)`], then an
axis parameter of `(1, 2, 3)` will result in a tensor that contains a Jaccard
score for every class in every item in the batch. The shape of this tensor
will be `(batch, classes)`. If the inputs only have one class (e.g., binary
segmentation), then an axis parameter of `(1, 2, 3, 4)` should be used.
This will result in a tensor of shape `(batch,)`, where every value is the
Jaccard similarity for that prediction.
Implemented according to https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4533825/#Equ7
Returns
-------
Tensor of Jaccard similarities.
Citations
---------
Taha AA, Hanbury A. Metrics for evaluating 3D medical image segmentation:
analysis, selection, and tool. BMC Med Imaging. 2015;15:29. Published 2015
Aug 12. doi:10.1186/s12880-015-0068-x
"""
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
eps = tf.keras.backend.epsilon()
intersection = tf.reduce_sum(y_true * y_pred, axis=axis)
union = tf.reduce_sum(y_true, axis=axis) + tf.reduce_sum(y_pred, axis=axis)
return (intersection + eps) / (union - intersection + eps)
def tversky(y_true, y_pred, axis=(1, 2, 3), alpha=0.3, beta=0.7):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
if y_true.get_shape().ndims < 2 or y_pred.get_shape().ndims < 2:
raise ValueError("y_true and y_pred must be at least rank 2.")
eps = tf.keras.backend.epsilon()
num = tf.reduce_sum(y_pred * y_true, axis=axis)
den = (
num
+ alpha * tf.reduce_sum(y_pred * (1 - y_true), axis=axis)
+ beta * tf.reduce_sum((1 - y_pred) * y_true, axis=axis)
)
# Sum over classes.
return tf.reduce_sum((num + eps) / (den + eps), axis=-1)
def dice_coef_multilabel(y_true, y_pred):
n_classes= tf.shape(y_pred)[-1]
dice_coeff=0
for index in range(n_classes):
dice_coeff -= dice(y_true[:,:,:,:,index], y_pred[:,:,:,:,index])
return dice_coeff
| [
[
[
71,
87
],
[
1367,
1369
],
[
1409,
1411
],
[
1449,
1451
],
[
1496,
1498
],
[
1554,
1556
],
[
1589,
1591
],
[
2200,
2202
],
[
2242,
2244
],
[
2428,
2430
],
[
2468,
2470
],
[
2487,
2489
],
[
2497,
2499
],
[
2540,
2542
],
[
2549,
2551
],
[
2597,
2599
],
[
2615,
2617
],
[
2678,
2680
],
[
2696,
2698
],
[
2781,
2783
],
[
2790,
2792
],
[
2823,
2825
],
[
2922,
2924
],
[
2964,
2966
],
[
3005,
3007
],
[
3020,
3022
],
[
4344,
4346
],
[
4386,
4388
],
[
4426,
4428
],
[
4473,
4475
],
[
4527,
4529
],
[
4562,
4564
],
[
4739,
4741
],
[
4781,
4783
],
[
4963,
4965
],
[
5001,
5003
],
[
5085,
5087
],
[
5150,
5152
],
[
5239,
5241
],
[
5347,
5349
]
],
[
[
94,
119
]
],
[
[
161,
165
],
[
5442,
5446
]
],
[
[
1684,
1700
]
],
[
[
2868,
2875
]
],
[
[
3067,
3076
]
],
[
[
3117,
3124
]
],
[
[
4664,
4671
]
],
[
[
5294,
5314
]
]
] |
# Copyright 2016 Bridgewater Associates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.tests.watchers.vpc.test_peering
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Bridgewater OSS <[email protected]>
"""
from security_monkey.tests.watchers import SecurityMonkeyWatcherTestCase
from security_monkey.watchers.vpc.peering import Peering
import boto
from moto import mock_sts, mock_ec2
from freezegun import freeze_time
class PeeringWatcherTestCase(SecurityMonkeyWatcherTestCase):
@freeze_time("2016-07-18 12:00:00")
@mock_sts
@mock_ec2
def test_slurp(self):
conn = boto.connect_vpc('the_key', 'the secret')
vpc = conn.create_vpc("10.0.0.0/16")
peer_vpc = conn.create_vpc("10.0.0.0/16")
conn.create_vpc_peering_connection(vpc.id, peer_vpc.id)
watcher = Peering(accounts=[self.account.name])
item_list, exception_map = watcher.slurp()
self.assertIs(
expr1=len(item_list),
expr2=1,
msg="Watcher should have 1 item but has {}".format(len(item_list)))
| [
[
[
842,
871
],
[
1043,
1072
]
],
[
[
921,
928
],
[
1406,
1413
]
],
[
[
937,
941
],
[
1185,
1189
]
],
[
[
959,
967
],
[
1121,
1129
]
],
[
[
969,
977
],
[
1135,
1143
]
],
[
[
1000,
1011
],
[
1081,
1092
]
],
[
[
1020,
1042
]
]
] |
import numpy as np
from sklearn import metrics
import math
from keras.preprocessing import sequence
from keras.preprocessing.text import Tokenizer
from typing import *
# fastai utility
def listify(o):
if o is None: return []
if isinstance(o, list): return o
if isinstance(o, str): return [o]
if isinstance(o, Iterable): return list(o)
return [o]
def compose(x, funcs, *args, **kwargs):
for f in listify(funcs):
x = f(x, **kwargs)
return x
class Onehotify():
def __init__(self, vocab_size):
self.vocab_size = vocab_size
self.tokenizer = Tokenizer(num_words=vocab_size)
def __call__(self, item):
return self.tokenizer.sequences_to_matrix([item], mode='binary')
class Padify():
def __init__(self, maxlen):
self.maxlen = maxlen
def __call__(self, item):
return sequence.pad_sequences([item], maxlen=self.maxlen)
class YOnehotify():
def __init__(self, num_classes):
self.num_classes = num_classes
def __call__(self, item):
categorical = np.zeros((1, self.num_classes))
categorical[0, item] = 1
return categorical
class Dataset():
def __init__(self, x, y, tfms_x, tfms_y):
self.x, self.y = x, y
self.x_tfms, self.y_tfms = tfms_x, tfms_y
def __len__(self):
return len(self.x)
def _get_transform(self, i, tfms):
return compose(i, tfms)
def __getitem__(self, i):
batch_x, batch_y = self.x[i], self.y[i]
return_x, return_y = [], []
if isinstance(i, slice):
return_x = [self._get_transform(o, self.x_tfms) for o in batch_x]
if isinstance(i, slice):
return_y = [self._get_transform(o, self.y_tfms) for o in batch_y]
return np.vstack(return_x), np.vstack(return_y)
class DataLoader():
def __init__(self, ds, bs, drop_last=True): self.ds, self.bs, self.drop_last = ds, bs, drop_last
def __iter__(self):
length = len(self.ds) // self.bs if self.drop_last else math.ceil(len(self.ds) / self.bs)
for i in range(0, length, 1):
yield self.ds[(i*self.bs):(i*self.bs)+self.bs] | [
[
[
7,
18
],
[
1054,
1056
],
[
1766,
1768
],
[
1787,
1789
]
],
[
[
39,
46
]
],
[
[
54,
58
],
[
2017,
2021
]
],
[
[
91,
99
],
[
854,
862
]
],
[
[
137,
146
],
[
596,
605
]
],
[
[
166,
167
],
[
326,
334
]
],
[
[
190,
197
],
[
421,
428
]
],
[
[
372,
379
],
[
1396,
1403
]
],
[
[
485,
494
]
],
[
[
738,
744
]
],
[
[
912,
922
]
],
[
[
1153,
1160
]
],
[
[
1814,
1824
]
]
] |
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 30, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 12); | [
[
[
7,
37
]
],
[
[
45,
100
],
[
105,
108
]
]
] |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['BoxCox'] , ['ConstantTrend'] , ['Seasonal_DayOfMonth'] , ['MLP'] ); | [
[
[
7,
75
],
[
78,
85
]
]
] |
from PIL import Image
import numpy as np
import colorsys
import os, sys
import argparse
import matplotlib.pyplot as plt
rgb_to_hsv = np.vectorize(colorsys.rgb_to_hsv)
hsv_to_rgb = np.vectorize(colorsys.hsv_to_rgb)
def crop(image, box=None):
if box:
imageBox = box
else:
imageBox = image.getbbox()
return image.crop(imageBox)
def hue_shift(image, value):
im = image.convert('RGBA')
arr = np.array(np.asarray(im).astype(float))
r,g,b,a = np.rollaxis(arr, axis=-1)
# print(np.max(r))
h,s,v = rgb_to_hsv(r, g, b)
r, g, b = hsv_to_rgb((h + value/360.0) % 1.0, s, v)
arr = np.dstack((r, g, b, a))
# print(np.max(r))
# plt.imshow(arr.astype(int), aspect='auto')
# plt.show()
return Image.fromarray(arr.astype('uint8'), 'RGBA')
parser = argparse.ArgumentParser(description='Rainbow an image batch')
parser.add_argument('--filename', dest='filename', type=str)
parser.add_argument('--step', dest='step', type=float, default=5.0)
parser.add_argument('--max_step', dest='max_step', type=float, default=360.0)
args = parser.parse_args()
color_image = Image.open(args.filename)
basename = os.path.basename(args.filename)
base, ext = os.path.splitext(basename)
if not os.path.exists('anim'):
os.mkdir('anim')
for n in range(0, int(args.max_step/args.step)):
dtheta = n*args.step
print('Writing out', dtheta)
cropped = crop(color_image, (1620, 780, 2220, 1380))
new_im = hue_shift(cropped, dtheta)
new_fn = os.path.join('anim','{0}_{1}{2}'.format(base, n, ext))
n += 1
new_im.save(new_fn) | [
[
[
16,
21
],
[
1130,
1135
],
[
754,
759
]
],
[
[
29,
40
],
[
136,
138
],
[
183,
185
],
[
428,
430
],
[
437,
439
],
[
481,
483
],
[
628,
630
]
],
[
[
48,
56
],
[
149,
157
],
[
196,
204
]
],
[
[
64,
66
],
[
1168,
1170
],
[
1212,
1214
],
[
1247,
1249
],
[
1275,
1277
],
[
1510,
1512
]
],
[
[
68,
71
]
],
[
[
79,
87
],
[
809,
817
]
],
[
[
95,
119
]
],
[
[
123,
133
],
[
542,
552
]
],
[
[
170,
180
],
[
576,
586
]
],
[
[
222,
226
],
[
1414,
1418
]
],
[
[
362,
371
],
[
1470,
1479
]
],
[
[
800,
806
],
[
871,
877
],
[
932,
938
],
[
1009,
1015
],
[
1095,
1101
]
],
[
[
1088,
1092
],
[
1141,
1145
],
[
1185,
1189
],
[
1315,
1319
],
[
1329,
1333
],
[
1357,
1361
]
],
[
[
1116,
1127
],
[
1419,
1430
]
],
[
[
1157,
1165
],
[
1229,
1237
]
],
[
[
1200,
1204
],
[
1550,
1554
]
],
[
[
1206,
1209
],
[
1559,
1562
]
],
[
[
1297,
1298
],
[
1355,
1356
],
[
1556,
1557
],
[
1569,
1570
]
],
[
[
1346,
1352
],
[
1392,
1398
],
[
1489,
1495
]
],
[
[
1404,
1411
],
[
1480,
1487
]
],
[
[
1461,
1467
],
[
1580,
1586
]
],
[
[
1501,
1507
],
[
1592,
1598
]
]
] |
# This is a simple application for alert system
from tkinter import *
from tkinter import messagebox
root = Tk()
root.geometry("200x200")
def message():
messagebox.showwarning("Alert Box", "Stop virus found")
but = Button(root, text="ok", command=Message)
but.place(x=100, y=100)
root.mainloop() | [
[
[
69,
70
],
[
110,
112
],
[
221,
227
],
[
253,
260
]
],
[
[
91,
101
],
[
159,
169
]
],
[
[
103,
107
],
[
115,
119
],
[
228,
232
],
[
286,
290
]
],
[
[
144,
151
]
],
[
[
215,
218
],
[
262,
265
]
]
] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Keras regularizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import regularizers
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
DATA_DIM = 5
NUM_CLASSES = 2
class KerasRegularizersTest(keras_parameterized.TestCase,
parameterized.TestCase):
def create_model(self, kernel_regularizer=None, activity_regularizer=None):
model = keras.models.Sequential()
model.add(keras.layers.Dense(NUM_CLASSES,
kernel_regularizer=kernel_regularizer,
activity_regularizer=activity_regularizer,
input_shape=(DATA_DIM,)))
return model
def get_data(self):
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=10,
test_samples=10,
input_shape=(DATA_DIM,),
num_classes=NUM_CLASSES)
y_train = np_utils.to_categorical(y_train, NUM_CLASSES)
y_test = np_utils.to_categorical(y_test, NUM_CLASSES)
return (x_train, y_train), (x_test, y_test)
def create_multi_input_model_from(self, layer1, layer2):
input_1 = keras.layers.Input(shape=(DATA_DIM,))
input_2 = keras.layers.Input(shape=(DATA_DIM,))
out1 = layer1(input_1)
out2 = layer2(input_2)
out = keras.layers.Average()([out1, out2])
model = keras.models.Model([input_1, input_2], out)
model.add_loss(keras.backend.mean(out2))
model.add_loss(math_ops.reduce_sum(input_1))
return model
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_kernel_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(kernel_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
('l2_zero', keras.regularizers.l2(0.)),
])
def test_activity_regularization(self, regularizer):
(x_train, y_train), _ = self.get_data()
model = self.create_model(activity_regularizer=regularizer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertEqual(len(model.losses), 1 if context.executing_eagerly() else 1)
model.fit(x_train, y_train, batch_size=10, epochs=1, verbose=0)
@keras_parameterized.run_all_keras_modes
@keras_parameterized.run_with_all_model_types
def test_zero_regularization(self):
# Verifies that training with zero regularization works.
x, y = np.ones((10, 10)), np.ones((10, 3))
model = testing_utils.get_model_from_layers(
[keras.layers.Dense(3, kernel_regularizer=keras.regularizers.l2(0))],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
model.fit(x, y, batch_size=5, epochs=1)
def test_custom_regularizer_saving(self):
def my_regularizer(weights):
return math_ops.reduce_sum(math_ops.abs(weights))
inputs = keras.Input((10,))
outputs = keras.layers.Dense(1, kernel_regularizer=my_regularizer)(inputs)
model = keras.Model(inputs, outputs)
model2 = model.from_config(
model.get_config(), custom_objects={'my_regularizer': my_regularizer})
self.assertEqual(model2.layers[1].kernel_regularizer, my_regularizer)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
model = self.create_multi_input_model_from(dense_layer, dense_layer)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertLen(model.losses, 5)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_model(self, regularizer):
dense_layer = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
dummy_model = keras.models.Model(input_tensor, dense_layer(input_tensor))
model = self.create_multi_input_model_from(dummy_model, dummy_model)
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
self.assertLen(model.losses, 6)
@keras_parameterized.run_all_keras_modes
@parameterized.named_parameters([
('l1', regularizers.l1()),
('l2', regularizers.l2()),
('l1_l2', regularizers.l1_l2()),
])
def test_regularization_shared_layer_in_different_models(self, regularizer):
shared_dense = keras.layers.Dense(
NUM_CLASSES,
kernel_regularizer=regularizer,
activity_regularizer=regularizer)
models = []
for _ in range(2):
input_tensor = keras.layers.Input(shape=(DATA_DIM,))
unshared_dense = keras.layers.Dense(
NUM_CLASSES, kernel_regularizer=regularizer)
out = unshared_dense(shared_dense(input_tensor))
models.append(keras.models.Model(input_tensor, out))
model = self.create_multi_input_model_from(
layer1=models[0], layer2=models[1])
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly(),
experimental_run_tf_function=testing_utils.should_run_tf_function())
# We expect to see 9 losses on the model:
# - 2 from the 2 add_loss calls on the outer model.
# - 3 from the weight regularizers on the shared_dense layer, unshared_dense
# in inner model 1, unshared_dense in inner model 2.
# - 4 from activity regularizers on the shared_dense layer.
self.assertLen(model.losses, 9)
if __name__ == '__main__':
test.main()
| [
[
[
749,
764
]
],
[
[
788,
796
]
],
[
[
820,
834
]
],
[
[
861,
874
],
[
1387,
1400
],
[
2656,
2669
],
[
3334,
3347
],
[
5191,
5204
],
[
5908,
5921
],
[
6762,
6775
]
],
[
[
882,
893
],
[
4255,
4257
],
[
4274,
4276
]
],
[
[
925,
930
],
[
3490,
3495
],
[
1503,
1508
],
[
1543,
1548
],
[
2251,
2256
],
[
2303,
2308
],
[
2405,
2410
],
[
2454,
2459
],
[
2517,
2522
],
[
4349,
4354
],
[
4390,
4395
],
[
4820,
4825
],
[
4853,
4858
],
[
4930,
4935
],
[
5411,
5416
],
[
6128,
6133
],
[
6271,
6276
],
[
6327,
6332
],
[
7003,
7008
],
[
7186,
7191
],
[
7247,
7252
],
[
7397,
7402
]
],
[
[
967,
974
],
[
3949,
3956
]
],
[
[
1011,
1030
],
[
1329,
1348
],
[
2613,
2632
],
[
3291,
3310
],
[
4057,
4076
],
[
4100,
4119
],
[
5148,
5167
],
[
5865,
5884
],
[
6719,
6738
]
],
[
[
1067,
1079
],
[
2702,
2714
],
[
2735,
2747
],
[
2771,
2783
],
[
3380,
3392
],
[
3413,
3425
],
[
3449,
3461
],
[
5237,
5249
],
[
5270,
5282
],
[
5306,
5318
],
[
5954,
5966
],
[
5987,
5999
],
[
6023,
6035
],
[
6808,
6820
],
[
6841,
6853
],
[
6877,
6889
]
],
[
[
1116,
1129
],
[
1865,
1878
],
[
3063,
3076
],
[
3136,
3149
],
[
3791,
3804
],
[
3864,
3877
],
[
4303,
4316
],
[
4514,
4527
],
[
4587,
4600
],
[
5712,
5725
],
[
5785,
5798
],
[
6566,
6579
],
[
6639,
6652
],
[
7634,
7647
],
[
7707,
7720
]
],
[
[
1172,
1180
],
[
2025,
2033
],
[
2084,
2092
]
],
[
[
1215,
1223
],
[
2562,
2570
],
[
4763,
4771
],
[
4783,
4791
]
],
[
[
1263,
1267
],
[
8119,
8123
]
],
[
[
1270,
1278
],
[
1769,
1777
],
[
1966,
1974
],
[
2277,
2285
],
[
2329,
2337
],
[
6297,
6305
],
[
7212,
7220
]
],
[
[
1283,
1294
],
[
1562,
1573
],
[
1998,
2009
],
[
2058,
2069
],
[
2116,
2127
],
[
5439,
5450
],
[
6156,
6167
],
[
7031,
7042
],
[
7277,
7288
]
],
[
[
1307,
1328
]
]
] |
"""WSGI File that enables Apache/GUnicorn to run Django"""
# pylint: disable=C0103
import os
import sys
from django.core.wsgi import get_wsgi_application
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.pardir), os.pardir)))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.abspath(os.path.dirname(__file__)))))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", 'termsandconditions_demo.settings')
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
| [
[
[
92,
94
],
[
176,
178
],
[
192,
194
],
[
205,
207
],
[
221,
223
],
[
233,
235
],
[
265,
267
],
[
281,
283
],
[
294,
296
],
[
310,
312
],
[
341,
343
]
],
[
[
102,
105
],
[
157,
160
],
[
246,
249
]
],
[
[
135,
155
],
[
615,
635
]
],
[
[
601,
612
]
]
] |
from concurrent import futures
import time
import grpc
import app.helloworld_pb2 as helloworld_pb2
import app.helloworld_pb2_grpc as helloworld_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Greeter(helloworld_pb2_grpc.GreeterServicer):
def Greet(self, request, context):
print('Saying `hello` to %s' % request.name)
return helloworld_pb2.GreetResponse(message='Hello, {}!'.format(request.name))
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
helloworld_pb2_grpc.add_GreeterServicer_to_server(Greeter(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
| [
[
[
23,
30
],
[
465,
472
]
],
[
[
38,
42
],
[
685,
689
]
],
[
[
51,
55
],
[
453,
457
]
],
[
[
64,
100
],
[
353,
367
]
],
[
[
108,
154
],
[
207,
226
],
[
513,
532
]
],
[
[
156,
175
],
[
696,
715
]
],
[
[
199,
206
],
[
563,
570
]
],
[
[
431,
436
],
[
803,
808
]
]
] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import logging
import functools
import paddle.distributed as dist
logger_initialized = {}
@functools.lru_cache()
def get_logger(name='srnet', log_file=None, log_level=logging.INFO):
"""Initialize and get a logger by name.
If the logger has not been initialized, this method will initialize the
logger by adding one or two handlers, otherwise the initialized logger will
be directly returned. During initialization, a StreamHandler will always be
added. If `log_file` is specified a FileHandler will also be added.
Args:
name (str): Logger name.
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the logger.
log_level (int): The logger level. Note that only the process of
rank 0 is affected, and other processes will set the level to
"Error" thus be silent most of the time.
Returns:
logging.Logger: The expected logger.
"""
logger = logging.getLogger(name)
if name in logger_initialized:
return logger
for logger_name in logger_initialized:
if name.startswith(logger_name):
return logger
formatter = logging.Formatter(
'[%(asctime)s] %(name)s %(levelname)s: %(message)s',
datefmt="%Y/%m/%d %H:%M:%S")
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
if log_file is not None and dist.get_rank() == 0:
log_file_folder = os.path.split(log_file)[0]
os.makedirs(log_file_folder, exist_ok=True)
file_handler = logging.FileHandler(log_file, 'a')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if dist.get_rank() == 0:
logger.setLevel(log_level)
else:
logger.setLevel(logging.ERROR)
logger_initialized[name] = True
return logger
| [
[
[
615,
617
],
[
2155,
2157
],
[
2190,
2192
]
],
[
[
625,
628
],
[
1982,
1985
]
],
[
[
636,
643
],
[
799,
806
],
[
1606,
1613
],
[
1814,
1821
],
[
1953,
1960
],
[
2257,
2264
],
[
2475,
2482
]
],
[
[
651,
660
],
[
723,
732
]
],
[
[
668,
694
],
[
2107,
2111
],
[
2384,
2388
]
],
[
[
696,
714
],
[
1645,
1663
],
[
1710,
1728
],
[
2494,
2512
]
],
[
[
749,
759
]
]
] |
from django.core.validators import RegexValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from taggit.managers import TaggableManager
from server.connective_tags.models import ConnectiveTaggedItem
from server.schools.models import School
from server.utils.db_utils import get_base_model
from server.utils.model_fields import random_slug
class SchoolActivityGroupManager(models.Manager):
def get_activity_container_only_group(self, activity_group):
container_only_groups = self.filter(
activity_order=activity_group.activity_order,
group_type=SchoolActivityGroup.GroupTypes.CONTAINER_ONLY,
)
if container_only_groups.exists():
return container_only_groups[0]
class ImportedOrganization(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
organization_number = models.CharField(max_length=10, unique=True)
email = models.EmailField(null=True, blank=True)
description = models.CharField(max_length=4096, null=True, blank=True)
website_url = models.URLField(null=True, blank=True)
name = models.CharField(max_length=256, null=True, blank=True)
goal = models.CharField(max_length=4096, null=True, blank=True)
year_founded = models.CharField(max_length=128, null=True, blank=True)
status = models.CharField(max_length=50, null=True, blank=True)
target_audience = models.JSONField(null=True, blank=True)
number_of_employees = models.PositiveIntegerField(null=True, blank=True)
number_of_members = models.PositiveIntegerField(null=True, blank=True)
number_of_volunteers = models.PositiveIntegerField(null=True, blank=True)
location_lon = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
location_lat = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
address_city = models.CharField(max_length=256, null=True, blank=True)
address_street = models.CharField(max_length=256, null=True, blank=True)
address_house_num = models.CharField(max_length=30, null=True, blank=True)
address_zipcode = models.CharField(max_length=9, null=True, blank=True)
cities = models.JSONField(null=True, blank=True)
districts = models.JSONField(null=True, blank=True)
union_type = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return f"{self.name} | {self.organization_number} | {self.slug}"
class Organization(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
organization_number = models.CharField(max_length=10, unique=True, null=True)
email = models.EmailField()
description = models.CharField(max_length=300)
website_url = models.URLField(null=True, blank=True)
name = models.CharField(max_length=100)
goal = models.CharField(max_length=300, null=True, blank=True)
year_founded = models.CharField(max_length=4, null=True, blank=True)
status = models.CharField(max_length=50, null=True, blank=True)
target_audience = models.JSONField(null=True, blank=True)
number_of_employees = models.PositiveIntegerField(null=True, blank=True)
number_of_members = models.PositiveIntegerField(null=True, blank=True)
number_of_volunteers = models.PositiveIntegerField(null=True, blank=True)
location_lon = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
location_lat = models.DecimalField(
max_digits=9,
decimal_places=6,
null=True,
blank=True,
)
address_city = models.CharField(max_length=150, null=True, blank=True)
address_street = models.CharField(max_length=150, null=True, blank=True)
address_house_num = models.CharField(max_length=20, null=True, blank=True)
address_zipcode = models.CharField(max_length=9, null=True, blank=True)
cities = models.JSONField(null=True, blank=True)
districts = models.JSONField(null=True, blank=True)
union_type = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return f"{self.name} | {self.organization_number} | {self.slug}"
class Activity(get_base_model()):
class Domain(models.TextChoices):
SCIENCE_AND_TECH = "SCIENCE_AND_TECH", "Science And Tech"
EXTREME_SPORTS = "EXTREME_SPORTS", "Extreme Sports"
FIELD = "FIELD", "Field"
OTHER = "OTHER", "Other"
tags = TaggableManager(blank=True, through=ConnectiveTaggedItem)
slug = models.CharField(max_length=40, default=random_slug, unique=True)
name = models.CharField(max_length=35)
target_audience = models.JSONField()
domain = models.CharField(max_length=55, null=True, choices=Domain.choices)
originization = models.ForeignKey(
Organization,
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="activities",
)
activity_website_url = models.URLField(max_length=750, null=True, blank=True)
activity_email = models.EmailField(null=True, blank=True)
description = models.CharField(max_length=550, default="")
contact_name = models.CharField(max_length=60, default="")
logo = models.ImageField(blank=True, null=True)
phone_number = models.CharField(
blank=True,
max_length=15,
validators=[
RegexValidator(
regex=r"^\d{9,15}$",
message=_("phone number must be between 9-15 digits"),
)
],
)
def __str__(self):
try:
return f"{self.name} | {self.slug} | {self.originization.name}"
except AttributeError:
return f"{self.name} | {self.slug}"
class ImportedActivity(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
activity_code = models.IntegerField()
name = models.CharField(max_length=550)
raw_name = models.CharField(max_length=550)
target_audience = models.JSONField()
organization_number = models.IntegerField()
organization_name = models.CharField(max_length=1550, default="")
target_gender = models.JSONField()
target_gender = models.JSONField()
target_population = models.JSONField()
target_time = models.JSONField()
target_size = models.JSONField()
target_migzar = models.JSONField()
target_pikuah = models.JSONField()
profession = models.JSONField()
goal = models.CharField(max_length=1550, default="")
is_active = models.BooleanField()
activity_website_url = models.URLField(max_length=750, null=True, blank=True)
activity_email = models.EmailField(null=True, blank=True)
description = models.CharField(max_length=1550, default="")
contact_name = models.CharField(max_length=100, default="")
phone_number = models.CharField(
blank=True,
max_length=15,
validators=[
RegexValidator(
regex=r"^\d{9,15}$",
message=_("phone number must be between 9-15 digits"),
)
],
)
def __str__(self):
return f"{self.name} | {self.slug} | {self.activity_code}"
class ActivityMedia(get_base_model()):
slug = models.CharField(max_length=40, default=random_slug, unique=True)
name = models.CharField(max_length=40, null=True, blank=True)
image_url = models.ImageField(blank=True, null=True)
video_url = models.URLField(blank=True, null=True)
activity = models.ForeignKey(
Activity,
on_delete=models.CASCADE,
related_name="rich_media",
)
def __str__(self):
return f"{self.name} | {self.slug} | {self.activity.name}"
class OrganizationMember(get_base_model()):
user = models.OneToOneField(
"users.User", on_delete=models.CASCADE, related_name="organization_member"
)
organization = models.ForeignKey(
Organization,
on_delete=models.CASCADE,
related_name="organization_member",
)
def __str__(self):
return f"{self.user.email} | {self.organization.name}"
class SchoolActivityOrder(get_base_model()):
class Meta:
constraints = [
models.UniqueConstraint(fields=["school", "activity"], name="unique_order")
]
class Status(models.TextChoices):
CANCELLED = "CANCELLED", "Cancelled"
PENDING_ADMIN_APPROVAL = "PENDING_ADMIN_APPROVAL", "Pending Admin Approval"
APPROVED = "APPROVED", "Approved"
DENIED = "DENIED", "Denied"
base_status = Status.PENDING_ADMIN_APPROVAL
slug = models.CharField(max_length=40, default=random_slug, unique=True)
requested_by = models.ForeignKey(
"users.User",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="requested_orders",
)
last_updated_by = models.ForeignKey(
"users.User",
on_delete=models.SET_NULL,
null=True,
blank=True,
related_name="last_updated_by_me_orders",
)
school = models.ForeignKey(
School, on_delete=models.CASCADE, related_name="school_activity_orders"
)
activity = models.ForeignKey(
Activity, on_delete=models.CASCADE, related_name="school_activity_orders"
)
status = models.CharField(
_("status"), max_length=50, choices=Status.choices, default=base_status
)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
status_reason = models.CharField(
max_length=250,
blank=True,
)
def __str__(self):
return f"{self.activity} | {self.school} | {self.status} | {self.pk}"
class SchoolActivityGroup(get_base_model()):
class GroupTypes(models.TextChoices):
CONTAINER_ONLY = "CONTAINER_ONLY", "Container Only"
DISABLED_CONSUMERS = "DISABLED_CONSUMERS", "Disabled Consumers"
NO_REGISTRATION = "NO_REGISTRATION", "No Registration"
DEFAULT = "DEFAULT", "Default"
objects = SchoolActivityGroupManager()
slug = models.CharField(max_length=40, default=random_slug, unique=True)
activity_order = models.ForeignKey(
SchoolActivityOrder, on_delete=models.CASCADE, related_name="activity_groups"
)
name = models.CharField(_("name"), max_length=50)
description = models.CharField(_("description"), max_length=550)
consumers = models.ManyToManyField(
"users.Consumer",
related_name="activity_groups",
blank=True,
)
group_type = models.CharField(
_("group type"),
max_length=50,
choices=GroupTypes.choices,
default=GroupTypes.DEFAULT,
)
instructor = models.ForeignKey(
"users.Instructor",
on_delete=models.SET_NULL,
related_name="managed_activity_groups",
null=True,
blank=True,
)
def __str__(self):
return f"""
{self.name} : {self.group_type} : {self.slug} :
{self.activity_order.activity.name} : {self.activity_order.school.name}
"""
| [
[
[
35,
49
],
[
5496,
5510
],
[
7046,
7060
]
],
[
[
72,
78
],
[
417,
423
],
[
828,
834
],
[
920,
926
],
[
977,
983
],
[
1036,
1042
],
[
1111,
1117
],
[
1161,
1167
],
[
1228,
1234
],
[
1304,
1310
],
[
1373,
1379
],
[
1450,
1456
],
[
1516,
1522
],
[
1591,
1597
],
[
1669,
1675
],
[
1739,
1745
],
[
1872,
1878
],
[
2006,
2012
],
[
2083,
2089
],
[
2163,
2169
],
[
2240,
2246
],
[
2307,
2313
],
[
2363,
2369
],
[
2420,
2426
],
[
2623,
2629
],
[
2715,
2721
],
[
2783,
2789
],
[
2821,
2827
],
[
2872,
2878
],
[
2922,
2928
],
[
2966,
2972
],
[
3041,
3047
],
[
3108,
3114
],
[
3185,
3191
],
[
3251,
3257
],
[
3326,
3332
],
[
3404,
3410
],
[
3474,
3480
],
[
3607,
3613
],
[
3741,
3747
],
[
3818,
3824
],
[
3898,
3904
],
[
3975,
3981
],
[
4042,
4048
],
[
4098,
4104
],
[
4155,
4161
],
[
4360,
4366
],
[
4655,
4661
],
[
4732,
4738
],
[
4786,
4792
],
[
4818,
4824
],
[
4905,
4911
],
[
4964,
4970
],
[
5088,
5094
],
[
5164,
5170
],
[
5223,
5229
],
[
5287,
5293
],
[
5342,
5348
],
[
5402,
5408
],
[
5898,
5904
],
[
5984,
5990
],
[
6017,
6023
],
[
6065,
6071
],
[
6120,
6126
],
[
6165,
6171
],
[
6211,
6217
],
[
6277,
6283
],
[
6316,
6322
],
[
6359,
6365
],
[
6396,
6402
],
[
6433,
6439
],
[
6472,
6478
],
[
6511,
6517
],
[
6547,
6553
],
[
6577,
6583
],
[
6639,
6645
],
[
6688,
6694
],
[
6764,
6770
],
[
6823,
6829
],
[
6888,
6894
],
[
6952,
6958
],
[
7344,
7350
],
[
7421,
7427
],
[
7492,
7498
],
[
7549,
7555
],
[
7603,
7609
],
[
7658,
7664
],
[
7863,
7869
],
[
7917,
7923
],
[
7993,
7999
],
[
8052,
8058
],
[
8304,
8310
],
[
8408,
8414
],
[
8697,
8703
],
[
8782,
8788
],
[
8841,
8847
],
[
8966,
8972
],
[
9025,
9031
],
[
9150,
9156
],
[
9195,
9201
],
[
9270,
9276
],
[
9317,
9323
],
[
9390,
9396
],
[
9511,
9517
],
[
9568,
9574
],
[
9624,
9630
],
[
9862,
9868
],
[
10173,
10179
],
[
10260,
10266
],
[
10318,
10324
],
[
10382,
10388
],
[
10443,
10449
],
[
10510,
10516
],
[
10643,
10649
],
[
10804,
10810
],
[
10869,
10875
]
],
[
[
116,
133
],
[
5573,
5574
],
[
7123,
7124
],
[
9416,
9417
],
[
10399,
10400
],
[
10460,
10461
],
[
10669,
10670
]
],
[
[
162,
177
],
[
4585,
4600
]
],
[
[
221,
241
],
[
4621,
4641
]
],
[
[
276,
282
],
[
9177,
9183
]
],
[
[
317,
331
],
[
798,
812
],
[
2593,
2607
],
[
4324,
4338
],
[
5868,
5882
],
[
7314,
7328
],
[
7833,
7847
],
[
8233,
8247
],
[
9822,
9836
]
],
[
[
370,
381
],
[
868,
879
],
[
2663,
2674
],
[
4695,
4706
],
[
5938,
5949
],
[
7384,
7395
],
[
8737,
8748
],
[
10213,
10224
]
],
[
[
390,
416
],
[
10132,
10158
]
],
[
[
777,
797
]
],
[
[
2580,
2592
],
[
4932,
4944
],
[
8020,
8032
]
],
[
[
4315,
4323
],
[
7630,
7638
],
[
9297,
9305
]
],
[
[
5851,
5867
]
],
[
[
7300,
7313
]
],
[
[
7814,
7832
]
],
[
[
8213,
8232
],
[
10287,
10306
]
],
[
[
9802,
9821
],
[
625,
644
]
]
] |
#!/usr/bin/env python
#
# Copyright 2015-2016 Flavio Garcia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import (absolute_import, division, print_function,
with_statement)
import unittest
from tornado import httpclient
import logging
from tornado.escape import json_decode, json_encode
from ..v1 import api_url_v1
logger = logging.getLogger(__name__)
class UserApiV1FunctionalTestCase(unittest.TestCase):
""" Case that covers the account service.
"""
def test_login_empty_body(self):
http_client = httpclient.HTTPClient()
login_url = "%s/user/login" % api_url_v1
response_body = None
error_code = 0
body_error_code = 0
try:
http_client.fetch(httpclient.HTTPRequest(
url=login_url, method='POST', body=''))
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
response_body = json_decode(e.response.body)
error_code = e.code
body_error_code = int(response_body['status'])
print(e.response.error)
except Exception as e:
# Other errors are possible, such as IOError.
logger.error("Error: %s" % str(e))
http_client.close()
# Bad Request http error
self.assertEquals(error_code, 500)
self.assertEquals(body_error_code, 500)
# Has 1 error
self.assertEquals(len(response_body['errors']), 1)
# Username message
self.assertEquals(response_body['errors']['schema'][0],
"Invalid json body content.")
def test_login_invalid_json(self):
http_client = httpclient.HTTPClient()
login_url = "%s/user/login" % api_url_v1
response_body = None
data = "invalid json string"
error_code = 0
body_error_code = 0
try:
http_client.fetch(httpclient.HTTPRequest(
url=login_url, method='POST', body=''))
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
response_body = json_decode(e.response.body)
error_code = e.code
body_error_code = int(response_body['status'])
print(e.response.error)
except Exception as e:
# Other errors are possible, such as IOError.
logger.error("Error: %s" % str(e))
http_client.close()
# Bad Request http error
self.assertEquals(error_code, 500)
self.assertEquals(body_error_code, 500)
# Has 1 error
self.assertEquals(len(response_body['errors']), 1)
# Username message
self.assertEquals(response_body['errors']['schema'][0],
"Invalid json body content.")
def test_login_without_username(self):
http_client = httpclient.HTTPClient()
login_url = "%s/user/login" % api_url_v1
response_body = None
error_code = 0
data = {
'payload': {
'password': "",
}
}
try:
response = http_client.fetch(httpclient.HTTPRequest(
url=login_url, method='POST', body=json_encode(data)))
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
logger.error("Error: %s" % str(e))
error_code = e.code
response_body = json_decode(e.response.body)
except Exception as e:
# Other errors are possible, such as IOError.
logger.error("Error: %s" % str(e))
http_client.close()
# Unauthorized http error
self.assertEquals(error_code, 400)
# Has 2 errors
self.assertEquals(len(response_body['errors']), 1)
# Username message
self.assertEquals(response_body['errors']['schema'],
"'username' is a required property")
def test_login_without_password(self):
http_client = httpclient.HTTPClient()
login_url = "%s/user/login" % api_url_v1
response_body = None
error_code = 0
data = {
'payload': {
'username': "",
}
}
try:
response = http_client.fetch(httpclient.HTTPRequest(
url=login_url, method='POST', body=json_encode(data)))
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
logger.error("Error: %s" % str(e))
error_code = e.code
response_body = json_decode(e.response.body)
except Exception as e:
# Other errors are possible, such as IOError.
logger.error("Error: %s" % str(e))
http_client.close()
# Unauthorized http error
self.assertEquals(error_code, 400)
# Has 2 errors
self.assertEquals(len(response_body['errors']), 1)
# Username message
print(response_body['errors']['schema'])
self.assertEquals(response_body['errors']['schema'],
"'password' is a required property")
def test_valid_login(self):
http_client = httpclient.HTTPClient()
login_url = "%s/user/login" % api_url_v1
response_body = None
code = 0
data = {
'payload': {
'username': "test",
'password': "test",
}
}
try:
response = http_client.fetch(httpclient.HTTPRequest(
url=login_url, method='POST', body=json_encode(data)))
code = response.code
response_body = json_decode(response.body)
except httpclient.HTTPError as e:
# HTTPError is raised for non-200 responses; the response
# can be found in e.response.
logger.error("Error: %s" % str(e))
except Exception as e:
# Other errors are possible, such as IOError.
logger.error("Error: %s" % str(e))
http_client.close()
# Unauthorized http error
self.assertEquals(code, 200)
# Username message
self.assertEquals(response_body['userid'], 1)
| [
[
[
630,
645
]
],
[
[
647,
655
]
],
[
[
657,
671
]
],
[
[
697,
711
]
],
[
[
721,
729
],
[
930,
938
]
],
[
[
750,
760
],
[
1064,
1074
],
[
1260,
1270
],
[
1359,
1369
],
[
2260,
2270
],
[
2493,
2503
],
[
2592,
2602
],
[
3497,
3507
],
[
3774,
3784
],
[
3888,
3898
],
[
4703,
4713
],
[
4980,
4990
],
[
5094,
5104
],
[
5947,
5957
],
[
6258,
6268
],
[
6460,
6470
]
],
[
[
768,
775
],
[
866,
873
]
],
[
[
803,
814
],
[
1526,
1537
],
[
2759,
2770
],
[
4134,
4145
],
[
5340,
5351
],
[
6418,
6429
]
],
[
[
816,
827
],
[
3853,
3864
],
[
5059,
5070
],
[
6337,
6348
]
],
[
[
845,
855
],
[
1126,
1136
],
[
2322,
2332
],
[
3559,
3569
],
[
4765,
4775
],
[
6009,
6019
]
],
[
[
857,
863
],
[
1783,
1789
],
[
3016,
3022
],
[
4039,
4045
],
[
4264,
4270
],
[
5245,
5251
],
[
5470,
5476
],
[
6611,
6617
],
[
6747,
6753
]
],
[
[
902,
929
]
]
] |
from tqdm import tqdm
import pandas as pd
from __init__ import FILE
df = pd.read_csv(FILE)
smiles = list(df["Smiles"])
with open("_chemprop.csv", "w") as f:
f.write("smiles\n")
for smi in smiles:
f.write("{0}\n".format(smi))
| [
[
[
17,
21
]
],
[
[
29,
41
],
[
74,
76
]
],
[
[
63,
67
],
[
86,
90
]
],
[
[
69,
71
],
[
106,
108
]
],
[
[
92,
98
],
[
198,
204
]
],
[
[
156,
157
],
[
163,
164
],
[
214,
215
]
],
[
[
191,
194
],
[
237,
240
]
]
] |
# coding=utf-8
import copy
import functools
from typing import List
import torch
import torch.distributed._shard.sharding_spec as shard_spec
from .api import (
_register_sharded_op,
Shard,
ShardedTensor,
ShardedTensorMetadata,
TensorProperties,
)
from .metadata import ShardMetadata # noqa: F401
from .partial_tensor import _PartialTensor
def empty(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` filled with uninitialized data.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
memory_format (:class:`torch.memory_format`, optional): the desired memory format of
returned Tensor. Default: ``torch.contiguous_format``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
def ones(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` with the scalar value 1.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return full(
sharding_spec,
size,
fill_value=1,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs
)
def zeros(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Returns a :class:`ShardedTensor` filled with the scalar value 0.
Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a sequence of integers defining the shape of the output
tensor. Can be a variable number of arguments or a collection like a list or tuple.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
return full(
sharding_spec,
size,
fill_value=0,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs
)
def full(sharding_spec: shard_spec.ShardingSpec,
size,
fill_value=torch.types.Number,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
is inferred from fill_value. If dtype is specified, it will override the
inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
fill_value (Scalar) – the value to fill the output tensor with.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.constant_(sharded_tensor, fill_value) # type: ignore[arg-type]
return sharded_tensor
def rand(sharding_spec: shard_spec.ShardingSpec,
*size,
dtype=None,
layout=torch.strided,
requires_grad=False,
pin_memory=False,
memory_format=torch.contiguous_format,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates a :class:`ShardedTensor` filled with fill_value. The tensor’s dtype
is inferred from fill_value. If dtype is specified, it will override the
inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion.
Args:
sharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`): The specification
describing how to shard the Tensor.
size (int...): a list, tuple, or `torch.Size` of integers defining the shape of the
output tensor.
fill_value (Scalar) – the value to fill the output tensor with.
Keyword args:
dtype (:class:`torch.dtype`, optional): the desired data type of returned tensor.
Default: if ``None``, uses a global default (see :func:`torch.set_default_tensor_type`).
layout (:class:`torch.layout`, optional): the desired layout of returned Tensor.
Default: ``torch.strided``.
requires_grad (bool, optional): If autograd should record operations on the
returned tensor. Default: ``False``.
pin_memory (bool, optional): If set, returned tensor would be allocated in
the pinned memory. Works only for CPU tensors. Default: ``False``.
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object on each rank
"""
sharded_tensor = ShardedTensor(
sharding_spec,
*size,
dtype=dtype,
layout=layout,
requires_grad=requires_grad,
pin_memory=pin_memory,
memory_format=memory_format,
process_group=process_group,
init_rrefs=init_rrefs,
)
torch.nn.init.uniform_(sharded_tensor, 0, 1) # type: ignore[arg-type]
return sharded_tensor
def init_from_local_shards(
local_shards: List[Shard],
*global_size,
process_group=None,
init_rrefs=False) -> ShardedTensor:
"""
Creates an :class:`ShardedTensor` from local shards and the global metadata.
Needs to be called on all ranks in an SPMD fashion.
Args:
local_shards (List[:class `torch.distributed._shard.sharded_tensor.Shard`]): A list
of shards that represent the local shards on this rank.
global_size (int...): a list, tuple, or `torch.Size` of integers defining the
shape of the overall sharded tensor.
Keyword args:
process_group (ProcessGroup, optional): The process group to work on. If None,
the default process group will be used.
init_rrefs (bool, optional): Whether or not to initialize
:class:`torch.distributed.rpc.RRef`s pointing to remote shards.
Need to initialize the RPC Framework if specified as ``True``.
Default: ``False``.
Returns:
A :class:`ShardedTensor` object handle on this rank
Examples:
Suppose we want construct a sharded tensor on two ranks, global size = (10, 5),
each shard have a (5, 5) local tensor, we can do it like below:
on rank 0:
>>> local_shard_metadata = ShardMetadata(
>>> shard_offsets=[0, 0]
>>> shard_lengths=[5, 5]
>>> placement="rank:0/cuda:0"
>>> )
>>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
>>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
on rank 1:
>>> local_shard_metadata = ShardMetadata(
>>> shard_offsets=[5, 0]
>>> shard_lengths=[5, 5]
>>> placement="rank:1/cuda:1"
>>> )
>>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)]
>>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])
"""
return ShardedTensor._init_from_local_shards(
local_shards,
*global_size,
process_group=process_group,
init_rrefs=init_rrefs
)
def state_dict_hook(module, destination, prefix, local_metadata):
"""
Hook to add ShardedTensor to Module's ``state_dict``. Needs to be
registered to the Module using
:meth:`torch.nn.Module._register_state_dict_hook`.
"""
for submodule_name, submodule in module.named_modules():
for attr_name, attr in submodule.__dict__.items():
if isinstance(attr, ShardedTensor):
destination[prefix + submodule_name + '.' + attr_name] = attr
def pre_load_state_dict_hook(module, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs):
"""
Pre-load state dict hook to add ShardedTensor to the module.
"""
for submodule_name, submodule in module.named_modules():
for attr_name, attr in submodule.__dict__.items():
key = prefix + submodule_name + '.' + attr_name
if key in state_dict:
if isinstance(state_dict[key], ShardedTensor):
setattr(submodule, attr_name, state_dict[key])
def sharded_op_impl(func):
"""
Provides a way for users to write their own custom sharded operator. This
can be used to override existing ShardedTensor operators or write a new
one not supported by ShardedTensor. If the operator in question is covered
by ``__torch_function__`` dispatch and has a ShardedTensor as any of its
parameters, the function provided will be invoked for that operator.
Example::
>>> @sharded_op_impl(torch.nn.functional.linear)
>>> def my_custom_sharded_linear(types, args, kwargs, process_group):
>>> ....
>>>
>>> input = torch.rand(10, 32)
>>> weight = sharded_tensor.rand(32, 16)
>>> bias = torch.rand(16)
>>> # This will call 'my_custom_sharded_linear'
>>> torch.nn.functional.linear(input, weight, bias)
The types, args and kwargs parameters are the same parameters that are
passed to ``__torch_function__`` dispatch API
(https://pytorch.org/docs/stable/notes/extending.html#extending-torch).
There is an additional ``process_group`` parameter which is the
process_group used for the ShardedTensor and can be used by
implementations for communications within a sharded implementation.
Args:
func(Callable): Torch function for which we want to provide a sharded
implementation (ex: torch.nn.functional.linear)
"""
def decorator_sharded_func(wrapped_func):
_register_sharded_op(func, wrapped_func)
@functools.wraps(wrapped_func)
def wrapper(*args, **kwargs):
return wrapped_func(*args, **kwargs)
return wrapper
return decorator_sharded_func
# Import all builtin sharded ops
from ._ops import * # noqa: F403
def _reshard_output(
module: torch.nn.Module,
resharding_spec: shard_spec.ShardingSpec) -> torch.nn.Module:
"""
Hook a module with local shards collection in the forward pass according
to the given ``resharding_spec``.
Args:
module (:class:`torch.nn.Module`): Module whose output needs to be resharded.
resharding_spec (:class:`torch.distributed._shard.sharding_spec.ShardingSpec`):
The specification describing how the output of the module will be resharded.
Returns:
A :class:`torch.nn.Module` object with collection API hooked.
"""
def hook_func(_module, _input, output):
if isinstance(output, ShardedTensor) or isinstance(output, _PartialTensor):
return output.reshard(resharding_spec)
return output
module.register_forward_hook(hook_func)
return module
def _collect_local_shard(module: torch.nn.Module) -> torch.nn.Module:
"""
Hook a module with local shards collection in the forward pass.
This API is typically used to convert a sharded representation back to data parallel
representation. In particular, it returns the local tensor for this Shard. If the
size along the sharding dimension for the local tensor is 1, this dimension is removed
from the final result. For example a [4, 16] ShardedTensor across 4 ranks is typically
a local Tensor of size [16] across each rank and not [1, 16] across each rank.
Args:
module (:class:`torch.nn.Module`): Module whose output needs to be resharded.
Returns:
A :class:`torch.nn.Module` object with collection API hooked.
"""
def hook_func(_module, _input, output):
if isinstance(output, ShardedTensor):
local_tensor = output.local_tensor()
# Squeeze the # of dimensions manually.
if local_tensor.size(output._sharding_spec.dim) == 1: # type: ignore[attr-defined]
local_tensor = local_tensor.squeeze(
output._sharding_spec.dim # type: ignore[attr-defined]
)
return local_tensor
module.register_forward_hook(hook_func)
return module
| [
[
[
23,
27
]
],
[
[
35,
44
],
[
16558,
16567
]
],
[
[
64,
68
],
[
11927,
11931
]
],
[
[
77,
82
],
[
471,
476
],
[
569,
574
],
[
2792,
2797
],
[
2887,
2892
],
[
4956,
4961
],
[
5054,
5059
],
[
7110,
7115
],
[
7167,
7172
],
[
7262,
7267
],
[
9572,
9577
],
[
9667,
9672
],
[
9365,
9370
],
[
11779,
11784
],
[
16908,
16913
],
[
16838,
16843
],
[
17732,
17737
],
[
17712,
17717
]
],
[
[
90,
142
],
[
390,
400
],
[
2714,
2724
],
[
4875,
4885
],
[
7050,
7060
],
[
9494,
9504
],
[
16880,
16890
]
],
[
[
167,
187
],
[
16507,
16527
]
],
[
[
193,
198
],
[
11932,
11937
]
],
[
[
204,
217
],
[
655,
668
],
[
2413,
2426
],
[
2971,
2984
],
[
5140,
5153
],
[
7346,
7359
],
[
9085,
9098
],
[
9751,
9764
],
[
11499,
11512
],
[
12019,
12032
],
[
13856,
13869
],
[
14407,
14420
],
[
14969,
14982
],
[
17488,
17501
],
[
18530,
18543
]
],
[
[
223,
244
]
],
[
[
250,
266
]
],
[
[
292,
305
]
],
[
[
348,
362
],
[
17525,
17539
]
],
[
[
369,
374
]
],
[
[
2694,
2698
]
],
[
[
4854,
4859
]
],
[
[
7030,
7034
],
[
4562,
4566
],
[
6738,
6742
]
],
[
[
9474,
9478
]
],
[
[
11881,
11903
]
],
[
[
14017,
14032
]
],
[
[
14506,
14530
]
],
[
[
15057,
15072
]
],
[
[
16784,
16785
]
],
[
[
16805,
16820
]
],
[
[
17683,
17703
]
]
] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import operator
import os
import queue
import time
from threading import Thread
import numpy as np
import torch
from fairseq.data import data_utils
logger = logging.getLogger(__name__)
# Object used by _background_consumer to signal the source is exhausted
# to the main thread.
_sentinel = object()
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count.
Args:
iterable (iterable): iterable to wrap
start (int): starting iteration count. Note that this doesn't
actually advance the iterator.
total (int): override the iterator length returned by
``__len__``. This can be used to truncate *iterator*.
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, start=None, total=None):
self.iterable = iterable
self.itr = iter(self)
if start is None:
self.n = getattr(iterable, 'n', 0)
else:
self.n = start
if total is None:
self.total = self.n + len(iterable)
else:
self.total = total
def __len__(self):
return self.total
def __iter__(self):
for x in self.iterable:
if self.n >= self.total:
raise RuntimeError(
'Mismatch between actual and expected iterable length. '
'Please report this to the fairseq developers.'
)
self.n += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
"""Whether the iterator has been exhausted."""
return self.n < len(self)
def skip(self, num_to_skip):
"""Fast-forward the iterator by skipping *num_to_skip* elements."""
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def take(self, n):
"""
Truncates the iterator to n elements at most.
"""
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
# Only take after what we have already consumed (i.e. after restarting
# from checkpoint mid epoch, we have to subtract self.n which is the
# starting point)
#
# This to maintain the invariant self.total = self.n + len(iterable),
# before calling __next__ or __iter__
propagated_take = max(n - self.n, 0)
if hasattr(self.iterable, "take"):
self.iterable.take(propagated_take)
else:
self.iterable = itertools.islice(self.iterable, propagated_take)
class EpochBatchIterating(object):
def __len__(self) -> int:
raise NotImplementedError
@property
def next_epoch_idx(self):
raise NotImplementedError
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
raise NotImplementedError
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
raise NotImplementedError
@property
def iterations_in_epoch(self) -> int:
"""The number of consumed batches in the current epoch."""
raise NotImplementedError
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
raise NotImplementedError
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
raise NotImplementedError
class StreamingEpochBatchIterator(EpochBatchIterating):
def __init__(
self, dataset, epoch=1, num_shards=1, shard_id=0,
):
assert isinstance(dataset, torch.utils.data.IterableDataset)
self.dataset = dataset
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self._current_epoch_iterator = None
self.num_shards = num_shards
self.shard_id = shard_id
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._current_epoch_iterator is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
self._current_epoch_iterator = CountingIterator(
iterable=ShardedIterator(
iterable=self.dataset,
num_shards=self.num_shards,
shard_id=self.shard_id,
),
)
return self._current_epoch_iterator
def end_of_epoch(self) -> bool:
return not self._current_epoch_iterator.has_next()
@property
def iterations_in_epoch(self) -> int:
if self._current_epoch_iterator is not None:
return self._current_epoch_iterator.n
return 0
def state_dict(self):
return {
'epoch': self.epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
class EpochBatchIterator(EpochBatchIterating):
"""A multi-epoch iterator over a :class:`torch.utils.data.Dataset`.
Compared to :class:`torch.utils.data.DataLoader`, this iterator:
- can be reused across multiple epochs with the :func:`next_epoch_itr`
method (optionally shuffled between epochs)
- can be serialized/deserialized with the :func:`state_dict` and
:func:`load_state_dict` methods
- supports sharding with the *num_shards* and *shard_id* arguments
Args:
dataset (~torch.utils.data.Dataset): dataset from which to load the data
collate_fn (callable): merges a list of samples to form a mini-batch
batch_sampler (~torch.utils.data.Sampler or a callable): an iterator over batches of
indices, or a callable to create such an iterator (~torch.utils.data.Sampler).
A callable batch_sampler will be called for each epoch to enable per epoch dynamic
batch iterators defined by this callable batch_sampler.
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
buffer_size (int, optional): the number of batches to keep ready in the
queue. Helps speeding up dataloading. When buffer_size is zero, the
default torch.utils.data.DataLoader preloading is used.
timeout (int, optional): if positive, the timeout value for collecting a batch
from workers. Should always be non-negative. (default: ``0``)
"""
def __init__(
self, dataset, collate_fn, batch_sampler, seed=1, num_shards=1, shard_id=0,
num_workers=0, epoch=1, buffer_size=0, timeout=0,
):
assert isinstance(dataset, torch.utils.data.Dataset)
self.dataset = dataset
self.collate_fn = collate_fn
self.batch_sampler = batch_sampler
self._frozen_batches = tuple(batch_sampler) if not callable(batch_sampler) else None
self.seed = seed
self.num_shards = num_shards
self.shard_id = shard_id
self.num_workers = num_workers
# This upper limit here is to prevent people from abusing this feature
# in a shared computing environment.
self.buffer_size = min(buffer_size, 20)
self.timeout = timeout
self.epoch = max(epoch, 1) # we use 1-based indexing for epochs
self.shuffle = True
self._cur_epoch_itr = None
self._next_epoch_itr = None
self._supports_prefetch = getattr(dataset, 'supports_prefetch', False)
@property
def frozen_batches(self):
if self._frozen_batches is None:
self._frozen_batches = tuple(self.batch_sampler(self.dataset, self.epoch))
return self._frozen_batches
def __len__(self):
return int(math.ceil(len(self.frozen_batches) / float(self.num_shards)))
@property
def n(self):
return self.iterations_in_epoch
@property
def next_epoch_idx(self):
"""Return the epoch index after *next_epoch_itr* is called."""
if self._next_epoch_itr is not None:
return self.epoch
elif self._cur_epoch_itr is not None and self.end_of_epoch():
return self.epoch + 1
else:
return self.epoch
def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False):
"""Return a new iterator over the dataset.
Args:
shuffle (bool, optional): shuffle batches before returning the
iterator (default: True).
fix_batches_to_gpus: ensure that batches are always
allocated to the same shards across epochs. Requires
that :attr:`dataset` supports prefetching (default: False).
"""
self.epoch = self.next_epoch_idx
self.dataset.set_epoch(self.epoch)
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
if callable(self.batch_sampler):
# reset _frozen_batches to refresh the next epoch
self._frozen_batches = None
self._cur_epoch_itr = self._get_iterator_for_epoch(
self.epoch, shuffle, fix_batches_to_gpus=fix_batches_to_gpus,
)
self.shuffle = shuffle
return self._cur_epoch_itr
def end_of_epoch(self) -> bool:
"""Returns whether the most recent epoch iterator has been exhausted"""
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
"""The number of consumed batches in the current epoch."""
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.n
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.n
return 0
def state_dict(self):
"""Returns a dictionary containing a whole state of the iterator."""
if self.end_of_epoch():
epoch = self.epoch + 1
iter_in_epoch = 0
else:
epoch = self.epoch
iter_in_epoch = self.iterations_in_epoch
return {
'version': 2,
'epoch': epoch,
'iterations_in_epoch': iter_in_epoch,
'shuffle': self.shuffle,
}
def load_state_dict(self, state_dict):
"""Copies the state of the iterator from the given *state_dict*."""
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
version = state_dict.get('version', 1)
if itr_pos > 0:
# fast-forward epoch iterator
self._next_epoch_itr = self._get_iterator_for_epoch(
self.epoch,
shuffle=state_dict.get('shuffle', True),
offset=itr_pos,
)
if self._next_epoch_itr is None:
if version == 1:
# legacy behavior: we finished the epoch, increment epoch counter
self.epoch += 1
else:
raise RuntimeError(
'Cannot resume training due to dataloader mismatch, please '
'report this to the fairseq developers. You can relaunch '
'training with `--reset-dataloader` and it should work.'
)
else:
self._next_epoch_itr = None
def _get_iterator_for_epoch(self, epoch, shuffle, fix_batches_to_gpus=False, offset=0):
def shuffle_batches(batches, seed):
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
if self._supports_prefetch:
batches = self.frozen_batches
if shuffle and not fix_batches_to_gpus:
batches = shuffle_batches(list(batches), self.seed + epoch)
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
self.dataset.prefetch([i for s in batches for i in s])
if shuffle and fix_batches_to_gpus:
batches = shuffle_batches(batches, self.seed + epoch + self.shard_id)
else:
if shuffle:
batches = shuffle_batches(list(self.frozen_batches), self.seed + epoch)
else:
batches = self.frozen_batches
batches = list(ShardedIterator(
batches, self.num_shards, self.shard_id, fill_value=[]
))
if offset > 0 and offset >= len(batches):
return None
if self.num_workers > 0:
os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning'
# Create data loader
itr = torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.collate_fn,
batch_sampler=batches[offset:],
num_workers=self.num_workers,
timeout=self.timeout,
)
# Wrap with a BufferedIterator if needed
if self.buffer_size > 0:
itr = BufferedIterator(self.buffer_size, itr)
# Wrap with CoutingIterator
itr = CountingIterator(itr, start=offset)
return itr
class GroupedIterator(CountingIterator):
"""Wrapper around an iterable that returns groups (chunks) of items.
Args:
iterable (iterable): iterable to wrap
chunk_size (int): size of each chunk
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, chunk_size):
itr = _chunk_iterator(iterable, chunk_size)
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(chunk_size))),
total=int(math.ceil(len(iterable) / float(chunk_size))),
)
self.chunk_size = chunk_size
def _chunk_iterator(itr, chunk_size):
chunk = []
for x in itr:
chunk.append(x)
if len(chunk) == chunk_size:
yield chunk
chunk = []
if len(chunk) > 0:
yield chunk
class ShardedIterator(CountingIterator):
"""A sharded wrapper around an iterable, padded to length.
Args:
iterable (iterable): iterable to wrap
num_shards (int): number of shards to split the iterable into
shard_id (int): which shard to iterator over
fill_value (Any, optional): padding value when the iterable doesn't
evenly divide *num_shards* (default: None).
Attributes:
n (int): number of elements consumed from this iterator
"""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
sharded_len = int(math.ceil(len(iterable) / float(num_shards)))
batch_size = len(list(iterable)[0])
last = max( list(map(max, *list(iterable))))
# This function receives a list [1,2,3,...., last] where each number represents one of the input subsequences
# In the unmodified fairseq, if you have 4 GPUS, fairseq will give the first GPU subsequences [1,5,9,13,...],
# the second GPU will get [2,6,10,14,..], the third GPU will get [3,7,11,15] and so on...
# If we want to do caching, we can't use that. We need each GPU to get a continuous list of input subsequences (like [1,2,3,4,5,...]).
# So what the following code does, is it splits the input into *continuous* chunks of subsequences. For example, if we have
# 4 GPUs and 100,000 input subsequences, the first GPU will get [1,2,3,...,25000], the second GPU will get [25001,25002,25003,...],
# and so on.
# The above description was written with the assumption that batch_size is 1. This function also works when batch_size is greater than 1.
iterable = range(0, last)
all_itrs = []
for i in range(shard_id*batch_size, (shard_id+1)*batch_size):
itr = list(itertools.islice(iterable, i * sharded_len,
(i +1 )* sharded_len ))
all_itrs.append(itr)
itr = [x for x in itertools.chain(*itertools.zip_longest(*all_itrs)) if x is not None]
itr = [itr[i:i+batch_size] for i in range(0, len(itr), batch_size)] #split to batches
if len(itr) != sharded_len: #this makes sure that we don't miss any input subsequences
to_add = sharded_len - len(itr)
to_add = [[e] for e in range(sharded_len-to_add, sharded_len)]
itr = itr + to_add
super().__init__(
itr,
start=int(math.ceil(getattr(iterable, 'n', 0) / float(num_shards))),
total=sharded_len,
)
class BackgroundConsumer(Thread):
def __init__(self, queue, source, max_len):
Thread.__init__(self)
self._queue = queue
self._source = source
self._max_len = max_len
self.count = 0
def run(self):
try:
for item in self._source:
self._queue.put(item)
# Stop if we reached the maximum length
self.count += 1
if self._max_len is not None and self.count >= self._max_len:
break
# Signal the consumer we are done.
self._queue.put(_sentinel)
except Exception as e:
self._queue.put(e)
class BufferedIterator(object):
def __init__(self, size, iterable):
self._queue = queue.Queue(size)
self._iterable = iterable
self._consumer = None
self.start_time = time.time()
self.warning_time = None
self.total = len(iterable)
def _create_consumer(self):
self._consumer = BackgroundConsumer(
self._queue,
self._iterable,
self.total,
)
self._consumer.daemon = True
self._consumer.start()
def __iter__(self):
return self
def __len__(self):
return self.total
def take(self, n):
self.total = min(self.total, n)
# Propagate this change to the underlying iterator
if hasattr(self._iterable, "take"):
self._iterable.take(n)
else:
self._iterable = itertools.islice(self._iterable, n)
def __next__(self):
# Create consumer if not created yet
if self._consumer is None:
self._create_consumer()
# Notify the user if there is a data loading bottleneck
if self._queue.qsize() < min(2, max(1, self._queue.maxsize // 2)):
if time.time() - self.start_time > 5 * 60:
if self.warning_time is None or time.time() - self.warning_time > 15 * 60:
logger.debug(
"Data loading buffer is empty or nearly empty. This may "
"indicate a data loading bottleneck, and increasing the "
"number of workers (--num-workers) may help."
)
self.warning_time = time.time()
# Get next example
item = self._queue.get(True)
if isinstance(item, Exception):
raise item
if item is _sentinel:
raise StopIteration()
return item
| [
[
[
185,
194
],
[
2054,
2063
],
[
2830,
2839
],
[
17276,
17285
],
[
17449,
17458
],
[
17466,
17475
],
[
19568,
19577
]
],
[
[
202,
209
],
[
383,
390
]
],
[
[
217,
221
],
[
9019,
9023
],
[
14934,
14938
],
[
15015,
15019
],
[
16065,
16069
],
[
17929,
17933
]
],
[
[
229,
237
]
],
[
[
245,
247
],
[
13877,
13879
]
],
[
[
255,
260
],
[
18804,
18809
]
],
[
[
268,
272
],
[
18913,
18917
],
[
19900,
19904
],
[
19988,
19992
],
[
20361,
20365
]
],
[
[
295,
301
],
[
18056,
18062
],
[
18121,
18127
]
],
[
[
310,
321
],
[
12841,
12843
]
],
[
[
329,
334
],
[
4346,
4351
],
[
7948,
7953
],
[
13991,
13996
]
],
[
[
361,
371
],
[
12796,
12806
]
],
[
[
374,
380
],
[
20051,
20057
]
],
[
[
506,
515
],
[
18635,
18644
],
[
20520,
20529
]
],
[
[
535,
551
],
[
14487,
14503
],
[
15357,
15373
],
[
5064,
5080
],
[
14408,
14424
]
],
[
[
2887,
2906
],
[
4206,
4225
],
[
5784,
5803
]
],
[
[
4178,
4205
]
],
[
[
5765,
5783
]
],
[
[
14471,
14486
]
],
[
[
15115,
15130
],
[
14831,
14846
]
],
[
[
15341,
15356
],
[
5103,
5118
],
[
13131,
13146
],
[
13653,
13668
]
],
[
[
18037,
18055
],
[
19052,
19070
]
],
[
[
18716,
18732
],
[
14317,
14333
]
]
] |
#!/usr/bin/env python3
from parse_topology_for_hydrogens import parse_top_for_h
def gen_h_ndx(orig_ndx, topology, out_name='h_prot.ndx'):
ndx_ind = list()
with open(orig_ndx, 'r') as f:
line = f.readline()
while '[ Protein ]' not in line:
line = f.readline()
line = f.readline()
while ';' == line[0]:
line = f.readline()
line = line.strip()
while len(line):
ndx_ind.extend(line.split())
line = f.readline().strip()
ndx_ind = [int(elem) for elem in ndx_ind]
good_ind = parse_top_for_h(topology)
filtered_h_ind = [elem for elem in ndx_ind if elem in good_ind]
formated_h_ind = ['{:>4} '.format(elem) for elem in filtered_h_ind]
with open(out_name, 'w') as new_file:
ind = 0
new_file.write('[ Protein ]\n')
while ind < len(filtered_h_ind):
new_file.write(''.join(formated_h_ind[ind:ind+15]))
new_file.write('\n')
# print(''.join(formated_h_ind[ind:ind+15]))
ind += 15
# gen_h_ndx('./prot_dir/prot.ndx', './prot_dir/topol.top')
| [
[
[
64,
79
],
[
584,
599
]
],
[
[
86,
95
]
]
] |
from suds.client import Client
from suds import WebFault
from model.project import Project
class SoapHelper:
def __init__(self, app):
self.app = app
def can_login(self, username, password):
client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
try:
client.service.mc_login(username, password)
return True
except WebFault:
return False
def get_project_list(self, username, password):
project_list = []
client = Client("http://localhost:8080/mantisbt-1.2.20/api/soap/mantisconnect.php?wsdl")
projects = client.service.mc_projects_get_user_accessible(username, password)
for i in range(len(projects)):
name = projects[i].name
description = projects[i].description
project_list.append(Project(name=name, description=description))
return project_list
| [
[
[
24,
30
],
[
226,
232
],
[
546,
552
]
],
[
[
48,
56
],
[
414,
422
]
],
[
[
83,
90
],
[
869,
876
]
],
[
[
98,
108
]
]
] |
def sol():
a, b = 0, 1
for i in range(int(input())):
a, b = b, a + b
print(a)
if __name__ == "__main__":
sol()
| [
[
[
4,
7
],
[
131,
134
]
]
] |
import json
import platform
import requests
import six
import sys
from .version import __version__
class SlackRequest(object):
def __init__(
self,
proxies=None
):
# HTTP configs
self.custom_user_agent = None
self.proxies = proxies
# Construct the user-agent header with the package info, Python version and OS version.
self.default_user_agent = {
# __name__ returns all classes, we only want the client
"client": "{0}/{1}".format(__name__.split('.')[0], __version__),
"python": "Python/{v.major}.{v.minor}.{v.micro}".format(v=sys.version_info),
"system": "{0}/{1}".format(platform.system(), platform.release())
}
def get_user_agent(self):
# Check for custom user-agent and append if found
if self.custom_user_agent:
custom_ua_list = ["/".join(client_info) for client_info in self.custom_user_agent]
custom_ua_string = " ".join(custom_ua_list)
self.default_user_agent['custom'] = custom_ua_string
# Concatenate and format the user-agent string to be passed into request headers
ua_string = []
for key, val in self.default_user_agent.items():
ua_string.append(val)
user_agent_string = " ".join(ua_string)
return user_agent_string
def append_user_agent(self, name, version):
if self.custom_user_agent:
self.custom_user_agent.append([name.replace("/", ":"), version.replace("/", ":")])
else:
self.custom_user_agent = [[name, version]]
def do(self, token=None, request="?", post_data=None,
as_user=None, domain="slack.com", timeout=None):
"""
Perform a POST request to the Slack Web API
Args:
token (str): your authentication token
request (str): the method to call from the Slack API. For example: 'channels.list'
post_data (dict): key/value arguments to pass for the request. For example:
{'channel': 'CABC12345'}
as_user (str): if using a workspace app, the user_id of the user to act on behalf of
domain (str): if for some reason you want to send your request to something other
than slack.com
timeout (float): stop waiting for a response after a given number of seconds
"""
# Pull `file` out so it isn't JSON encoded like normal fields.
# Only do this for requests that are UPLOADING files; downloading files
# use the 'file' argument to point to a File ID.
post_data = post_data or {}
# Move singular file objects into `files`
upload_requests = ['files.upload']
# Move file content into requests' `files` param
files = None
if request in upload_requests:
files = {'file': post_data.pop('file')} if 'file' in post_data else None
# Check for plural fields and convert them to comma-separated strings if needed
for field in {'channels', 'users', 'types'} & set(post_data.keys()):
if isinstance(post_data[field], list):
post_data[field] = ",".join(post_data[field])
# Convert any params which are list-like to JSON strings
# Example: `attachments` is a dict, and needs to be passed as JSON
for k, v in six.iteritems(post_data):
if isinstance(v, (list, dict)):
post_data[k] = json.dumps(v)
return self.post_http_request(token, request, post_data, as_user, files, timeout, domain)
def post_http_request(self, token, api_method, post_data,
as_user=None, files=None, timeout=None, domain="slack.com"):
"""
This method build and submits the Web API HTTP request
:param token: You app's Slack access token
:param api_method: The API method endpoint to submit the request to
:param post_data: The request payload
:param as_user: The user_id if using a workspace app on behalf of a user
:param files: Any files to be submitted during upload calls
:param timeout: Stop waiting for a response after a given number of seconds
:param domain: The URL to submit the API request to
:return:
"""
# Override token header if `token` is passed in post_data
if post_data is not None and "token" in post_data:
token = post_data['token']
# Set user-agent and auth headers
headers = {
'user-agent': self.get_user_agent(),
'Authorization': 'Bearer {}'.format(token)
}
if as_user:
headers["X-Slack-User"] = as_user
# Submit the request
res = requests.post(
'https://{0}/api/{1}'.format(domain, api_method),
headers=headers,
data=post_data,
files=files,
timeout=timeout,
proxies=self.proxies
)
return res
| [
[
[
7,
11
],
[
3496,
3500
]
],
[
[
19,
27
],
[
696,
704
],
[
715,
723
]
],
[
[
35,
43
],
[
4781,
4789
]
],
[
[
51,
54
],
[
3395,
3398
]
],
[
[
62,
65
],
[
638,
641
]
],
[
[
88,
99
],
[
554,
565
]
],
[
[
108,
120
]
]
] |
#
# Copyright (c) 2020 Project CHIP Authors
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Python interface for Chip Stack
#
"""Chip Stack interface
"""
from __future__ import absolute_import
from __future__ import print_function
import sys
import os
import time
import glob
import platform
import logging
from threading import Lock, Event
from ctypes import *
from .ChipUtility import ChipUtility
from .ChipExceptions import *
__all__ = [
"DeviceStatusStruct",
"ChipStackException",
"DeviceError",
"ChipStackError",
"ChipStack",
]
ChipStackDLLBaseName = "_ChipDeviceCtrl.so"
def _singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
class DeviceStatusStruct(Structure):
_fields_ = [
("ProfileId", c_uint32),
("StatusCode", c_uint16),
("SysErrorCode", c_uint32),
]
class LogCategory(object):
"""Debug logging categories used by chip."""
# NOTE: These values must correspond to those used in the chip C++ code.
Disabled = 0
Error = 1
Progress = 2
Detail = 3
Retain = 4
@staticmethod
def categoryToLogLevel(cat):
if cat == LogCategory.Error:
return logging.ERROR
elif cat == LogCategory.Progress:
return logging.INFO
elif cat == LogCategory.Detail:
return logging.DEBUG
elif cat == LogCategory.Retain:
return logging.CRITICAL
else:
return logging.NOTSET
class ChipLogFormatter(logging.Formatter):
"""A custom logging.Formatter for logging chip library messages."""
def __init__(
self,
datefmt=None,
logModulePrefix=False,
logLevel=False,
logTimestamp=False,
logMSecs=True,
):
fmt = "%(message)s"
if logModulePrefix:
fmt = "CHIP:%(chip-module)s: " + fmt
if logLevel:
fmt = "%(levelname)s:" + fmt
if datefmt is not None or logTimestamp:
fmt = "%(asctime)s " + fmt
super(ChipLogFormatter, self).__init__(fmt=fmt, datefmt=datefmt)
self.logMSecs = logMSecs
def formatTime(self, record, datefmt=None):
if datefmt is None:
timestampStr = time.strftime("%Y-%m-%d %H:%M:%S%z")
if self.logMSecs:
timestampUS = record.__dict__.get("timestamp-usec", 0)
timestampStr = "%s.%03ld" % (timestampStr, timestampUS / 1000)
return timestampStr
_CompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_ErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_ulong, POINTER(DeviceStatusStruct))
_LogMessageFunct = CFUNCTYPE(None, c_int64, c_int64, c_char_p, c_uint8, c_char_p)
@_singleton
class ChipStack(object):
def __init__(self, installDefaultLogHandler=True):
self.networkLock = Lock()
self.completeEvent = Event()
self._ChipStackLib = None
self._chipDLLPath = None
self.devMgr = None
self.callbackRes = None
self._activeLogFunct = None
self.addModulePrefixToLogMessage = True
# Locate and load the chip shared library.
self._loadLib()
# Arrange to log output from the chip library to a python logger object with the
# name 'chip.ChipStack'. If desired, applications can override this behavior by
# setting self.logger to a different python logger object, or by calling setLogFunct()
# with their own logging function.
self.logger = logging.getLogger(__name__)
self.setLogFunct(self.defaultLogFunct)
# Determine if there are already handlers installed for the logger. Python 3.5+
# has a method for this; on older versions the check has to be done manually.
if hasattr(self.logger, "hasHandlers"):
hasHandlers = self.logger.hasHandlers()
else:
hasHandlers = False
logger = self.logger
while logger is not None:
if len(logger.handlers) > 0:
hasHandlers = True
break
if not logger.propagate:
break
logger = logger.parent
# If a logging handler has not already been initialized for 'chip.ChipStack',
# or any one of its parent loggers, automatically configure a handler to log to
# stdout. This maintains compatibility with a number of applications which expect
# chip log output to go to stdout by default.
#
# This behavior can be overridden in a variety of ways:
# - Initialize a different log handler before ChipStack is initialized.
# - Pass installDefaultLogHandler=False when initializing ChipStack.
# - Replace the StreamHandler on self.logger with a different handler object.
# - Set a different Formatter object on the existing StreamHandler object.
# - Reconfigure the existing ChipLogFormatter object.
# - Configure chip to call an application-specific logging function by
# calling self.setLogFunct().
# - Call self.setLogFunct(None), which will configure the chip library
# to log directly to stdout, bypassing python altogether.
#
if installDefaultLogHandler and not hasHandlers:
logHandler = logging.StreamHandler(stream=sys.stdout)
logHandler.setFormatter(ChipLogFormatter())
self.logger.addHandler(logHandler)
self.logger.setLevel(logging.DEBUG)
def HandleComplete(appState, reqState):
self.callbackRes = True
self.completeEvent.set()
def HandleError(appState, reqState, err, devStatusPtr):
self.callbackRes = self.ErrorToException(err, devStatusPtr)
self.completeEvent.set()
self.cbHandleComplete = _CompleteFunct(HandleComplete)
self.cbHandleError = _ErrorFunct(HandleError)
self.blockingCB = None # set by other modules(BLE) that require service by thread while thread blocks.
# Initialize the chip library
res = self._ChipStackLib.pychip_Stack_Init()
if res != 0:
raise self._ChipStack.ErrorToException(res)
@property
def defaultLogFunct(self):
"""Returns a python callable which, when called, logs a message to the python logger object
currently associated with the ChipStack object.
The returned function is suitable for passing to the setLogFunct() method."""
def logFunct(timestamp, timestampUSec, moduleName, logCat, message):
moduleName = ChipUtility.CStringToString(moduleName)
message = ChipUtility.CStringToString(message)
if self.addModulePrefixToLogMessage:
message = "CHIP:%s: %s" % (moduleName, message)
logLevel = LogCategory.categoryToLogLevel(logCat)
msgAttrs = {
"chip-module": moduleName,
"timestamp": timestamp,
"timestamp-usec": timestampUSec,
}
self.logger.log(logLevel, message, extra=msgAttrs)
return logFunct
def setLogFunct(self, logFunct):
"""Set the function used by the chip library to log messages.
The supplied object must be a python callable that accepts the following
arguments:
timestamp (integer)
timestampUS (integer)
module name (encoded UTF-8 string)
log category (integer)
message (encoded UTF-8 string)
Specifying None configures the chip library to log directly to stdout."""
if logFunct is None:
logFunct = 0
if not isinstance(logFunct, _LogMessageFunct):
logFunct = _LogMessageFunct(logFunct)
with self.networkLock:
# NOTE: ChipStack must hold a reference to the CFUNCTYPE object while it is
# set. Otherwise it may get garbage collected, and logging calls from the
# chip library will fail.
self._activeLogFunct = logFunct
self._ChipStackLib.pychip_Stack_SetLogFunct(logFunct)
def Shutdown(self):
self._ChipStack.Call(lambda: self._dmLib.pychip_Stack_Shutdown())
self.networkLock = None
self.completeEvent = None
self._ChipStackLib = None
self._chipDLLPath = None
self.devMgr = None
self.callbackRes = None
def Call(self, callFunct):
# throw error if op in progress
self.callbackRes = None
self.completeEvent.clear()
with self.networkLock:
res = callFunct()
self.completeEvent.set()
if res == 0 and self.callbackRes != None:
return self.callbackRes
return res
def CallAsync(self, callFunct):
# throw error if op in progress
self.callbackRes = None
self.completeEvent.clear()
with self.networkLock:
res = callFunct()
if res != 0:
self.completeEvent.set()
raise self.ErrorToException(res)
while not self.completeEvent.isSet():
if self.blockingCB:
self.blockingCB()
self.completeEvent.wait(0.05)
if isinstance(self.callbackRes, ChipStackException):
raise self.callbackRes
return self.callbackRes
def ErrorToException(self, err, devStatusPtr=None):
if err == 4044 and devStatusPtr:
devStatus = devStatusPtr.contents
msg = ChipUtility.CStringToString(
(
self._ChipStackLib.pychip_Stack_StatusReportToString(
devStatus.ProfileId, devStatus.StatusCode
)
)
)
sysErrorCode = (
devStatus.SysErrorCode if (devStatus.SysErrorCode != 0) else None
)
if sysErrorCode != None:
msg = msg + " (system err %d)" % (sysErrorCode)
return DeviceError(
devStatus.ProfileId, devStatus.StatusCode, sysErrorCode, msg
)
else:
return ChipStackError(
err,
ChipUtility.CStringToString(
(self._ChipStackLib.pychip_Stack_ErrorToString(err))
),
)
def LocateChipDLL(self):
if self._chipDLLPath:
return self._chipDLLPath
scriptDir = os.path.dirname(os.path.abspath(__file__))
# When properly installed in the chip package, the Chip Device Manager DLL will
# be located in the package root directory, along side the package's
# modules.
dmDLLPath = os.path.join(scriptDir, ChipStackDLLBaseName)
if os.path.exists(dmDLLPath):
self._chipDLLPath = dmDLLPath
return self._chipDLLPath
# For the convenience of developers, search the list of parent paths relative to the
# running script looking for an CHIP build directory containing the Chip Device
# Manager DLL. This makes it possible to import and use the ChipDeviceMgr module
# directly from a built copy of the CHIP source tree.
buildMachineGlob = "%s-*-%s*" % (platform.machine(), platform.system().lower())
relDMDLLPathGlob = os.path.join(
"build",
buildMachineGlob,
"src/controller/python/.libs",
ChipStackDLLBaseName,
)
for dir in self._AllDirsToRoot(scriptDir):
dmDLLPathGlob = os.path.join(dir, relDMDLLPathGlob)
for dmDLLPath in glob.glob(dmDLLPathGlob):
if os.path.exists(dmDLLPath):
self._chipDLLPath = dmDLLPath
return self._chipDLLPath
raise Exception(
"Unable to locate Chip Device Manager DLL (%s); expected location: %s"
% (ChipStackDLLBaseName, scriptDir)
)
# ----- Private Members -----
def _AllDirsToRoot(self, dir):
dir = os.path.abspath(dir)
while True:
yield dir
parent = os.path.dirname(dir)
if parent == "" or parent == dir:
break
dir = parent
def _loadLib(self):
if self._ChipStackLib is None:
self._ChipStackLib = CDLL(self.LocateChipDLL())
self._ChipStackLib.pychip_Stack_Init.argtypes = []
self._ChipStackLib.pychip_Stack_Init.restype = c_uint32
self._ChipStackLib.pychip_Stack_Shutdown.argtypes = []
self._ChipStackLib.pychip_Stack_Shutdown.restype = c_uint32
self._ChipStackLib.pychip_Stack_StatusReportToString.argtypes = [
c_uint32,
c_uint16,
]
self._ChipStackLib.pychip_Stack_StatusReportToString.restype = c_char_p
self._ChipStackLib.pychip_Stack_ErrorToString.argtypes = [c_uint32]
self._ChipStackLib.pychip_Stack_ErrorToString.restype = c_char_p
self._ChipStackLib.pychip_Stack_SetLogFunct.argtypes = [_LogMessageFunct]
self._ChipStackLib.pychip_Stack_SetLogFunct.restype = c_uint32
| [
[
[
792,
807
]
],
[
[
831,
845
]
],
[
[
853,
856
],
[
6098,
6101
]
],
[
[
864,
866
],
[
11180,
11182
],
[
11196,
11198
],
[
11428,
11430
],
[
11485,
11487
],
[
12039,
12041
],
[
12270,
12272
],
[
12380,
12382
],
[
12753,
12755
],
[
12837,
12839
]
],
[
[
874,
878
],
[
2961,
2965
]
],
[
[
886,
890
],
[
12335,
12339
]
],
[
[
898,
906
],
[
11965,
11973
],
[
11985,
11993
]
],
[
[
914,
921
],
[
2237,
2244
],
[
1927,
1934
],
[
2002,
2009
],
[
2074,
2081
],
[
2147,
2154
],
[
2197,
2204
],
[
4209,
4216
],
[
6069,
6076
],
[
6246,
6253
]
],
[
[
944,
948
],
[
3540,
3544
]
],
[
[
950,
955
],
[
3576,
3581
]
],
[
[
975,
976
]
],
[
[
1002,
1013
],
[
10251,
10262
],
[
10927,
10938
],
[
7347,
7358
],
[
7409,
7420
]
],
[
[
1042,
1043
],
[
1447,
1456
],
[
1498,
1506
],
[
1532,
1540
],
[
1568,
1576
],
[
3213,
3222
],
[
3229,
3237
],
[
3239,
3247
],
[
3263,
3272
],
[
3279,
3287
],
[
3289,
3297
],
[
3299,
3306
],
[
3308,
3315
],
[
3356,
3365
],
[
3372,
3379
],
[
3381,
3388
],
[
3390,
3398
],
[
3400,
3407
],
[
3409,
3417
],
[
10001,
10019
],
[
10737,
10748
],
[
10874,
10888
],
[
13048,
13052
],
[
13197,
13205
],
[
13336,
13344
],
[
13439,
13447
],
[
13465,
13473
],
[
13564,
13572
],
[
13643,
13651
],
[
13721,
13729
],
[
13882,
13890
]
],
[
[
1045,
1052
]
],
[
[
1170,
1190
],
[
11452,
11472
],
[
12159,
12179
],
[
12626,
12646
]
],
[
[
1220,
1230
],
[
3422,
3432
]
],
[
[
1428,
1446
],
[
3316,
3334
]
],
[
[
1593,
1604
],
[
1889,
1900
],
[
1961,
1972
],
[
2035,
2046
],
[
2108,
2119
],
[
7582,
7593
]
],
[
[
2220,
2236
],
[
2765,
2781
],
[
6146,
6162
]
],
[
[
3196,
3210
],
[
6590,
6604
]
],
[
[
3249,
3260
],
[
6650,
6661
]
],
[
[
3337,
3353
],
[
8446,
8462
],
[
8488,
8504
],
[
13798,
13814
]
],
[
[
3439,
3448
]
]
] |
import gym
import keras as k
from keras.models import Sequential
from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout
from keras.optimizers import Adam
import numpy as np
from datetime import datetime
from matplotlib import pyplot as PLT
import time
import csv
import os
# You can adjust these hyperparameters
POPULATION_SIZE = 50
L1=20
L2=10
L3=50
L4=4
# L1=2
# L2=3
# L3=4
# L4=5
POOLING_SIZE = (2,2)
FILTER_SIZE_1 = (3,3)
FILTER_SIZE_2 = (5,5)
ELITE_SET_SIZE = 5
MUTATION_RATE = 0.5
FRAME_SIZE = 210*160*1
INPUT_DIM = 2*FRAME_SIZE
INPUT_SHAPE = (210, 160, 2)
FINAL_DIMENSION_X = int(((INPUT_SHAPE[0] - 2*int(FILTER_SIZE_1[0]/2))/2 - 2*int(FILTER_SIZE_2[0]/2))/2)
FINAL_DIMENSION_Y = int(((INPUT_SHAPE[1] - 2*int(FILTER_SIZE_1[0]/2))/2 - 2*int(FILTER_SIZE_2[0]/2))/2)
env = gym.make('SpaceInvaders-v0')
keepTraining = True
slack_logs = np.zeros((6,1))
def visualize(featureVector):
regularImage = featureVector[0,:FRAME_SIZE].reshape((210,160))
differenceImage = featureVector[0,FRAME_SIZE:].reshape((210,160))
PLT.imshow(regularImage)
PLT.show()
PLT.imshow(differenceImage)
PLT.show()
def writeCsv(index, data):
slack_logs[index] = data
# For slack_logs:
# [0] Generation
# [1] Highest Score
# [2] Current Score
# [3] Games Played
# [4] Start Time
# [5] All Time High Score
with open("logs.csv", "w", newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',')
writer.writerows(slack_logs)
def calculatePolicySize():
# INPUT_DIM * L1+L1+L1 * L2+L2+L2 * L3+L3+L3 * L4+L4
# FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 +
# FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 +
# final_dimension_x*final_dimension_y*L2*L3 + L3 +
# L3*L4
return FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 + FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 + FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3 + L3 + L3 * L4 + L4
# This function is called each time a new memeber of the population is created
def initPopulation():
population = np.random.rand(POPULATION_SIZE, calculatePolicySize())
population = population*2-1
return population
def convert_prediction_to_action(prediction):
index = np.argmax(prediction[0])
# NOOP
if (index == 0):
return 0
# FIRE
elif (index == 1):
return 1
# RIGHT
elif (index == 2):
return 3
# LEFT
elif (index == 3):
return 4
return 0
def playGame(model):
score=0
done=False
action=0
frame = np.zeros((1,FRAME_SIZE))
previous_frame = np.zeros((1,FRAME_SIZE))
env.reset()
observation_dim = list(INPUT_SHAPE)
observation_dim.insert(0,1)
observation_dim = tuple(observation_dim)
while not done:
env.render()
observation, reward, done, _ = env.step(action)
frame = np.reshape(observation[:,:,0],(1,FRAME_SIZE))
frame = np.where(frame > 0, 1.0,0)
difference = frame-previous_frame
final_observation=np.zeros((1,INPUT_DIM))
final_observation[0,:FRAME_SIZE]=frame
final_observation[0,FRAME_SIZE:]=difference
final_observation = np.reshape(final_observation, observation_dim)
prediction = model.predict(final_observation)
action = convert_prediction_to_action(prediction)
score+=reward
writeCsv(2, score)
previous_frame = np.copy(frame)
# print("Score:",score)
return score
# This is where the weights are put into the neural net to see how well it goes
def evaluate(dnnmodel, population, gamesPlayed):
scores=np.zeros(POPULATION_SIZE)
for i in range(POPULATION_SIZE):
nnFormatPolicyVector = applyPolicyVectorToNN(population[i])
dnnmodel.set_weights(nnFormatPolicyVector)
scores[i] = playGame(dnnmodel)
gamesPlayed+=1
writeCsv(3, gamesPlayed)
return scores
# Constructs the model that is to be used
def buildModel():
model = Sequential()
# layer1=Dense(L1, activation = 'relu', input_dim = INPUT_DIM, kernel_initializer='uniform')
layer1=Conv2D(L1, FILTER_SIZE_1, activation='relu', input_shape = INPUT_SHAPE, kernel_initializer='uniform')
model.add(layer1)
model.add(MaxPooling2D(pool_size=POOLING_SIZE))
layer2=Conv2D(L2, FILTER_SIZE_2, activation='relu', kernel_initializer='uniform')
model.add(layer2)
model.add(MaxPooling2D(pool_size=POOLING_SIZE))
# model.add(Dropout(0.25))
model.add(Flatten())
layer3=Dense(L3, activation = 'relu', kernel_initializer='uniform')
model.add(layer3)
layer4=Dense(L4, activation ='softmax', kernel_initializer='uniform')
model.add(layer4)
adam = Adam(lr=0.01)
model.compile(loss='mean_squared_error', optimizer=adam)
weights=model.get_weights()
print(len(weights))
print("====================================")
return model
def applyPolicyVectorToNN(policyVector):
# INPUT_DIM * L1+L1+L1 * L2+L2+L2 * L3+L3+L3 * L4+L4
# FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1 + L1 +
# FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2 + L2 +
# final_dimension_x*final_dimension_y*L2*L3 + L3 +
# L3*L4
offset=FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * INPUT_SHAPE[2] * L1
sec1 = policyVector[:offset].reshape(FILTER_SIZE_1[0], FILTER_SIZE_1[1], INPUT_SHAPE[2], L1)
sec2 = policyVector[offset:offset+L1]
offset+=L1
sec3 = policyVector[offset:offset+FILTER_SIZE_2[0] * FILTER_SIZE_2[1] * L1 * L2].reshape(FILTER_SIZE_2[0], FILTER_SIZE_2[1], L1, L2)
offset+=FILTER_SIZE_1[0] * FILTER_SIZE_1[1] * L1 * L2
sec4 = policyVector[offset:offset+L2]
offset+=L2
sec5 = policyVector[offset:offset+FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3].reshape(FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2, L3)
offset+=FINAL_DIMENSION_X*FINAL_DIMENSION_Y*L2*L3
sec6 = policyVector[offset:offset+L3]
offset+=L3
sec7 = policyVector[offset:offset+L3*L4].reshape(L3, L4)
offset+=L3*L4
sec8 = policyVector[offset:]
nnFormat = []
nnFormat.append(sec1)
nnFormat.append(sec2)
nnFormat.append(sec3)
nnFormat.append(sec4)
nnFormat.append(sec5)
nnFormat.append(sec6)
nnFormat.append(sec7)
nnFormat.append(sec8)
return nnFormat
# This is where the members of the population are ranked
def selection(scores, population):
eliteSet = np.zeros((ELITE_SET_SIZE,calculatePolicySize()))
scoresTemp=np.copy(scores)
for i in range(ELITE_SET_SIZE):
index = np.argmax(scoresTemp)
scoresTemp[index] = 0
eliteSet[i] = population[index]
return eliteSet
def cross(policy1, policy2):
newPolicy = policy1.copy()
mask = np.random.randint(2, size=newPolicy.shape).astype(np.bool)
newPolicy[mask] = policy2[mask]
# for i in range(calculatePolicySize()):
# rand = np.random.uniform()
# if rand > 0.5:
# newPolicy[i] = policy2[i]
return newPolicy
# This is where crossover occurs based on the selection process
def crossover(scores, population):
crossoverSet = np.zeros((POPULATION_SIZE,calculatePolicySize()))
selectionProbability = np.array(scores)/np.sum(scores)
for i in range(POPULATION_SIZE - ELITE_SET_SIZE):
randomIndex = np.random.choice(range(POPULATION_SIZE), p=selectionProbability)
policy1 = population[randomIndex]
randomIndex = np.random.choice(range(POPULATION_SIZE), p=selectionProbability)
policy2 = population[randomIndex]
newPolicy = cross(policy1, policy2)
crossoverSet[i]=newPolicy
return crossoverSet
# Lastly, the mutation is a point mutation that sometimes occurs
def mutation(crossoverPopulation):
i = int((POPULATION_SIZE - ELITE_SET_SIZE) * np.random.random_sample())
j = int(calculatePolicySize() * np.random.random_sample())
for _ in range(int(i*j*MUTATION_RATE)):
crossoverPopulation[i][j] = np.random.random_sample() * 2 - 1
# for i in range(POPULATION_SIZE - ELITE_SET_SIZE):
# for j in range(calculatePolicySize()):
# rand = np.random.uniform()
# if(rand < MUTATION_RATE):
# crossoverPopulation[i][j] = np.random.random_sample() * 2 - 1
return crossoverPopulation
def generateNewGeneration(scores, population):
elitePopulation = selection(scores, population)
crossoverPopulation = crossover(scores, population)
mutationPopulation = mutation(crossoverPopulation)
for i in range(ELITE_SET_SIZE):
mutationPopulation[POPULATION_SIZE-ELITE_SET_SIZE+i] = elitePopulation[i]
return mutationPopulation
def saveHighestScorePolicy(population, generation, scores):
if (generation % 10 == 0):
index = np.argmax(scores)
filename='generation'+str(generation)+'HS'+str(scores[index])+'.npy'
np.save(os.path.join('SavedScores', filename) ,population[index])
print("Saved generation to file "+filename)
def loadPolicy(filename, population, index):
policy=np.load(filename)
print("Loaded\n",policy)
population[index]=policy
def measureTime():
global lasttime
currentTime=time.time()
diff=currentTime-lasttime
lasttime=currentTime
return diff
# test_selection()
# quit()
env.reset()
population = initPopulation()
# loadPolicy('generation0.npy',population,0)
dnnmodel = buildModel()
generation = 0
lasttime = time.time()
all_time_high_score = 0
writeCsv(4, time.time())
while (keepTraining):
scores = evaluate(dnnmodel, population, generation*POPULATION_SIZE)
print(int(measureTime())," sec Generation: ", generation, " Highest Score: ", np.max(scores), " Games Played: ", generation*POPULATION_SIZE+POPULATION_SIZE)
writeCsv(0, generation)
writeCsv(1, np.max(scores))
if (np.max(scores) > all_time_high_score):
all_time_high_score = np.max(scores)
writeCsv(5, all_time_high_score)
saveHighestScorePolicy(population, generation, scores)
population = generateNewGeneration(scores, population)
print(int(measureTime())," sec New generation created.")
generation+=1
| [
[
[
7,
10
],
[
810,
813
]
],
[
[
18,
28
]
],
[
[
54,
64
],
[
4026,
4036
]
],
[
[
90,
96
],
[
4147,
4153
],
[
4339,
4345
]
],
[
[
98,
108
]
],
[
[
110,
122
],
[
4285,
4297
],
[
4450,
4462
]
],
[
[
124,
131
],
[
4534,
4541
]
],
[
[
133,
138
],
[
4557,
4562
],
[
4652,
4657
]
],
[
[
140,
147
]
],
[
[
177,
181
],
[
4749,
4753
]
],
[
[
189,
200
],
[
872,
874
],
[
9686,
9688
],
[
9810,
9812
],
[
9834,
9836
],
[
9903,
9905
],
[
2112,
2114
],
[
2280,
2282
],
[
2595,
2597
],
[
2641,
2643
],
[
2912,
2914
],
[
2974,
2976
],
[
3069,
3071
],
[
3220,
3222
],
[
3455,
3457
],
[
3657,
3659
],
[
6433,
6435
],
[
6497,
6499
],
[
6565,
6567
],
[
6749,
6751
],
[
6799,
6801
],
[
7131,
7133
],
[
7208,
7210
],
[
7225,
7227
],
[
7316,
7318
],
[
7445,
7447
],
[
7804,
7806
],
[
7867,
7869
],
[
7975,
7977
],
[
8787,
8789
],
[
8890,
8892
],
[
9065,
9067
]
],
[
[
222,
230
]
],
[
[
254,
267
],
[
1060,
1063
],
[
1089,
1092
],
[
1104,
1107
],
[
1136,
1139
]
],
[
[
275,
279
],
[
9447,
9451
],
[
9496,
9500
],
[
9197,
9201
]
],
[
[
287,
290
],
[
1444,
1447
]
],
[
[
298,
300
],
[
8898,
8900
]
],
[
[
341,
356
],
[
9587,
9602
],
[
9732,
9747
],
[
9748,
9763
],
[
2127,
2142
],
[
3666,
3681
],
[
3702,
3717
],
[
7141,
7156
],
[
7259,
7274
],
[
7339,
7354
],
[
7468,
7483
],
[
7768,
7783
],
[
8589,
8604
]
],
[
[
362,
364
],
[
1868,
1870
],
[
1873,
1875
],
[
1916,
1918
],
[
4154,
4156
],
[
5313,
5315
],
[
5409,
5411
],
[
5451,
5453
],
[
5467,
5469
],
[
5546,
5548
],
[
5599,
5601
],
[
5657,
5659
]
],
[
[
368,
370
],
[
1921,
1923
],
[
1926,
1928
],
[
1967,
1969
],
[
4346,
4348
],
[
5551,
5553
],
[
5603,
5605
],
[
5662,
5664
],
[
5703,
5705
],
[
5719,
5721
],
[
5796,
5798
],
[
5847,
5849
],
[
5903,
5905
]
],
[
[
374,
376
],
[
1970,
1972
],
[
1975,
1977
],
[
1980,
1982
],
[
4563,
4565
],
[
5799,
5801
],
[
5851,
5853
],
[
5906,
5908
],
[
5947,
5949
],
[
5963,
5965
],
[
6004,
6006
],
[
6019,
6021
],
[
6039,
6041
]
],
[
[
380,
382
],
[
1985,
1987
],
[
1990,
1992
],
[
4658,
4660
],
[
6007,
6009
],
[
6023,
6025
],
[
6042,
6044
]
],
[
[
413,
425
],
[
4308,
4320
],
[
4473,
4485
]
],
[
[
434,
447
],
[
643,
656
],
[
747,
760
],
[
1813,
1826
],
[
1832,
1845
],
[
1878,
1891
],
[
1897,
1910
],
[
4158,
4171
],
[
5258,
5271
],
[
5277,
5290
],
[
5357,
5370
],
[
5375,
5388
],
[
5619,
5632
],
[
5638,
5651
]
],
[
[
456,
469
],
[
674,
687
],
[
778,
791
],
[
4350,
4363
],
[
5508,
5521
],
[
5527,
5540
],
[
5563,
5576
],
[
5581,
5594
]
],
[
[
478,
492
],
[
6443,
6457
],
[
6532,
6546
],
[
7277,
7291
],
[
7786,
7800
],
[
8545,
8559
],
[
8605,
8619
]
],
[
[
497,
510
],
[
7922,
7935
]
],
[
[
518,
528
],
[
555,
565
],
[
955,
965
],
[
1024,
1034
],
[
2607,
2617
],
[
2653,
2663
],
[
2945,
2955
],
[
3122,
3132
],
[
3168,
3178
]
],
[
[
541,
550
],
[
3081,
3090
]
],
[
[
566,
577
],
[
620,
631
],
[
724,
735
],
[
1851,
1862
],
[
2709,
2720
],
[
4206,
4217
],
[
5296,
5307
],
[
5393,
5404
]
],
[
[
594,
611
],
[
1931,
1948
],
[
5760,
5777
],
[
5811,
5828
],
[
5867,
5884
]
],
[
[
698,
715
],
[
1949,
1966
],
[
5778,
5795
],
[
5829,
5846
],
[
5885,
5902
]
],
[
[
804,
807
],
[
9310,
9313
],
[
2670,
2673
],
[
2827,
2830
],
[
2879,
2882
]
],
[
[
839,
851
],
[
9517,
9529
]
],
[
[
859,
869
],
[
1179,
1189
],
[
1505,
1515
]
],
[
[
893,
902
]
],
[
[
1152,
1160
],
[
9484,
9492
],
[
9770,
9778
],
[
9798,
9806
],
[
9926,
9934
],
[
3410,
3418
],
[
3909,
3917
]
],
[
[
1522,
1541
],
[
2144,
2163
],
[
6458,
6477
],
[
7157,
7176
],
[
7843,
7862
]
],
[
[
2077,
2091
],
[
9335,
9349
]
],
[
[
2226,
2254
],
[
3338,
3366
]
],
[
[
2526,
2534
],
[
3859,
3867
]
],
[
[
3601,
3609
],
[
9545,
9553
]
],
[
[
4000,
4010
],
[
9408,
9418
]
],
[
[
4952,
4973
],
[
3751,
3772
]
],
[
[
6387,
6396
],
[
8376,
8385
]
],
[
[
6682,
6687
],
[
7572,
7577
]
],
[
[
7081,
7090
],
[
8432,
8441
]
],
[
[
7724,
7732
],
[
8487,
8495
]
],
[
[
8311,
8332
],
[
10036,
10057
]
],
[
[
8684,
8706
],
[
9964,
9986
]
],
[
[
9013,
9023
]
],
[
[
9146,
9157
],
[
9618,
9629
],
[
10092,
10103
]
],
[
[
9322,
9332
],
[
9564,
9574
],
[
9987,
9997
],
[
10066,
10076
]
],
[
[
9397,
9405
],
[
9554,
9562
]
],
[
[
9421,
9431
],
[
9576,
9586
],
[
9654,
9664
],
[
9721,
9731
],
[
9782,
9792
],
[
9999,
10009
],
[
10143,
10153
]
],
[
[
9436,
9444
],
[
9230,
9238
]
],
[
[
9459,
9478
],
[
9851,
9870
]
],
[
[
9536,
9542
],
[
9693,
9699
],
[
9817,
9823
],
[
9841,
9847
],
[
9910,
9916
],
[
10011,
10017
],
[
10058,
10064
]
],
[
[
9881,
9900
],
[
9938,
9957
],
[
9851,
9870
]
],
[
[
10023,
10033
],
[
9564,
9574
],
[
9987,
9997
],
[
10066,
10076
]
],
[
[
9243,
9251
]
]
] |
import torch.nn as nn
import torch.nn.functional as F
from ssd.layers import L2Norm
from ssd.modeling import registry
from ssd.utils.model_zoo import load_state_dict_from_url
model_urls = {
'vgg': 'https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth',
}
# borrowed from https://github.com/amdegroot/ssd.pytorch/blob/master/ssd.py
def add_vgg(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
elif v == 'C':
layers += [nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
pool5 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
conv6 = nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6)
conv7 = nn.Conv2d(1024, 1024, kernel_size=1)
layers += [pool5, conv6,
nn.ReLU(inplace=True), conv7, nn.ReLU(inplace=True)]
return layers
def add_extras(cfg, i, size=300):
# Extra layers added to VGG for feature scaling
layers = []
in_channels = i
flag = False
for k, v in enumerate(cfg):
if in_channels != 'S':
if v == 'S':
layers += [nn.Conv2d(in_channels, cfg[k + 1], kernel_size=(1, 3)[flag], stride=2, padding=1)]
else:
layers += [nn.Conv2d(in_channels, v, kernel_size=(1, 3)[flag])]
flag = not flag
in_channels = v
if size == 512:
layers.append(nn.Conv2d(in_channels, 128, kernel_size=1, stride=1))
layers.append(nn.Conv2d(128, 256, kernel_size=4, stride=1, padding=1))
return layers
def add_header(vgg, extra_layers, boxes_per_location, num_classes):
regression_headers = []
classification_headers = []
vgg_source = [21, -2]
for k, v in enumerate(vgg_source):
regression_headers += [nn.Conv2d(vgg[v].out_channels,
boxes_per_location[k] * 4, kernel_size=3, padding=1)]
classification_headers += [nn.Conv2d(vgg[v].out_channels,
boxes_per_location[k] * num_classes, kernel_size=3, padding=1)]
for k, v in enumerate(extra_layers[1::2], 2):
regression_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]
* 4, kernel_size=3, padding=1)]
classification_headers += [nn.Conv2d(v.out_channels, boxes_per_location[k]
* num_classes, kernel_size=3, padding=1)]
return regression_headers, classification_headers
vgg_base = {
'300': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
'512': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'C', 512, 512, 512, 'M',
512, 512, 512],
}
extras_base = {
'300': [256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256],
'512': [256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256],
}
class VGG(nn.Module):
def __init__(self, cfg):
super().__init__()
size = cfg.INPUT.IMAGE_SIZE
vgg_config = vgg_base[str(size)]
extras_config = extras_base[str(size)]
self.vgg = nn.ModuleList(add_vgg(vgg_config))
self.extras = nn.ModuleList(add_extras(extras_config, i=1024, size=size))
self.l2_norm = L2Norm(512, scale=20)
self.reset_parameters()
def reset_parameters(self):
for m in self.extras.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
def init_from_pretrain(self, state_dict):
self.vgg.load_state_dict(state_dict)
def forward(self, x):
features = []
for i in range(23):
x = self.vgg[i](x)
s = self.l2_norm(x) # Conv4_3 L2 normalization
features.append(s)
# apply vgg up to fc7
for i in range(23, len(self.vgg)):
x = self.vgg[i](x)
features.append(x)
for k, v in enumerate(self.extras):
x = F.relu(v(x), inplace=True)
if k % 2 == 1:
features.append(x)
return tuple(features)
@registry.BACKBONES.register('vgg')
def vgg(cfg, pretrained=True):
model = VGG(cfg)
if pretrained:
model.init_from_pretrain(load_state_dict_from_url(model_urls['vgg']))
return model
| [
[
[
7,
21
],
[
3247,
3249
],
[
482,
484
],
[
567,
569
],
[
657,
659
],
[
771,
773
],
[
790,
792
],
[
866,
868
],
[
929,
931
],
[
990,
992
],
[
1061,
1063
],
[
1142,
1144
],
[
1172,
1174
],
[
1469,
1471
],
[
1597,
1599
],
[
1744,
1746
],
[
1820,
1822
],
[
2121,
2123
],
[
2282,
2284
],
[
2503,
2505
],
[
2659,
2661
],
[
3459,
3461
],
[
3516,
3518
],
[
3755,
3757
],
[
3783,
3785
],
[
3833,
3835
]
],
[
[
29,
53
],
[
4332,
4333
]
],
[
[
78,
84
],
[
3599,
3605
]
],
[
[
110,
118
],
[
4456,
4464
]
],
[
[
151,
175
],
[
4595,
4619
]
],
[
[
177,
187
],
[
4620,
4630
]
],
[
[
352,
359
],
[
3473,
3480
]
],
[
[
1219,
1229
],
[
3530,
3540
]
],
[
[
1901,
1911
]
],
[
[
2850,
2858
],
[
3372,
3380
]
],
[
[
3081,
3092
],
[
3416,
3427
]
],
[
[
3243,
3246
],
[
4534,
4537
]
],
[
[
4495,
4498
]
]
] |
from __future__ import unicode_literals
import frappe
def set_default_address(doc,method):
if doc.is_primary_address:
for row in doc.links:
if row.link_doctype=="Customer":
cust = frappe.get_doc("Customer",row.link_name)
cust.default_address=doc.name
cust.save()
def set_default_contact(doc,method):
if doc.is_primary_contact:
for row in doc.links:
if row.link_doctype=="Customer":
cust = frappe.get_doc("Customer",row.link_name)
cust.default_contact_person=doc.name
cust.save() | [
[
[
23,
39
]
],
[
[
47,
53
],
[
191,
197
],
[
419,
425
]
],
[
[
59,
78
]
],
[
[
287,
306
]
]
] |
class MenuItem(object):
TEXT_NAME = 'name'
TEXT_URL = 'url_name'
TEXT_SUBMENU = 'submenu'
def __init__(self, name, url=None, *args):
super(MenuItem, self).__init__()
self.name = name
self.url = url
self.url_args = args
self.sub_menu = []
def add_sub_menu_item(self, name, url):
item = {self.TEXT_NAME: name, self.TEXT_URL: url}
self.sub_menu.append(item)
def __getitem__(self, key):
return self[key]
def to_text(self):
output = {}
output[self.TEXT_NAME] = self.name
if self.url:
output[self.TEXT_URL] = self.url
if self.sub_menu:
output[self.TEXT_SUBMENU] = self.sub_menu
return output
class Nav:
def __init__(self, *args, **kwargs):
self.menu = []
def add_menu(self, menu):
self.menu.append(menu)
def get_menu_list(self):
output = []
for x in self.menu:
output.append(x.to_text())
return output
| [
[
[
6,
14
],
[
164,
172
]
],
[
[
755,
758
]
]
] |
import os
import time
import traceback
# import functools
def getobj(s):
return open(s, "r", encoding='utf-8').read()
def getobjs(s):
objs = []
fs = os.listdir(s)
for f in fs:
absf = os.path.join(s, f)
if os.path.isfile(absf) and os.path.splitext(f)[1] == '.py':
objs.append(absf)
elif os.path.isdir(absf):
objs += getobjs(absf)
return objs
class gameplay(object):
def __init__(self, scenario="__general", _basedir=None):
print("A new game object is constructed.")
if _basedir is None:
_basedir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
self.__basedir = _basedir
self.var = {
"load_script": self.load_script,
"load_scripts": self.load_scripts,
"running": True
# "output":self.output
}
self.load_scripts("__commons")
self.load_script(os.path.join("scenarios", scenario + '.py'))
self.paused = True
self.speed = 0
def end(self):
print("A game has ended.")
def run(self):
print("A game has started.")
try:
# definition
# execution
while self.var['running']:
self.var['play_round']()
self.pause_game()
while self.paused:
# self.output()
ope = input()
# print("Game object got operation:" + ope)
exec(ope)
time.sleep(2 * (0.5 ** self.speed))
except:
print("!!!!! --- 游戏体抛出异常 --- !!!!!")
traceback.print_exc()
self.end()
def output(self):
print(self.var)
def load_script(self, scriptpath):
exec(getobj(os.path.join(self.__basedir, scriptpath)), self.var, self.var)
def load_scripts(self, scriptdir):
objs = getobjs(os.path.join(self.__basedir, scriptdir))
objs.sort()
for i in objs:
exec(getobj(i), self.var, self.var)
def pause_game(self):
self.paused = True
def continue_game(self):
self.paused = False
def set_speed(self, speed):
self.speed = speed
| [
[
[
7,
9
],
[
168,
170
],
[
207,
209
],
[
232,
234
],
[
257,
259
],
[
320,
322
],
[
554,
556
],
[
570,
572
],
[
586,
588
],
[
846,
848
],
[
1518,
1520
],
[
1638,
1640
]
],
[
[
18,
22
],
[
1295,
1299
]
],
[
[
31,
40
],
[
1387,
1396
]
],
[
[
70,
76
],
[
1511,
1517
],
[
1721,
1727
]
],
[
[
137,
144
],
[
353,
360
],
[
1630,
1637
]
],
[
[
392,
400
]
]
] |
import os
import tensorflow as tf
import numpy as np
import pandas as pd
from sklearn.utils import check_random_state
from skbio.stats.composition import clr_inv as softmax
from biom import Table
from patsy import dmatrix
def random_multinomial_model(num_samples, num_features,
reps=1,
low=2, high=10,
beta_mean=0,
beta_scale=5,
mu=1,
sigma=1,
seed=0):
""" Generates a table using a random poisson regression model.
Here we will be simulating microbial counts given the model, and the
corresponding model priors.
Parameters
----------
num_samples : int
Number of samples
num_features : int
Number of features
tree : np.array
Tree specifying orthonormal contrast matrix.
low : float
Smallest gradient value.
high : float
Largest gradient value.
beta_mean : float
Mean of beta prior (for regression coefficients)
beta_scale : float
Scale of beta prior (for regression coefficients)
mu : float
Mean sequencing depth (in log units)
sigma : float
Variance for sequencing depth
Returns
-------
table : biom.Table
Biom representation of the count table.
metadata : pd.DataFrame
DataFrame containing relevant metadata.
beta : np.array
Regression parameter estimates.
"""
N = num_samples
# generate all of the coefficient using the random poisson model
state = check_random_state(seed)
beta = state.normal(beta_mean, beta_scale, size=(2, num_features-1))
X = np.hstack([np.linspace(low, high, num_samples // reps)]
for _ in range(reps))
X = np.vstack((np.ones(N), X)).T
phi = np.hstack((np.zeros((N, 1)), X @ beta))
probs = softmax(phi)
n = [mu] * N
table = np.vstack(
state.multinomial(n[i], probs[i, :])
for i in range(N)
).T
samp_ids = pd.Index(['S%d' % i for i in range(num_samples)],
name='sampleid')
feat_ids = ['F%d' % i for i in range(num_features)]
balance_ids = ['L%d' % i for i in range(num_features-1)]
table = Table(table, feat_ids, samp_ids)
metadata = pd.DataFrame(X, columns=['Ones', 'X'], index=samp_ids)
beta = pd.DataFrame(beta.T, columns=['Intercept', 'beta'],
index=balance_ids)
return table, metadata, beta
def _type_cast_to_float(df):
""" Attempt to cast all of the values in dataframe to float.
This will try to type cast all of the series within the
dataframe into floats. If a column cannot be type casted,
it will be kept as is.
Parameters
----------
df : pd.DataFrame
Returns
-------
pd.DataFrame
"""
# TODO: Will need to improve this, as this is a very hacky solution.
for c in df.columns:
s = df[c]
try:
df[c] = s.astype(np.float64)
except Exception:
continue
return df
def read_metadata(filepath):
""" Reads in a sample metadata file
Parameters
----------
filepath: str
The file path location of the sample metadata file
Returns
-------
pd.DataFrame :
The metadata table with inferred types.
"""
metadata = pd.read_table(
filepath, dtype=object)
cols = metadata.columns
metadata = metadata.set_index(cols[0])
metadata = _type_cast_to_float(metadata.copy())
return metadata
def match_and_filter(table, metadata, formula,
min_sample_count, min_feature_count):
""" Matches and aligns biom and metadata tables.
This will also return the patsy representation.
Parameters
----------
table : biom.Table
Table of abundances
metadata : pd.DataFrame
Sample metadata
Returns
-------
table : biom.Table
Filtered biom table
metadata : pd.DataFrame
Sample metadata
"""
# match them
def sample_filter(val, id_, md):
return id_ in metadata.index and np.sum(val) > min_sample_count
def read_filter(val, id_, md):
return np.sum(val > 0) > min_feature_count
table = table.filter(sample_filter, axis='sample', inplace=False)
table = table.filter(read_filter, axis='observation', inplace=False)
metadata = metadata.loc[table.ids(axis='sample')]
metadata = metadata.loc[~metadata.index.duplicated(keep='first')]
def sort_f(xs):
return [xs[metadata.index.get_loc(x)] for x in xs]
table = table.sort(sort_f=sort_f, axis='sample')
design = dmatrix(formula, metadata, return_type='dataframe')
design = design.dropna()
def design_filter(val, id_, md):
return id_ in design.index
table = table.filter(design_filter, axis='sample')
return table, metadata, design
def split_training(dense_table, metadata, design, training_column=None,
num_random_test_examples=10, seed=None):
if training_column is None:
np.random.seed(seed)
idx = np.random.random(design.shape[0])
i = np.argsort(idx)[num_random_test_examples]
threshold = idx[i]
train_idx = ~(idx < threshold)
else:
train_idx = metadata.loc[design.index, training_column] == "Train"
trainX = design.loc[train_idx].values
testX = design.loc[~train_idx].values
trainY = dense_table.loc[train_idx].values
testY = dense_table.loc[~train_idx].values
return trainX, testX, trainY, testY
def silence_output():
# suppress profiling messages & compilation warnings
# taken from:
# https://stackoverflow.com/questions/47068709/your-cpu-supports-
# instructions-that-this-tensorflow-binary-was-not-compiled-to-u
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# suppress deprecation warnings
# taken from https://github.com/tensorflow/tensorflow/issues/27023
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
| [
[
[
7,
9
],
[
5904,
5906
]
],
[
[
17,
33
],
[
6057,
6059
],
[
6092,
6094
]
],
[
[
41,
52
],
[
1756,
1758
],
[
1767,
1769
],
[
1860,
1862
],
[
1871,
1873
],
[
1899,
1901
],
[
1910,
1912
],
[
1994,
1996
],
[
3072,
3074
],
[
5162,
5164
],
[
5197,
5199
],
[
5243,
5245
],
[
4208,
4210
],
[
4290,
4292
]
],
[
[
60,
72
],
[
2100,
2102
],
[
2369,
2371
],
[
2435,
2437
],
[
3437,
3439
]
],
[
[
99,
117
],
[
1649,
1667
]
],
[
[
154,
172
],
[
1951,
1958
]
],
[
[
190,
195
],
[
2321,
2326
]
],
[
[
214,
221
],
[
4742,
4749
]
],
[
[
228,
252
]
],
[
[
2570,
2589
],
[
3570,
3589
]
],
[
[
3151,
3164
]
],
[
[
3634,
3650
]
],
[
[
4993,
5007
]
],
[
[
5664,
5678
]
]
] |
# This file is part of spot_motion_monitor.
#
# Developed for LSST System Integration, Test and Commissioning.
#
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
import numpy as np
import pytest
from spot_motion_monitor.camera.gaussian_camera import GaussianCamera
from spot_motion_monitor.models import FullFrameModel
from spot_motion_monitor.utils import FrameRejected, TimeHandler
class TestFullFrameModel():
def setup_class(cls):
cls.model = FullFrameModel()
cls.model.timeHandler = TimeHandler()
def checkFrame(self, flux, maxAdc, comX, comY):
return flux > 4000 and maxAdc > 130 and comX > 0 and comY > 0
def test_parametersAfterConstruction(self):
assert self.model.sigmaScale == 5.0
assert self.model.minimumNumPixels == 10
assert self.model.timeHandler is not None
def test_frameCalculations(self):
# This test requires the generation of a CCD frame which will be
# provided by the GaussianCamera
camera = GaussianCamera()
camera.seed = 1000
camera.startup()
frame = camera.getFullFrame()
info = self.model.calculateCentroid(frame)
assert info.centerX == 288.47687644439395
assert info.centerY == 224.45394404821826
assert info.flux == 3235.9182163661176
assert info.maxAdc == 135.83703259361937
assert info.fwhm == 5.749039360993981
assert info.stdNoObjects is None
def test_badFrameCalculation(self):
frame = np.ones((480, 640))
with pytest.raises(FrameRejected):
self.model.calculateCentroid(frame)
def test_failedFrameCheck(self):
# This test requires the generation of a CCD frame which will be
# provided by the GaussianCamera
self.model.frameCheck = self.checkFrame
camera = GaussianCamera()
camera.seed = 1000
camera.startup()
frame = camera.getFullFrame()
with pytest.raises(FrameRejected):
self.model.calculateCentroid(frame)
self.model.frameCheck = None
| [
[
[
338,
349
],
[
1676,
1678
]
],
[
[
357,
363
],
[
1709,
1715
],
[
2124,
2130
]
],
[
[
420,
434
],
[
1178,
1192
],
[
2004,
2018
]
],
[
[
474,
488
],
[
630,
644
]
],
[
[
527,
540
],
[
1723,
1736
],
[
2138,
2151
]
],
[
[
542,
553
],
[
679,
690
]
],
[
[
561,
579
]
]
] |
# Copyright (C) 2016-Today: Odoo Community Association (OCA)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.tools import html_sanitize
class OdooModule(models.Model):
_inherit = "abstract.action.mixin"
_name = "odoo.module"
_description = "Odoo Module"
_order = "technical_name, name"
# Column Section
name = fields.Char(
string="Name", store=True, readonly=True, compute="_compute_name"
)
technical_name = fields.Char(
string="Technical Name", index=True, required=True, readonly=True
)
module_version_ids = fields.One2many(
comodel_name="odoo.module.version",
inverse_name="module_id",
string="Versions",
readonly=True,
)
module_version_qty = fields.Integer(
string="Number of Module Versions",
compute="_compute_module_version_qty",
store=True,
)
author_ids = fields.Many2many(
string="Authors",
comodel_name="odoo.author",
compute="_compute_author",
relation="github_module_author_rel",
column1="module_id",
column2="author_id",
store=True,
)
author_ids_description = fields.Char(
string="Authors (Text)", compute="_compute_author", store=True
)
organization_serie_ids = fields.Many2many(
string="Series",
comodel_name="github.organization.serie",
compute="_compute_organization_serie",
store=True,
relation="github_module_organization_serie_rel",
column1="module_id",
column2="organization_serie_id",
)
organization_serie_ids_description = fields.Char(
string="Series (Text)", store=True, compute="_compute_organization_serie",
)
description_rst = fields.Char(
string="RST Description of the last Version",
store=True,
readonly=True,
compute="_compute_description",
)
description_rst_html = fields.Html(
string="HTML of the RST Description of the last Version",
store=True,
readonly=True,
compute="_compute_description",
)
dependence_module_version_ids = fields.Many2many(
comodel_name="odoo.module.version",
string="Module Versions that depend on this module",
relation="module_version_dependency_rel",
column1="dependency_module_id",
column2="module_version_id",
)
dependence_module_version_qty = fields.Integer(
string="Number of Module Versions that depend on this module",
compute="_compute_dependence_module_version_qty",
store=True,
)
image = fields.Binary(
string="Icon Image", compute="_compute_image", store=True, attachment=True
)
# Compute Section
@api.depends("module_version_ids.image")
def _compute_image(self):
module_version_obj = self.env["odoo.module.version"]
for module in self:
version_ids = module.module_version_ids.ids
last_version = module_version_obj.search(
[("id", "in", version_ids)], order="organization_serie_id desc", limit=1
)
module.image = last_version and last_version.image
@api.depends("technical_name", "module_version_ids.name")
def _compute_name(self):
module_version_obj = self.env["odoo.module.version"]
for module in self:
version_ids = module.module_version_ids.ids
last_version = module_version_obj.search(
[("id", "in", version_ids)], order="organization_serie_id desc", limit=1
)
if last_version:
module.name = last_version.name
else:
module.name = module.technical_name
@api.depends("module_version_ids", "module_version_ids.description_rst_html")
def _compute_description(self):
module_version_obj = self.env["odoo.module.version"]
for module in self:
version_ids = module.module_version_ids.ids
last_version = module_version_obj.search(
[("id", "in", version_ids)], order="organization_serie_id desc", limit=1
)
if last_version:
module.description_rst = last_version.description_rst
module.description_rst_html = last_version.description_rst_html
else:
module.description_rst = ""
module.description_rst_html = html_sanitize(
"<h1 style='color:gray;'>" + _("No Version Found") + "</h1>"
)
@api.depends("dependence_module_version_ids.dependency_module_ids")
def _compute_dependence_module_version_qty(self):
for module in self:
module.dependence_module_version_qty = len(
module.dependence_module_version_ids
)
@api.depends("module_version_ids")
def _compute_module_version_qty(self):
for module in self:
module.module_version_qty = len(module.module_version_ids)
@api.depends("module_version_ids.author_ids")
def _compute_author(self):
for module in self:
authors = []
for version in module.module_version_ids:
authors += version.author_ids
authors = set(authors)
module.author_ids = [x.id for x in authors]
module.author_ids_description = ", ".join(sorted([x.name for x in authors]))
@api.depends("module_version_ids.organization_serie_id")
def _compute_organization_serie(self):
for module in self:
organization_series = []
for version in module.module_version_ids:
organization_series += version.organization_serie_id
organization_series = set(organization_series)
module.organization_serie_ids = [x.id for x in organization_series]
module.organization_serie_ids_description = " - ".join(
[x.name for x in sorted(organization_series, key=lambda x: x.sequence)]
)
# Custom Section
@api.model
def create_if_not_exist(self, technical_name):
module = self.search([("technical_name", "=", technical_name)])
if not module:
module = self.create({"technical_name": technical_name})
return module
def name_get(self):
return [(module.id, module.technical_name) for module in self]
| [
[
[
209,
210
],
[
4632,
4633
]
],
[
[
212,
215
],
[
2884,
2887
],
[
3325,
3328
],
[
3866,
3869
],
[
4688,
4691
],
[
4966,
4969
],
[
5148,
5151
],
[
5563,
5566
],
[
6186,
6189
]
],
[
[
217,
223
],
[
470,
476
],
[
585,
591
],
[
704,
710
],
[
881,
887
],
[
1032,
1038
],
[
1306,
1312
],
[
1426,
1432
],
[
1761,
1767
],
[
1886,
1892
],
[
2070,
2076
],
[
2275,
2281
],
[
2568,
2574
],
[
2752,
2758
]
],
[
[
225,
231
],
[
288,
294
]
],
[
[
255,
268
],
[
4568,
4581
]
],
[
[
277,
287
]
]
] |
import sys
import math
import os
import torch
import torchvision
import numpy as np
from pkg_resources import resource_stream
def interpolate1d(x, values, tangents):
'''
Returns:
Returns the interpolated or extrapolated values for each query point,
depending on whether or not the query lies within the span of the spline.
'''
assert torch.is_tensor(x)
assert torch.is_tensor(values)
assert torch.is_tensor(tangents)
float_dtype = x.dtype
assert values.dtype == float_dtype
assert tangents.dtype == float_dtype
assert len(values.shape) == 1
assert len(tangents.shape) == 1
assert values.shape[0] == tangents.shape[0]
x_lo = torch.floor(torch.clamp(x, torch.as_tensor(0),
values.shape[0] - 2)).type(torch.int64)
x_hi = x_lo + 1
# Compute the relative distance between each `x` and the knot below it.
t = x - x_lo.type(float_dtype)
# Compute the cubic hermite expansion of `t`.
t_sq = t**2
t_cu = t * t_sq
h01 = -2. * t_cu + 3. * t_sq
h00 = 1. - h01
h11 = t_cu - t_sq
h10 = h11 - t_sq + t
# Linearly extrapolate above and below the extents of the spline for all
# values.
value_before = tangents[0] * t + values[0]
value_after = tangents[-1] * (t - 1.) + values[-1]
# Cubically interpolate between the knots below and above each query point.
neighbor_values_lo = values[x_lo]
neighbor_values_hi = values[x_hi]
neighbor_tangents_lo = tangents[x_lo]
neighbor_tangents_hi = tangents[x_hi]
value_mid = (
neighbor_values_lo * h00 + neighbor_values_hi * h01 +
neighbor_tangents_lo * h10 + neighbor_tangents_hi * h11)
return torch.where(t < 0., value_before,
torch.where(t > 1., value_after, value_mid))
def log_safe(x):
x = torch.as_tensor(x)
return torch.log(torch.min(x, torch.tensor(33e37).to(x)))
def load_spline_params():
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, '../misc/partition_spline.npz'), "rb") as spline_file:
with np.load(spline_file, allow_pickle=False) as f:
spline_x_scale = torch.tensor(f['x_scale'])
spline_values = torch.tensor(f['values'])
spline_tangents = torch.tensor(f['tangents'])
return spline_x_scale, spline_values, spline_tangents
def get_partition_init(shape):
shape = torch.as_tensor(shape)
base1 = (2.25 * shape - 4.5) / (torch.abs(shape - 2) + 0.25) + shape + 2
base2 = 5. / 18. * log_safe(4 * shape - 15) + 8
return torch.where(shape < 4, base1, base2)
def get_partition(shape):
shape = torch.as_tensor(shape)
assert (shape >= 0).all()
init = get_partition_init(shape)
x_scale, values, tangents = load_spline_params()
return interpolate1d(init * x_scale.to(init), values.to(init), tangents.to(init))
def general_adaptive_loss(x, shape, bowl=1.):
input_shape = x.shape
shape = torch.as_tensor(shape).to(x.device)
bowl = torch.as_tensor(bowl).to(x.device)
b = x.size(0)
x = x.view(b, -1)
if len(shape.shape) == 0:
shape = shape.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)
else:
shape = shape.view(b, -1)
if len(bowl.shape) == 0:
bowl = bowl.unsqueeze(dim=0).expand([b, ]).unsqueeze(dim=1)
else:
bowl = bowl.view(b, -1)
partition = get_partition(shape)
ans = (torch.abs(shape - 2)/shape) * (torch.pow((torch.square(x/bowl) /
torch.abs(shape - 2) + 1), shape/2) - 1) + log_safe(bowl) + log_safe(partition)
return ans.view(input_shape)
| [
[
[
7,
10
]
],
[
[
18,
22
]
],
[
[
30,
32
],
[
1980,
1982
],
[
2020,
2022
]
],
[
[
41,
46
],
[
369,
374
],
[
399,
404
],
[
434,
439
],
[
696,
701
],
[
708,
713
],
[
723,
728
],
[
805,
810
],
[
1728,
1733
],
[
1785,
1790
],
[
1857,
1862
],
[
1887,
1892
],
[
1897,
1902
],
[
1910,
1915
],
[
2186,
2191
],
[
2241,
2246
],
[
2297,
2302
],
[
2429,
2434
],
[
2489,
2494
],
[
2594,
2599
],
[
2671,
2676
],
[
2989,
2994
],
[
3036,
3041
],
[
3446,
3451
],
[
3477,
3482
],
[
3488,
3493
],
[
3564,
3569
]
],
[
[
54,
65
]
],
[
[
73,
84
],
[
2110,
2112
]
],
[
[
112,
127
]
],
[
[
133,
146
],
[
2828,
2841
]
],
[
[
1836,
1844
],
[
2553,
2561
],
[
3607,
3615
],
[
3624,
3632
]
],
[
[
1944,
1962
],
[
2795,
2813
]
],
[
[
2390,
2408
],
[
2736,
2754
]
],
[
[
2637,
2650
],
[
3414,
3427
]
],
[
[
2909,
2930
]
]
] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name,unused-variable,unused-argument,no-member
"""Conv2D int8 schedule on x86"""
import re
import tvm
from tvm import autotvm
from tvm.autotvm.task import get_config
from tvm.autotvm.task.topi_integration import deserialize_args
from ..nn.conv2d import _get_workload as _get_conv2d_workload
from .. import generic, tag
from ..generic import conv2d as conv2d_generic
from ..util import get_const_tuple
from ..nn.conv2d import conv2d_NCHWc_int8
from .. import nn
from . import conv2d_avx_1x1, conv2d_avx_common
def _get_default_config_int8(cfg, data, kernel, strides, padding, out_dtype, is_depthwise=False,
layout='NCHW'):
"""
Get default schedule config for the workload
"""
assert not is_depthwise, "Depthwise Int8 not supported"
wkl = _get_conv2d_workload(data, kernel, strides, padding, out_dtype, layout)
is_kernel_1x1 = wkl.hkernel == 1 and wkl.wkernel == 1
if is_kernel_1x1:
conv2d_generic.fallback_schedule_cpu_1x1_int8(
cfg, wkl, int32_lanes=16, num_int8_elements=4)
else:
conv2d_generic.fallback_schedule_cpu_common_int8(
cfg, wkl, int32_lanes=16, num_int8_elements=4)
def _is_int8_hw_support(data_dtype, kernel_dtype):
"""
Checks to ensure that we can use Intel DLBoost instructions
1) The datatypes are correct.
2) LLVM version has support for the instructions.
3) Target is skylake and above.
"""
# 1) Check datatypes
is_dtype_support = data_dtype == 'uint8' and kernel_dtype == 'int8'
# 2) Check LLVM support
llvm_intrin_fast_int8 = "llvm.x86.avx512.pmaddubs.w.512"
llvm_id = tvm.codegen.llvm_lookup_intrinsic_id(llvm_intrin_fast_int8)
is_llvm_support = llvm_id != 0
# 3) Check target
target = tvm.target.current_target()
is_target_support = False
for opt in target.options:
if opt == '-mcpu=skylake-avx512':
is_target_support = True
return is_dtype_support and is_llvm_support and is_target_support
def _create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, layout):
"""Create schedule configuration from input arguments"""
dshape = get_const_tuple(data.shape)
kshape = get_const_tuple(kernel.shape)
pat = re.compile(r'NCHW.+(\d+)c')
if layout == 'NCHW':
n, ic, h, w = dshape
oc, _, kh, kw = kshape
elif layout == 'NHWC':
n, h, w, ic = dshape
kh, kw, oc, _ = kshape
elif pat.match(layout) is not None:
n, ic_chunk, h, w, ic_bn = dshape
target = tvm.target.current_target(allow_none=False)
oc_chunk, k_ic, kh, kw, k_ic_f, oc_bn, k_ic_s = kshape
ic = ic_chunk * ic_bn
assert ic == k_ic * k_ic_f * k_ic_s
oc = oc_chunk*oc_bn
else:
raise ValueError("Not support this layout {} with "
"schedule template.".format(layout))
is_kernel_1x1 = kh == 1 and kw == 1
ph, pw = padding if isinstance(padding, (tuple, list)) else (padding, padding)
sh, sw = strides if isinstance(strides, (tuple, list)) else (strides, strides)
oh = (h - kh + 2 * ph) // sh + 1
ow = (w - kw + 2 * pw) // sw + 1
# Create schedule config
cfg.define_split('tile_ic', ic, num_outputs=2, filter=lambda y: y.size[-1] % 4 == 0)
cfg.define_split('tile_oc', oc, num_outputs=2, filter=lambda y: y.size[-1] % 16 == 0)
cfg.define_split("tile_ow", ow, num_outputs=2, filter=lambda y: y.size[-1] <= 64)
if is_kernel_1x1:
cfg.define_knob("tile_oh", [1, 2] if oh > 1 else [1])
else:
cfg.define_knob("unroll_kw", [True, False])
# Define template function for autotvm task
# We define schedule template in this function instead of
# declaration function since actual input arguments need
# to be altered by the schedule selected.
@autotvm.task.register("topi_x86_conv2d_NCHWc_int8")
def _topi_nn_conv2d_NCHWc_int8(*args, **kwargs):
assert not kwargs, "Do not support kwargs in template function call"
args = deserialize_args(args)
if len(args) == 7:
data, kernel, strides, padding, dilation, origin_layout, dtype = args
else:
assert len(args) == 8
data, kernel, strides, padding, dilation, origin_layout, out_layout, dtype = args
raw_data_shape = get_const_tuple(data.shape)
raw_kernel_shape = get_const_tuple(kernel.shape)
# get config here
cfg = get_config()
_create_tuning_space_int8(cfg, data, kernel, strides, padding, dilation, origin_layout)
# change shape with the value in config
ic_bn, oc_bn, ow_bn = (cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1],
cfg["tile_ow"].size[-1])
data_layout = "NCHW%dc" % ic_bn
out_layout = "NCHW%dc" % oc_bn
# Set up the new shape for data and kernel
new_data_shape = (raw_data_shape[0], raw_data_shape[1] // ic_bn,
raw_data_shape[2], raw_data_shape[3], ic_bn)
n_elems = 4
new_kernel_shape = (raw_kernel_shape[0] // oc_bn,
raw_kernel_shape[1] // ic_bn,
raw_kernel_shape[2],
raw_kernel_shape[3],
ic_bn // n_elems,
oc_bn,
n_elems)
new_data = tvm.placeholder(new_data_shape, data.dtype)
new_kernel = tvm.placeholder(new_kernel_shape, kernel.dtype)
C = _declaration_conv_NCHWc_int8(cfg, new_data, new_kernel, strides, padding, dilation,
data_layout, out_layout, dtype)
s = _schedule_conv2d_NCHWc_int8(cfg, [C])
return s, [new_data, new_kernel, C]
@autotvm.register_topi_compute(conv2d_NCHWc_int8, 'cpu', 'direct')
def _declaration_conv_NCHWc_int8(cfg, data, kernel, strides,
padding, dilation, layout, out_layout, out_dtype):
return nn.conv2d_NCHWc_int8_compute(data,
kernel,
strides,
padding,
dilation,
layout,
out_layout,
out_dtype)
@autotvm.register_topi_schedule(generic.schedule_conv2d_NCHWc_int8, 'cpu', ['direct'])
def _schedule_conv2d_NCHWc_int8(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_NCHWc_int8' in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0] \
if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \
else data_vec
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
target = tvm.target.current_target(allow_none=False)
# int8 conv kernel is 7-dim
_, _, kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_NCHWc_int8(*args)
else:
conv2d_avx_common._schedule_conv_NCHWc_int8(*args)
scheduled_ops.append(op)
traverse(outs[0].op)
return s
@autotvm.register_topi_schedule(generic.schedule_conv2d_nhwc_pack, 'cpu', ['direct'])
def schedule_conv2d_nhwc_pack(cfg, outs):
"""Create schedule for tensors"""
s = tvm.create_schedule([x.op for x in outs])
output_op = outs[0].op
scheduled_ops = []
def traverse(op):
"""Traverse operators from computation graph"""
# inline all one-to-one-mapping operators except the last stage (output)
if tag.is_broadcast(op.tag):
if op not in s.outputs:
s[op].compute_inline()
else: # inject custom schedule
if len(op.axis) == 4: # schedule bias + bn + relu
n, h, w, c = op.axis
fused = s[op].fuse(n, h, w)
s[op].parallel(fused)
s[op].vectorize(c)
for tensor in op.input_tensors:
if isinstance(tensor.op, tvm.tensor.ComputeOp) and tensor.op not in scheduled_ops:
traverse(tensor.op)
if 'conv2d_nhwc_pack_int8' in op.tag:
conv_out = op.output(0)
kernel = conv_out.op.input_tensors[1]
data_vec = conv_out.op.input_tensors[0]
data = data_vec.op.input_tensors[0] \
if isinstance(data_vec.op, tvm.tensor.ComputeOp) and "pad" not in data_vec.op.tag \
else data_vec
if isinstance(data.op, tvm.tensor.ComputeOp) and "pad" in data.op.tag:
data_pad = data
data = data_pad.op.input_tensors[0]
args = [s, cfg, data_vec, conv_out, outs[0]]
if data.dtype == 'uint8':
kh, kw, _, _, _ = get_const_tuple(kernel.shape)
if kh == 1 and kw == 1:
conv2d_avx_1x1._schedule_conv_nhwc_pack_int8(*args)
else:
raise ValueError("Only support 1x1 kernel with "
"schedule_conv2d_nhwc_pack.")
else:
raise ValueError("Not support this data type {} with "
"schedule_conv2d_nhwc_pack. Only support int8".format(data.dtype))
scheduled_ops.append(op)
traverse(output_op)
return s
| [
[
[
900,
902
],
[
3063,
3065
]
],
[
[
910,
913
],
[
2450,
2453
],
[
2581,
2584
],
[
3362,
3365
],
[
6076,
6079
],
[
6137,
6140
],
[
7219,
7222
],
[
8929,
8932
],
[
7641,
7644
],
[
8013,
8016
],
[
8135,
8138
],
[
8346,
8349
],
[
9657,
9660
],
[
10033,
10036
],
[
10155,
10158
]
],
[
[
930,
937
],
[
4629,
4636
],
[
6436,
6443
],
[
7043,
7050
],
[
8756,
8763
]
],
[
[
967,
977
],
[
5205,
5215
]
],
[
[
1024,
1040
],
[
4814,
4830
]
],
[
[
1065,
1102
],
[
1599,
1619
]
],
[
[
1118,
1125
],
[
7074,
7081
],
[
8787,
8794
]
],
[
[
1127,
1130
],
[
7455,
7458
],
[
9192,
9195
]
],
[
[
1153,
1177
],
[
1759,
1773
],
[
1883,
1897
]
],
[
[
1197,
1212
],
[
2982,
2997
],
[
3023,
3038
],
[
5091,
5106
],
[
5142,
5157
],
[
8466,
8481
],
[
10417,
10432
]
],
[
[
1237,
1254
],
[
6466,
6483
]
],
[
[
1270,
1272
],
[
6658,
6660
]
],
[
[
1287,
1301
],
[
8548,
8562
],
[
10507,
10521
]
],
[
[
1303,
1320
],
[
8630,
8647
]
],
[
[
1326,
1350
]
],
[
[
1998,
2017
]
],
[
[
2826,
2851
],
[
5222,
5247
]
],
[
[
4685,
4711
]
],
[
[
6506,
6534
],
[
6194,
6222
]
],
[
[
7133,
7160
],
[
6355,
6382
]
],
[
[
8845,
8870
]
]
] |
"""Tests for py. for S2201"""
import csv
import json
import os
import tempfile
import sys
import unittest
_CODEDIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(1, os.path.join(_CODEDIR, '.'))
from .process import *
_FEATURES = os.path.join(_CODEDIR, 'features.json')
_STAT_VAR_LIST = os.path.join(_CODEDIR, 'stat_vars.csv')
_TEST_DATA = os.path.join(_CODEDIR, 'testdata')
_EXPECTED_TMCF = os.path.join(_CODEDIR, 'output.tmcf')
class ProcessTest(unittest.TestCase):
def test_convert_column_to_stat_var(self):
f = open(_FEATURES)
features = json.load(f)
f.close()
self.assertEqual(
convert_column_to_stat_var(
'Estimate!!Households receiving food stamps/SNAP!!Households',
features), 'Count_Household_WithFoodStampsInThePast12Months')
self.assertEqual(
convert_column_to_stat_var(
'Margin of Error!!' +
'Households receiving food stamps/SNAP!!Households!!' +
'No children under 18 years!!Other family:!!' +
'Male householder, no spouse present', features),
'MarginOfError_Count_Household_WithFoodStampsInThePast12Months_' +
'WithoutChildrenUnder18_SingleFatherFamilyHousehold')
self.assertEqual(
convert_column_to_stat_var(
'Estimate!!Households receiving food stamps/SNAP!!Households!!'
+ 'HOUSEHOLD INCOME IN THE PAST 12 MONTHS' +
'(IN 2019 INFLATION-ADJUSTED DOLLARS)!!Median income (dollars)',
features),
'Median_Income_Household_WithFoodStampsInThePast12Months')
def test_create_csv(self):
f = open(_FEATURES)
features = json.load(f)
f.close()
f = open(_STAT_VAR_LIST)
stat_vars = f.read().splitlines()
f.close()
with tempfile.TemporaryDirectory() as tmp_dir:
test_csv = os.path.join(tmp_dir, 'test_csv.csv')
create_csv(test_csv, stat_vars)
for year in range(2010, 2020):
filename = f'testACSST5Y{year}.csv'
with open(os.path.join(_TEST_DATA, filename)) as f:
reader = csv.DictReader(f)
write_csv(filename, reader, test_csv, features, stat_vars)
with open(test_csv) as f_result:
test_result = f_result.read()
with open(os.path.join(_TEST_DATA, 'expected.csv')) as f_test:
expected = f_test.read()
self.assertEqual(test_result, expected)
os.remove(test_csv)
def test_create_tmcf(self):
f = open(_FEATURES)
features = json.load(f)
f.close()
f = open(_STAT_VAR_LIST)
stat_vars = f.read().splitlines()
f.close()
with tempfile.TemporaryDirectory() as tmp_dir:
test_tmcf = os.path.join(tmp_dir, 'test_tmcf.tmcf')
create_tmcf(test_tmcf, features, stat_vars)
with open(test_tmcf) as f_result:
test_result = f_result.read()
with open(_EXPECTED_TMCF) as f_test:
expected = f_test.read()
self.assertEqual(test_result, expected)
os.remove(test_tmcf)
if __name__ == '__main__':
unittest.main()
| [
[
[
38,
41
],
[
2229,
2232
]
],
[
[
49,
53
],
[
583,
587
],
[
1753,
1757
],
[
2713,
2717
]
],
[
[
61,
63
],
[
119,
121
],
[
135,
137
],
[
183,
185
],
[
248,
250
],
[
305,
307
],
[
358,
360
],
[
410,
412
],
[
1955,
1957
],
[
2158,
2160
],
[
2443,
2445
],
[
2613,
2615
],
[
2916,
2918
],
[
3274,
3276
]
],
[
[
71,
79
],
[
1890,
1898
],
[
2850,
2858
]
],
[
[
87,
90
],
[
164,
167
]
],
[
[
98,
106
],
[
468,
476
],
[
3328,
3336
]
],
[
[
108,
116
],
[
196,
204
],
[
261,
269
],
[
318,
326
],
[
371,
379
],
[
423,
431
]
],
[
[
233,
234
],
[
652,
678
],
[
875,
901
],
[
1326,
1352
],
[
2005,
2015
],
[
2267,
2276
],
[
2968,
2979
]
],
[
[
236,
245
],
[
553,
562
],
[
1723,
1732
],
[
2683,
2692
]
],
[
[
288,
302
],
[
1801,
1815
],
[
2761,
2775
]
],
[
[
345,
355
],
[
2171,
2181
],
[
2456,
2466
]
],
[
[
393,
407
],
[
3130,
3144
]
],
[
[
456,
467
]
]
] |
from utils.utils import validate_parameters
get_news_query_schema = {
"from": {
"type": "integer",
'coerce': int,
"min": 0,
"max": 10000,
"required": False,
"default": 0
},
"limit": {
"type": "integer",
'coerce': int,
"min": 0,
"max": 10000,
"required": False,
"default": 0
},
"category": {
"type": "string",
"required": False
}
}
class GetNewsValidator:
def __call__(self, request):
body_validation_errors = validate_parameters(request.args.copy(), get_news_query_schema)
return body_validation_errors | [
[
[
24,
43
],
[
560,
579
]
],
[
[
45,
66
],
[
601,
622
]
],
[
[
475,
491
]
]
] |
import hp
from pathlib import Path
import numpy as np
from tqdm import tqdm
import librosa
import torch
import librosa.filters
import numpy as np
import scipy
from random import randint
from os import makedirs
def load_wav(path, sample_rate):
return librosa.core.load(path, sr=sample_rate)[0]
def save_wav(wav, path, sample_rate):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
scipy.io.wavfile.write(path, sample_rate, wav.astype(np.int16))
def get_segments(source, length, count):
begins = []
l = len(source)
for _ in range(count):
begins.append(randint(0, l - length - 1))
segments = []
for begin in begins:
segments.append(source[begin: begin + length])
return segments
def process_chime(
source=hp.whole_chime_path,
target=hp.part_chime_path,
sr=16000,
duration=30,
count=10
):
"""
Randomly picking segments from CHiME dataset, since full dataset is not necessary in our case.
:param source:
:param target:
:param sr:
:param duration:
:param count:
:return:
"""
makedirs(str(target), exist_ok=True)
for path in tqdm(source.glob("*.wav")):
wave = load_wav(path, sr)
if len(wave) < sr * 30: continue
waves = get_segments(wave, duration * sr, count)
for i, wave in enumerate(waves, 1):
save_wav(wave, str(target / f"{path.stem}_{i}.wav"), sr)
if __name__ == '__main__':
print("Beginning segmenting CHiME4 noises.")
process_chime()
print("Processing Finished")
| [
[
[
7,
9
],
[
763,
765
],
[
795,
797
]
],
[
[
30,
34
]
],
[
[
42,
53
]
],
[
[
71,
75
],
[
1139,
1143
]
],
[
[
83,
90
]
],
[
[
98,
103
]
],
[
[
111,
126
],
[
256,
263
]
],
[
[
134,
145
],
[
368,
370
],
[
375,
377
],
[
446,
448
]
],
[
[
153,
158
],
[
393,
398
]
],
[
[
178,
185
],
[
585,
592
]
],
[
[
201,
209
],
[
1086,
1094
]
],
[
[
216,
224
],
[
1182,
1190
]
],
[
[
305,
313
],
[
1355,
1363
]
],
[
[
463,
475
],
[
1258,
1270
]
],
[
[
737,
750
],
[
1494,
1507
]
]
] |
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
#!/usr/bin/python
outputs = [ "out.exr" ]
command = testrender("-r 320 240 -aa 4 material-layer.xml out.exr")
| [
[
[
190,
197
]
],
[
[
214,
221
]
]
] |
### $Id: admin.py,v 1.5 2017/12/18 09:12:51 muntaza Exp $
from django.contrib import admin
from umum.models import Provinsi, Kabupaten, LokasiBidang, SKPD, SUBSKPD, KodeBarang, HakTanah, SatuanBarang, KeadaanBarang, SKPenghapusan, MutasiBerkurang, JenisPemanfaatan, AsalUsul, Tahun, GolonganBarang, Tanah, KontrakTanah, PenghapusanTanah, TanahPenghapusan, PemanfaatanTanah, TanahPemanfaatan, HargaTanah, TahunBerkurangUsulHapusTanah, TanahUsulHapus
#### Tanah
from umum.models import TanahDPUPR, KontrakTanahDPUPR, HargaTanahDPUPR, TanahUsulHapusDPUPR, TahunBerkurangUsulHapusTanahDPUPR
from umum.models import TanahPenghapusanDPUPR, TahunBerkurangTanahDPUPR, PenghapusanTanahDPUPR
from umum.models import SKPDAsalTanahDPUPR, SKPDTujuanTanahDPUPR, FotoTanahDPUPR
from umum.admin import HargaTanahInline, TanahAdmin, KontrakTanahAdmin, HargaTanahAdmin, TahunBerkurangUsulHapusTanahInline, TanahUsulHapusAdmin
from umum.admin import TahunBerkurangTanahInline, PenghapusanTanahInline, TanahPenghapusanAdmin
from umum.admin import SKPDAsalTanahInline, SKPDTujuanTanahInline, FotoTanahInline
from umum.admin import GedungBangunanInline
#### Gedung Bangunan
from gedungbangunan.models import StatusTingkat, StatusBeton, KontrakGedungBangunan, HargaGedungBangunan, GedungBangunan, PenghapusanGedungBangunan, PemanfaatanGedungBangunan, TahunBerkurangGedungBangunan, Ruangan, TahunBerkurangUsulHapusGedung
from gedungbangunan.models import GedungBangunanPemanfaatan, GedungBangunanPenghapusan, GedungBangunanRuangan, GedungBangunanUsulHapus
from gedungbangunan.models import GedungBangunanDPUPR, KontrakGedungBangunanDPUPR, HargaGedungBangunanDPUPR, GedungBangunanRuanganDPUPR, GedungBangunanUsulHapusDPUPR, TahunBerkurangUsulHapusGedungDPUPR
from gedungbangunan.models import GedungBangunanPenghapusanDPUPR, TahunBerkurangGedungBangunanDPUPR, PenghapusanGedungBangunanDPUPR
from gedungbangunan.models import SKPDAsalGedungBangunanDPUPR, SKPDTujuanGedungBangunanDPUPR, FotoGedungBangunanDPUPR
from gedungbangunan.admin import HargaGedungBangunanInline, GedungBangunanAdmin, KontrakGedungBangunanAdmin, HargaGedungBangunanAdmin, RuanganInline, GedungBangunanRuanganAdmin, KDPGedungBangunanAdmin, TahunBerkurangUsulHapusGedungInline, GedungBangunanUsulHapusAdmin
from gedungbangunan.admin import TahunBerkurangGedungBangunanInline, PenghapusanGedungBangunanInline, GedungBangunanPenghapusanAdmin
from gedungbangunan.admin import SKPDAsalGedungBangunanInline, SKPDTujuanGedungBangunanInline, FotoGedungBangunanInline
#### Peralatan Mesin
from peralatanmesin.models import KontrakPeralatanMesin, HargaPeralatanMesin, PeralatanMesin, PenghapusanPeralatanMesin, PemanfaatanPeralatanMesin, TahunBerkurangPeralatanMesin, TahunBerkurangUsulHapusPeralatanMesin
#untuk menampung inline
from peralatanmesin.models import PeralatanMesinPemanfaatan, PeralatanMesinPenghapusan, PeralatanMesinUsulHapus
from peralatanmesin.models import PeralatanMesinDPUPR, KontrakPeralatanMesinDPUPR, HargaPeralatanMesinDPUPR, PeralatanMesinUsulHapusDPUPR, TahunBerkurangUsulHapusPeralatanMesinDPUPR
from peralatanmesin.models import PeralatanMesinPenghapusanDPUPR, TahunBerkurangPeralatanMesinDPUPR, PenghapusanPeralatanMesinDPUPR
from peralatanmesin.models import SKPDAsalPeralatanMesinDPUPR, SKPDTujuanPeralatanMesinDPUPR, FotoPeralatanMesinDPUPR
from peralatanmesin.admin import HargaPeralatanMesinInline, PeralatanMesinAdmin, KontrakPeralatanMesinAdmin, HargaPeralatanMesinAdmin, TahunBerkurangUsulHapusPeralatanMesinInline, PeralatanMesinUsulHapusAdmin
from peralatanmesin.admin import TahunBerkurangPeralatanMesinInline, PenghapusanPeralatanMesinInline, PeralatanMesinPenghapusanAdmin
from peralatanmesin.admin import SKPDAsalPeralatanMesinInline, SKPDTujuanPeralatanMesinInline, FotoPeralatanMesinInline
#### Class Tanah
class TahunBerkurangTanahDPUPRInline(TahunBerkurangTanahInline):
model = TahunBerkurangTanahDPUPR
class PenghapusanTanahDPUPRInline(PenghapusanTanahInline):
model = PenghapusanTanahDPUPR
class SKPDAsalTanahDPUPRInline(SKPDAsalTanahInline):
model = SKPDAsalTanahDPUPR
class SKPDTujuanTanahDPUPRInline(SKPDTujuanTanahInline):
model = SKPDTujuanTanahDPUPR
class FotoTanahDPUPRInline(FotoTanahInline):
model = FotoTanahDPUPR
class GedungBangunanDPUPRInline(GedungBangunanInline):
model = GedungBangunanDPUPR
class HargaTanahDPUPRInline(HargaTanahInline):
model = HargaTanahDPUPR
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak":
kwargs["queryset"] = KontrakTanah.objects.filter(id_skpd__exact=3)
return super(HargaTanahDPUPRInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusTanahDPUPRInline(TahunBerkurangUsulHapusTanahInline):
model = TahunBerkurangUsulHapusTanahDPUPR
class TanahDPUPRAdmin(TanahAdmin):
inlines = [HargaTanahDPUPRInline,
SKPDAsalTanahDPUPRInline,
FotoTanahDPUPRInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=3)
return super(TanahDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class TanahUsulHapusDPUPRAdmin(TanahUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusTanahDPUPRInline,
SKPDAsalTanahDPUPRInline,
FotoTanahDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakTanahDPUPRAdmin(KontrakTanahAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=3)
return super(KontrakTanahDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=3)
class HargaTanahDPUPRAdmin(HargaTanahAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=3)
tanah_qs = Tanah.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_tanah__in=tanah_qs)
class TanahPenghapusanDPUPRAdmin(TanahPenghapusanAdmin):
inlines = [PenghapusanTanahDPUPRInline, TahunBerkurangTanahDPUPRInline,
SKPDAsalTanahDPUPRInline,
SKPDTujuanTanahDPUPRInline,
FotoTanahDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
### Register Tanah DPUPR
admin.site.register(TanahDPUPR, TanahDPUPRAdmin)
admin.site.register(TanahUsulHapusDPUPR, TanahUsulHapusDPUPRAdmin)
admin.site.register(KontrakTanahDPUPR, KontrakTanahDPUPRAdmin)
admin.site.register(HargaTanahDPUPR, HargaTanahDPUPRAdmin)
admin.site.register(TanahPenghapusanDPUPR, TanahPenghapusanDPUPRAdmin)
from gedungbangunan.models import KDPGedungBangunanDPUPR
#### Class Gedung dan Bangunan
class TahunBerkurangGedungBangunanDPUPRInline(TahunBerkurangGedungBangunanInline):
model = TahunBerkurangGedungBangunanDPUPR
class PenghapusanGedungBangunanDPUPRInline(PenghapusanGedungBangunanInline):
model = PenghapusanGedungBangunanDPUPR
class SKPDAsalGedungBangunanDPUPRInline(SKPDAsalGedungBangunanInline):
model = SKPDAsalGedungBangunanDPUPR
class SKPDTujuanGedungBangunanDPUPRInline(SKPDTujuanGedungBangunanInline):
model = SKPDTujuanGedungBangunanDPUPR
class FotoGedungBangunanDPUPRInline(FotoGedungBangunanInline):
model = FotoGedungBangunanDPUPR
class HargaGedungBangunanDPUPRInline(HargaGedungBangunanInline):
model = HargaGedungBangunanDPUPR
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_gedung_bangunan":
kwargs["queryset"] = KontrakGedungBangunan.objects.filter(id_skpd__exact=3)
return super(HargaGedungBangunanDPUPRInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusGedungDPUPRInline(TahunBerkurangUsulHapusGedungInline):
model = TahunBerkurangUsulHapusGedungDPUPR
class GedungBangunanDPUPRAdmin(GedungBangunanAdmin):
inlines = [HargaGedungBangunanDPUPRInline,
SKPDAsalGedungBangunanDPUPRInline,
FotoGedungBangunanDPUPRInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=3)
return super(GedungBangunanDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class KDPGedungBangunanDPUPRAdmin(KDPGedungBangunanAdmin):
inlines = [HargaGedungBangunanDPUPRInline,
SKPDAsalGedungBangunanDPUPRInline,
FotoGedungBangunanDPUPRInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=3)
return super(KDPGedungBangunanDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanRuanganDPUPRAdmin(GedungBangunanRuanganAdmin):
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=5)
class GedungBangunanUsulHapusDPUPRAdmin(GedungBangunanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusGedungDPUPRInline,
SKPDAsalGedungBangunanDPUPRInline,
FotoGedungBangunanDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=3).filter(id_mutasi_berkurang__exact=3)
class KontrakGedungBangunanDPUPRAdmin(KontrakGedungBangunanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=3)
return super(KontrakGedungBangunanDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=3)
class HargaGedungBangunanDPUPRAdmin(HargaGedungBangunanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=3)
gedung_bangunan_qs = GedungBangunan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_gedung_bangunan__in=gedung_bangunan_qs)
class GedungBangunanPenghapusanDPUPRAdmin(GedungBangunanPenghapusanAdmin):
inlines = [PenghapusanGedungBangunanDPUPRInline, TahunBerkurangGedungBangunanDPUPRInline,
SKPDAsalGedungBangunanDPUPRInline,
SKPDTujuanGedungBangunanDPUPRInline,
FotoGedungBangunanDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register GedungBangunan DPUPR
admin.site.register(GedungBangunanDPUPR, GedungBangunanDPUPRAdmin)
admin.site.register(KDPGedungBangunanDPUPR, KDPGedungBangunanDPUPRAdmin)
admin.site.register(GedungBangunanRuanganDPUPR, GedungBangunanRuanganDPUPRAdmin)
admin.site.register(GedungBangunanUsulHapusDPUPR, GedungBangunanUsulHapusDPUPRAdmin)
admin.site.register(KontrakGedungBangunanDPUPR, KontrakGedungBangunanDPUPRAdmin)
admin.site.register(HargaGedungBangunanDPUPR, HargaGedungBangunanDPUPRAdmin)
admin.site.register(GedungBangunanPenghapusanDPUPR, GedungBangunanPenghapusanDPUPRAdmin)
#### Class Peralatan Mesin
class TahunBerkurangPeralatanMesinDPUPRInline(TahunBerkurangPeralatanMesinInline):
model = TahunBerkurangPeralatanMesinDPUPR
class PenghapusanPeralatanMesinDPUPRInline(PenghapusanPeralatanMesinInline):
model = PenghapusanPeralatanMesinDPUPR
class SKPDAsalPeralatanMesinDPUPRInline(SKPDAsalPeralatanMesinInline):
model = SKPDAsalPeralatanMesinDPUPR
class SKPDTujuanPeralatanMesinDPUPRInline(SKPDTujuanPeralatanMesinInline):
model = SKPDTujuanPeralatanMesinDPUPR
class FotoPeralatanMesinDPUPRInline(FotoPeralatanMesinInline):
model = FotoPeralatanMesinDPUPR
class HargaPeralatanMesinDPUPRInline(HargaPeralatanMesinInline):
model = HargaPeralatanMesinDPUPR
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_peralatan_mesin":
kwargs["queryset"] = KontrakPeralatanMesin.objects.filter(id_skpd__exact=3)
return super(HargaPeralatanMesinDPUPRInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusPeralatanMesinDPUPRInline(TahunBerkurangUsulHapusPeralatanMesinInline):
model = TahunBerkurangUsulHapusPeralatanMesinDPUPR
class PeralatanMesinDPUPRAdmin(PeralatanMesinAdmin):
inlines = [HargaPeralatanMesinDPUPRInline,
SKPDAsalPeralatanMesinDPUPRInline,
FotoPeralatanMesinDPUPRInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=3)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=3)
return super(PeralatanMesinDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class PeralatanMesinUsulHapusDPUPRAdmin(PeralatanMesinUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusPeralatanMesinDPUPRInline,
SKPDAsalPeralatanMesinDPUPRInline,
FotoPeralatanMesinDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=3)
class KontrakPeralatanMesinDPUPRAdmin(KontrakPeralatanMesinAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=3)
return super(KontrakPeralatanMesinDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=3)
class HargaPeralatanMesinDPUPRAdmin(HargaPeralatanMesinAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=3)
peralatan_mesin_qs = PeralatanMesin.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_peralatan_mesin__in=peralatan_mesin_qs)
class PeralatanMesinPenghapusanDPUPRAdmin(PeralatanMesinPenghapusanAdmin):
inlines = [PenghapusanPeralatanMesinDPUPRInline, TahunBerkurangPeralatanMesinDPUPRInline,
SKPDAsalPeralatanMesinDPUPRInline,
SKPDTujuanPeralatanMesinDPUPRInline,
FotoPeralatanMesinDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register PeralatanMesin DPUPR
admin.site.register(PeralatanMesinDPUPR, PeralatanMesinDPUPRAdmin)
admin.site.register(PeralatanMesinUsulHapusDPUPR, PeralatanMesinUsulHapusDPUPRAdmin)
admin.site.register(KontrakPeralatanMesinDPUPR, KontrakPeralatanMesinDPUPRAdmin)
admin.site.register(HargaPeralatanMesinDPUPR, HargaPeralatanMesinDPUPRAdmin)
admin.site.register(PeralatanMesinPenghapusanDPUPR, PeralatanMesinPenghapusanDPUPRAdmin)
#### Jalan, Irigasi, dan Jaringan
from jalanirigasijaringan.models import KontrakJalanIrigasiJaringan, HargaJalanIrigasiJaringan, JalanIrigasiJaringan, PenghapusanJalanIrigasiJaringan, PemanfaatanJalanIrigasiJaringan, TahunBerkurangJalanIrigasiJaringan, TahunBerkurangUsulHapusJalanIrigasiJaringan
from jalanirigasijaringan.models import JalanIrigasiJaringanPemanfaatan, JalanIrigasiJaringanPenghapusan, JalanIrigasiJaringanUsulHapus
from jalanirigasijaringan.models import JalanIrigasiJaringanDPUPR, KontrakJalanIrigasiJaringanDPUPR, HargaJalanIrigasiJaringanDPUPR, KDPJalanIrigasiJaringanDPUPR, JalanIrigasiJaringanUsulHapusDPUPR, TahunBerkurangUsulHapusJalanIrigasiJaringanDPUPR
from jalanirigasijaringan.models import JalanIrigasiJaringanPenghapusanDPUPR, TahunBerkurangJalanIrigasiJaringanDPUPR, PenghapusanJalanIrigasiJaringanDPUPR
from jalanirigasijaringan.models import SKPDAsalJalanIrigasiJaringanDPUPR, SKPDTujuanJalanIrigasiJaringanDPUPR, FotoJalanIrigasiJaringanDPUPR
from jalanirigasijaringan.admin import HargaJalanIrigasiJaringanInline, JalanIrigasiJaringanAdmin, KontrakJalanIrigasiJaringanAdmin, HargaJalanIrigasiJaringanAdmin, KDPJalanIrigasiJaringanAdmin, TahunBerkurangUsulHapusJalanIrigasiJaringanInline, JalanIrigasiJaringanUsulHapusAdmin
from jalanirigasijaringan.admin import TahunBerkurangJalanIrigasiJaringanInline, PenghapusanJalanIrigasiJaringanInline, JalanIrigasiJaringanPenghapusanAdmin
from jalanirigasijaringan.admin import SKPDAsalJalanIrigasiJaringanInline, SKPDTujuanJalanIrigasiJaringanInline, FotoJalanIrigasiJaringanInline
#### Class Jalan, Irigasi dan Jaringan
class TahunBerkurangJalanIrigasiJaringanDPUPRInline(TahunBerkurangJalanIrigasiJaringanInline):
model = TahunBerkurangJalanIrigasiJaringanDPUPR
class PenghapusanJalanIrigasiJaringanDPUPRInline(PenghapusanJalanIrigasiJaringanInline):
model = PenghapusanJalanIrigasiJaringanDPUPR
class SKPDAsalJalanIrigasiJaringanDPUPRInline(SKPDAsalJalanIrigasiJaringanInline):
model = SKPDAsalJalanIrigasiJaringanDPUPR
class SKPDTujuanJalanIrigasiJaringanDPUPRInline(SKPDTujuanJalanIrigasiJaringanInline):
model = SKPDTujuanJalanIrigasiJaringanDPUPR
class FotoJalanIrigasiJaringanDPUPRInline(FotoJalanIrigasiJaringanInline):
model = FotoJalanIrigasiJaringanDPUPR
class HargaJalanIrigasiJaringanDPUPRInline(HargaJalanIrigasiJaringanInline):
model = HargaJalanIrigasiJaringanDPUPR
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_jalan_irigasi_jaringan":
kwargs["queryset"] = KontrakJalanIrigasiJaringan.objects.filter(id_skpd__exact=3)
return super(HargaJalanIrigasiJaringanDPUPRInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusJalanIrigasiJaringanDPUPRInline(TahunBerkurangUsulHapusJalanIrigasiJaringanInline):
model = TahunBerkurangUsulHapusJalanIrigasiJaringanDPUPR
class JalanIrigasiJaringanDPUPRAdmin(JalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDPUPRInline,
SKPDAsalJalanIrigasiJaringanDPUPRInline,
FotoJalanIrigasiJaringanDPUPRInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=3)
return super(JalanIrigasiJaringanDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=5)
class JalanIrigasiJaringanUsulHapusDPUPRAdmin(JalanIrigasiJaringanUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusJalanIrigasiJaringanDPUPRInline,
SKPDAsalJalanIrigasiJaringanDPUPRInline,
FotoJalanIrigasiJaringanDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=4).filter(id_mutasi_berkurang__exact=3)
class KDPJalanIrigasiJaringanDPUPRAdmin(KDPJalanIrigasiJaringanAdmin):
inlines = [HargaJalanIrigasiJaringanDPUPRInline,
SKPDAsalJalanIrigasiJaringanDPUPRInline,
FotoJalanIrigasiJaringanDPUPRInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=3)
return super(KDPJalanIrigasiJaringanDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=6).filter(id_mutasi_berkurang__exact=5)
class KontrakJalanIrigasiJaringanDPUPRAdmin(KontrakJalanIrigasiJaringanAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=3)
return super(KontrakJalanIrigasiJaringanDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=3)
class HargaJalanIrigasiJaringanDPUPRAdmin(HargaJalanIrigasiJaringanAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=3)
jalan_irigasi_jaringan_qs = JalanIrigasiJaringan.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_jalan_irigasi_jaringan__in=jalan_irigasi_jaringan_qs)
class JalanIrigasiJaringanPenghapusanDPUPRAdmin(JalanIrigasiJaringanPenghapusanAdmin):
inlines = [PenghapusanJalanIrigasiJaringanDPUPRInline, TahunBerkurangJalanIrigasiJaringanDPUPRInline,
SKPDAsalJalanIrigasiJaringanDPUPRInline,
SKPDTujuanJalanIrigasiJaringanDPUPRInline,
FotoJalanIrigasiJaringanDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register JalanIrigasiJaringan DPUPR
admin.site.register(JalanIrigasiJaringanDPUPR, JalanIrigasiJaringanDPUPRAdmin)
admin.site.register(JalanIrigasiJaringanUsulHapusDPUPR, JalanIrigasiJaringanUsulHapusDPUPRAdmin)
admin.site.register(KDPJalanIrigasiJaringanDPUPR, KDPJalanIrigasiJaringanDPUPRAdmin)
admin.site.register(KontrakJalanIrigasiJaringanDPUPR, KontrakJalanIrigasiJaringanDPUPRAdmin)
admin.site.register(HargaJalanIrigasiJaringanDPUPR, HargaJalanIrigasiJaringanDPUPRAdmin)
admin.site.register(JalanIrigasiJaringanPenghapusanDPUPR, JalanIrigasiJaringanPenghapusanDPUPRAdmin)
#### Aset Tetap Lainnya
from atl.models import KontrakATL, HargaATL, ATL, PenghapusanATL, PemanfaatanATL, TahunBerkurangATL, TahunBerkurangUsulHapusATL
from atl.models import ATLPemanfaatan, ATLPenghapusan, ATLUsulHapus
from atl.models import ATLDPUPR, KontrakATLDPUPR, HargaATLDPUPR, ATLUsulHapusDPUPR, TahunBerkurangUsulHapusATLDPUPR
from atl.models import ATLPenghapusanDPUPR, TahunBerkurangATLDPUPR, PenghapusanATLDPUPR
from atl.models import SKPDAsalATLDPUPR, SKPDTujuanATLDPUPR, FotoATLDPUPR
from atl.admin import HargaATLInline, ATLAdmin, KontrakATLAdmin, HargaATLAdmin, TahunBerkurangUsulHapusATLInline, ATLUsulHapusAdmin
from atl.admin import TahunBerkurangATLInline, PenghapusanATLInline, ATLPenghapusanAdmin
from atl.admin import SKPDAsalATLInline, SKPDTujuanATLInline, FotoATLInline
#### Class Aset Tetap Lainnya
class TahunBerkurangATLDPUPRInline(TahunBerkurangATLInline):
model = TahunBerkurangATLDPUPR
class PenghapusanATLDPUPRInline(PenghapusanATLInline):
model = PenghapusanATLDPUPR
class SKPDAsalATLDPUPRInline(SKPDAsalATLInline):
model = SKPDAsalATLDPUPR
class SKPDTujuanATLDPUPRInline(SKPDTujuanATLInline):
model = SKPDTujuanATLDPUPR
class FotoATLDPUPRInline(FotoATLInline):
model = FotoATLDPUPR
class HargaATLDPUPRInline(HargaATLInline):
model = HargaATLDPUPR
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_kontrak_atl":
kwargs["queryset"] = KontrakATL.objects.filter(id_skpd__exact=3)
return super(HargaATLDPUPRInline, self).formfield_for_foreignkey(db_field, request, **kwargs)
class TahunBerkurangUsulHapusATLDPUPRInline(TahunBerkurangUsulHapusATLInline):
model = TahunBerkurangUsulHapusATLDPUPR
class ATLDPUPRAdmin(ATLAdmin):
inlines = [HargaATLDPUPRInline,
SKPDAsalATLDPUPRInline,
FotoATLDPUPRInline, ]
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_sub_skpd":
kwargs["queryset"] = SUBSKPD.objects.filter(id_skpd__exact=3)
if db_field.name == "id_ruangan":
kwargs["queryset"] = Ruangan.objects.filter(id_gedung_bangunan__id_sub_skpd__id_skpd__exact=3)
return super(ATLDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__exact=5)
class ATLUsulHapusDPUPRAdmin(ATLUsulHapusAdmin):
inlines = [TahunBerkurangUsulHapusATLDPUPRInline,
SKPDAsalATLDPUPRInline,
FotoATLDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_golongan_barang__exact=5).filter(id_mutasi_berkurang__exact=3)
class KontrakATLDPUPRAdmin(KontrakATLAdmin):
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "id_skpd":
kwargs["queryset"] = SKPD.objects.filter(id__exact=3)
return super(KontrakATLDPUPRAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_queryset(self, request):
return self.model.objects.filter(id_skpd__exact=3)
class HargaATLDPUPRAdmin(HargaATLAdmin):
def get_queryset(self, request):
sub_skpd_qs = SUBSKPD.objects.filter(id_skpd__exact=3)
atl_qs = ATL.objects.filter(id_sub_skpd__in=sub_skpd_qs)
return self.model.objects.filter(id_atl__in=atl_qs)
class ATLPenghapusanDPUPRAdmin(ATLPenghapusanAdmin):
inlines = [PenghapusanATLDPUPRInline, TahunBerkurangATLDPUPRInline,
SKPDAsalATLDPUPRInline,
SKPDTujuanATLDPUPRInline,
FotoATLDPUPRInline, ]
def get_queryset(self, request):
qs = SUBSKPD.objects.filter(id_skpd__exact=3)
return self.model.objects.filter(id_sub_skpd__in=qs).filter(id_mutasi_berkurang__in=[2,4,6,7,10,])
###Register ATL DPUPR
admin.site.register(ATLDPUPR, ATLDPUPRAdmin)
admin.site.register(ATLUsulHapusDPUPR, ATLUsulHapusDPUPRAdmin)
admin.site.register(KontrakATLDPUPR, KontrakATLDPUPRAdmin)
admin.site.register(HargaATLDPUPR, HargaATLDPUPRAdmin)
admin.site.register(ATLPenghapusanDPUPR, ATLPenghapusanDPUPRAdmin)
| [
[
[
86,
91
],
[
7093,
7098
],
[
7142,
7147
],
[
7209,
7214
],
[
7272,
7277
],
[
7331,
7336
],
[
12234,
12239
],
[
12301,
12306
],
[
12374,
12379
],
[
12455,
12460
],
[
12540,
12545
],
[
12621,
12626
],
[
12698,
12703
],
[
16649,
16654
],
[
16716,
16721
],
[
16801,
16806
],
[
16882,
16887
],
[
16959,
16964
],
[
23507,
23512
],
[
23586,
23591
],
[
23683,
23688
],
[
23768,
23773
],
[
23861,
23866
],
[
23950,
23955
],
[
28180,
28185
],
[
28225,
28230
],
[
28288,
28293
],
[
28347,
28352
],
[
28402,
28407
]
],
[
[
116,
124
]
],
[
[
126,
135
]
],
[
[
137,
149
]
],
[
[
151,
155
],
[
6075,
6079
],
[
11079,
11083
],
[
15494,
15498
],
[
22259,
22263
],
[
27193,
27197
]
],
[
[
157,
164
],
[
5168,
5175
],
[
5358,
5365
],
[
5742,
5749
],
[
6418,
6425
],
[
6917,
6924
],
[
9000,
9007
],
[
9199,
9206
],
[
9729,
9736
],
[
9931,
9938
],
[
10227,
10234
],
[
10692,
10699
],
[
11449,
11456
],
[
12050,
12057
],
[
14358,
14365
],
[
14706,
14713
],
[
15143,
15150
],
[
15864,
15871
],
[
16465,
16472
],
[
20376,
20383
],
[
20581,
20588
],
[
21084,
21091
],
[
21652,
21659
],
[
21860,
21867
],
[
22647,
22654
],
[
23317,
23324
],
[
26109,
26116
],
[
26446,
26453
],
[
26828,
26835
],
[
27530,
27537
],
[
28007,
28014
]
],
[
[
166,
176
]
],
[
[
178,
186
]
],
[
[
188,
200
]
],
[
[
202,
215
]
],
[
[
217,
230
]
],
[
[
232,
247
]
],
[
[
249,
265
]
],
[
[
267,
275
]
],
[
[
277,
282
]
],
[
[
284,
298
]
],
[
[
300,
305
],
[
6478,
6483
]
],
[
[
307,
319
],
[
4583,
4595
]
],
[
[
321,
337
]
],
[
[
339,
355
]
],
[
[
357,
373
]
],
[
[
375,
391
]
],
[
[
393,
403
]
],
[
[
405,
433
]
],
[
[
435,
449
]
],
[
[
486,
496
],
[
7113,
7123
]
],
[
[
498,
515
],
[
7229,
7246
]
],
[
[
517,
532
],
[
4422,
4437
],
[
7292,
7307
]
],
[
[
534,
553
],
[
7162,
7181
]
],
[
[
555,
588
],
[
4831,
4864
]
],
[
[
614,
635
],
[
7351,
7372
]
],
[
[
637,
661
],
[
3894,
3918
]
],
[
[
663,
684
],
[
3993,
4014
]
],
[
[
710,
728
],
[
4083,
4101
]
],
[
[
730,
750
],
[
4174,
4194
]
],
[
[
752,
766
],
[
4255,
4269
]
],
[
[
791,
807
],
[
4391,
4407
]
],
[
[
809,
819
],
[
4889,
4899
]
],
[
[
821,
838
],
[
5913,
5930
]
],
[
[
840,
855
],
[
6340,
6355
]
],
[
[
857,
891
],
[
4782,
4816
]
],
[
[
893,
912
],
[
5531,
5550
]
],
[
[
937,
962
],
[
3854,
3879
]
],
[
[
964,
986
],
[
3956,
3978
]
],
[
[
988,
1009
],
[
6628,
6649
]
],
[
[
1033,
1052
],
[
4049,
4068
]
],
[
[
1054,
1075
],
[
4138,
4159
]
],
[
[
1077,
1092
],
[
4225,
4240
]
],
[
[
1117,
1137
],
[
4305,
4325
]
],
[
[
1196,
1209
]
],
[
[
1211,
1222
]
],
[
[
1224,
1245
],
[
8349,
8370
]
],
[
[
1247,
1266
]
],
[
[
1268,
1282
],
[
11519,
11533
]
],
[
[
1284,
1309
]
],
[
[
1311,
1336
]
],
[
[
1338,
1366
]
],
[
[
1368,
1375
],
[
14474,
14481
],
[
26225,
26232
]
],
[
[
1377,
1406
]
],
[
[
1442,
1467
]
],
[
[
1469,
1494
]
],
[
[
1496,
1517
]
],
[
[
1519,
1542
]
],
[
[
1578,
1597
],
[
4340,
4359
],
[
12254,
12273
]
],
[
[
1599,
1625
],
[
12560,
12586
]
],
[
[
1627,
1651
],
[
8163,
8187
],
[
12641,
12665
]
],
[
[
1653,
1679
],
[
12394,
12420
]
],
[
[
1681,
1709
],
[
12475,
12503
]
],
[
[
1711,
1745
],
[
8617,
8651
]
],
[
[
1781,
1811
],
[
12718,
12748
]
],
[
[
1813,
1846
],
[
7590,
7623
]
],
[
[
1848,
1878
],
[
7716,
7746
]
],
[
[
1914,
1941
],
[
7833,
7860
]
],
[
[
1943,
1972
],
[
7951,
7980
]
],
[
[
1974,
1997
],
[
8059,
8082
]
],
[
[
2032,
2057
],
[
8123,
8148
]
],
[
[
2059,
2078
],
[
8685,
8704
]
],
[
[
2080,
2106
],
[
10908,
10934
]
],
[
[
2108,
2132
],
[
11362,
11386
]
],
[
[
2134,
2147
]
],
[
[
2149,
2175
],
[
10147,
10173
]
],
[
[
2177,
2199
],
[
9411,
9433
]
],
[
[
2201,
2236
],
[
8567,
8602
]
],
[
[
2238,
2266
],
[
10445,
10473
]
],
[
[
2301,
2335
],
[
7541,
7575
]
],
[
[
2337,
2368
],
[
7670,
7701
]
],
[
[
2370,
2400
],
[
11707,
11737
]
],
[
[
2434,
2462
],
[
7790,
7818
]
],
[
[
2464,
2494
],
[
7906,
7936
]
],
[
[
2496,
2520
],
[
8020,
8044
]
],
[
[
2578,
2599
],
[
13675,
13696
]
],
[
[
2601,
2620
]
],
[
[
2622,
2636
],
[
15934,
15948
]
],
[
[
2638,
2663
]
],
[
[
2665,
2690
]
],
[
[
2692,
2720
]
],
[
[
2722,
2759
]
],
[
[
2820,
2845
]
],
[
[
2847,
2872
]
],
[
[
2874,
2897
]
],
[
[
2933,
2952
],
[
16669,
16688
]
],
[
[
2954,
2980
],
[
16821,
16847
]
],
[
[
2982,
3006
],
[
13489,
13513
],
[
16902,
16926
]
],
[
[
3008,
3036
],
[
16736,
16764
]
],
[
[
3038,
3080
],
[
13959,
14001
]
],
[
[
3116,
3146
],
[
16979,
17009
]
],
[
[
3148,
3181
],
[
12916,
12949
]
],
[
[
3183,
3213
],
[
13042,
13072
]
],
[
[
3249,
3276
],
[
13159,
13186
]
],
[
[
3278,
3307
],
[
13277,
13306
]
],
[
[
3309,
3332
],
[
13385,
13408
]
],
[
[
3367,
3392
],
[
13449,
13474
]
],
[
[
3394,
3413
],
[
14035,
14054
]
],
[
[
3415,
3441
],
[
15323,
15349
]
],
[
[
3443,
3467
],
[
15777,
15801
]
],
[
[
3469,
3512
],
[
13901,
13944
]
],
[
[
3514,
3542
],
[
14888,
14916
]
],
[
[
3577,
3611
],
[
12867,
12901
]
],
[
[
3613,
3644
],
[
12996,
13027
]
],
[
[
3646,
3676
],
[
16122,
16152
]
],
[
[
3710,
3738
],
[
13116,
13144
]
],
[
[
3740,
3770
],
[
13232,
13262
]
],
[
[
3772,
3796
],
[
13346,
13370
]
],
[
[
3823,
3853
],
[
6696,
6726
]
],
[
[
3928,
3955
],
[
6667,
6694
]
],
[
[
4024,
4048
],
[
4956,
4980
],
[
5625,
5649
],
[
6748,
6772
]
],
[
[
4111,
4137
],
[
6794,
6820
]
],
[
[
4204,
4224
],
[
4998,
5018
],
[
5667,
5687
],
[
6842,
6862
]
],
[
[
4279,
4304
]
],
[
[
4369,
4390
],
[
4917,
4938
],
[
4650,
4671
]
],
[
[
4742,
4781
],
[
5568,
5607
]
],
[
[
4873,
4888
],
[
7125,
7140
],
[
5230,
5245
]
],
[
[
5506,
5530
],
[
7183,
7207
]
],
[
[
5890,
5912
],
[
7248,
7270
],
[
6129,
6151
]
],
[
[
6319,
6339
],
[
7309,
7329
]
],
[
[
6601,
6627
],
[
7374,
7400
]
],
[
[
7439,
7461
],
[
12321,
12343
]
],
[
[
7501,
7540
],
[
11793,
11832
]
],
[
[
7633,
7669
],
[
11755,
11791
]
],
[
[
7756,
7789
],
[
8770,
8803
],
[
9499,
9532
],
[
10553,
10586
],
[
11854,
11887
]
],
[
[
7870,
7905
],
[
11909,
11944
]
],
[
[
7990,
8019
],
[
8821,
8850
],
[
9550,
9579
],
[
10608,
10637
],
[
11966,
11995
]
],
[
[
8092,
8122
],
[
8722,
8752
],
[
9451,
9481
],
[
8425,
8455
]
],
[
[
8526,
8566
],
[
10491,
10531
]
],
[
[
8660,
8684
],
[
12275,
12299
],
[
9062,
9086
]
],
[
[
9383,
9410
],
[
12345,
12372
],
[
9791,
9818
]
],
[
[
10115,
10146
],
[
12422,
12453
]
],
[
[
10411,
10444
],
[
12505,
12538
]
],
[
[
10876,
10907
],
[
12588,
12619
],
[
11133,
11164
]
],
[
[
11332,
11361
],
[
12667,
12696
]
],
[
[
11671,
11706
],
[
12750,
12785
]
],
[
[
12827,
12866
],
[
16208,
16247
]
],
[
[
12959,
12995
],
[
16170,
16206
]
],
[
[
13082,
13115
],
[
14124,
14157
],
[
15004,
15037
],
[
16269,
16302
]
],
[
[
13196,
13231
],
[
16324,
16359
]
],
[
[
13316,
13345
],
[
14179,
14208
],
[
15059,
15088
],
[
16381,
16410
]
],
[
[
13418,
13448
],
[
14072,
14102
],
[
13751,
13781
]
],
[
[
13852,
13900
],
[
14934,
14982
]
],
[
[
14010,
14034
],
[
16690,
16714
],
[
14569,
14593
]
],
[
[
14854,
14887
],
[
16766,
16799
]
],
[
[
15291,
15322
],
[
16849,
16880
],
[
15548,
15579
]
],
[
[
15747,
15776
],
[
16928,
16957
]
],
[
[
16086,
16121
],
[
17011,
17046
]
],
[
[
17126,
17153
],
[
19633,
19660
]
],
[
[
17155,
17180
]
],
[
[
17182,
17202
],
[
22724,
22744
]
],
[
[
17204,
17235
]
],
[
[
17237,
17268
]
],
[
[
17270,
17304
]
],
[
[
17306,
17349
]
],
[
[
17391,
17422
]
],
[
[
17424,
17455
]
],
[
[
17457,
17486
]
],
[
[
17528,
17553
],
[
23527,
23552
]
],
[
[
17555,
17587
],
[
23788,
23820
]
],
[
[
17589,
17619
],
[
19434,
19464
],
[
23881,
23911
]
],
[
[
17621,
17649
],
[
23703,
23731
]
],
[
[
17651,
17685
],
[
23606,
23640
]
],
[
[
17687,
17735
],
[
19941,
19989
]
],
[
[
17777,
17813
],
[
23970,
24006
]
],
[
[
17815,
17854
],
[
18769,
18808
]
],
[
[
17856,
17892
],
[
18913,
18949
]
],
[
[
17934,
17967
],
[
19048,
19081
]
],
[
[
17969,
18004
],
[
19184,
19219
]
],
[
[
18006,
18035
],
[
19310,
19339
]
],
[
[
18076,
18107
],
[
19388,
19419
]
],
[
[
18109,
18134
],
[
20029,
20054
]
],
[
[
18136,
18168
],
[
22082,
22114
]
],
[
[
18170,
18200
],
[
22554,
22584
]
],
[
[
18202,
18230
],
[
21302,
21330
]
],
[
[
18232,
18281
],
[
19877,
19926
]
],
[
[
18283,
18317
],
[
20805,
20839
]
],
[
[
18358,
18398
],
[
18714,
18754
]
],
[
[
18400,
18437
],
[
18861,
18898
]
],
[
[
18439,
18475
],
[
22938,
22974
]
],
[
[
18515,
18549
],
[
18999,
19033
]
],
[
[
18551,
18587
],
[
19133,
19169
]
],
[
[
18589,
18619
],
[
19265,
19295
]
],
[
[
18668,
18713
],
[
23036,
23081
]
],
[
[
18818,
18860
],
[
22992,
23034
]
],
[
[
18959,
18998
],
[
20130,
20169
],
[
20933,
20972
],
[
21406,
21445
],
[
23103,
23142
]
],
[
[
19091,
19132
],
[
23164,
23205
]
],
[
[
19229,
19264
],
[
20191,
20226
],
[
20994,
21029
],
[
21467,
21502
],
[
23227,
23262
]
],
[
[
19351,
19387
],
[
20072,
20108
],
[
21348,
21384
],
[
19715,
19751
]
],
[
[
19822,
19876
],
[
20857,
20911
]
],
[
[
19998,
20028
],
[
23554,
23584
],
[
20438,
20468
]
],
[
[
20765,
20804
],
[
23642,
23681
]
],
[
[
21268,
21301
],
[
23733,
23766
],
[
21714,
21747
]
],
[
[
22044,
22081
],
[
23822,
23859
],
[
22313,
22350
]
],
[
[
22518,
22553
],
[
23913,
23948
]
],
[
[
22896,
22937
],
[
24008,
24049
]
],
[
[
24103,
24113
],
[
25535,
25545
]
],
[
[
24115,
24123
]
],
[
[
24125,
24128
],
[
27588,
27591
]
],
[
[
24130,
24144
]
],
[
[
24146,
24160
]
],
[
[
24162,
24179
]
],
[
[
24181,
24207
]
],
[
[
24232,
24246
]
],
[
[
24248,
24262
]
],
[
[
24264,
24276
]
],
[
[
24301,
24309
],
[
28200,
28208
]
],
[
[
24311,
24326
],
[
28308,
28323
]
],
[
[
24328,
24341
],
[
25372,
25385
],
[
28367,
28380
]
],
[
[
24343,
24360
],
[
28245,
28262
]
],
[
[
24362,
24393
],
[
25776,
25807
]
],
[
[
24418,
24437
],
[
28422,
28441
]
],
[
[
24439,
24461
],
[
24964,
24986
]
],
[
[
24463,
24482
],
[
25057,
25076
]
],
[
[
24507,
24523
],
[
25141,
25157
]
],
[
[
24525,
24543
],
[
25226,
25244
]
],
[
[
24545,
24557
],
[
25301,
25313
]
],
[
[
24581,
24595
],
[
25343,
25357
]
],
[
[
24597,
24605
],
[
25830,
25838
]
],
[
[
24607,
24622
],
[
27033,
27048
]
],
[
[
24624,
24637
],
[
27454,
27467
]
],
[
[
24639,
24671
],
[
25729,
25761
]
],
[
[
24673,
24690
],
[
26617,
26634
]
],
[
[
24714,
24737
],
[
24926,
24949
]
],
[
[
24739,
24759
],
[
25022,
25042
]
],
[
[
24761,
24780
],
[
27730,
27749
]
],
[
[
24803,
24820
],
[
25109,
25126
]
],
[
[
24822,
24841
],
[
25192,
25211
]
],
[
[
24843,
24856
],
[
25273,
25286
]
],
[
[
24897,
24925
],
[
27794,
27822
]
],
[
[
24996,
25021
],
[
27767,
27792
]
],
[
[
25086,
25108
],
[
25897,
25919
],
[
26711,
26733
],
[
27844,
27866
]
],
[
[
25167,
25191
],
[
27888,
27912
]
],
[
[
25254,
25272
],
[
25941,
25959
],
[
26755,
26773
],
[
27934,
27952
]
],
[
[
25323,
25342
],
[
25856,
25875
],
[
25600,
25619
]
],
[
[
25691,
25728
],
[
26652,
26689
]
],
[
[
25816,
25829
],
[
28210,
28223
],
[
26320,
26333
]
],
[
[
26594,
26616
],
[
28264,
28286
]
],
[
[
27012,
27032
],
[
28325,
28345
],
[
27247,
27267
]
],
[
[
27435,
27453
],
[
28382,
28400
]
],
[
[
27705,
27729
],
[
28443,
28467
]
]
] |