max_stars_repo_path
stringlengths 4
197
| max_stars_repo_name
stringlengths 6
120
| max_stars_count
int64 0
191k
| id
stringlengths 1
8
| content
stringlengths 6
964k
| score
float64 -0.88
3.95
| int_score
int64 0
4
|
---|---|---|---|---|---|---|
test.py | threefoldtech/JumpscaleX | 2 | 64004 | <filename>test.py
from Jumpscale import j
import traceback
import sys
def schema():
j.data.schema.test()
j.data.types.test()
def bcdb():
j.tools.tmux.kill()
assert len(j.tools.tmux.server.sessions) == 1
# j.servers.zdb.test(build=True)
# j.clients.zdb.test()
j.data.bcdb.test()
def servers():
j.tools.tmux.kill()
if j.core.platformtype.myplatform.isUbuntu:
j.builders.web.traefik.install()
# j.builders.db.etcd.install()
j.builders.network.coredns.install()
def ssh():
# j.clients.sshagent.test() #should not do, because in container there will be no ssh-key loaded any more to continue the tests
j.clients.sshkey.test()
# schema()
bcdb()
# ssh()
# servers()
| 1 | 1 |
demos/check_hdf5_hash.py | bengranett/catstore | 0 | 64132 | <filename>demos/check_hdf5_hash.py
import sys
import pypelid.utils.filetools as ft
for filename in sys.argv[1:]:
check, hashes = ft.check_hdf5_hash(filename)
if check:
print "%s: %s checksum passed :D"%(filename, hashes[0])
else:
print "%s: checksum failed :( (read:%s computed:%s)"%(filename, hashes[0], hashes[1])
| 1.015625 | 1 |
format4.py | andreroche/Test-Scripts | 0 | 64260 | <reponame>andreroche/Test-Scripts<gh_stars>0
for i in range (1,11):
print ('{:2d} {:2d} {:4d} {:5d}'.format(i, i**2, i**3, i**4))
| 1.453125 | 1 |
pandas/core/nanops.py | m2p-consulting/pandas | 0 | 64388 | <reponame>m2p-consulting/pandas
import functools
import itertools
import operator
from typing import Any, Optional, Tuple, Union, cast
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs import NaT, Timedelta, iNaT, lib
from pandas._typing import ArrayLike, Dtype, DtypeObj, F, Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas.core.dtypes.common import (
get_dtype,
is_any_int_dtype,
is_bool_dtype,
is_complex,
is_datetime64_any_dtype,
is_float,
is_float_dtype,
is_integer,
is_integer_dtype,
is_numeric_dtype,
is_object_dtype,
is_scalar,
is_timedelta64_dtype,
needs_i8_conversion,
pandas_dtype,
)
from pandas.core.dtypes.dtypes import PeriodDtype
from pandas.core.dtypes.missing import isna, na_value_for_dtype, notna
from pandas.core.construction import extract_array
bn = import_optional_dependency("bottleneck", raise_on_missing=False, on_version="warn")
_BOTTLENECK_INSTALLED = bn is not None
_USE_BOTTLENECK = False
def set_use_bottleneck(v: bool = True) -> None:
# set/unset to use bottleneck
global _USE_BOTTLENECK
if _BOTTLENECK_INSTALLED:
_USE_BOTTLENECK = v
set_use_bottleneck(get_option("compute.use_bottleneck"))
class disallow:
def __init__(self, *dtypes):
super().__init__()
self.dtypes = tuple(pandas_dtype(dtype).type for dtype in dtypes)
def check(self, obj) -> bool:
return hasattr(obj, "dtype") and issubclass(obj.dtype.type, self.dtypes)
def __call__(self, f: F) -> F:
@functools.wraps(f)
def _f(*args, **kwargs):
obj_iter = itertools.chain(args, kwargs.values())
if any(self.check(obj) for obj in obj_iter):
f_name = f.__name__.replace("nan", "")
raise TypeError(
f"reduction operation '{f_name}' not allowed for this dtype"
)
try:
with np.errstate(invalid="ignore"):
return f(*args, **kwargs)
except ValueError as e:
# we want to transform an object array
# ValueError message to the more typical TypeError
# e.g. this is normally a disallowed function on
# object arrays that contain strings
if is_object_dtype(args[0]):
raise TypeError(e) from e
raise
return cast(F, _f)
class bottleneck_switch:
def __init__(self, name=None, **kwargs):
self.name = name
self.kwargs = kwargs
def __call__(self, alt: F) -> F:
bn_name = self.name or alt.__name__
try:
bn_func = getattr(bn, bn_name)
except (AttributeError, NameError): # pragma: no cover
bn_func = None
@functools.wraps(alt)
def f(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
**kwds,
):
if len(self.kwargs) > 0:
for k, v in self.kwargs.items():
if k not in kwds:
kwds[k] = v
if values.size == 0 and kwds.get("min_count") is None:
# We are empty, returning NA for our type
# Only applies for the default `min_count` of None
# since that affects how empty arrays are handled.
# TODO(GH-18976) update all the nanops methods to
# correctly handle empty inputs and remove this check.
# It *may* just be `var`
return _na_for_min_count(values, axis)
if _USE_BOTTLENECK and skipna and _bn_ok_dtype(values.dtype, bn_name):
if kwds.get("mask", None) is None:
# `mask` is not recognised by bottleneck, would raise
# TypeError if called
kwds.pop("mask", None)
result = bn_func(values, axis=axis, **kwds)
# prefer to treat inf/-inf as NA, but must compute the func
# twice :(
if _has_infs(result):
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
else:
result = alt(values, axis=axis, skipna=skipna, **kwds)
return result
return cast(F, f)
def _bn_ok_dtype(dtype: DtypeObj, name: str) -> bool:
# Bottleneck chokes on datetime64, PeriodDtype (or and EA)
if not is_object_dtype(dtype) and not needs_i8_conversion(dtype):
# GH 15507
# bottleneck does not properly upcast during the sum
# so can overflow
# GH 9422
# further we also want to preserve NaN when all elements
# are NaN, unlike bottleneck/numpy which consider this
# to be 0
if name in ["nansum", "nanprod"]:
return False
return True
return False
def _has_infs(result) -> bool:
if isinstance(result, np.ndarray):
if result.dtype == "f8":
return lib.has_infs_f8(result.ravel("K"))
elif result.dtype == "f4":
return lib.has_infs_f4(result.ravel("K"))
try:
return np.isinf(result).any()
except (TypeError, NotImplementedError):
# if it doesn't support infs, then it can't have infs
return False
def _get_fill_value(
dtype: DtypeObj, fill_value: Optional[Scalar] = None, fill_value_typ=None
):
""" return the correct fill value for the dtype of the values """
if fill_value is not None:
return fill_value
if _na_ok_dtype(dtype):
if fill_value_typ is None:
return np.nan
else:
if fill_value_typ == "+inf":
return np.inf
else:
return -np.inf
else:
if fill_value_typ is None:
return iNaT
else:
if fill_value_typ == "+inf":
# need the max int here
return np.iinfo(np.int64).max
else:
return iNaT
def _maybe_get_mask(
values: np.ndarray, skipna: bool, mask: Optional[np.ndarray]
) -> Optional[np.ndarray]:
"""
Compute a mask if and only if necessary.
This function will compute a mask iff it is necessary. Otherwise,
return the provided mask (potentially None) when a mask does not need to be
computed.
A mask is never necessary if the values array is of boolean or integer
dtypes, as these are incapable of storing NaNs. If passing a NaN-capable
dtype that is interpretable as either boolean or integer data (eg,
timedelta64), a mask must be provided.
If the skipna parameter is False, a new mask will not be computed.
The mask is computed using isna() by default. Setting invert=True selects
notna() as the masking function.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
mask : Optional[ndarray]
nan-mask if known
Returns
-------
Optional[np.ndarray]
"""
if mask is None:
if is_bool_dtype(values.dtype) or is_integer_dtype(values.dtype):
# Boolean data cannot contain nulls, so signal via mask being None
return None
if skipna or needs_i8_conversion(values.dtype):
mask = isna(values)
return mask
def _get_values(
values: np.ndarray,
skipna: bool,
fill_value: Any = None,
fill_value_typ: Optional[str] = None,
mask: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, Optional[np.ndarray], np.dtype, np.dtype, Any]:
"""
Utility to get the values view, mask, dtype, dtype_max, and fill_value.
If both mask and fill_value/fill_value_typ are not None and skipna is True,
the values array will be copied.
For input arrays of boolean or integer dtypes, copies will only occur if a
precomputed mask, a fill_value/fill_value_typ, and skipna=True are
provided.
Parameters
----------
values : ndarray
input array to potentially compute mask for
skipna : bool
boolean for whether NaNs should be skipped
fill_value : Any
value to fill NaNs with
fill_value_typ : str
Set to '+inf' or '-inf' to handle dtype-specific infinities
mask : Optional[np.ndarray]
nan-mask if known
Returns
-------
values : ndarray
Potential copy of input value array
mask : Optional[ndarray[bool]]
Mask for values, if deemed necessary to compute
dtype : np.dtype
dtype for values
dtype_max : np.dtype
platform independent dtype
fill_value : Any
fill value used
"""
# In _get_values is only called from within nanops, and in all cases
# with scalar fill_value. This guarantee is important for the
# maybe_upcast_putmask call below
assert is_scalar(fill_value)
values = extract_array(values, extract_numpy=True)
mask = _maybe_get_mask(values, skipna, mask)
dtype = values.dtype
if needs_i8_conversion(values.dtype):
# changing timedelta64/datetime64 to int64 needs to happen after
# finding `mask` above
values = np.asarray(values.view("i8"))
dtype_ok = _na_ok_dtype(dtype)
# get our fill value (in case we need to provide an alternative
# dtype for it)
fill_value = _get_fill_value(
dtype, fill_value=fill_value, fill_value_typ=fill_value_typ
)
if skipna and (mask is not None) and (fill_value is not None):
values = values.copy()
if dtype_ok and mask.any():
np.putmask(values, mask, fill_value)
# promote if needed
else:
values, _ = maybe_upcast_putmask(values, mask, fill_value)
# return a platform independent precision dtype
dtype_max = dtype
if is_integer_dtype(dtype) or is_bool_dtype(dtype):
dtype_max = np.dtype(np.int64)
elif is_float_dtype(dtype):
dtype_max = np.dtype(np.float64)
return values, mask, dtype, dtype_max, fill_value
def _na_ok_dtype(dtype: DtypeObj) -> bool:
if needs_i8_conversion(dtype):
return False
return not issubclass(dtype.type, np.integer)
def _wrap_results(result, dtype: np.dtype, fill_value=None):
""" wrap our results if needed """
if result is NaT:
pass
elif is_datetime64_any_dtype(dtype):
if fill_value is None:
# GH#24293
fill_value = iNaT
if not isinstance(result, np.ndarray):
assert not isna(fill_value), "Expected non-null fill_value"
if result == fill_value:
result = np.nan
if isna(result):
result = np.datetime64("NaT", "ns")
else:
result = np.int64(result).view("datetime64[ns]")
else:
# If we have float dtype, taking a view will give the wrong result
result = result.astype(dtype)
elif is_timedelta64_dtype(dtype):
if not isinstance(result, np.ndarray):
if result == fill_value:
result = np.nan
# raise if we have a timedelta64[ns] which is too large
if np.fabs(result) > np.iinfo(np.int64).max:
raise ValueError("overflow in timedelta operation")
result = Timedelta(result, unit="ns")
else:
result = result.astype("m8[ns]").view(dtype)
return result
def _datetimelike_compat(func):
"""
If we have datetime64 or timedelta64 values, ensure we have a correct
mask before calling the wrapped function, then cast back afterwards.
"""
@functools.wraps(func)
def new_func(values, *, axis=None, skipna=True, mask=None, **kwargs):
orig_values = values
datetimelike = values.dtype.kind in ["m", "M"]
if datetimelike and mask is None:
mask = isna(values)
result = func(values, axis=axis, skipna=skipna, mask=mask, **kwargs)
if datetimelike:
result = _wrap_results(result, orig_values.dtype, fill_value=iNaT)
if not skipna:
result = _mask_datetimelike_result(result, axis, mask, orig_values)
return result
return new_func
def _na_for_min_count(
values: np.ndarray, axis: Optional[int]
) -> Union[Scalar, np.ndarray]:
"""
Return the missing value for `values`.
Parameters
----------
values : ndarray
axis : int or None
axis for the reduction, required if values.ndim > 1.
Returns
-------
result : scalar or ndarray
For 1-D values, returns a scalar of the correct missing type.
For 2-D values, returns a 1-D array where each element is missing.
"""
# we either return np.nan or pd.NaT
if is_numeric_dtype(values):
values = values.astype("float64")
fill_value = na_value_for_dtype(values.dtype)
if fill_value is NaT:
fill_value = values.dtype.type("NaT", "ns")
if values.ndim == 1:
return fill_value
elif axis is None:
return fill_value
else:
result_shape = values.shape[:axis] + values.shape[axis + 1 :]
result = np.full(result_shape, fill_value, dtype=values.dtype)
return result
def nanany(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> bool:
"""
Check if any elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis : int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2])
>>> nanops.nanany(s)
True
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([np.nan])
>>> nanops.nanany(s)
False
"""
values, _, _, _, _ = _get_values(values, skipna, fill_value=False, mask=mask)
return values.any(axis)
def nanall(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> bool:
"""
Check if all elements along an axis evaluate to True.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : bool
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanall(s)
True
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 0])
>>> nanops.nanall(s)
False
"""
values, _, _, _, _ = _get_values(values, skipna, fill_value=True, mask=mask)
return values.all(axis)
@disallow("M8")
@_datetimelike_compat
def nansum(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
min_count: int = 0,
mask: Optional[np.ndarray] = None,
) -> float:
"""
Sum the elements along an axis ignoring NaNs
Parameters
----------
values : ndarray[dtype]
axis: int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : dtype
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nansum(s)
3.0
"""
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
dtype_sum = dtype_max
if is_float_dtype(dtype):
dtype_sum = dtype
elif is_timedelta64_dtype(dtype):
dtype_sum = np.float64
the_sum = values.sum(axis, dtype=dtype_sum)
the_sum = _maybe_null_out(the_sum, axis, mask, values.shape, min_count=min_count)
return the_sum
def _mask_datetimelike_result(
result: Union[np.ndarray, np.datetime64, np.timedelta64],
axis: Optional[int],
mask: np.ndarray,
orig_values: np.ndarray,
):
if isinstance(result, np.ndarray):
# we need to apply the mask
result = result.astype("i8").view(orig_values.dtype)
axis_mask = mask.any(axis=axis)
result[axis_mask] = iNaT
else:
if mask.any():
result = NaT
return result
@disallow(PeriodDtype)
@bottleneck_switch()
@_datetimelike_compat
def nanmean(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> float:
"""
Compute the mean of the element along an axis ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, np.nan])
>>> nanops.nanmean(s)
1.5
"""
values, mask, dtype, dtype_max, _ = _get_values(
values, skipna, fill_value=0, mask=mask
)
dtype_sum = dtype_max
dtype_count = np.float64
# not using needs_i8_conversion because that includes period
if dtype.kind in ["m", "M"]:
dtype_sum = np.float64
elif is_integer_dtype(dtype):
dtype_sum = np.float64
elif is_float_dtype(dtype):
dtype_sum = dtype
dtype_count = dtype
count = _get_counts(values.shape, mask, axis, dtype=dtype_count)
the_sum = _ensure_numeric(values.sum(axis, dtype=dtype_sum))
if axis is not None and getattr(the_sum, "ndim", False):
count = cast(np.ndarray, count)
with np.errstate(all="ignore"):
# suppress division by zero warnings
the_mean = the_sum / count
ct_mask = count == 0
if ct_mask.any():
the_mean[ct_mask] = np.nan
else:
the_mean = the_sum / count if count > 0 else np.nan
return the_mean
@bottleneck_switch()
def nanmedian(values, *, axis=None, skipna=True, mask=None):
"""
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 2])
>>> nanops.nanmedian(s)
2.0
"""
def get_median(x):
mask = notna(x)
if not skipna and not mask.all():
return np.nan
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings("ignore", "All-NaN slice encountered")
res = np.nanmedian(x[mask])
return res
values, mask, dtype, _, _ = _get_values(values, skipna, mask=mask)
if not is_float_dtype(values.dtype):
try:
values = values.astype("f8")
except ValueError as err:
# e.g. "could not convert string to float: 'a'"
raise TypeError from err
if mask is not None:
values[mask] = np.nan
if axis is None:
values = values.ravel("K")
notempty = values.size
# an array from a frame
if values.ndim > 1:
# there's a non-empty array to apply over otherwise numpy raises
if notempty:
if not skipna:
res = np.apply_along_axis(get_median, axis, values)
else:
# fastpath for the skipna case
with warnings.catch_warnings():
# Suppress RuntimeWarning about All-NaN slice
warnings.filterwarnings("ignore", "All-NaN slice encountered")
res = np.nanmedian(values, axis)
else:
# must return the correct shape, but median is not defined for the
# empty set so return nans of shape "everything but the passed axis"
# since "axis" is where the reduction would occur if we had a nonempty
# array
res = get_empty_reduction_result(values.shape, axis, np.float_, np.nan)
else:
# otherwise return a scalar value
res = get_median(values) if notempty else np.nan
return _wrap_results(res, dtype)
def get_empty_reduction_result(
shape: Tuple[int, ...], axis: int, dtype: np.dtype, fill_value: Any
) -> np.ndarray:
"""
The result from a reduction on an empty ndarray.
Parameters
----------
shape : Tuple[int]
axis : int
dtype : np.dtype
fill_value : Any
Returns
-------
np.ndarray
"""
shp = np.array(shape)
dims = np.arange(len(shape))
ret = np.empty(shp[dims != axis], dtype=dtype)
ret.fill(fill_value)
return ret
def _get_counts_nanvar(
value_counts: Tuple[int],
mask: Optional[np.ndarray],
axis: Optional[int],
ddof: int,
dtype: Dtype = float,
) -> Tuple[Union[int, np.ndarray], Union[int, np.ndarray]]:
"""
Get the count of non-null values along an axis, accounting
for degrees of freedom.
Parameters
----------
values_shape : Tuple[int]
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
ddof : int
degrees of freedom
dtype : type, optional
type to use for count
Returns
-------
count : scalar or array
d : scalar or array
"""
dtype = get_dtype(dtype)
count = _get_counts(value_counts, mask, axis, dtype=dtype)
d = count - dtype.type(ddof)
# always return NaN, never inf
if is_scalar(count):
if count <= ddof:
count = np.nan
d = np.nan
else:
mask2: np.ndarray = count <= ddof
if mask2.any():
np.putmask(d, mask2, np.nan)
np.putmask(count, mask2, np.nan)
return count, d
@bottleneck_switch(ddof=1)
def nanstd(values, *, axis=None, skipna=True, ddof=1, mask=None):
"""
Compute the standard deviation along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanstd(s)
1.0
"""
if values.dtype == "M8[ns]":
values = values.view("m8[ns]")
orig_dtype = values.dtype
values, mask, _, _, _ = _get_values(values, skipna, mask=mask)
result = np.sqrt(nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask))
return _wrap_results(result, orig_dtype)
@disallow("M8", "m8")
@bottleneck_switch(ddof=1)
def nanvar(values, *, axis=None, skipna=True, ddof=1, mask=None):
"""
Compute the variance along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nanvar(s)
1.0
"""
values = extract_array(values, extract_numpy=True)
dtype = values.dtype
mask = _maybe_get_mask(values, skipna, mask)
if is_any_int_dtype(dtype):
values = values.astype("f8")
if mask is not None:
values[mask] = np.nan
if is_float_dtype(values.dtype):
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
else:
count, d = _get_counts_nanvar(values.shape, mask, axis, ddof)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
# xref GH10242
# Compute variance via two-pass algorithm, which is stable against
# cancellation errors and relatively accurate for small numbers of
# observations.
#
# See https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
avg = _ensure_numeric(values.sum(axis=axis, dtype=np.float64)) / count
if axis is not None:
avg = np.expand_dims(avg, axis)
sqr = _ensure_numeric((avg - values) ** 2)
if mask is not None:
np.putmask(sqr, mask, 0)
result = sqr.sum(axis=axis, dtype=np.float64) / d
# Return variance as np.float64 (the datatype used in the accumulator),
# unless we were dealing with a float array, in which case use the same
# precision as the original values array.
if is_float_dtype(dtype):
result = result.astype(dtype)
return result
@disallow("M8", "m8")
def nansem(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
ddof: int = 1,
mask: Optional[np.ndarray] = None,
) -> float:
"""
Compute the standard error in the mean along given axis while ignoring NaNs
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 2, 3])
>>> nanops.nansem(s)
0.5773502691896258
"""
# This checks if non-numeric-like data is passed with numeric_only=False
# and raises a TypeError otherwise
nanvar(values, axis=axis, skipna=skipna, ddof=ddof, mask=mask)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count, _ = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype)
var = nanvar(values, axis=axis, skipna=skipna, ddof=ddof)
return np.sqrt(var) / np.sqrt(count)
def _nanminmax(meth, fill_value_typ):
@bottleneck_switch(name="nan" + meth)
@_datetimelike_compat
def reduction(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> Dtype:
values, mask, dtype, dtype_max, fill_value = _get_values(
values, skipna, fill_value_typ=fill_value_typ, mask=mask
)
if (axis is not None and values.shape[axis] == 0) or values.size == 0:
try:
result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except (AttributeError, TypeError, ValueError):
result = np.nan
else:
result = getattr(values, meth)(axis)
result = _maybe_null_out(result, axis, mask, values.shape)
return result
return reduction
nanmin = _nanminmax("min", fill_value_typ="+inf")
nanmax = _nanminmax("max", fill_value_typ="-inf")
@disallow("O")
def nanargmax(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> Union[int, np.ndarray]:
"""
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of max value in specified axis or -1 in the NA case
Examples
--------
>>> import pandas.core.nanops as nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmax(arr)
4
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 2] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., nan],
[ 9., 10., nan]])
>>> nanops.nanargmax(arr, axis=1)
array([2, 2, 1, 1], dtype=int64)
"""
values, mask, _, _, _ = _get_values(values, True, fill_value_typ="-inf", mask=mask)
result = values.argmax(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow("O")
def nanargmin(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> Union[int, np.ndarray]:
"""
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : int or ndarray[int]
The index/indices of min value in specified axis or -1 in the NA case
Examples
--------
>>> import pandas.core.nanops as nanops
>>> arr = np.array([1, 2, 3, np.nan, 4])
>>> nanops.nanargmin(arr)
0
>>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3)
>>> arr[2:, 0] = np.nan
>>> arr
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[nan, 7., 8.],
[nan, 10., 11.]])
>>> nanops.nanargmin(arr, axis=1)
array([0, 0, 1, 1], dtype=int64)
"""
values, mask, _, _, _ = _get_values(values, True, fill_value_typ="+inf", mask=mask)
result = values.argmin(axis)
result = _maybe_arg_null_out(result, axis, mask, skipna)
return result
@disallow("M8", "m8")
def nanskew(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> float:
"""
Compute the sample skewness.
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G1. The algorithm computes this coefficient directly
from the second and third central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 1, 2])
>>> nanops.nanskew(s)
1.7320508075688787
"""
values = extract_array(values, extract_numpy=True)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted3 = adjusted2 * adjusted
m2 = adjusted2.sum(axis, dtype=np.float64)
m3 = adjusted3.sum(axis, dtype=np.float64)
# floating point error
#
# #18044 in _libs/windows.pyx calc_skew follow this behavior
# to fix the fperr to treat m2 <1e-14 as zero
m2 = _zero_out_fperr(m2)
m3 = _zero_out_fperr(m3)
with np.errstate(invalid="ignore", divide="ignore"):
result = (count * (count - 1) ** 0.5 / (count - 2)) * (m3 / m2 ** 1.5)
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(m2 == 0, 0, result)
result[count < 3] = np.nan
return result
else:
result = 0 if m2 == 0 else result
if count < 3:
return np.nan
return result
@disallow("M8", "m8")
def nankurt(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
mask: Optional[np.ndarray] = None,
) -> float:
"""
Compute the sample excess kurtosis
The statistic computed here is the adjusted Fisher-Pearson standardized
moment coefficient G2, computed directly from the second and fourth
central moment.
Parameters
----------
values : ndarray
axis: int, optional
skipna : bool, default True
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
result : float64
Unless input is a float array, in which case use the same
precision as the input array.
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, np.nan, 1, 3, 2])
>>> nanops.nankurt(s)
-1.2892561983471076
"""
values = extract_array(values, extract_numpy=True)
mask = _maybe_get_mask(values, skipna, mask)
if not is_float_dtype(values.dtype):
values = values.astype("f8")
count = _get_counts(values.shape, mask, axis)
else:
count = _get_counts(values.shape, mask, axis, dtype=values.dtype)
if skipna and mask is not None:
values = values.copy()
np.putmask(values, mask, 0)
mean = values.sum(axis, dtype=np.float64) / count
if axis is not None:
mean = np.expand_dims(mean, axis)
adjusted = values - mean
if skipna and mask is not None:
np.putmask(adjusted, mask, 0)
adjusted2 = adjusted ** 2
adjusted4 = adjusted2 ** 2
m2 = adjusted2.sum(axis, dtype=np.float64)
m4 = adjusted4.sum(axis, dtype=np.float64)
with np.errstate(invalid="ignore", divide="ignore"):
adj = 3 * (count - 1) ** 2 / ((count - 2) * (count - 3))
numer = count * (count + 1) * (count - 1) * m4
denom = (count - 2) * (count - 3) * m2 ** 2
# floating point error
#
# #18044 in _libs/windows.pyx calc_kurt follow this behavior
# to fix the fperr to treat denom <1e-14 as zero
numer = _zero_out_fperr(numer)
denom = _zero_out_fperr(denom)
if not isinstance(denom, np.ndarray):
# if ``denom`` is a scalar, check these corner cases first before
# doing division
if count < 4:
return np.nan
if denom == 0:
return 0
with np.errstate(invalid="ignore", divide="ignore"):
result = numer / denom - adj
dtype = values.dtype
if is_float_dtype(dtype):
result = result.astype(dtype)
if isinstance(result, np.ndarray):
result = np.where(denom == 0, 0, result)
result[count < 4] = np.nan
return result
@disallow("M8", "m8")
def nanprod(
values: np.ndarray,
*,
axis: Optional[int] = None,
skipna: bool = True,
min_count: int = 0,
mask: Optional[np.ndarray] = None,
) -> float:
"""
Parameters
----------
values : ndarray[dtype]
axis: int, optional
skipna : bool, default True
min_count: int, default 0
mask : ndarray[bool], optional
nan-mask if known
Returns
-------
Dtype
The product of all elements on a given axis. ( NaNs are treated as 1)
Examples
--------
>>> import pandas.core.nanops as nanops
>>> s = pd.Series([1, 2, 3, np.nan])
>>> nanops.nanprod(s)
6.0
"""
mask = _maybe_get_mask(values, skipna, mask)
if skipna and mask is not None:
values = values.copy()
values[mask] = 1
result = values.prod(axis)
return _maybe_null_out(result, axis, mask, values.shape, min_count=min_count)
def _maybe_arg_null_out(
result: np.ndarray, axis: Optional[int], mask: Optional[np.ndarray], skipna: bool
) -> Union[np.ndarray, int]:
# helper function for nanargmin/nanargmax
if mask is None:
return result
if axis is None or not getattr(result, "ndim", False):
if skipna:
if mask.all():
result = -1
else:
if mask.any():
result = -1
else:
if skipna:
na_mask = mask.all(axis)
else:
na_mask = mask.any(axis)
if na_mask.any():
result[na_mask] = -1
return result
def _get_counts(
values_shape: Tuple[int, ...],
mask: Optional[np.ndarray],
axis: Optional[int],
dtype: Dtype = float,
) -> Union[int, float, np.ndarray]:
"""
Get the count of non-null values along an axis
Parameters
----------
values_shape : tuple of int
shape tuple from values ndarray, used if mask is None
mask : Optional[ndarray[bool]]
locations in values that should be considered missing
axis : Optional[int]
axis to count along
dtype : type, optional
type to use for count
Returns
-------
count : scalar or array
"""
dtype = get_dtype(dtype)
if axis is None:
if mask is not None:
n = mask.size - mask.sum()
else:
n = np.prod(values_shape)
return dtype.type(n)
if mask is not None:
count = mask.shape[axis] - mask.sum(axis)
else:
count = values_shape[axis]
if is_scalar(count):
return dtype.type(count)
try:
return count.astype(dtype)
except AttributeError:
return np.array(count, dtype=dtype)
def _maybe_null_out(
result: np.ndarray,
axis: Optional[int],
mask: Optional[np.ndarray],
shape: Tuple[int, ...],
min_count: int = 1,
) -> float:
"""
Returns
-------
Dtype
The product of all elements on a given axis. ( NaNs are treated as 1)
"""
if mask is not None and axis is not None and getattr(result, "ndim", False):
null_mask = (mask.shape[axis] - mask.sum(axis) - min_count) < 0
if np.any(null_mask):
if is_numeric_dtype(result):
if np.iscomplexobj(result):
result = result.astype("c16")
else:
result = result.astype("f8")
result[null_mask] = np.nan
else:
# GH12941, use None to auto cast null
result[null_mask] = None
elif result is not NaT:
if check_below_min_count(shape, mask, min_count):
result = np.nan
return result
def check_below_min_count(
shape: Tuple[int, ...], mask: Optional[np.ndarray], min_count: int
) -> bool:
"""
Check for the `min_count` keyword. Returns True if below `min_count` (when
missing value should be returned from the reduction).
Parameters
----------
shape : tuple
The shape of the values (`values.shape`).
mask : ndarray or None
Boolean numpy array (typically of same shape as `shape`) or None.
min_count : int
Keyword passed through from sum/prod call.
Returns
-------
bool
"""
if min_count > 0:
if mask is None:
# no missing values, only check size
non_nulls = np.prod(shape)
else:
non_nulls = mask.size - mask.sum()
if non_nulls < min_count:
return True
return False
def _zero_out_fperr(arg):
# #18044 reference this behavior to fix rolling skew/kurt issue
if isinstance(arg, np.ndarray):
with np.errstate(invalid="ignore"):
return np.where(np.abs(arg) < 1e-14, 0, arg)
else:
return arg.dtype.type(0) if np.abs(arg) < 1e-14 else arg
@disallow("M8", "m8")
def nancorr(
a: np.ndarray, b: np.ndarray, *, method="pearson", min_periods: Optional[int] = None
):
"""
a, b: ndarrays
"""
if len(a) != len(b):
raise AssertionError("Operands to nancorr must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
f = get_corr_func(method)
return f(a, b)
def get_corr_func(method):
if method == "kendall":
from scipy.stats import kendalltau
def func(a, b):
return kendalltau(a, b)[0]
return func
elif method == "spearman":
from scipy.stats import spearmanr
def func(a, b):
return spearmanr(a, b)[0]
return func
elif method == "pearson":
def func(a, b):
return np.corrcoef(a, b)[0, 1]
return func
elif callable(method):
return method
raise ValueError(
f"Unknown method '{method}', expected one of "
"'kendall', 'spearman', 'pearson', or callable"
)
@disallow("M8", "m8")
def nancov(
a: np.ndarray,
b: np.ndarray,
*,
min_periods: Optional[int] = None,
ddof: Optional[int] = 1,
):
if len(a) != len(b):
raise AssertionError("Operands to nancov must have same size")
if min_periods is None:
min_periods = 1
valid = notna(a) & notna(b)
if not valid.all():
a = a[valid]
b = b[valid]
if len(a) < min_periods:
return np.nan
return np.cov(a, b, ddof=ddof)[0, 1]
def _ensure_numeric(x):
if isinstance(x, np.ndarray):
if is_integer_dtype(x) or is_bool_dtype(x):
x = x.astype(np.float64)
elif is_object_dtype(x):
try:
x = x.astype(np.complex128)
except (TypeError, ValueError):
try:
x = x.astype(np.float64)
except ValueError as err:
# GH#29941 we get here with object arrays containing strs
raise TypeError(f"Could not convert {x} to numeric") from err
else:
if not np.any(np.imag(x)):
x = x.real
elif not (is_float(x) or is_integer(x) or is_complex(x)):
try:
x = float(x)
except ValueError:
# e.g. "1+1j" or "foo"
try:
x = complex(x)
except ValueError as err:
# e.g. "foo"
raise TypeError(f"Could not convert {x} to numeric") from err
return x
# NA-friendly array comparisons
def make_nancomp(op):
def f(x, y):
xmask = isna(x)
ymask = isna(y)
mask = xmask | ymask
with np.errstate(all="ignore"):
result = op(x, y)
if mask.any():
if is_bool_dtype(result):
result = result.astype("O")
np.putmask(result, mask, np.nan)
return result
return f
nangt = make_nancomp(operator.gt)
nange = make_nancomp(operator.ge)
nanlt = make_nancomp(operator.lt)
nanle = make_nancomp(operator.le)
naneq = make_nancomp(operator.eq)
nanne = make_nancomp(operator.ne)
def _nanpercentile_1d(
values: np.ndarray, mask: np.ndarray, q, na_value: Scalar, interpolation
) -> Union[Scalar, np.ndarray]:
"""
Wrapper for np.percentile that skips missing values, specialized to
1-dimensional case.
Parameters
----------
values : array over which to find quantiles
mask : ndarray[bool]
locations in values that should be considered missing
q : scalar or array of quantile indices to find
na_value : scalar
value to return for empty or all-null values
interpolation : str
Returns
-------
quantiles : scalar or array
"""
# mask is Union[ExtensionArray, ndarray]
values = values[~mask]
if len(values) == 0:
if lib.is_scalar(q):
return na_value
else:
return np.array([na_value] * len(q), dtype=values.dtype)
return np.percentile(values, q, interpolation=interpolation)
def nanpercentile(
values: np.ndarray,
q,
*,
axis: int,
na_value,
mask: np.ndarray,
ndim: int,
interpolation,
):
"""
Wrapper for np.percentile that skips missing values.
Parameters
----------
values : array over which to find quantiles
q : scalar or array of quantile indices to find
axis : {0, 1}
na_value : scalar
value to return for empty or all-null values
mask : ndarray[bool]
locations in values that should be considered missing
ndim : {1, 2}
interpolation : str
Returns
-------
quantiles : scalar or array
"""
if values.dtype.kind in ["m", "M"]:
# need to cast to integer to avoid rounding errors in numpy
result = nanpercentile(
values.view("i8"),
q=q,
axis=axis,
na_value=na_value.view("i8"),
mask=mask,
ndim=ndim,
interpolation=interpolation,
)
# Note: we have to do do `astype` and not view because in general we
# have float result at this point, not i8
return result.astype(values.dtype)
if not lib.is_scalar(mask) and mask.any():
if ndim == 1:
return _nanpercentile_1d(
values, mask, q, na_value, interpolation=interpolation
)
else:
# for nonconsolidatable blocks mask is 1D, but values 2D
if mask.ndim < values.ndim:
mask = mask.reshape(values.shape)
if axis == 0:
values = values.T
mask = mask.T
result = [
_nanpercentile_1d(val, m, q, na_value, interpolation=interpolation)
for (val, m) in zip(list(values), list(mask))
]
result = np.array(result, dtype=values.dtype, copy=False).T
return result
else:
return np.percentile(values, q, axis=axis, interpolation=interpolation)
def na_accum_func(values: ArrayLike, accum_func, *, skipna: bool) -> ArrayLike:
"""
Cumulative function with skipna support.
Parameters
----------
values : np.ndarray or ExtensionArray
accum_func : {np.cumprod, np.maximum.accumulate, np.cumsum, np.minimum.accumulate}
skipna : bool
Returns
-------
np.ndarray or ExtensionArray
"""
mask_a, mask_b = {
np.cumprod: (1.0, np.nan),
np.maximum.accumulate: (-np.inf, np.nan),
np.cumsum: (0.0, np.nan),
np.minimum.accumulate: (np.inf, np.nan),
}[accum_func]
# We will be applying this function to block values
if values.dtype.kind in ["m", "M"]:
# GH#30460, GH#29058
# numpy 1.18 started sorting NaTs at the end instead of beginning,
# so we need to work around to maintain backwards-consistency.
orig_dtype = values.dtype
# We need to define mask before masking NaTs
mask = isna(values)
if accum_func == np.minimum.accumulate:
# Note: the accum_func comparison fails as an "is" comparison
y = values.view("i8")
y[mask] = np.iinfo(np.int64).max
changed = True
else:
y = values
changed = False
result = accum_func(y.view("i8"), axis=0)
if skipna:
result[mask] = iNaT
elif accum_func == np.minimum.accumulate:
# Restore NaTs that we masked previously
nz = (~np.asarray(mask)).nonzero()[0]
if len(nz):
# everything up to the first non-na entry stays NaT
result[: nz[0]] = iNaT
if changed:
# restore NaT elements
y[mask] = iNaT # TODO: could try/finally for this?
if isinstance(values, np.ndarray):
result = result.view(orig_dtype)
else:
# DatetimeArray
result = type(values)._simple_new( # type: ignore[attr-defined]
result, dtype=orig_dtype
)
elif skipna and not issubclass(values.dtype.type, (np.integer, np.bool_)):
vals = values.copy()
mask = isna(vals)
vals[mask] = mask_a
result = accum_func(vals, axis=0)
result[mask] = mask_b
else:
result = accum_func(values, axis=0)
return result
| 1.445313 | 1 |
tests/api/common/mark_tasks.py | headrun/incubator-airflow | 0 | 64516 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from airflow import models
from airflow.api.common.experimental.mark_tasks import (
set_state, _create_dagruns, set_dag_run_state)
from airflow.settings import Session
from airflow.utils.dates import days_ago
from airflow.utils.state import State
from datetime import datetime, timedelta
DEV_NULL = "/dev/null"
class TestMarkTasks(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(include_examples=True)
self.dag1 = self.dagbag.dags['test_example_bash_operator']
self.dag2 = self.dagbag.dags['example_subdag_operator']
self.execution_dates = [days_ago(2), days_ago(1)]
drs = _create_dagruns(self.dag1, self.execution_dates,
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag1
dr.verify_integrity()
drs = _create_dagruns(self.dag2,
[self.dag2.default_args['start_date']],
state=State.RUNNING,
run_id_template="scheduled__{}")
for dr in drs:
dr.dag = self.dag2
dr.verify_integrity()
self.session = Session()
def snapshot_state(self, dag, execution_dates):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.session.expunge_all()
return tis
def verify_state(self, dag, task_ids, execution_dates, state, old_tis):
TI = models.TaskInstance
tis = self.session.query(TI).filter(
TI.dag_id==dag.dag_id,
TI.execution_date.in_(execution_dates)
).all()
self.assertTrue(len(tis) > 0)
for ti in tis:
if ti.task_id in task_ids and ti.execution_date in execution_dates:
self.assertEqual(ti.state, state)
else:
for old_ti in old_tis:
if (old_ti.task_id == ti.task_id
and old_ti.execution_date == ti.execution_date):
self.assertEqual(ti.state, old_ti.state)
def test_mark_tasks_now(self):
# set one task to success but do not commit
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=False)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
None, snapshot)
# set one and only one task to success
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set no tasks
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 0)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
# set task to other than success
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.FAILED, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.FAILED, snapshot)
# dont alter other tasks
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_0")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 1)
self.verify_state(self.dag1, [task.task_id], [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_downstream(self):
# test downstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 3)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_upstream(self):
# test upstream
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("run_after_loop")
relatives = task.get_flat_relatives(upstream=True)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=True, downstream=False, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 4)
self.verify_state(self.dag1, task_ids, [self.execution_dates[0]],
State.SUCCESS, snapshot)
def test_mark_tasks_future(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=False, future=True,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_past(self):
# set one task to success towards end of scheduled dag runs
snapshot = self.snapshot_state(self.dag1, self.execution_dates)
task = self.dag1.get_task("runme_1")
altered = set_state(task=task, execution_date=self.execution_dates[1],
upstream=False, downstream=False, future=False,
past=True, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 2)
self.verify_state(self.dag1, [task.task_id], self.execution_dates,
State.SUCCESS, snapshot)
def test_mark_tasks_subdag(self):
# set one task to success towards end of scheduled dag runs
task = self.dag2.get_task("section-1")
relatives = task.get_flat_relatives(upstream=False)
task_ids = [t.task_id for t in relatives]
task_ids.append(task.task_id)
altered = set_state(task=task, execution_date=self.execution_dates[0],
upstream=False, downstream=True, future=False,
past=False, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), 14)
# cannot use snapshot here as that will require drilling down the
# the sub dag tree essentially recreating the same code as in the
# tested logic.
self.verify_state(self.dag2, task_ids, [self.execution_dates[0]],
State.SUCCESS, [])
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
# just to make sure we are fully cleaned up
self.session.query(models.DagRun).delete()
self.session.query(models.TaskInstance).delete()
self.session.commit()
self.session.close()
class TestMarkDAGRun(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(include_examples=True)
self.dag1 = self.dagbag.dags['test_example_bash_operator']
self.dag2 = self.dagbag.dags['example_subdag_operator']
self.execution_dates = [days_ago(3), days_ago(2), days_ago(1)]
self.session = Session()
def verify_dag_run_states(self, dag, date, state=State.SUCCESS):
drs = models.DagRun.find(dag_id=dag.dag_id, execution_date=date)
dr = drs[0]
self.assertEqual(dr.get_state(), state)
tis = dr.get_task_instances(session=self.session)
for ti in tis:
self.assertEqual(ti.state, state)
def test_set_running_dag_run_state(self):
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=date,
session=self.session
)
for ti in dr.get_task_instances(session=self.session):
ti.set_state(State.RUNNING, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
# All of the task should be altered
self.assertEqual(len(altered), len(self.dag1.tasks))
self.verify_dag_run_states(self.dag1, date)
def test_set_success_dag_run_state(self):
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.SUCCESS,
execution_date=date,
session=self.session
)
for ti in dr.get_task_instances(session=self.session):
ti.set_state(State.SUCCESS, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
# None of the task should be altered
self.assertEqual(len(altered), 0)
self.verify_dag_run_states(self.dag1, date)
def test_set_failed_dag_run_state(self):
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=date,
session=self.session
)
dr.get_task_instance('runme_0').set_state(State.FAILED, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
# All of the task should be altered
self.assertEqual(len(altered), len(self.dag1.tasks))
self.verify_dag_run_states(self.dag1, date)
def test_set_mixed_dag_run_state(self):
"""
This test checks function set_dag_run_state with mixed task instance
state.
"""
date = self.execution_dates[0]
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=date,
session=self.session
)
# success task
dr.get_task_instance('runme_0').set_state(State.SUCCESS, self.session)
# skipped task
dr.get_task_instance('runme_1').set_state(State.SKIPPED, self.session)
# retry task
dr.get_task_instance('runme_2').set_state(State.UP_FOR_RETRY, self.session)
# queued task
dr.get_task_instance('also_run_this').set_state(State.QUEUED, self.session)
# running task
dr.get_task_instance('run_after_loop').set_state(State.RUNNING, self.session)
# failed task
dr.get_task_instance('run_this_last').set_state(State.FAILED, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=True)
self.assertEqual(len(altered), len(self.dag1.tasks) - 1) # only 1 task succeeded
self.verify_dag_run_states(self.dag1, date)
def test_set_state_without_commit(self):
date = self.execution_dates[0]
# Running dag run and task instances
dr = self.dag1.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=date,
session=self.session
)
for ti in dr.get_task_instances(session=self.session):
ti.set_state(State.RUNNING, self.session)
altered = set_dag_run_state(self.dag1, date, state=State.SUCCESS, commit=False)
# All of the task should be altered
self.assertEqual(len(altered), len(self.dag1.tasks))
# Both dag run and task instances' states should remain the same
self.verify_dag_run_states(self.dag1, date, State.RUNNING)
def test_set_state_with_multiple_dagruns(self):
dr1 = self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[0],
session=self.session
)
dr2 = self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.FAILED,
execution_date=self.execution_dates[1],
session=self.session
)
dr3 = self.dag2.create_dagrun(
run_id='manual__' + datetime.now().isoformat(),
state=State.RUNNING,
execution_date=self.execution_dates[2],
session=self.session
)
altered = set_dag_run_state(self.dag2, self.execution_dates[1],
state=State.SUCCESS, commit=True)
# Recursively count number of tasks in the dag
def count_dag_tasks(dag):
count = len(dag.tasks)
subdag_counts = [count_dag_tasks(subdag) for subdag in dag.subdags]
count += sum(subdag_counts)
return count
self.assertEqual(len(altered), count_dag_tasks(self.dag2))
self.verify_dag_run_states(self.dag2, self.execution_dates[1])
# Make sure other dag status are not changed
dr1 = models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[0])
dr1 = dr1[0]
self.assertEqual(dr1.get_state(), State.FAILED)
dr3 = models.DagRun.find(dag_id=self.dag2.dag_id, execution_date=self.execution_dates[2])
dr3 = dr3[0]
self.assertEqual(dr3.get_state(), State.RUNNING)
def test_set_dag_run_state_edge_cases(self):
# Dag does not exist
altered = set_dag_run_state(None, self.execution_dates[0])
self.assertEqual(len(altered), 0)
# Invalid execution date
altered = set_dag_run_state(self.dag1, None)
self.assertEqual(len(altered), 0)
self.assertRaises(AssertionError, set_dag_run_state, self.dag1, timedelta(microseconds=-1))
# DagRun does not exist
# This will throw AssertionError since dag.latest_execution_date does not exist
self.assertRaises(AssertionError, set_dag_run_state, self.dag1, self.execution_dates[0])
def tearDown(self):
self.dag1.clear()
self.dag2.clear()
self.session.query(models.DagRun).delete()
self.session.query(models.TaskInstance).delete()
self.session.query(models.DagStat).delete()
self.session.commit()
if __name__ == '__main__':
unittest.main()
| 1.34375 | 1 |
docker/prepare.py | bilzard/mvtec-utils | 6 | 64644 | import os
from pathlib import Path
import cv2
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn.model_selection import train_test_split
def create_info_csv(mvtec_dir: Path) -> DataFrame:
df = pd.DataFrame({})
for data_type in ["train", "test"]:
for p in mvtec_dir.glob(f"*/{data_type}/*/*.png"):
raw_stem = p.stem
defect = p.parents[0].name
data_type = p.parents[1].name
category = p.parents[2].name
df = df.append(
{
"raw_img_path": str(p),
"raw_stem": raw_stem,
"defect": defect,
"data_type": data_type,
"category": category,
},
ignore_index=True,
)
for category in df["category"].unique():
category_df = df.query("data_type=='train' & category==@category")
_, val_index = train_test_split(
category_df.index.tolist(),
train_size=0.8,
test_size=0.2,
random_state=5,
shuffle=True,
)
df.loc[val_index, "data_type"] = "val"
df["stem"] = df.apply(
lambda x: f"{x.category}_{x.data_type}_{x.defect}_{x.raw_stem}",
axis=1,
)
df["raw_mask_path"] = df.apply(
lambda x: f"{mvtec_dir}/{x.category}/ground_truth/{x.defect}/{x.raw_stem}_mask.png",
axis=1,
)
return df
def move_images_and_masks(df: DataFrame) -> None:
os.makedirs("/data/images", exist_ok=True)
os.makedirs("/data/masks", exist_ok=True)
for i in df.index:
raw_img_path, raw_mask_path, stem = df.loc[i, ["raw_img_path", "raw_mask_path", "stem"]]
if os.path.exists(raw_mask_path):
os.rename(raw_mask_path, f"/data/masks/{stem}.png")
else:
# create masks for train images
img = cv2.imread(raw_img_path)
mask = np.zeros(img.shape)
cv2.imwrite(f"/data/masks/{stem}.png", mask)
os.rename(raw_img_path, f"/data/images/{stem}.png")
df.drop(columns=["raw_stem", "raw_img_path", "raw_mask_path"])
df.to_csv("/data/info.csv", index=False)
if __name__ == "__main__":
mvtec_dir = Path("/data/MVTec")
df = create_info_csv(mvtec_dir)
move_images_and_masks(df)
| 1.945313 | 2 |
app/main/forms.py | thantsinmoe/Device-Monitor | 30 | 64772 | import csv
from flask_wtf import FlaskForm as Form
from flask_wtf.file import FileField, FileRequired, FileAllowed
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
from wtforms import ValidationError
# noinspection PyMethodMayBeStatic
class HostForm(Form):
fqdn = StringField('FQDN or IP', validators=[DataRequired()])
port = StringField('TCP Port')
friendly_name = StringField('Friendly Name')
submit = SubmitField('Submit')
def validate_port(self, field):
if len(field.data) > 0:
try:
int(field.data)
except ValueError:
raise ValidationError('Port provided is not valid')
class ImportForm(Form):
file = FileField('Hosts', validators=[FileRequired(), FileAllowed(['csv'], 'Only CSV is supported!')])
submit = SubmitField('Submit')
| 1.5 | 2 |
app.py | smsaladi/sg-simple-event-recorder | 0 | 64900 | """
A simple webhook event handler for Sendgrid
"""
import os
import json
import flask
from flask import request, jsonify
import flask_sqlalchemy
from sqlalchemy.ext.hybrid import hybrid_property
app = flask.Flask(__name__)
app.config['BASE_URL'] = os.environ['BASE_URL']
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
# username/pass to POST
post_user, post_pass = os.environ['POST_USERNAME'], os.environ['POST_PASSWORD']
db = flask_sqlalchemy.SQLAlchemy()
db.init_app(app)
# Based on
# https://sendgrid.com/docs/for-developers/tracking-events/event/#event-objects
# These are other rarer(?) possibilities:
# asm_group_id, unique_args, marketing_campaign_id, marketing_campaign_name, pool
class Event(db.Model):
email = db.Column(db.Text)
timestamp = db.Column(db.Integer) # DateTime)
event = db.Column(db.Text)
smtp_id = db.Column(db.Text) # sg key is 'smtp-id'
useragent = db.Column(db.Text)
ip = db.Column(db.Text)
sg_event_id = db.Column(db.String(100), primary_key=True)
sg_message_id = db.Column(db.Text)
reason = db.Column(db.Text)
status = db.Column(db.Text)
response = db.Column(db.Text)
tls = db.Column(db.Text)
url = db.Column(db.Text)
urloffset = db.Column(db.Text)
attempt = db.Column(db.Text)
category = db.Column(db.Text)
type_ = db.Column(db.Text)
_other = db.Column('other', db.Text, default='[]')
@hybrid_property
def other(self):
return json.loads(self._other)
@other.setter
def other(self, lst):
self._other = json.dumps(lst)
event_keys = [k.strip('_')
for k in flask_sqlalchemy.inspect(Event).columns.keys()
if not k.startswith('_')]
@app.route('/', methods=['POST'])
def home():
if request.authorization["username"] != post_user or \
request.authorization["password"] != post_pass:
return jsonify({"message": "Unauthorized"}), 401
# No data, just return
if not request.json:
return ""
for item in request.json:
# fix name mangling
if 'smtp-id' in item.keys():
item['smtp_id'] = item.pop('smtp-id')
# collect keys not in model
other = {}
for k in list(item.keys()):
if k not in event_keys:
other[k] = str(item.pop(k))
obj = Event(**item)
obj.other = other
db.session.merge(obj)
db.session.commit()
return ""
@app.cli.command("initdb")
def init_db():
db.create_all()
db.session.commit()
return
if __name__ == "__main__":
app.run(debug=True, threaded=True, use_reloader=True)
| 1.695313 | 2 |
examples/commissioning_2022may/pointing_utils/dbutils.py | Subaru-PFS/ets_target_database | 0 | 65028 | <gh_stars>0
#!/usr/bin/env python3
import configparser
import tempfile
import time
import numpy as np
import pandas as pd
import psycopg2
import psycopg2.extras
from astropy import units as u
from astropy.table import Table
from astropy.time import Time
from targetdb import targetdb
def connect_subaru_gaiadb(conf=None):
conn = psycopg2.connect(**dict(conf["gaiadb"]))
return conn
def connect_targetdb(conf=None):
db = targetdb.TargetDB(**dict(conf["targetdb"]))
db.connect()
return db
# def generate_query_simple_boxsearch(
# ra1,
# ra2,
# dec1,
# dec2,
# tablename,
# # good_fluxstd=False,
# extra_where=None,
# # mag_min=None,
# # mag_max=None,
# # mag_filter=None,
# # min_prob_f_star=None,
# ):
# # FIXME: I know this is too simple and stupid,
# # but should be enough for the commissioning.
# # In the future, more sophisticated method should be used (e.g., q3c).
# query_target = f"""SELECT * FROM {tablename}
# WHERE ra >= {ra1} AND ra < {ra2}
# AND dec >= {dec1} AND dec < {dec2}
# """
# if extra_where is not None:
# query_target += extra_where
# query_target += ";"
# return query_target
def generate_query_list(
ra,
dec,
dw_ra,
dw,
tablename,
good_fluxstd=False,
flags_dist=False,
flags_ebv=False,
extra_where=None,
mag_min=None,
mag_max=None,
mag_filter=None,
min_prob_f_star=None,
):
dec1, dec2 = dec - dw, dec + dw
qlist = []
if ra - dw_ra < 0.0:
ra1 = [0.0, ra - dw_ra + 360.0]
ra2 = [ra + dw_ra, 360.0]
elif ra + dw_ra >= 360.0:
ra1 = [0.0, ra - dw_ra]
ra2 = [ra + dw_ra - 360.0, 360.0]
else:
ra1, ra2 = [ra - dw_ra], [ra + dw_ra]
if tablename == "target":
for i in range(len(ra1)):
query_target = f"""SELECT * FROM {tablename}
WHERE ra >= {ra1[i]} AND ra < {ra2[i]}
AND dec >= {dec1} AND dec < {dec2}
"""
if extra_where is not None:
query_target += extra_where
query_target += ";"
qlist.append(query_target)
return qlist
if tablename == "fluxstd":
for i in range(len(ra1)):
query_target = f"""SELECT * FROM {tablename}
WHERE ra >= {ra1[i]} AND ra < {ra2[i]}
AND dec >= {dec1} AND dec < {dec2}
"""
if extra_where is None:
extra_where = ""
if good_fluxstd:
extra_where += f"""
AND flags_dist IS FALSE
AND flags_ebv IS FALSE
AND prob_f_star > 0.5
AND psf_mag_{mag_filter} BETWEEN {mag_min} AND {mag_max}
"""
if not good_fluxstd:
extra_where = f"""
AND psf_mag_{mag_filter} BETWEEN {mag_min} AND {mag_max}
AND prob_f_star > {min_prob_f_star}
"""
if flags_dist:
extra_where += f"""
AND flags_dist IS FALSE
"""
if flags_ebv:
extra_where += f"""
AND flags_ebv IS FALSE
"""
query_target += extra_where
query_target += ";"
qlist.append(query_target)
return qlist
def generate_targets_from_targetdb(
ra,
dec,
conf=None,
arms="br",
tablename="target",
fp_radius_degree=260.0 * 10.2 / 3600, # "Radius" of PFS FoV in degree (?)
fp_fudge_factor=1.5, # fudge factor for search widths
width=None,
height=None,
extra_where=None,
mag_min=None,
mag_max=None,
mag_filter=None,
force_priority=None,
):
db = connect_targetdb(conf)
dw = fp_radius_degree * fp_fudge_factor
# consider the cosine term
cos_term = 1.0 / np.cos(dec * u.deg)
if width is None:
dw_ra = dw * cos_term
else:
dw_ra = width * cos_term / 2.0
if height is not None:
dw = height / 2.0
if "m" in arms:
extra_where = "AND is_medium_resolution IS TRUE"
else:
extra_where = "AND is_medium_resolution IS FALSE"
qlist = generate_query_list(
ra,
dec,
dw_ra,
dw,
tablename,
extra_where=extra_where,
mag_min=mag_min,
mag_max=mag_max,
mag_filter=mag_filter,
)
df = pd.DataFrame()
for q in qlist:
print(q)
t_begin = time.time()
df_tmp = db.fetch_query(q)
t_end = time.time()
print("Time spent for querying: {:f}".format(t_end - t_begin))
df = pd.concat([df, df_tmp], ignore_index=True)
df.loc[df["pmra"].isna(), "pmra"] = 0.0
df.loc[df["pmdec"].isna(), "pmdec"] = 0.0
df.loc[df["parallax"].isna(), "parallax"] = 1.0e-7
print(df)
if force_priority is not None:
df["priority"] = force_priority
db.close()
return df
def generate_fluxstds_from_targetdb(
ra,
dec,
conf=None,
tablename="fluxstd",
fp_radius_degree=260.0 * 10.2 / 3600, # "Radius" of PFS FoV in degree (?)
fp_fudge_factor=1.5, # fudge factor for search widths
width=None,
height=None,
good_fluxstd=False,
flags_dist=False,
flags_ebv=False,
mag_min=None,
mag_max=None,
mag_filter=None,
min_prob_f_star=None,
extra_where=None,
):
db = connect_targetdb(conf)
dw = fp_radius_degree * fp_fudge_factor
# consider the cosine term
cos_term = 1.0 / np.cos(dec * u.deg)
if width is None:
dw_ra = dw * cos_term
else:
dw_ra = width * cos_term / 2.0
if height is not None:
dw = height / 2.0
qlist = generate_query_list(
ra,
dec,
dw_ra,
dw,
tablename,
good_fluxstd=good_fluxstd,
flags_dist=flags_dist,
flags_ebv=flags_ebv,
extra_where=extra_where,
mag_min=mag_min,
mag_max=mag_max,
mag_filter=mag_filter,
min_prob_f_star=min_prob_f_star,
)
df = pd.DataFrame()
for q in qlist:
print(q)
t_begin = time.time()
df_tmp = db.fetch_query(q)
t_end = time.time()
print("Time spent for querying: {:f}".format(t_end - t_begin))
df = pd.concat([df, df_tmp], ignore_index=True)
df.loc[df["pmra"].isna(), "pmra"] = 0.0
df.loc[df["pmdec"].isna(), "pmdec"] = 0.0
df.loc[df["parallax"].isna(), "parallax"] = 1.0e-7
print(df)
db.close()
return df
# def generate_targets_from_gaiadb(args.ra, args.dec, conf=conf)
def generate_targets_from_gaiadb(
ra,
dec,
conf=None,
fp_radius_degree=260.0 * 10.2 / 3600, # "Radius" of PFS FoV in degree (?)
fp_fudge_factor=1.5, # fudge factor for search widths
search_radius=None,
band_select="phot_g_mean_mag",
mag_min=0.0,
mag_max=99.0,
good_astrometry=False,
):
conn = connect_subaru_gaiadb(conf)
cur = conn.cursor()
if search_radius is None:
search_radius = fp_radius_degree * fp_fudge_factor
# Query for raster scan stars:
# astrometric_excess_noise_sig (D) < 2
# 12 <= phot_g_mean_mag <=20
query_string = f"""SELECT
source_id,ref_epoch,ra,dec,pmra,pmdec,parallax,
phot_g_mean_mag,phot_bp_mean_mag,phot_rp_mean_mag
FROM gaia
WHERE q3c_radial_query(ra, dec, {ra}, {dec}, {search_radius})
AND {band_select} BETWEEN {mag_min} AND {mag_max}
"""
if good_astrometry:
query_string += "AND astrometric_excess_noise_sig < 2.0"
query_string += ";"
print(query_string)
cur.execute(query_string)
df_res = pd.DataFrame(
cur.fetchall(),
columns=[
"source_id",
"ref_epoch",
"ra",
"dec",
"pmra",
"pmdec",
"parallax",
"phot_g_mean_mag",
"phot_bp_mean_mag",
"phot_rp_mean_mag",
],
)
cur.close()
conn.close()
print(df_res)
return df_res
def fixcols_gaiadb_to_targetdb(
df,
proposal_id=None,
target_type_id=None,
input_catalog_id=None,
exptime=900.0,
priority=1,
):
df.rename(columns={"source_id": "obj_id", "ref_epoch": "epoch"}, inplace=True)
df["epoch"] = df["epoch"].apply(lambda x: f"J{x:.1f}")
df["proposal_id"] = proposal_id
df["target_type_id"] = target_type_id
df["input_catalog_id"] = input_catalog_id
df["effective_exptime"] = exptime
df["priority"] = priority
tb = Table([])
# ZPs are taken from Weiler (2018, A&A, 617, A138)
tb["g_mag_ab"] = (df["phot_g_mean_mag"].to_numpy() + (25.7455 - 25.6409)) * u.ABmag
tb["bp_mag_ab"] = (
df["phot_bp_mean_mag"].to_numpy() + (25.3603 - 25.3423)
) * u.ABmag
tb["rp_mag_ab"] = (
df["phot_rp_mean_mag"].to_numpy() + (25.1185 - 24.7600)
) * u.ABmag
df["g_flux_njy"] = tb["g_mag_ab"].to("nJy").value
df["bp_flux_njy"] = tb["bp_mag_ab"].to("nJy").value
df["rp_flux_njy"] = tb["rp_mag_ab"].to("nJy").value
return df
| 1.875 | 2 |
tests/pelicanconf.py | Matael/Flex | 0 | 65156 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'Test'
SITEURL = u'http://localhost:8000'
SITENAME = u"Test Blog"
SITETITLE = AUTHOR
SITESUBTITLE = u'Test'
SITEDESCRIPTION = u'%s\'s Thoughts and Writings' % AUTHOR
SITELOGO = u'https://www.example.com/img/profile.png'
FAVICON = SITEURL + '/images/favicon.ico'
ROBOTS = u'index, follow'
THEME = u'../'
PATH = u'content'
TIMEZONE = u'America/Sao_Paulo'
DEFAULT_LANG = u'en'
OG_LOCALE = u'en_US'
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
USE_FOLDER_AS_CATEGORY = True
MAIN_MENU = True
LINKS = (('Portfolio', '//alexandrevicenzi.com'),)
SOCIAL = (('linkedin', 'https://br.linkedin.com/in/test'),
('github', 'https://github.com/test'),
('google', 'https://google.com/+Test'),
('rss', '//www.example.com/feeds/all.atom.xml'))
MENUITEMS = (('Archives', '/archives.html'),
('Categories', '/categories.html'),
('Tags', '/tags.html'),)
CC_LICENSE = {
'name': 'Creative Commons Attribution-ShareAlike',
'version': '4.0',
'slug': 'by-sa'
}
COPYRIGHT_YEAR = 2016
DEFAULT_PAGINATION = 10
STATUSCAKE = {
'trackid': 'test-test',
'days': 7
}
RELATIVE_URLS = False
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = 'feeds/%s.atom.xml'
DELETE_OUTPUT_DIRECTORY = False
DEFAULT_PAGINATION = 5
SUMMARY_MAX_LENGTH = 150
DISQUS_SITENAME = "test-test"
GOOGLE_ANALYTICS = "UA-XXXXXX-X"
ADD_THIS_ID = 'ra-XX3242XX'
USE_LESS = True
| 1.5 | 2 |
src/dash/app.py | OlafNowicki/CNN_Review_Analyzer | 0 | 65284 | import os
import requests
import time
import pandas as pd
import config
from flask import request
import dash
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output, State
external_stylesheets = [
"https://use.fontawesome.com/releases/v5.0.7/css/all.css",
'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css',
'https://fonts.googleapis.com/css?family=Roboto&display=swap'
]
external_script = "https://raw.githubusercontent.com/MarwanDebbiche/post-tuto-deployment/master/src/dash/assets/gtag.js"
app = dash.Dash(
__name__,
external_stylesheets=external_stylesheets,
meta_tags=[
{"name": "viewport", "content": "width=device-width, initial-scale=1"}
],
suppress_callback_exceptions=True
)
app.scripts.append_script({
"external_url": external_script
})
app.title = 'Reviews powered by AI'
companies = pd.read_csv('./csv/companies_forbes.csv')
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
home_layout = html.Div(
[
html.Div(
[
html.A(
html.Img(
id='company_logo',
style={
'height': '100px',
'padding': '5px'
}
),
id="company_link",
target="_blank"
)
],
style={
'height': '100px',
'backgroundColor': 'white',
'borderStyle': 'solid',
'borderRadius': '100px',
'borderWidth': 'thin'
}
),
html.H1(
[
"What do you think of ",
html.Span(
id='company_name'
),
" ?"
],
className="h3 mb-3 font-weight-normal",
style={
'marginTop': '5px'
}
),
html.Div(
[
dcc.Textarea(
className="form-control z-depth-1",
id="review",
rows="8",
placeholder="Write something here..."
)
],
className="form-group shadow-textarea"
),
html.H5(
'Sentiment analysis 🤖'
),
dbc.Progress(
children=html.Span(
id='proba',
style={
'color': 'black',
'fontWeight': 'bold'
}
),
id="progress",
striped=False,
animated=False,
style={
'marginBottom': '10px'
}
),
html.H5(
'Propose a rating 😁📢'
),
html.Div(
[
dcc.Slider(
id='rating',
max=5,
min=1,
step=1,
marks={i: f'{i}' for i in range(1, 6)}
),
],
style={'marginBottom': '30px'}
),
html.Button(
[
html.Span(
"Submit",
style={
"marginRight": "10px"
}
),
html.I(
className="fa fa-paper-plane m-l-7"
)
],
className="btn btn-lg btn-primary btn-block",
role="submit",
id="submit_button",
n_clicks_timestamp=0
),
html.Button(
[
html.Span(
"Review another brand",
style={
"marginRight": "10px"
}
),
html.I(
className="fas fa-sync-alt"
)
],
className="btn btn-lg btn-secondary btn-block",
id='switch_button',
n_clicks_timestamp=0
),
html.P(
dcc.Link("Go to Admin 🔑", id="admin-link", href="/admin"),
className="mt-2"
),
html.P(
[
html.A("<NAME>", href="https://www.linkedin.com/in/olaf-nowicki/", target="_blank"),
" - 2021"
],
className="mt-3 mb-2 text-muted"
),
],
className="form-review",
)
admin_layout = html.Div(
[
html.H1("Admin Page 🔑"),
html.Div(id="admin-page-content"),
html.P(
dcc.Link("Go to Home 🏡", href="/"),
style={"marginTop": "20px"}
)
]
)
@app.callback(
[
Output('company_logo', 'src'),
Output('company_name', 'children'),
Output('review', 'value'),
Output('company_link', 'href')
],
[
Input('submit_button', 'n_clicks_timestamp'),
Input('switch_button', 'n_clicks_timestamp')
],
[
State('review', 'value'),
State('progress', 'value'),
State('rating', 'value'),
State('company_name', 'children')
]
)
def change_brand(submit_click_ts, another_brand_click_ts, review_text, score, rating, brand_name):
if submit_click_ts > another_brand_click_ts:
sentiment_score = float(score) / 100
ip_address = request.remote_addr
user_agent = request.headers.get('User-Agent')
response = requests.post(
f"{config.API_URL}/review",
data={
'review': review_text,
'rating': rating,
'suggested_rating': min(int(sentiment_score * 5 + 1), 5),
'sentiment_score': sentiment_score,
'brand': brand_name,
'user_agent': user_agent,
'ip_address': ip_address
}
)
if response.ok:
print("Review Saved")
else:
print("Error Saving Review")
random_company = companies.sample(1).to_dict(orient="records")[0]
company_logo_url = random_company['company_logo']
if not company_logo_url.startswith('http'):
company_logo_url = 'https://' + company_logo_url
company_name = random_company['company_name']
company_website = random_company['company_website']
return company_logo_url, company_name, '', company_website
@app.callback(
[
Output('proba', 'children'),
Output('progress', 'value'),
Output('progress', 'color'),
Output('rating', 'value'),
Output('submit_button', 'disabled')
],
[Input('review', 'value')]
)
def update_proba(review):
if review is not None and review.strip() != '':
response = requests.post(
f"{config.API_URL}/predict", data={'review': review})
proba = response.json()
proba = round(proba * 100, 2)
suggested_rating = min(int((proba / 100) * 5 + 1), 5)
text_proba = f"{proba}%"
if proba >= 67:
return text_proba, proba, 'success', suggested_rating, False
elif 33 < proba < 67:
return text_proba, proba, 'warning', suggested_rating, False
elif proba <= 33:
return text_proba, proba, 'danger', suggested_rating, False
else:
return None, 0, None, 0, True
# Load review table
@app.callback(
Output('admin-page-content', 'children'),
[Input('url', 'pathname')]
)
def load_review_table(pathname):
if pathname != "/admin":
return None
response = requests.get(f"{config.API_URL}/reviews")
reviews = pd.DataFrame(response.json())
table = dbc.Table.from_dataframe(reviews,
striped=True,
bordered=True,
hover=True,
responsive=True,
header=["id", "brand", "created_date", "review",
"rating", "suggested_rating", "sentiment_score"],
columns=["id", "brand", "created_date", "review",
"rating", "suggested_rating", "sentiment_score"]
)
return table
# Update page layout
@app.callback(
Output('page-content', 'children'),
[Input('url', 'pathname')]
)
def display_page(pathname):
if pathname == '/':
return home_layout
if pathname == "/admin":
return admin_layout
else:
return [
html.Div(
[
html.Img(
src="./assets/404.png",
style={
"width": "50%"
}
),
],
className="form-review"
),
dcc.Link("Go to Home", href="/"),
]
if __name__ == '__main__':
app.run_server(debug=config.DEBUG, host=config.HOST)
| 1.296875 | 1 |
tests/test_features/test_occurrance_dataset_creation.py | math-sasso/masters_research | 1 | 65412 | from pathlib import Path
from easy_sdm.data.data_loader import ShapefileLoader
from easy_sdm.featuarizer.build_features import OccurrancesDatasetBuilder
from easy_sdm.utils.utils import PathUtils
def extract_occurances(species_shapefile_path: Path):
processed_rasters_dirpath = PathUtils.dir_path(Path.cwd() / "data/processed_rasters/standarized_rasters")
species_shapefile_path = PathUtils.file_path(species_shapefile_path)
raster_paths_list = PathUtils.get_rasters_filepaths_in_dir(
processed_rasters_dirpath
)
occ_dst_builder = OccurrancesDatasetBuilder(raster_paths_list)
df = occ_dst_builder.build(
ShapefileLoader().read_geodataframe(species_shapefile_path)
)
assert(df.shape[0]>0)
assert(df.index.names == ['lat', 'lon']) | 1.742188 | 2 |
sdk/python/kfp/compiler_cli_tests/test_data/pipeline_with_condition.py | k-gupta/pipelines | 0 | 65540 | <gh_stars>0
# Copyright 2021 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import components
from kfp import dsl
from kfp import compiler
from kfp.dsl import component
@component
def flip_coin_op() -> str:
"""Flip a coin and output heads or tails randomly."""
import random
result = 'heads' if random.randint(0, 1) == 0 else 'tails'
return result
@component
def print_op(msg: str):
"""Print a message."""
print(msg)
@dsl.pipeline(name='single-condition-pipeline', pipeline_root='dummy_root')
def my_pipeline(text: str = 'condition test'):
flip1 = flip_coin_op().set_caching_options(False)
print_op(msg=flip1.output)
with dsl.Condition(flip1.output == 'heads'):
flip2 = flip_coin_op().set_caching_options(False)
print_op(msg=flip2.output)
print_op(msg=text)
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
package_path=__file__.replace('.py', '.json'))
| 1.609375 | 2 |
lectures/lecture00/code/helloWorldBroke.py | mateusza/Introduction-to-Python-Numerical-Analysis-for-Engineers-and-Scientist | 101 | 65668 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
print 'hello world' | 0.277344 | 0 |
octavia/tests/unit/api/drivers/amphora_driver/v1/test_amphora_driver.py | johnsom/octavia | 0 | 65796 | <reponame>johnsom/octavia<gh_stars>0
# Copyright 2018 Rackspace, US Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from octavia_lib.api.drivers import data_models as driver_dm
from octavia_lib.api.drivers import exceptions
from oslo_utils import uuidutils
from octavia.api.drivers.amphora_driver.v1 import driver
from octavia.common import constants as consts
from octavia.network import base as network_base
from octavia.tests.common import sample_data_models
from octavia.tests.unit import base
class TestAmphoraDriver(base.TestRpc):
def setUp(self):
super(TestAmphoraDriver, self).setUp()
self.amp_driver = driver.AmphoraProviderDriver()
self.sample_data = sample_data_models.SampleDriverDataModels()
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.return_value = self.sample_data.db_vip
provider_vip_dict = self.amp_driver.create_vip_port(
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
self.assertEqual(self.sample_data.provider_vip_dict, provider_vip_dict)
@mock.patch('octavia.common.utils.get_network_driver')
def test_create_vip_port_failed(self, mock_get_net_driver):
mock_net_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_net_driver
mock_net_driver.allocate_vip.side_effect = (
network_base.AllocateVIPException())
self.assertRaises(exceptions.DriverError,
self.amp_driver.create_vip_port,
self.sample_data.lb_id, self.sample_data.project_id,
self.sample_data.provider_vip_dict)
# Load Balancer
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_create(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_create(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.FLAVOR: None,
consts.AVAILABILITY_ZONE: None}
mock_cast.assert_called_with({}, 'create_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_delete(self, mock_cast):
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
self.amp_driver.loadbalancer_delete(provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
'cascade': False}
mock_cast.assert_called_with({}, 'delete_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_failover(self, mock_cast):
self.amp_driver.loadbalancer_failover(self.sample_data.lb_id)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id}
mock_cast.assert_called_with({}, 'failover_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, admin_state_up=True)
lb_dict = {'enabled': True}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_name(self, mock_cast):
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id, name='Great LB')
lb_dict = {'name': 'Great LB'}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_loadbalancer_update_qos(self, mock_cast):
qos_policy_id = uuidutils.generate_uuid()
old_provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id)
provider_lb = driver_dm.LoadBalancer(
loadbalancer_id=self.sample_data.lb_id,
vip_qos_policy_id=qos_policy_id)
lb_dict = {'vip': {'qos_policy_id': qos_policy_id}}
self.amp_driver.loadbalancer_update(old_provider_lb, provider_lb)
payload = {consts.LOAD_BALANCER_ID: self.sample_data.lb_id,
consts.LOAD_BALANCER_UPDATES: lb_dict}
mock_cast.assert_called_with({}, 'update_load_balancer', **payload)
# Listener
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_create(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
self.amp_driver.listener_create(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'create_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_delete(self, mock_cast):
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
self.amp_driver.listener_delete(provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id}
mock_cast.assert_called_with({}, 'delete_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, admin_state_up=False)
listener_dict = {'enabled': False}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_listener_update_name(self, mock_cast):
old_provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id)
provider_listener = driver_dm.Listener(
listener_id=self.sample_data.listener1_id, name='Great Listener')
listener_dict = {'name': 'Great Listener'}
self.amp_driver.listener_update(old_provider_listener,
provider_listener)
payload = {consts.LISTENER_ID: self.sample_data.listener1_id,
consts.LISTENER_UPDATES: listener_dict}
mock_cast.assert_called_with({}, 'update_listener', **payload)
# Pool
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id,
lb_algorithm=consts.LB_ALGORITHM_ROUND_ROBIN)
self.amp_driver.pool_create(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'create_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_create_unsupported_algorithm(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_create,
provider_pool)
mock_cast.assert_not_called()
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_delete(self, mock_cast):
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
self.amp_driver.pool_delete(provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id}
mock_cast.assert_called_with({}, 'delete_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, admin_state_up=True)
pool_dict = {'enabled': True}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_name(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id, name='Great pool',
admin_state_up=True, tls_enabled=True)
pool_dict = {'name': 'Great pool',
'enabled': True,
'tls_enabled': True}
self.amp_driver.pool_update(old_provider_pool, provider_pool)
payload = {consts.POOL_ID: self.sample_data.pool1_id,
consts.POOL_UPDATES: pool_dict}
mock_cast.assert_called_with({}, 'update_pool', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_pool_update_unsupported_algorithm(self, mock_cast):
old_provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool = driver_dm.Pool(
pool_id=self.sample_data.pool1_id)
provider_pool.lb_algorithm = 'foo'
self.assertRaises(
exceptions.UnsupportedOptionError,
self.amp_driver.pool_update,
old_provider_pool,
provider_pool)
mock_cast.assert_not_called()
# Member
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create(self, mock_cast, mock_pool_get, mock_session):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "172.16.31.10"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.amp_driver.member_create(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'create_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_create_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "fe80::1"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool_get.return_value = mock_pool
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id,
address="192.0.2.1")
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_create,
provider_member)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_delete(self, mock_cast):
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
self.amp_driver.member_delete(provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id}
mock_cast.assert_called_with({}, 'delete_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, admin_state_up=True)
member_dict = {'enabled': True}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_update_name(self, mock_cast):
old_provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id)
provider_member = driver_dm.Member(
member_id=self.sample_data.member1_id, name='Great member')
member_dict = {'name': 'Great member'}
self.amp_driver.member_update(old_provider_member, provider_member)
payload = {consts.MEMBER_ID: self.sample_data.member1_id,
consts.MEMBER_UPDATES: member_dict}
mock_cast.assert_called_with({}, 'update_member', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update(self, mock_cast, mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_no_admin_addr(self, mock_cast,
mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id,
monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_clear_already_empty(
self, mock_cast, mock_pool_get, mock_session):
mock_pool = mock.MagicMock()
mock_pool_get.return_value = mock_pool
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, [])
mock_cast.assert_not_called()
# Health Monitor
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_create(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_create(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'create_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_delete(self, mock_cast):
provider_HM = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
self.amp_driver.health_monitor_delete(provider_HM)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id}
mock_cast.assert_called_with({}, 'delete_health_monitor', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "172.16.31.10"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='192.0.2.17', monitor_address='192.0.2.77',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
update_mem_dict = {'ip_address': '192.0.2.17',
'name': 'updated-member2',
'monitor_address': '192.0.2.77',
'id': self.sample_data.member2_id,
'enabled': False,
'protocol_port': 80,
'pool_id': self.sample_data.pool1_id}
self.amp_driver.member_batch_update(
self.sample_data.pool1_id, prov_members)
payload = {'old_member_ids': [self.sample_data.member1_id],
'new_member_ids': [self.sample_data.member3_id],
'updated_members': [update_mem_dict]}
mock_cast.assert_called_with({}, 'batch_update_members', **payload)
@mock.patch('octavia.db.api.get_session')
@mock.patch('octavia.db.repositories.PoolRepository.get')
@mock.patch('oslo_messaging.RPCClient.cast')
def test_member_batch_update_udp_ipv4_ipv6(self, mock_cast, mock_pool_get,
mock_session):
mock_lb = mock.MagicMock()
mock_lb.vip = mock.MagicMock()
mock_lb.vip.ip_address = "172.16.31.10"
mock_listener = mock.MagicMock()
mock_listener.load_balancer = mock_lb
mock_pool = mock.MagicMock()
mock_pool.protocol = consts.PROTOCOL_UDP
mock_pool.listeners = [mock_listener]
mock_pool.members = self.sample_data.db_pool1_members
mock_pool_get.return_value = mock_pool
prov_mem_update = driver_dm.Member(
member_id=self.sample_data.member2_id,
pool_id=self.sample_data.pool1_id, admin_state_up=False,
address='fe80::1', monitor_address='fe80::2',
protocol_port=80, name='updated-member2')
prov_new_member = driver_dm.Member(
member_id=self.sample_data.member3_id,
pool_id=self.sample_data.pool1_id,
address='192.0.2.18', monitor_address='192.0.2.28',
protocol_port=80, name='member3')
prov_members = [prov_mem_update, prov_new_member]
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.member_batch_update,
self.sample_data.pool1_id, prov_members)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, admin_state_up=True,
max_retries=1, max_retries_down=2)
hm_dict = {'enabled': True, 'rise_threshold': 1, 'fall_threshold': 2}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_health_monitor_update_name(self, mock_cast):
old_provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id)
provider_hm = driver_dm.HealthMonitor(
healthmonitor_id=self.sample_data.hm1_id, name='Great HM')
hm_dict = {'name': 'Great HM'}
self.amp_driver.health_monitor_update(old_provider_hm, provider_hm)
payload = {consts.HEALTH_MONITOR_ID: self.sample_data.hm1_id,
consts.HEALTH_MONITOR_UPDATES: hm_dict}
mock_cast.assert_called_with({}, 'update_health_monitor', **payload)
# L7 Policy
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_create(self, mock_cast):
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_create(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'create_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_delete(self, mock_cast):
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
self.amp_driver.l7policy_delete(provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id}
mock_cast.assert_called_with({}, 'delete_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, admin_state_up=True)
l7policy_dict = {'enabled': True}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7policy_update_name(self, mock_cast):
old_provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id)
provider_l7policy = driver_dm.L7Policy(
l7policy_id=self.sample_data.l7policy1_id, name='Great L7Policy')
l7policy_dict = {'name': 'Great L7Policy'}
self.amp_driver.l7policy_update(old_provider_l7policy,
provider_l7policy)
payload = {consts.L7POLICY_ID: self.sample_data.l7policy1_id,
consts.L7POLICY_UPDATES: l7policy_dict}
mock_cast.assert_called_with({}, 'update_l7policy', **payload)
# L7 Rules
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_create(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_create(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'create_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_delete(self, mock_cast):
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
self.amp_driver.l7rule_delete(provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id}
mock_cast.assert_called_with({}, 'delete_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, admin_state_up=True)
l7rule_dict = {'enabled': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
@mock.patch('oslo_messaging.RPCClient.cast')
def test_l7rule_update_invert(self, mock_cast):
old_provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id)
provider_l7rule = driver_dm.L7Rule(
l7rule_id=self.sample_data.l7rule1_id, invert=True)
l7rule_dict = {'invert': True}
self.amp_driver.l7rule_update(old_provider_l7rule, provider_l7rule)
payload = {consts.L7RULE_ID: self.sample_data.l7rule1_id,
consts.L7RULE_UPDATES: l7rule_dict}
mock_cast.assert_called_with({}, 'update_l7rule', **payload)
# Flavor
def test_get_supported_flavor_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_flavor_metadata
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', test_schema):
result = self.amp_driver.get_supported_flavor_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.get_supported_flavor_metadata)
def test_validate_flavor(self):
ref_dict = {consts.LOADBALANCER_TOPOLOGY: consts.TOPOLOGY_SINGLE}
self.amp_driver.validate_flavor(ref_dict)
# Test bad flavor metadata value is bad
ref_dict = {consts.LOADBALANCER_TOPOLOGY: 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test bad flavor metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_flavor,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.flavor_schema.'
'SUPPORTED_FLAVOR_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_flavor, 'bogus')
# Availability Zone
def test_get_supported_availability_zone_metadata(self):
test_schema = {
"properties": {
"test_name": {"description": "Test description"},
"test_name2": {"description": "Another description"}}}
ref_dict = {"test_name": "Test description",
"test_name2": "Another description"}
# mock out the supported_availability_zone_metadata
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', test_schema):
result = self.amp_driver.get_supported_availability_zone_metadata()
self.assertEqual(ref_dict, result)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(
exceptions.DriverError,
self.amp_driver.get_supported_availability_zone_metadata)
def test_validate_availability_zone(self):
ref_dict = {consts.COMPUTE_ZONE: 'my_compute_zone'}
self.amp_driver.validate_availability_zone(ref_dict)
# Test bad availability zone metadata key
ref_dict = {'bogus': 'bogus'}
self.assertRaises(exceptions.UnsupportedOptionError,
self.amp_driver.validate_availability_zone,
ref_dict)
# Test for bad schema
with mock.patch('octavia.api.drivers.amphora_driver.'
'availability_zone_schema.'
'SUPPORTED_AVAILABILITY_ZONE_SCHEMA', 'bogus'):
self.assertRaises(exceptions.DriverError,
self.amp_driver.validate_availability_zone,
'bogus')
| 1.460938 | 1 |
artssat/atmosphere/absorption/__init__.py | simonpf/pARTS | 3 | 65924 | <filename>artssat/atmosphere/absorption/__init__.py
"""
Absorption
==========
"""
from abc import ABCMeta, abstractmethod, abstractproperty
from artssat.atmosphere.atmospheric_quantity \
import AtmosphericQuantity, extend_dimensions
from artssat.arts_object import ArtsObject, arts_property
from artssat.jacobian import JacobianBase
from artssat.retrieval import RetrievalBase, RetrievalQuantity
import numpy as np
from pyarts.workspace import arts_agenda
from typhon.physics.atmosphere import vmr2relative_humidity, \
relative_humidity2vmr
################################################################################
# Retrieval units
################################################################################
class Unit(metaclass = ABCMeta):
"""
Abstract base class for classes representing units used for the calculation
of Jacobians and retrievals of absorption species.
"""
def __init__():
pass
@abstractmethod
def to_arts(self, ws, x):
pass
@abstractmethod
def from_arts(self, ws, x):
pass
@abstractproperty
def arts_name(self):
pass
class VMR(Unit):
"""
VMR is the default unit used for absorption species in ARTS. If this unit
is used value from the state vector are plugged in as they are into
the ARTS vmr field.
"""
def __init__(self):
pass
def to_arts(self, ws, x):
"""
Does nothing.
"""
return x
def from_arts(self, ws, x):
"""
Does nothing.
"""
return x
@property
def arts_name(self):
return "vmr"
class Relative(Unit):
"""
In relative units, the amount of a quantity is specified relative to a
reference profile or field.
If this unit is used in a Jacobian calculation, then the Jacobian is
calculated w.r.t. a relative perturbation.
If this unit is used in the retrieval, values in the state vector are
interpreted as multiplicative perturbations of the reference profile
or field.
"""
def __init__(self, x_ref):
self.x_ref = x_ref
def to_arts(self, ws, x):
return self.x_ref * x
def from_arts(self, ws, y):
return y / self.x_ref
@property
def arts_name(self):
return "rel"
class RelativeHumidity(Unit):
"""
Relative humidity is available only for the retrieval of H2O.
"""
def __init__(self):
pass
def to_arts(self, ws, rh):
"""
Converts value given in relative humidity units to ARTS vmr units.
Arguments:
ws(:code:`pyarts.workspace.Workspace`): Workspace object
which contains pressure grid and temperature field required
for the converstion.
rh(:code:`numpy.ndarray`): Relative humidity values to convert.
Returns:
:code:`numpy.ndarray` containing the converted RH values.
"""
p = ws.p_grid.value.reshape(-1, 1, 1)
t = ws.t_field.value
vmr = relative_humidity2vmr(rh, p, t)
return vmr
def from_arts(self, ws, vmr):
"""
Converts a value given in ARTS vmr units back to relative humidity.
Arguments:
ws(:code:`pyarts.workspace.Workspace`): Workspace object
which contains pressure grid and temperature field required
for the conversion.
vmr(:code:`numpy.ndarray`): Values in ARTS vmr units to convert
to relative humidity.
Returns:
:code:`numpy.ndarray` containing the converted RH values.
"""
p = ws.p_grid.value.reshape(-1, 1, 1)
t = ws.t_field.value
rh = vmr2relative_humidity(vmr, p, t)
return rh
@property
def arts_name(self):
return "rh"
################################################################################
# The Jacobian class
################################################################################
class Jacobian(JacobianBase, ArtsObject):
@arts_property("Numeric")
def perturbation(self):
return 0.01
def __init__(self, quantity, index):
JacobianBase.__init__(self, quantity, index)
ArtsObject.__init__(self)
self.unit = VMR()
self.for_species_tag = 1
def _make_setup_kwargs(self, ws):
kwargs = self.get_grids(ws)
kwargs.update({"species" : self.quantity.get_tag_string(),
"unit" : self.unit.arts_name,
"for_species_tag" : self.for_species_tag})
return kwargs
def setup(self, ws):
kwargs = self._make_setup_kwargs(ws)
ws.jacobianAddAbsSpecies(**kwargs)
class Retrieval(RetrievalBase, Jacobian):
def __init__(self, quantity, index):
super().__init__(quantity, index)
def add(self, ws):
ws.retrievalAddAbsSpecies(**self._make_setup_kwargs(ws))
class AbsorptionSpecies(AtmosphericQuantity, RetrievalQuantity):
def __init__(self,
name,
from_catalog = False,
cia = None,
frequency_range = None,
isotopologues = None,
model = None,
on_the_fly = True,
zeeman = False,
lineshape = "no_shape",
normalization = "no_norm",
cutoff = -1,
cutoff_type = "ByBand"):
AtmosphericQuantity.__init__(self,
name,
(0, 0, 0))
RetrievalQuantity.__init__(self)
self._dimensions = (0, 0, 0)
self._from_catalog = from_catalog
self._cia = cia
self._frequency_range = frequency_range
self._isotopologues = isotopologues
self._jacobian = None
self._model = model
self._on_the_fly = on_the_fly
self._retrieval = None
self._zeeman = zeeman
self._lineshape = lineshape
self._normalization = normalization
self._cutoff = cutoff
self._cutoff_type = cutoff_type
#
# Abstract properties
#
def dimensions(self):
return self._dimensions
#
# Properties
#
@property
def from_catalog(self):
return self._from_catalog
@property
def cia(self):
return self._cia
@property
def isotopologues(self):
return self._isotopologues
@property
def model(self):
return self._model
@property
def frequency_range(self):
return self._frequency_range
@property
def on_the_fly(self):
return self._on_the_fly
@property
def zeeman(self):
return self._zeeman
@property
def lineshape(self):
return self._lineshape
@property
def cutoff(self):
return self._cutoff
@property
def cutoff_type(self):
return self._cutoff_type
@property
def normalization(self):
return self._normalization
def _get_tag_string(self,
zeeman = False,
isotopologue = None,
model = None,
frequency_range = None):
ts = self._name
ts += "-"
if zeeman:
ts += "Z"
ts += "-"
if not isotopologue is None:
ts += isotopologues
ts += "-"
if not model is None:
ts += model
ts += "-"
if not frequency_range is None:
ts += frequency_range[0]
ts += "-"
ts += frequency_range[1]
return ts
def get_tag_string(self):
tss = []
if self.from_catalog:
tss += [self._name]
z = self._zeeman
if type(self._isotopologues) is list:
isotopologues = self._isotopologues
else:
isotopologues = [self._isotopologues]
if type(self._model) is list:
models = self._model
else:
models = [self._model]
if type(self._frequency_range) is list:
frequency_ranges = self._frequency_range
else:
frequency_ranges = [self._frequency_range]
for i in isotopologues:
for m in models:
for fr in frequency_ranges:
tss += [self._get_tag_string(z, i, m, fr)]
return ",".join(tss)
#
# Jacobian & retrieval
#
@property
def jacobian_class(self):
return Jacobian
@property
def retrieval_class(self):
return Retrieval
def set_from_x(self, ws, x):
grids = [ws.p_grid.value, ws.lat_grid.value, ws.lon_grid.value]
grids = [g for g in grids if g.size > 0]
x = self.transformation.invert(x)
x = self.retrieval.interpolate_to_grids(x, grids)
x = x.reshape(ws.vmr_field.value.shape[1:])
unit = self.retrieval.unit
x = unit.to_arts(ws, x)
if self._wsv_index is None:
raise Exception("Absorber's wsv_index is unknown. This is likely "
"its setup(...) routine has not been called.")
ws.vmr_field.value[self._wsv_index, :, :, :] = x
#
# Retrieval
#
@property
def retrieved(self):
return not self._retrieval is None
#
# Abstract methods
#
def setup(self, ws, i):
self._wsv_index = i
def get_data(self, ws, provider, *args, **kwargs):
if not self.retrieved:
dimensions = ws.t_field.shape
f = provider.__getattribute__("get_" + self.name)
x = f(*args, **kwargs)
x = extend_dimensions(x)
if not x.shape == dimensions:
raise Exception("Shape of {0} field is inconsistent with "
"the dimensions of the atmosphere."
.format(self.name))
ws.vmr_field.value[self._wsv_index, :, :, :] = x
class H2O(AbsorptionSpecies):
def __init__(self,
from_catalog = False,
cia = None,
frequency_range = None,
isotopologues = None,
model = "PWR98",
on_the_fly = True,
zeeman = False,
lineshape = "VP",
normalization = "VVW",
cutoff = -1,
cutoff_type = "ByBand"):
super().__init__("H2O",
from_catalog = from_catalog,
cia = cia,
frequency_range = frequency_range,
isotopologues = isotopologues,
model = model,
on_the_fly = on_the_fly,
zeeman = zeeman,
lineshape = lineshape,
normalization = normalization,
cutoff = cutoff,
cutoff_type = cutoff_type)
class N2(AbsorptionSpecies):
def __init__(self,
from_catalog = False,
cia = None,
frequency_range = None,
isotopologues = None,
model = "SelfContStandardType",
on_the_fly = True,
zeeman = False,
lineshape = "VP",
normalization = "VVH",
cutoff = -1,
cutoff_type = "ByBand"):
super().__init__("N2",
from_catalog = from_catalog,
cia = cia,
frequency_range = frequency_range,
isotopologues = isotopologues,
model = model,
on_the_fly = on_the_fly,
zeeman = zeeman,
lineshape = lineshape,
normalization = normalization,
cutoff = cutoff,
cutoff_type = cutoff_type)
class O2(AbsorptionSpecies):
def __init__(self,
from_catalog = False,
cia = None,
frequency_range = None,
isotopologues = None,
model = "PWR93",
on_the_fly = True,
zeeman = False,
lineshape = "VP",
normalization = "VVW",
cutoff = -1,
cutoff_type = "ByBand"):
super().__init__("O2",
from_catalog = from_catalog,
cia = cia,
frequency_range = frequency_range,
isotopologues = isotopologues,
model = model,
on_the_fly = on_the_fly,
zeeman = zeeman,
lineshape = lineshape,
normalization = normalization,
cutoff = cutoff,
cutoff_type = cutoff_type)
class CloudWater(AbsorptionSpecies):
def __init__(self,
from_catalog = False,
cia = None,
frequency_range = None,
isotopologues = None,
model = "MPM93",
on_the_fly = True,
zeeman = False,
lineshape = "VP",
normalization = "RQ",
cutoff = -1,
cutoff_type = "ByBand"):
super().__init__("cloud_water",
from_catalog = from_catalog,
cia = cia,
frequency_range = frequency_range,
isotopologues = isotopologues,
model = model,
on_the_fly = on_the_fly,
zeeman = zeeman,
lineshape = lineshape,
normalization = normalization,
cutoff = cutoff,
cutoff_type = cutoff_type)
def get_tag_string(self):
ts = "liquidcloud"
ts += "-"
if self._model:
ts += self._model
ts += "-"
return ts
| 1.390625 | 1 |
src/simmate/toolkit/diffusion/migration_images.py | jacksund/simmate | 9 | 66052 | # -*- coding: utf-8 -*-
import numpy
from simmate.toolkit import Structure
from pymatgen.analysis.diffusion.neb.pathfinder import (
DistinctPathFinder,
MigrationHop as PymatgenMigrationHop,
IDPPSolver,
)
from typing import List
class MigrationImages(list):
"""
This class is just a list of structures for a diffusion pathway. It has
utility methods to help create these structures but otherwise behaves
exactly like a python list.
Note, this class is primarily used to generate inputs for calculations. If
you'd like more advanced features, you should represent your diffusion
pathway as a MigrationHop instead.As a rule of thumb: Only use this class
if you are manually creating your pathway from endpoint supercells or from
a set of supercell images.
All MigrationHop's can be converted to MigrationImages (using the
`from_migration_hop` method); but not all MigrationImages can be converted
to MigrationHops.
"""
def __init__(self, structures: List[Structure]):
# This init function does nothing except apply typing -- specifically,
# it says that it expects a list of structures.
super().__init__(structures)
def get_sum_structure(self, tolerance: float = 1e-3):
"""
Takes all structures and combines them into one. Atoms that are within
the given tolerance are joined into a single site.
This is primarily used to view a diffusing pathway within a single
structure -- as well as how the host lattice changes during diffusion.
If you are able to convert your pathway to a MigrationHop, the
MigrationHop.write_path() method is much faster and cleaner than this
method, so it should be preffered. Also, because there are many atoms
that are overlapping here, the output structure may cause programs
like VESTA to crash.
#### Parameters
- `tolerance`:
the angle and distance tolerance to consider fractional coordinates
as matching. Matching sites will be merged as 1 site in the final
sum structure.
"""
# OPTIMIZE: this is very inefficient. It's much faster to visualize
# structures with MigrationHop class because you know which atom is
# moving. Here, we need to treat all atoms as moving. We can also
# speed this up by only looking at diffusing species too.
final_coords = []
final_species = []
for structure in self: # recall self is a list of structures
for site in structure:
is_new = True
for coords in final_coords:
if all(
numpy.isclose(
site.frac_coords,
coords,
rtol=tolerance,
atol=tolerance,
)
):
is_new = False
break
if is_new:
final_coords.append(site.frac_coords)
final_species.append(site.specie)
structure = Structure(
lattice=structure.lattice,
species=final_species,
coords=final_coords,
)
return structure
@staticmethod
def get_nimages(
pathway_length: float,
min_image_step: float = 0.7,
require_midpoint: bool = True,
):
"""
Gives the desirable number of images (not including start/end structures).
This method helps generate a MigrationImages object, and typically is
not called directly. The other classmethods of MigrationImages call
this for you.
#### Parameters
- `pathway_length`:
The length of the pathway.
- `min_image_step`:
The minimum step distance for the diffusing atom between images.
The default is 0.7 Angstroms. For example, a path 2.8A long would
require at least 4 images for this default.
- `require_midpoint`:
Whether there should be an image at the midpoint. In other words,
whether the number of images should be odd. This is often important
if you expect the transition state to be at the midpoint and you are
not running CI-NEB. The default is True.
Returns
-------
- `nimages`:
The number of images to use for this pathway.
"""
# At a minimum, we want to have images be 0.7 angstroms apart, and
# with one additional image.
nimages = pathway_length // min_image_step + 1
# We also want an odd number of images. This ensures we have an image
# at exactly the midpoint, which is often necessary if we aren't
# running CI-NEB.
if require_midpoint and nimages % 2 == 0:
nimages += 1
# This is a float but it makes more sense to have an integer
return int(nimages)
@classmethod
def from_migration_hop(
cls,
migration_hop: PymatgenMigrationHop,
vacancy_mode: bool = True,
min_nsites: int = 80,
max_nsites: int = 240,
min_length: int = 10,
**kwargs,
):
"""
Creates a MigrationImages object from a MigrationHop object
#### Parameters
- `migration_hop`:
The MigrationHop object that should be converted.
- `vacancy_mode`:
Whether to use single-vacancy diffusion (True) or interstitial
diffusion (False). The default is True.
- `min_nsites`:
The minimum number of sites to have in the supercell structure.
The default is 80.
- `max_nsites`:
The maximum number of sites to have in the supercell structure.
The default is 240.
- `min_length`:
The minimum length for each vector in the supercell structure.
The default is 10 Angstroms.
- `**kwargs`:
Any arguments that are normally accepted by IDPPSolver
"""
# The third thing returned is the bulk_supercell which we don't need.
start_supercell, end_supercell, _ = migration_hop.get_sc_structures(
vac_mode=vacancy_mode,
min_atoms=min_nsites,
max_atoms=max_nsites,
min_length=min_length,
)
# calculate the number of images required
nimages = cls.get_nimages(migration_hop.length)
return cls.from_endpoints(
start_supercell,
end_supercell,
nimages=nimages,
**kwargs,
)
@classmethod
def from_endpoints(
cls,
structure_start: Structure,
structure_end: Structure,
nimages: int,
**kwargs,
):
"""
Creates a MigrationImages object from start and end supercell structures.
You do not need to specify the diffusing atom(s) as all sites are
linearly interpolated and then relaxed by IDPP.
#### Parameters
- `structure_start`:
The starting supercell of the diffusion pathway.
- `structure_end`:
The ending supercell of the diffusion pathway.
- `nimages`:
The number of desired images for the pathway. Note, if you know the
pathway length of your path, you can use the `get_nimages` static
method to get a logical number of images.
- `**kwargs`:
Any arguments that are normally accepted by IDPPSolver
"""
# Run IDPP relaxation on the images before returning them
idpp_solver = IDPPSolver.from_endpoints(
[structure_start, structure_end],
nimages=nimages,
**kwargs,
)
images = idpp_solver.run()
return cls(images)
@classmethod
def from_startend_sites(
cls,
structure: Structure,
site_start: int,
site_end: int,
**kwargs,
):
"""
Creates a MigrationImages object from a bulk structure and start/end
periodic sites of the diffusing atom.
For example, this would allow a diffusion pathway that goes from a site
at (0,0,0) to (1,1,1). Thus, symmetry and periodic boundry conditions
are considered.
Note, this method just creates a MigrationHop and then uses the
`from_migration_hop` method to make a MigrationImages object.
#### Parameters
- `structure`:
The bulk crystal structure (NOT the supercell).
- `site_start`:
The starting periodic site for this pathway.
- `site_end`:
The end periodic site for this pathway.
- `**kwargs`:
Any arguments that are normally accepted by `from_migration_hop`.
"""
# This information is all we need for a MigrationHop object
pathway = PymatgenMigrationHop(site_start, site_end, structure)
return cls.from_migration_hop(pathway, **kwargs)
@classmethod
def from_structure(
cls,
structure: Structure,
migrating_specie: str,
pathfinder_kwargs: dict = {},
**kwargs,
):
"""
Given a bulk crystal structure, this will find all symmetrically
unique pathways and return them as list of MigrationImages objects.
#### Parameters
- `structure`:
The bulk crystal structure (NOT the supercell).
- `migrating_specie`:
The identity of the diffusing ion (e.g. "Li" or "Li1+"). Note, only
provide oxidation state if you are using an oxidation-state decorated
structure.
- `pathfinder_kwargs`:
Any arguments that are normally accepted by DistinctPathFinder, but
given as a dictionary. The default is {}.
- `**kwargs`:
Any arguments that are normally accepted by `from_migration_hop`.
"""
# convert to the LLL reduced primitive cell to make it as cubic as possible
structure_lll = structure.get_sanitized_structure()
# Use pymatgen to find all the symmetrically unique pathways.
# NOTE: This only finds pathways up until the structure is percolating.
# If you are interested in longer pathways, then this script needs to
# be adjusted by passing additional kwargs
pathfinder = DistinctPathFinder(
structure_lll,
migrating_specie=migrating_specie,
**pathfinder_kwargs,
)
pathways = pathfinder.get_paths()
# Now go through each path and convert to a MigrationPath. We return
# these as a list of paths.
migration_paths = []
for pathway in pathways:
migration_path = cls.from_migration_hop(
migration_hop=pathway,
**kwargs,
)
migration_paths.append(migration_path)
return migration_paths
@classmethod
def from_dynamic(cls, migration_images):
"""
This is an experimental feature. The code here is a repurposing of
Structre.from_dynamic so consider making a general class for
from_dynamic methods.
"""
is_from_past_calc = False
# assume any list is in the MigrationHop format if there are more than
# two structures (i.e. there is at least one midpoint image)
if isinstance(migration_images, list) and len(migration_images) > 2:
migration_images_cleaned = migration_images
else:
raise Exception("Unknown format provided for migration_images input.")
migration_images_cleaned.is_from_past_calc = is_from_past_calc
return migration_images_cleaned
def as_dict(self):
return [s.as_dict() for s in self]
| 2.515625 | 3 |
tests/ops/test_linalg.py | gavincangan/pyro | 2 | 66180 | from __future__ import absolute_import, division, print_function
import pytest
import torch
from pyro.ops.linalg import rinverse
from tests.common import assert_equal
@pytest.mark.parametrize("A", [
torch.tensor([[17.]]),
torch.tensor([[1., 2.], [2., -3.]]),
torch.tensor([[1., 2, 0], [2, -2, 4], [0, 4, 5]]),
torch.tensor([[1., 2, 0, 7], [2, -2, 4, -1], [0, 4, 5, 8], [7, -1, 8, 1]]),
torch.tensor([[1., 2, 0, 7, 0], [2, -2, 4, -1, 2], [0, 4, 5, 8, -4], [7, -1, 8, 1, -3], [0, 2, -4, -3, -1]]),
torch.eye(40)
])
@pytest.mark.parametrize("use_sym", [True, False])
def test_sym_rinverse(A, use_sym):
d = A.shape[-1]
assert_equal(rinverse(A, sym=use_sym), torch.inverse(A), prec=1e-8)
assert_equal(torch.mm(A, rinverse(A, sym=use_sym)), torch.eye(d), prec=1e-8)
batched_A = A.unsqueeze(0).unsqueeze(0).expand(5, 4, d, d)
expected_A = torch.inverse(A).unsqueeze(0).unsqueeze(0).expand(5, 4, d, d)
assert_equal(rinverse(batched_A, sym=use_sym), expected_A, prec=1e-8)
| 1.140625 | 1 |
tensorflow_v1/07_-_Recurrent_neural_networks/06_-_Simple_RNN_seqlen.py | mtanti/deeplearningtutorial | 5 | 66308 | <filename>tensorflow_v1/07_-_Recurrent_neural_networks/06_-_Simple_RNN_seqlen.py
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
import numpy as np
import matplotlib.pyplot as plt
token_sents = [
'i like it'.split(' '), #positive
'i hate it'.split(' '), #negative
'i don\'t hate it'.split(' '), #positive
'i don\'t like it'.split(' '), #negative
]
sent_lens = [
3,
3,
4,
4,
] #Alternatively, you can replace this list with [ len(sent) for sent in tokens ]
sentiments = [
[ 1 ],
[ 0 ],
[ 1 ],
[ 0 ]
]
vocab = sorted({ token for sent in token_sents for token in sent })
max_len = max(sent_lens)
token2index = { token: index for (index, token) in enumerate(vocab) }
index_sents = [ [ token2index[token] for token in sent ] + [ 0 for _ in range(max_len - len(sent)) ] for sent in token_sents ] #Add zeros to the end of each sentence so that all sentences are equal to the maximum length (can be some other index instead of zero).
token_prefixes = sorted({ tuple(sent[:i]) for sent in token_sents for i in range(len(token_sents)) })
prefix_lens = [ len(prefix) for prefix in token_prefixes ]
max_prefix_len = max(prefix_lens)
index_prefixes = [ [ token2index[token] for token in prefix ] + [ 0 for _ in range(max_prefix_len - len(prefix)) ] for prefix in token_prefixes ]
###################################
class Cell(tf.nn.rnn_cell.RNNCell):
def __init__(self, embed_size, state_size, init_stddev):
super().__init__()
self.W = None
self.b = None
self._embed_size = embed_size
self._state_size = state_size
self._init_stddev = init_stddev
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._state_size
def build(self, inputs_shape):
self.W = self.add_variable('W', [self._state_size+self._embed_size, self._state_size], tf.float32, tf.random_normal_initializer(stddev=self._init_stddev))
self.b = self.add_variable('b', [self._state_size], tf.float32, tf.zeros_initializer())
self.built = True
def call(self, x, curr_state):
layer_input = tf.concat([ curr_state, x ], axis=1)
new_state = tf.tanh(tf.matmul(layer_input, self.W) + self.b)
return (new_state, new_state) #Return the state as both output and state.
###################################
class Model(object):
def __init__(self, vocab_size):
init_stddev = 1e-1
embed_size = 2
state_size = 2
self.graph = tf.Graph()
with self.graph.as_default():
self.sents = tf.placeholder(tf.int32, [None, None], 'sents')
self.sent_lens = tf.placeholder(tf.int32, [None], 'sent_lens')
self.targets = tf.placeholder(tf.float32, [None, 1], 'targets')
self.params = []
batch_size = tf.shape(self.sents)[0]
with tf.variable_scope('embeddings'):
self.embedding_matrix = tf.get_variable('embedding_matrix', [ vocab_size, embed_size ], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
self.params.extend([ self.embedding_matrix ])
embedded = tf.nn.embedding_lookup(self.embedding_matrix, self.sents)
with tf.variable_scope('hidden'):
init_state = tf.get_variable('init_state', [state_size], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
batch_init = tf.tile(tf.reshape(init_state, [1, state_size]), [batch_size, 1])
self.params.extend([ init_state ])
cell = Cell(embed_size, state_size, init_stddev)
(_, self.states) = tf.nn.dynamic_rnn(cell, embedded, sequence_length=self.sent_lens, initial_state=batch_init) #Pass sentence lengths here.
self.params.extend([ cell.W, cell.b ])
with tf.variable_scope('output'):
W = tf.get_variable('W', [state_size, 1], tf.float32, tf.random_normal_initializer(stddev=init_stddev))
b = tf.get_variable('b', [1], tf.float32, tf.zeros_initializer())
self.params.extend([ W, b ])
logits = tf.matmul(self.states, W) + b
self.probs = tf.sigmoid(logits)
self.error = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.targets, logits=logits))
self.optimiser_step = tf.train.AdamOptimizer().minimize(self.error)
self.init = tf.global_variables_initializer()
self.graph.finalize()
self.sess = tf.Session()
def initialise(self):
return self.sess.run([ self.init ], { })
def close(self):
self.sess.close()
def optimisation_step(self, sents, sent_lens, targets):
return self.sess.run([ self.optimiser_step ], { self.sents: sents, self.sent_lens: sent_lens, self.targets: targets })
def get_params(self):
return self.sess.run(self.params, { })
def get_error(self, sents, sent_lens, targets):
return self.sess.run([ self.error ], { self.sents: sents, self.sent_lens: sent_lens, self.targets: targets })[0]
def predict(self, sents, sent_lens):
return self.sess.run([ self.probs ], { self.sents: sents, self.sent_lens: sent_lens })[0]
def get_state(self, sents, sent_lens):
return self.sess.run([ self.states ], { self.sents: sents, self.sent_lens: sent_lens })[0]
###################################
max_epochs = 2000
(fig, axs) = plt.subplots(1, 3)
prefix_plots = list()
prefix_texts = list()
for token_prefix in token_prefixes:
[ prefix_plot ] = axs[0].plot([ 0 ], [ 0 ], linestyle='', marker='o', markersize=10)
prefix_plots.append(prefix_plot)
prefix_text = axs[0].text(0, 0, ' '.join(token_prefix), fontdict={ 'fontsize': 8 })
prefix_texts.append(prefix_text)
axs[0].set_xlim(-1.0, 1.0)
axs[0].set_xlabel('d0')
axs[0].set_ylim(-1.0, 1.0)
axs[0].set_ylabel('d1')
axs[0].grid(True)
axs[0].set_title('Prefixes')
sent_plots = list()
sent_texts = list()
for token_sent in token_sents:
[ sent_plot ] = axs[1].plot([ 0 ], [ 0 ], linestyle='', marker='o', markersize=10)
sent_plots.append(sent_plot)
sent_text = axs[1].text(0, 0, ' '.join(token_sent), fontdict={ 'fontsize': 8 })
sent_texts.append(sent_text)
axs[1].set_xlim(-1.0, 1.0)
axs[1].set_xlabel('d0')
axs[1].set_ylim(-1.0, 1.0)
axs[1].set_ylabel('d1')
axs[1].grid(True)
axs[1].set_title('Sents')
[ train_error_plot ] = axs[2].plot([], [], color='red', linestyle='-', linewidth=1, label='train')
axs[2].set_xlim(0, max_epochs)
axs[2].set_xlabel('epoch')
axs[2].set_ylim(0.0, 2.0)
axs[2].set_ylabel('XE')
axs[2].grid(True)
axs[2].set_title('Error progress')
axs[2].legend()
fig.tight_layout()
fig.show()
###################################
model = Model(len(vocab))
model.initialise()
train_errors = list()
print('epoch', 'train error', sep='\t')
for epoch in range(1, max_epochs+1):
train_error = model.get_error(index_sents, sent_lens, sentiments)
train_errors.append(train_error)
if epoch%100 == 0:
print(epoch, train_error, sep='\t')
states = model.get_state(index_prefixes, prefix_lens)
for (prefix_plot, prefix_text, state) in zip(prefix_plots, prefix_texts, states):
prefix_plot.set_data([ state[0] ], [ state[1] ])
prefix_text.set_position( (state[0], state[1]) )
states = model.get_state(index_sents, sent_lens)
for (sent_plot, sent_text, state) in zip(sent_plots, sent_texts, states.tolist()):
sent_plot.set_data([ state[0] ], [ state[1] ])
sent_text.set_position( (state[0], state[1]) )
train_error_plot.set_data(np.arange(len(train_errors)), train_errors)
plt.draw()
fig.canvas.flush_events()
model.optimisation_step(index_sents, sent_lens, sentiments)
print()
print('prefix', 'vector', sep='\t')
states = model.get_state(index_prefixes, prefix_lens)
for (token_prefix, state) in zip(token_prefixes, states.tolist()):
print(' '.join(token_prefix), np.round(state, 3), sep='\t')
print()
print('sent', 'vector', sep='\t')
states = model.get_state(index_sents, sent_lens)
for (token_sent, state) in zip(token_sents, states.tolist()):
print(' '.join(token_sent), np.round(state, 3), sep='\t')
print()
probs = model.predict(index_sents, sent_lens)
print('sent', 'sentiment', sep='\t')
for (token_sent, prob) in zip(token_sents, probs.tolist()):
print(' '.join(token_sent), np.round(prob[0], 3), sep='\t')
model.close()
| 2.125 | 2 |
BDSpaceVis/coordinate_system.py | bond-anton/BDSpaceVis | 0 | 66436 | <gh_stars>0
import numpy as np
from mayavi import mlab
from tvtk.api import tvtk
from BDSpaceVis import generators
def euler_color(euler_angles):
r = (euler_angles.euler_angles[0] + np.pi) / (2 * np.pi)
g = euler_angles.euler_angles[1] / np.pi
b = (euler_angles.euler_angles[2] + np.pi) / (2 * np.pi)
if np.allclose(r, 0):
r = 0
if np.allclose(g, 0):
g = 0
if np.allclose(b, 0):
b = 0
return r, g, b
def coordinate_system_arrows(coordinate_system, offset=0.0, scale=1.0):
points = []
lengths = []
for i in range(3):
points.append(coordinate_system.origin + scale * np.asarray(coordinate_system.basis[i]) * offset)
lengths.append(np.asarray(coordinate_system.basis[:, i]) * scale)
points = np.array(points)
lengths = np.array(lengths)
return points, lengths
def draw_coordinate_system_axes(fig, coordinate_system, offset=0.0, scale=1.0, draw_labels=True):
points, lengths = coordinate_system_arrows(coordinate_system, offset=offset, scale=scale)
mlab.figure(fig, bgcolor=fig.scene.background)
arrows = mlab.quiver3d(points[:, 0], points[:, 1], points[:, 2],
lengths[0, :], lengths[1, :], lengths[2, :],
scalars=np.array([3, 2, 1]), mode='arrow')
arrows.glyph.color_mode = 'color_by_scalar'
arrows.glyph.glyph.scale_factor = scale
data = arrows.parent.parent
data.name = coordinate_system.name
glyph_scale = arrows.glyph.glyph.scale_factor * 1.1
label_col = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
labels = []
if draw_labels:
for i in range(3):
labels.append(mlab.text3d(points[i, 0] + glyph_scale * coordinate_system.basis[i, 0],
points[i, 1] + glyph_scale * coordinate_system.basis[i, 1],
points[i, 2] + glyph_scale * coordinate_system.basis[i, 2],
coordinate_system.labels[i],
color=label_col[i],
scale=0.1 * glyph_scale))
return arrows, labels
def draw_coordinate_system_box(fig, coordinate_system, offset=0.5, scale=1.0, draw_axes=True, draw_labels=True):
mlab.figure(fig, bgcolor=fig.scene.background)
cube_points, dims = generators.generate_cuboid(scale, scale, scale,
origin=np.array([scale/2, scale/2, scale/2]))
cube = tvtk.StructuredGrid(dimensions=dims)
cube.points = np.asarray(coordinate_system.to_parent(cube_points))
color = euler_color(coordinate_system.euler_angles)
cube_surface = mlab.pipeline.surface(cube, color=color)
cube_surface.parent.parent.name = 'Euler colored box: ' + coordinate_system.name
cube_surface.actor.property.edge_visibility = 1
cube_surface.actor.property.edge_color = color
arrows, labels = None, None
if draw_axes:
arrows, labels = draw_coordinate_system_axes(fig, coordinate_system, offset=offset, scale=scale,
draw_labels=draw_labels)
return cube_surface, arrows, labels
def update_coordinate_system_axes(coordinate_system, arrows, labels, offset=0.0, scale=1.0):
points, lengths = coordinate_system_arrows(coordinate_system, offset=offset, scale=scale)
data = arrows.parent.parent
data.mlab_source.points = points
data.mlab_source.u = lengths[0, :]
data.mlab_source.v = lengths[1, :]
data.mlab_source.w = lengths[2, :]
glyph_scale = arrows.glyph.glyph.scale_factor * 1.1
for i in range(len(labels)):
labels[i].position = points[i, :] + glyph_scale * np.asarray(coordinate_system.basis[i, :])
labels[i].scale = np.ones(3) * 0.1 * glyph_scale
return arrows, labels
def update_coordinate_system_box(coordinate_system, cube_surface, arrows, labels, offset=0.5, scale=1.0):
cube_points, dims = generators.generate_cuboid(scale, scale, scale,
origin=np.array([scale/2, scale/2, scale/2]))
color = euler_color(coordinate_system.euler_angles)
cube_surface.parent.parent.data.set(points=np.asarray(coordinate_system.to_parent(cube_points)))
cube_surface.actor.property.edge_visibility = 1
cube_surface.actor.property.edge_color = color
cube_surface.actor.property.color = color
if arrows is None:
return cube_surface, arrows, labels
else:
arrows, labels = update_coordinate_system_axes(coordinate_system, arrows, labels, offset=offset, scale=scale)
return cube_surface, arrows, labels
| 2 | 2 |
locations/migrations/0002_auto_20200601_2156.py | PatelKeviin/Locations-API-using-Django-REST-Framework | 0 | 66564 | <reponame>PatelKeviin/Locations-API-using-Django-REST-Framework
# Generated by Django 3.0.6 on 2020-06-01 11:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('locations', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='location',
name='id',
field=models.AutoField(primary_key=True, serialize=False),
),
]
| 0.804688 | 1 |
hap_init.py | Carles-Figuerola/hap-client-metrics | 0 | 66692 | #!/usr/bin/python
import os
from hapclient.client import HapClient
import json
import logging as log
import sys
def read_json_file(file):
with open(file) as fd:
try:
content = json.load(fd)
except json.decoder.JSONDecodeError as e:
content = {}
return content
def write_pairing_data(pairing_data, pairing_data_file):
with open(pairing_data_file, 'w') as fd:
json.dump(pairing_data, fd)
def wipe_pairing_data(pairing_data_file):
with open(pairing_data_file, 'w') as fd:
fd.write("")
def load_config(config_file, options):
config = read_json_file(config_file)
if options.device_id:
config['device_id'] = options.device_id
if options.address:
config['address'] = options.address
if options.port:
config['port'] = options.port
if options.pin:
config['pin'] = options.pin
if options.autodiscover:
config['autodiscover'] = True
else:
config['autodiscover'] = False
if options.autodiscover:
if not 'pin' in config:
log.error('pin is needed for autodiscovery')
#sys.exit(1)
else:
if not all(x in config for x in ['device_id', 'address', 'port', 'pin']):
log.error("Config file or flags do not have all required fields: [device_id, address, port, pin]")
#sys.exit(1)
return config
def pair_homekit(config, pairing_data, pairing_data_file):
if pairing_data:
client = HapClient(device_id=config['device_id'], pairing_data=pairing_data)
log.info("Successfully paired with the device")
else:
if config['autodiscover']:
devices = HapClient.discover()
if len(devices) > 1:
log.warn(f"Found more than one devices, choosing the first one: {devices[0]['id']}")
log.info(f"Found Server: {devices[0]['id']}")
client = HapClient(devices[0]['id'], address=devices[0]['address'], port=devices[0]['port'])
else:
client = HapClient(config['device_id'], address=config['address'], port=config['port'])
pair_result = client.pair(config['pin'])
if pair_result:
log.info("Successfully paired with the device")
write_pairing_data(client.pairing_data, pairing_data_file)
log.info("Saved pairing data")
else:
log.error("Failed to pair with the device")
return client
def unpair_homekit(client):
client.unpair()
return True
| 1.101563 | 1 |
perfkitbenchmarker/scripts/wait_for_command.py | PublicStuff/PerfKitBenchmarker | 0 | 66820 | #!/usr/bin/env python2
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Waits for a command started by execute_command.py to complete.
Blocks until a command wrapped by "execute_command.py" completes, then mimics
the wrapped command, copying the wrapped command's stdout/stderr to this
process' stdout/stderr, and exiting with the wrapped command's status.
*Runs on the guest VM. Supports Python 2.6, 2.7, and 3.x.*
"""
import fcntl
import optparse
import os
import shutil
import sys
import threading
def main():
p = optparse.OptionParser()
p.add_option('-o', '--stdout', dest='stdout',
help="""Read stdout from FILE.""", metavar='FILE')
p.add_option('-e', '--stderr', dest='stderr',
help="""Read stderr from FILE.""", metavar='FILE')
p.add_option('-s', '--status', dest='status', metavar='FILE',
help='Get process exit status from FILE. '
'Will block until a shared lock is acquired on FILE.')
p.add_option('-d', '--delete', dest='delete', action='store_true',
help='Delete stdout, stderr, and status files when finished.')
options, args = p.parse_args()
if args:
sys.stderr.write('Unexpected arguments: {0}\n'.format(args))
return 1
missing = []
for option in ('stdout', 'stderr', 'status'):
if getattr(options, option) is None:
missing.append(option)
if missing:
p.print_usage()
msg = 'Missing required flag(s): {0}\n'.format(
', '.join('--' + i for i in missing))
sys.stderr.write(msg)
return 1
with open(options.stdout, 'r') as stdout:
with open(options.stderr, 'r') as stderr:
with open(options.status, 'r') as status:
fcntl.lockf(status, fcntl.LOCK_SH)
return_code = int(status.read())
status.close()
stderr_copier = threading.Thread(target=shutil.copyfileobj,
args=[stderr, sys.stderr],
name='stderr-copier')
stderr_copier.daemon = True
stderr_copier.start()
try:
shutil.copyfileobj(stdout, sys.stdout)
finally:
stderr_copier.join()
if options.delete:
for f in [options.stdout, options.stderr, options.status]:
os.unlink(f)
return return_code
if __name__ == '__main__':
sys.exit(main())
| 1.460938 | 1 |
pyNastran/converters/format_converter.py | als0052/pyNastran | 0 | 66948 | <filename>pyNastran/converters/format_converter.py
"""Multi-input/output format converter"""
from __future__ import annotations
import os
import sys
import glob
from typing import Dict, Optional, Any, TYPE_CHECKING
if TYPE_CHECKING:
from cpylog import SimpleLogger
# stl_to_plot3d ???
def process_nastran(bdf_filename: str, fmt2: str, fname2: str,
log: Optional[SimpleLogger]=None,
data: Optional[Dict[str, Any]]=None,
debug: bool=True,
quiet: bool=False) -> None:
"""
Converts Nastran to STL/Cart3d/Tecplot/UGRID3d
"""
assert fmt2 in ['stl', 'cart3d', 'tecplot', 'ugrid', 'nastran', 'abaqus'], 'format2=%s' % fmt2
if data is None:
data = {'--scale': 1.0,}
from pyNastran.bdf.bdf import BDF
xref = True
if fmt2 == 'ugrid':
xref = False
model = BDF(log=log, debug=debug)
model.read_bdf(bdf_filename, validate=False, xref=xref)
if data['--scale'] != 1.0:
scale = data['--scale']
data['--scale'] = 1.0
for node in model.nodes.values():
node.xyz = node.get_position() * scale
node.cp = 0
del node.cp_ref
if fmt2 == 'stl':
from pyNastran.converters.nastran.nastran_to_stl import nastran_to_stl
nastran_to_stl(model, fname2, is_binary=data['--binary'])
elif fmt2 == 'cart3d':
from pyNastran.converters.nastran.nastran_to_cart3d import nastran_to_cart3d
cart3d = nastran_to_cart3d(model)
cart3d.write_cart3d(fname2)
elif fmt2 == 'tecplot':
from pyNastran.converters.nastran.nastran_to_tecplot import nastran_to_tecplot
tecplot = nastran_to_tecplot(model)
tecplot_filename = fname2
tecplot.write_tecplot(tecplot_filename, adjust_nids=False)
elif fmt2 == 'ugrid':
from pyNastran.converters.nastran.nastran_to_ugrid import nastran_to_ugrid
nastran_to_ugrid(model, fname2)
elif fmt2 == 'abaqus':
from pyNastran.converters.nastran.nastran_to_abaqus import nastran_to_abaqus
nastran_to_abaqus(model, fname2)
elif fmt2 == 'nastran':
model.write_bdf(fname2, size=16)
else:
raise NotImplementedError('fmt2=%s is not supported by process_nastran' % fmt2)
def process_cart3d(cart3d_filename: str, fmt2: str, fname2: str,
log: SimpleLogger,
data: Dict[str, Any],
quiet: bool=False) -> None:
"""
Converts Cart3d to STL/Nastran/Tecplot/Cart3d
"""
assert fmt2 in ['stl', 'nastran', 'tecplot', 'cart3d'], 'format2=%s' % fmt2
if data is None:
data = {'--scale': 1.0,}
from pyNastran.converters.cart3d.cart3d import read_cart3d
model = read_cart3d(cart3d_filename, log=log)
if data['--scale'] != 1.0:
model.points *= data['--scale']
data['--scale'] = 1.0
if fmt2 == 'stl':
from pyNastran.converters.cart3d.cart3d_to_stl import cart3d_to_stl_filename
cart3d_to_stl_filename(model, fname2, log=log, is_binary=data['--binary'])
elif fmt2 == 'nastran':
from pyNastran.converters.cart3d.cart3d_to_nastran import cart3d_to_nastran_filename
cart3d_to_nastran_filename(model, fname2, log=log)
elif fmt2 == 'tecplot':
from pyNastran.converters.cart3d.cart3d_to_tecplot import cart3d_to_tecplot
cart3d_to_tecplot(model, fname2, log=log)
elif fmt2 == 'cart3d':
model.write_cart3d(fname2, is_binary=data['--binary'])
# elif fmt2 == 'ugrid':
# cart3d_to_ugrid(model, fname2)
else:
raise NotImplementedError('fmt2=%s is not supported by process_cart3d' % fmt2)
def process_stl(stl_filename: str, fmt2: str, fname2: str,
log: SimpleLogger,
data: Optional[Dict[str, Any]]=None,
quiet: bool=False) -> None:
"""
Converts STL to Nastran/Cart3d
"""
assert fmt2 in ['stl', 'nastran', 'cart3d'], 'format2=%s' % fmt2
if data is None:
data = {'--scale': 1.0,}
if '*' in stl_filename:
stl_filenames = glob.glob(stl_filename)
else:
stl_filenames = [stl_filename]
assert len(stl_filenames) > 0, stl_filenames
from pyNastran.converters.stl.utils import merge_stl_files
model = merge_stl_files(stl_filenames, stl_out_filename=None, log=log)
scale = data['--scale']
if scale is not None:
assert isinstance(scale, float), 'scale=%r type=%r' % (scale, type(scale))
model.nodes *= scale
# model = STL()
# model.read_stl(stl_filename)
if fmt2 == 'nastran':
from pyNastran.converters.stl.stl_to_nastran import stl_to_nastran
stl_to_nastran(model, fname2, log=log)
elif fmt2 == 'cart3d':
from pyNastran.converters.stl.stl_to_cart3d import stl_to_cart3d
stl_to_cart3d(model, fname2, log=log)
elif fmt2 == 'stl':
is_binary = data['--binary']
model.write_stl(fname2, is_binary=is_binary, float_fmt='%6.12f', stop_on_failure=False)
# elif fmt2 == 'tecplot':
# stl_to_tecplot(model, fname2)
# elif fmt2 == 'ugrid':
# stl_to_ugrid(model, fname2)
else:
raise NotImplementedError('fmt2=%s is not supported by process_stl' % fmt2)
def element_slice(tecplot, data: Dict[str, Any]) -> None:
"""removes solid elements from a tecplot model"""
xslice = data['--xx']
yslice = data['--yy']
zslice = data['--zz']
# if xslice is not None:
# xslice = data['--xx']
# tecplot.slice_x(xslice)
# if yslice is not None:
# yslice = data['--yy']
# tecplot.slice_y(yslice)
# if zslice is not None:
# zslice = data['--zz']
# tecplot.slice_z(zslice)
#print(tecplot)
tecplot.slice_xyz(xslice, yslice, zslice)
def process_tecplot(tecplot_filename: str, fmt2: str, fname2: str,
log: SimpleLogger,
data: Optional[Dict[str, Any]]=None,
quiet: bool=False) -> None:
"""
Converts Tecplot to Tecplot
Globs all input tecplot files (e.g. tecplot*.plt)
"""
assert fmt2 in ['stl', 'nastran', 'cart3d', 'tecplot'], 'format2=%s' % fmt2
if '*' in tecplot_filename:
tecplot_filenames = glob.glob(tecplot_filename)
else:
tecplot_filenames = [tecplot_filename]
assert len(tecplot_filenames) > 0, tecplot_filename
from pyNastran.converters.tecplot.utils import merge_tecplot_files
from pyNastran.converters.tecplot.tecplot_to_nastran import tecplot_to_nastran_filename
from pyNastran.converters.tecplot.tecplot_to_cart3d import tecplot_to_cart3d_filename
model = merge_tecplot_files(tecplot_filenames, tecplot_filename_out=None, log=log)
#if fmt2 == 'cart3d':
#tecplot_to_cart3d(model, fname2)
#elif fmt2 == 'stl':
#tecplot_to_stl(model, fname2)
# elif fmt2 == 'ugrid':
# tecplot_to_ugrid(model, fname2)
res_types = data['RESTYPE']
unused_is_points = not data['--block']
if fmt2 == 'tecplot':
if not quiet: # pragma: no cover
print(data)
element_slice(model, data)
# this is a good way to merge files
model.write_tecplot(fname2, res_types=res_types) # is_points=is_points
elif fmt2 == 'nastran':
tecplot_to_nastran_filename(model, fname2)
elif fmt2 == 'stl':
cart3d_filename = fname2 + '.tri'
tecplot_to_cart3d_filename(model, cart3d_filename, log=log)
process_cart3d(cart3d_filename, fmt2, fname2, log, data=data, quiet=quiet)
os.remove(cart3d_filename)
#tecplot_to_nastran_filename(model, fname2 + '.bdf')
#process_nastran(fname2 + '.bdf', fmt2, fname2, log, data=data, quiet=quiet)
elif fmt2 == 'cart3d':
# supports tris/quads, not loads
#tecplot_to_nastran_filename(model, fname2 + '.bdf')
#process_nastran(fname2 + '.bdf', fmt2, fname2, log, data=data, quiet=quiet)
# supports quads/loads, not tris
tecplot_to_cart3d_filename(model, fname2, log=log)
else:
raise NotImplementedError('fmt2=%s is not supported by process_tecplot' % fmt2)
def process_ugrid(ugrid_filename: str, fmt2: str, fname2: str,
log: SimpleLogger,
data: Optional[SimpleLogger]=None,
quiet: bool=False) -> None:
"""
Converts UGRID to Nastran/Cart3d/STL/Tecplot
"""
assert fmt2 in ['stl', 'nastran', 'cart3d', 'tecplot'], 'format2=%s' % fmt2
read_shells = True
read_solids = True
if fmt2 in ['stl', 'cart3d']:
read_shells = True
read_solids = False
from pyNastran.converters.aflr.ugrid.ugrid_reader import UGRID
model = UGRID(read_shells=read_shells, read_solids=read_solids, log=log)
model.read_ugrid(ugrid_filename)
if fmt2 == 'nastran':
# ugrid_to_nastran(model, fname2
include_shells = True
include_solids = True
bdf_filename = fname2
model.write_bdf(bdf_filename, include_shells=include_shells, include_solids=include_solids)
elif fmt2 == 'cart3d':
include_shells = True
include_solids = False
bdf_filename = fname2 + '.bdf'
model.write_bdf(bdf_filename, include_shells=include_shells, include_solids=include_solids)
# ugrid_to_cart3d(model, fname2)
process_nastran(bdf_filename, 'cart3d', fname2, data=None)
elif fmt2 == 'stl':
include_shells = True
include_solids = False
bdf_filename = fname2 + '.bdf'
model.write_bdf(bdf_filename, include_shells=include_shells, include_solids=include_solids)
process_nastran(bdf_filename, 'cart3d', fname2, data=None)
# ugrid_to_stl(model, fname2)
elif fmt2 == 'tecplot':
from pyNastran.converters.aflr.ugrid.ugrid3d_to_tecplot import ugrid_to_tecplot
# ugrid_to_tecplot(model, fname2)
tecplot, unused_zone = ugrid_to_tecplot(model)
element_slice(tecplot, data)
tecplot_filename = fname2
tecplot.write_tecplot(tecplot_filename)
else:
raise NotImplementedError('fmt2=%s is not supported by process_ugrid' % fmt2)
def run_format_converter(fmt1: str, fname1: str,
fmt2: str, fname2: str,
data: Dict[str, Any],
log: SimpleLogger,
quiet: bool=False) -> None:
"""
Runs the format converter
"""
if fmt1 == 'nastran':
process_nastran(fname1, fmt2, fname2, log, data=data, quiet=quiet)
elif fmt1 == 'cart3d':
process_cart3d(fname1, fmt2, fname2, log, data=data, quiet=quiet)
elif fmt1 == 'stl':
process_stl(fname1, fmt2, fname2, log, data=data, quiet=quiet)
elif fmt1 == 'tecplot':
process_tecplot(fname1, fmt2, fname2, log, data=data, quiet=quiet)
elif fmt1 == 'ugrid':
process_ugrid(fname1, fmt2, fname2, log, data=data, quiet=quiet)
elif fmt1 == 'vrml':
process_vrml(fname1, fmt2, fname2, log, data=data, quiet=quiet)
else:
format1s = ['nastran', 'cart3d', 'stl', 'tecplot', 'ugrid', 'vrml']
#format2s = ['nastran', 'cart3d', 'stl', 'ugrid', 'tecplot']
raise NotImplementedError(f'fmt1={fmt1} is not supported by run; '
f'use {", ".join(format1s)}')
def cmd_line_format_converter(argv=None, quiet: str=False) -> None:
"""Interface for format_converter"""
if argv is None:
argv = sys.argv
msg = (
"Usage:\n"
#format1s = ['nastran', 'cart3d', 'stl', 'ugrid', 'tecplot', 'vrml']
#format2s = ['nastran', 'cart3d', 'stl', 'ugrid', 'tecplot']
" format_converter nastran <INPUT> <format2> <OUTPUT> [-o <OP2>] --no_xref\n"
" format_converter <format1> <INPUT> tecplot <OUTPUT> [-r RESTYPE...] [-b] [--block] [-x <X>] [-y <Y>] [-z <Z>] [--scale SCALE]\n"
" format_converter <format1> <INPUT> stl <OUTPUT> [-b] [--scale SCALE]\n"
" format_converter cart3d <INPUT> <format2> <OUTPUT> [-b] [--scale SCALE]\n"
" format_converter <format1> <INPUT> <format2> <OUTPUT> [--scale SCALE]\n"
#" format_converter nastran <INPUT> <format2> <OUTPUT>\n"
#" format_converter cart3d <INPUT> <format2> <OUTPUT>\n"
' format_converter -h | --help\n'
' format_converter -v | --version\n'
"\n"
"Required Arguments:\n"
" format1 format type (nastran, cart3d, stl, ugrid, tecplot, vrml)\n"
" format2 format type (nastran, cart3d, stl, ugrid, tecplot, abaqus)\n"
" INPUT path to input file\n"
" OUTPUT path to output file\n"
"\n"
"Nastran Options:\n"
" -o OP2, --op2 OP2 path to results file (nastran-specific)\n"
" only used for Tecplot (not supported)\n"
" --no_xref Don't cross-reference (nastran-specific)\n"
"\n"
"Tecplot Options:\n"
" -x X, --xx X Creates a constant x slice; keeps points < X\n"
" -y Y, --yy Y Creates a constant y slice; keeps points < Y\n"
" -z Z, --zz Z Creates a constant z slice; keeps points < Z\n"
" --block Writes the data in BLOCK (vs. POINT) format\n"
" -r, --results Specifies the results to write to limit output\n"
"\n"
"Tecplot/Cart3d/STL Options:\n"
" --scale SCALE Apply a scale factor to the XYZ locations (default=1.0)\n"
" -b, --binary writes the STL in binary (not supported for Tecplot)\n"
"\n"
"Info:\n"
" -h, --help show this help message and exit\n"
" -v, --version show program's version number and exit\n"
'\n'
'Notes:\n'
" Nastran->Tecplot assumes sequential nodes and consistent types (shell/solid)\n"
" STL/Tecplot supports globbing as the input filename\n"
" Tecplot slicing doesn't support multiple slice values and will give bad results (not crash)\n"
" UGRID outfiles must be of the form model.b8.ugrid, where\n"
" b8, b4, lb8, lb4 are valid choices and periods are important\n"
" Scale has only been tested on STL -> STL\n"
)
from docopt import docopt
import pyNastran
ver = str(pyNastran.__version__)
data = docopt(msg, version=ver, argv=argv[1:])
# because we have special blocks for tecplot/stl/cart3d
is_nastran = data['nastran']
format1 = data['<format1>']
if is_nastran:
format1 = 'nastran'
data['<format1>'] = format1
format2 = data['<format2>']
is_stl = data['stl']
if is_stl:
format2 = 'stl'
data['<format2>'] = format2
is_tecplot = data['tecplot']
if is_tecplot:
format2 = 'tecplot'
data['<format2>'] = format2
is_cart3d = data['cart3d']
if is_cart3d:
format1 = 'cart3d'
data['<format1>'] = format1
# common options
if data['--scale']:
data['--scale'] = eval(data['--scale'])
else:
data['--scale'] = 1.0
if not quiet: # pragma: no cover
print(data)
input_filename = data['<INPUT>']
output_filename = data['<OUTPUT>']
level = 'warning' if quiet else 'debug'
from cpylog import SimpleLogger
log = SimpleLogger(level=level)
run_format_converter(format1, input_filename, format2, output_filename, data, log=log, quiet=quiet)
def process_vrml(vrml_filename: str, fmt2: str, fname2: str,
log: SimpleLogger,
data: Dict[str, Any],
quiet: bool=False) -> None:
"""
Converts VRML to Nastran
"""
assert fmt2 in ['nastran', 'stl'], 'format2=%s' % fmt2
#if data['--scale'] != 1.0:
#model.points *= data['--scale']
#data['--scale'] = 1.0
from pyNastran.converters.dev.vrml.vrml import vrml_to_nastran, vrml_to_stl
if fmt2 == 'nastran':
vrml_to_nastran(vrml_filename, fname2, log=log)
if fmt2 == 'stl':
vrml_to_stl(vrml_filename, fname2, log=log)
else:
raise NotImplementedError('fmt2=%s is not supported by process_vrml' % fmt2)
if __name__ == '__main__': # pragma: no cover
cmd_line_format_converter()
| 1.570313 | 2 |
open_seq2seq/encoders/encoder.py | VoiceZen/OpenSeq2Seq | 1,459 | 67076 | # Copyright (c) 2018 NVIDIA Corporation
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import abc
import copy
import six
import tensorflow as tf
from open_seq2seq.optimizers.mp_wrapper import mp_regularizer_wrapper
from open_seq2seq.utils.utils import check_params, cast_types
@six.add_metaclass(abc.ABCMeta)
class Encoder:
"""Abstract class from which all encoders must inherit.
"""
@staticmethod
def get_required_params():
"""Static method with description of required parameters.
Returns:
dict:
Dictionary containing all the parameters that **have to** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {}
@staticmethod
def get_optional_params():
"""Static method with description of optional parameters.
Returns:
dict:
Dictionary containing all the parameters that **can** be
included into the ``params`` parameter of the
class :meth:`__init__` method.
"""
return {
'regularizer': None, # any valid TensorFlow regularizer
'regularizer_params': dict,
'initializer': None, # any valid TensorFlow initializer
'initializer_params': dict,
'dtype': [tf.float32, tf.float16, 'mixed'],
}
def __init__(self, params, model, name="encoder", mode='train'):
"""Encoder constructor.
Note that encoder constructors should not modify TensorFlow graph, all
graph construction should happen in the :meth:`self._encode() <_encode>`
method.
Args:
params (dict): parameters describing the encoder.
All supported parameters are listed in :meth:`get_required_params`,
:meth:`get_optional_params` functions.
model (instance of a class derived from :class:`Model<models.model.Model>`):
parent model that created this encoder.
Could be None if no model access is required for the use case.
name (str): name for encoder variable scope.
mode (str): mode encoder is going to be run in.
Could be "train", "eval" or "infer".
Config parameters:
* **initializer** --- any valid TensorFlow initializer. If no initializer
is provided, model initializer will be used.
* **initializer_params** (dict) --- dictionary that will be passed to
initializer ``__init__`` method.
* **regularizer** --- and valid TensorFlow regularizer. If no regularizer
is provided, model regularizer will be used.
* **regularizer_params** (dict) --- dictionary that will be passed to
regularizer ``__init__`` method.
* **dtype** --- model dtype. Could be either ``tf.float16``, ``tf.float32``
or "mixed". For details see
:ref:`mixed precision training <mixed_precision>` section in docs. If no
dtype is provided, model dtype will be used.
"""
check_params(params, self.get_required_params(), self.get_optional_params())
self._params = copy.deepcopy(params)
self._model = model
if 'dtype' not in self._params:
if self._model:
self._params['dtype'] = self._model.params['dtype']
else:
self._params['dtype'] = tf.float32
self._name = name
self._mode = mode
self._compiled = False
def encode(self, input_dict):
"""Wrapper around :meth:`self._encode() <_encode>` method.
Here name, initializer and dtype are set in the variable scope and then
:meth:`self._encode() <_encode>` method is called.
Args:
input_dict (dict): see :meth:`self._encode() <_encode>` docs.
Returns:
see :meth:`self._encode() <_encode>` docs.
"""
if not self._compiled:
if 'regularizer' not in self._params:
if self._model and 'regularizer' in self._model.params:
self._params['regularizer'] = copy.deepcopy(
self._model.params['regularizer']
)
self._params['regularizer_params'] = copy.deepcopy(
self._model.params['regularizer_params']
)
if 'regularizer' in self._params:
init_dict = self._params.get('regularizer_params', {})
if self._params['regularizer'] is not None:
self._params['regularizer'] = self._params['regularizer'](**init_dict)
if self._params['dtype'] == 'mixed':
self._params['regularizer'] = mp_regularizer_wrapper(
self._params['regularizer'],
)
if self._params['dtype'] == 'mixed':
self._params['dtype'] = tf.float16
if 'initializer' in self.params:
init_dict = self.params.get('initializer_params', {})
initializer = self.params['initializer'](**init_dict)
else:
initializer = None
self._compiled = True
with tf.variable_scope(self._name, initializer=initializer,
dtype=self.params['dtype']):
return self._encode(self._cast_types(input_dict))
def _cast_types(self, input_dict):
"""This function performs automatic cast of all inputs to encoder dtype.
Args:
input_dict (dict): dictionary passed to :meth:`self._encode() <_encode>`
method.
Returns:
dict: same as input_dict, but with all Tensors cast to encoder dtype.
"""
return cast_types(input_dict, self.params['dtype'])
@abc.abstractmethod
def _encode(self, input_dict):
"""This is the main function which should construct encoder graph.
Typically, encoder will take raw input sequence as an input and
produce some hidden representation as an output.
Args:
input_dict (dict): dictionary containing encoder inputs.
If the encoder is used with :class:`models.encoder_decoder` class,
``input_dict`` will have the following content::
{
"source_tensors": data_layer.input_tensors['source_tensors']
}
Returns:
dict:
dictionary of encoder outputs. Return all necessary outputs.
Typically this will be just::
{
"outputs": outputs,
"state": state,
}
"""
pass
@property
def params(self):
"""Parameters used to construct the encoder (dictionary)."""
return self._params
@property
def mode(self):
"""Mode encoder is run in."""
return self._mode
@property
def name(self):
"""Encoder name."""
return self._name
| 1.78125 | 2 |
LinkUp/core/apis/algorithm_api.py | Justin-sd/LinkUp | 0 | 67204 | from ..models import *
from .availability_calendar_api import *
from .calendar_api import *
import json
from datetime import datetime, timedelta
def get_best(event_id):
"""
:param event_id: the id of the event we want to get best times of
:return: A list of sorted pairs: [ (time, [users]), (time, [users]).... ]
where time is the starting time and users is list of users who can make it.
"""
event_set = Event.objects.filter(event_id=event_id)
event = event_set[0]
# make the queryset of users into a list of users
users = list(event.members.all())
# make all these in minutes
duration = int(event.duration)
st = event.potential_start_date
# round up the potential starting minutes
if st.minute > 30:
new_st = st.replace(minute=0)
new_st = new_st + timedelta(hours=1)
elif st.minute > 0:
new_st = st.replace(minute=30)
elif st.minute == 0 or st.minute == 30:
new_st = st
start = convert_to_minutes(new_st, new_st)
et = event.potential_end_date
# round down potential ending minutes
if et.minute > 30:
new_et = et.replace(minute=30)
elif et.minute > 0:
new_et = et.replace(minute=0)
elif et.minute == 0 or et.minute == 30:
new_et = et
end = convert_to_minutes(new_et, new_st)
min_hour = event.no_earlier_than.hour
min_minute = event.no_earlier_than.minute
max_hour = event.no_later_than.hour
max_minute = event.no_later_than.minute
# Dictionary: starting times as keys and values is list of people who can make it,
# keys incremented by duration
optimal_times = {}
# from start to end time, add keys of 30 minute increments with querysets of every user attending
for i in range(start,end+1, 30):
if i + duration > end:
break
# only add times later than min time and earlier than max time
time = convert_to_datetime(new_st, i)
if min_hour < time.hour < max_hour:
optimal_times[i] = users.copy()
elif time.hour == min_hour:
if time.minute >= min_minute:
optimal_times[i] = users.copy()
elif time.hour == max_hour:
if time.minute <= max_minute:
optimal_times[i] = users.copy()
# have a list of all users times
for u in users:
# user_sched = free_busy_month(u)
# schedule = json.dumps(user_sched, default=json_datetime_handler)
# Schedule.objects.create(user=u, availability=schedule)
# get user's schedules in datetime format
for times in get_users_saved_schedule(u):
start_time = list(times.values())[0]
# round DOWN the starting minutes
if start_time.minute > 30:
starting = start_time.replace(minute=30)
elif start_time.minute > 0:
starting = start_time.replace(minute=0)
elif start_time.minute == 0 or start_time.minute == 30:
starting = start_time
the_start = convert_to_minutes(starting, new_st)
end_time = list(times.values())[1]
# round UP the ending minutes
if et.minute > 30:
ending = et.replace(minute=0)
ending = ending + timedelta(hours=1)
elif et.minute > 0:
ending = et.replace(minute=30)
elif et.minute == 0 or et.minute == 30:
ending = end_time
the_end = convert_to_minutes(ending, new_st)
# try to find the keys in 30 minute increments and remove the user
# from the corresponding list
for i in range(the_start, the_end+1, 30):
if i in optimal_times:
dict_value = optimal_times.get(i)
if u in dict_value:
dict_value.remove(u)
new_dict = {i: dict_value}
optimal_times.update(new_dict)
# go through the optimal times and find which list contains
# most users then append to new list
curr_max = 0
if len(optimal_times) > 0:
curr_max = len(list(optimal_times.values())[0])
append_list = []
for times in optimal_times:
if len(optimal_times[times]) >= curr_max:
# append a list of pairs, first = datetime of start second = list of attending
# with the ending of the list having more people available
append_list.append((convert_to_datetime(new_st, times), optimal_times.get(times)))
curr_max = len(optimal_times[times])
# return the reversed list
return append_list[::-1]
# convert a datetime to minutes elapsed
def convert_to_minutes(time, starting):
elapsed = time - starting
minutes = int(elapsed.total_seconds()/60)
return minutes
# convert minutes to a datetime by getting starting datetime and timedelta by minutes
def convert_to_datetime(starting, mins):
time = starting + timedelta(minutes=mins)
return time | 2.609375 | 3 |
meta_policy_search/samplers/rl2/meta_sample_processor.py | clrrrr/promp_plus | 3 | 67332 | from meta_policy_search.samplers.base import SampleProcessor
from meta_policy_search.samplers.dice_sample_processor import DiceSampleProcessor
from meta_policy_search.utils.rl2 import utils
import numpy as np
class MetaSampleProcessor(SampleProcessor):
def process_samples(self, paths_meta_batch, log=False, log_prefix=''):
"""
Processes sampled paths. This involves:
- computing discounted rewards (returns)
- fitting baseline estimator using the path returns and predicting the return baselines
- estimating the advantages using GAE (+ advantage normalization id desired)
- stacking the path data
- logging statistics of the paths
Args:
paths_meta_batch (dict): A list of dict of lists, size: [meta_batch_size] x (batch_size) x [5] x (max_path_length)
log (boolean): indicates whether to log
log_prefix (str): prefix for the logging keys
Returns:
(list of dicts) : Processed sample data among the meta-batch; size: [meta_batch_size] x [7] x (batch_size x max_path_length)
"""
assert isinstance(paths_meta_batch, dict), 'paths must be a dict'
assert self.baseline, 'baseline must be specified'
samples_data_meta_batch = []
all_paths = []
for meta_task, paths in paths_meta_batch.items():
# fits baseline, compute advantages and stack path data
samples_data, paths = self._compute_samples_data(paths)
samples_data_meta_batch.append(samples_data)
all_paths.extend(paths)
# 7) compute normalized trajectory-batch rewards (for E-MAML)
overall_avg_reward = np.mean(np.concatenate([samples_data['rewards'] for samples_data in samples_data_meta_batch]))
overall_avg_reward_std = np.std(np.concatenate([samples_data['rewards'] for samples_data in samples_data_meta_batch]))
for samples_data in samples_data_meta_batch:
samples_data['adj_avg_rewards'] = (samples_data['rewards'] - overall_avg_reward) / (overall_avg_reward_std + 1e-8)
# 8) log statistics if desired
self._log_path_stats(all_paths, log=log, log_prefix=log_prefix)
return samples_data_meta_batch
class DiceMetaSampleProcessor(DiceSampleProcessor):
process_samples = MetaSampleProcessor.process_samples | 1.921875 | 2 |
get_contact_bridges.py | rcrehuet/getcontacts | 31 | 67460 | <gh_stars>10-100
#!/usr/bin/env python
"""
Forms bridges between pairs of atoms that share an interaction with a residue
matched by a user-specified regex. The output is a new list contact file where
interactions to atoms matching the regex have been replaced with bridges.
For example, running this app with the regex "A:CA:.*" and the contact list:
0 vdw A:ASP:52:C A:CA:201:CA
0 vdw A:CA:201:CA A:GLN:53:N
0 vdw A:ASP:52:N A:PHE:48:O
will output the contact list
0 br A:ASP:52:C A:GLN:53:N
0 vdw A:ASP:52:N A:PHE:48:O
"""
from collections import defaultdict
import re
from itertools import combinations
from contact_calc.transformations import parse_contacts
def main(argv=None):
# Parse command line arguments
import argparse as ap
parser = ap.ArgumentParser(description=__doc__, formatter_class=ap.RawTextHelpFormatter)
optional = parser._action_groups.pop()
required = parser.add_argument_group('required arguments')
parser._action_groups.append(optional) # added this line
required.add_argument('--input',
required=True,
type=ap.FileType('r'),
metavar='FILE',
help='A contact-file generated by get_dynamic_contacts.py or get_static_contacts.py')
required.add_argument('--bridge',
required=True,
type=str,
metavar='REGEX',
help='Regular expression matching any atom to be included as a bridge')
required.add_argument('--bridges_only',
required=False,
type=bool,
metavar='BOOL',
default=False,
help='Indicates whether to output non-bridged interactions as well as the bridges')
required.add_argument('--output',
required=False,
metavar='FILE',
type=ap.FileType('w'),
help='The name of the output contact-file')
args = parser.parse_args(argv)
contacts, total_frames = parse_contacts(args.input)
bridges_only = args.bridges_only
# Build the bridge_neighbor datastructure which for each frame has a dictionary mapping bridging-residues to
# non-bridging neighbors. Also collects contacts in `bridged_contacts` that are not part of any bridges unless
# `bridges_only` has been enabled
bridge_neighbors = [defaultdict(list) for _ in range(total_frames)]
bridge_pattern = re.compile(args.bridge)
bridged_contacts = []
for contact in contacts:
frame = contact[0]
a1_match = bridge_pattern.match(contact[2])
a2_match = bridge_pattern.match(contact[3])
if a1_match and not a2_match:
a1_res = ":".join(contact[2].split(":")[0:3])
bridge_neighbors[frame][a1_res].append(contact[3])
elif a2_match:
a2_res = ":".join(contact[3].split(":")[0:3])
bridge_neighbors[frame][a2_res].append(contact[2])
elif not bridges_only:
bridged_contacts.append(contact)
# Based on the neighbor-lists in `bridge_neighbors`, add atom pairs to `bridged_contacts`
for frame, bridge_map in enumerate(bridge_neighbors):
for bridge_res in bridge_map:
for a1, a2 in combinations(bridge_map[bridge_res], 2):
bridged_contacts.append([frame, 'br', a1, a2, bridge_res])
# Sort the contacts and convert them to strings
from operator import itemgetter
bridged_contacts.sort(key=itemgetter(0))
for contact in bridged_contacts:
contact[0] = str(contact[0])
bridged_contacts = ["\t".join(contact) for contact in bridged_contacts]
# Write to output
if args.output:
args.output.write("# total_frames:%d\n" % total_frames)
args.output.write("\n".join(bridged_contacts))
args.output.close()
print("Wrote residue contact file to " + args.output.name)
else:
print("\n".join(bridged_contacts))
if __name__ == "__main__":
main()
__license__ = "Apache License 2.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
| 2.453125 | 2 |
tests/api/test_file_upload.py | GermerCarsten/RelES | 0 | 67588 | <filename>tests/api/test_file_upload.py
from hashlib import sha1
import httplib
from uuid import uuid4
from flask import url_for
import pytest
@pytest.fixture
def random_file_content():
return 'Hello File Content! %s\n' % uuid4().hex
# TODO is there merit in actually sending a file with complex binary data instead?
# @pytest.yield_fixture
# def temporary_file():
# _temporary_file = TemporaryFile()
# _temporary_file.write('Hello File! %s\n' % uuid4().hex)
# _temporary_file.flush()
# _temporary_file.seek(0)
#
# yield _temporary_file
#
# _temporary_file.close()
@pytest.mark.usefixtures('live_server')
class TestFileUploadPermission(object):
def test_upload_forbidden(self, random_file_content, requests):
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', random_file_content, 'text/plain')},
allow_redirects=False
)
assert response.status_code == 403
@pytest.mark.usefixtures('live_server', 'customer_with_media_permissions')
class TestFileUploadContentType(object):
def test_content_type_not_whitelisted(self, random_file_content, requests):
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', random_file_content, 'bad/content')},
allow_redirects=False
)
assert response.status_code == 400
def test_content_type_does_not_match_detected(self, random_file_content, requests):
# prepend .pdf magic bytes to content to mess with the detection
# https://en.wikipedia.org/wiki/Magic_number_%28programming%29#Magic_numbers_in_files
modified_content = '\25\50\44\46' + random_file_content
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', modified_content, 'text/plain')},
allow_redirects=False
)
assert response.status_code == 400
def test_content_type_ok(self, random_file_content, requests):
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', random_file_content, 'text/plain')},
allow_redirects=False
)
assert response.status_code == 201
created = response.json()
assert 'mime' in created
assert created['mime'] == 'text/plain'
@pytest.mark.usefixtures('live_server', 'customer_with_media_permissions')
class TestFileUpload(object):
def test_upload_matches_hash(self, random_file_content, requests):
expected_hash = sha1(random_file_content).hexdigest()
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', random_file_content, 'text/plain')},
allow_redirects=False
)
assert response.status_code == 201
created = response.json()
assert 'sha1' in created
assert created['sha1'] == expected_hash
def test_upload_can_download(self, random_file_content, requests):
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', random_file_content, 'text/plain')},
allow_redirects=False
)
assert response.status_code == 201
assert 'location' in response.headers
created = response.json()
assert 'path' in created
remote_path = created['path'].lstrip('/')
response = requests.get(
url_for('download_file', filename=remote_path, _external=True),
allow_redirects=False
)
assert response.status_code == 200
assert response.content == random_file_content
def test_upload_gets_charged(self, customer, random_file_content, app, requests):
assert 'cycles' not in customer
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', random_file_content, 'text/plain')},
allow_redirects=False
)
assert response.status_code == httplib.CREATED
customer.refresh()
assert len(customer.cycles.to_dict()) == 1
assert customer.cycles['_upload_text'] == app.config['CYCLES_FILE_UPLOAD']
def test_upload_charges_only_on_success(self, customer, random_file_content, requests):
assert 'cycles' not in customer
wrong_mimetype = 'image/jpeg'
response = requests.post(
url_for('upload_file', _external=True),
files={'file': ('random.txt', random_file_content, wrong_mimetype)},
allow_redirects=False
)
assert response.status_code == httplib.BAD_REQUEST
customer.refresh()
assert 'cycles' not in customer
| 1.6875 | 2 |
classwork/04/singly_linked_list.py | makenasandra/golclinics-dsa | 0 | 67716 | <gh_stars>0
# the Node class - contains value and address to next node
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
def get_data(self):
return self.val
def set_data(self, val):
self.val = val
def get_next(self):
return self.next
def set_next(self, next):
self.next = next
# the LinkedList class
class LinkedList(object):
def __init__(self, head=None):
self.head = head
self.count = 0
def get_count(self):
return self.count
def insert(self, data):
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
self.count += 1
def find(self, val):
item = self.head
while (item != None):
if item.get_data() == val:
return item
else:
item = item.get_next()
return None
def deleteAt(self, idx):
if idx > self.count:
return
if self.head == None:
return
else:
tempIdx = 0
node = self.head
while tempIdx < idx-1:
node = node.get_next()
tempIdx += 1
node.set_next(node.get_next().get_next())
self.count -= 1
def printList(self):
tempnode = self.head
while (tempnode != None):
print("Node: ", tempnode.get_data())
tempnode = tempnode.get_next()
def sumList(self):
tempnode = self.head
self.sum = 0
while (tempnode != None):
self.sum += tempnode.get_data()
tempnode = tempnode.get_next()
print('Sum of list:', self.sum)
if __name__ == "__main__":
# create a linked list and insert some items
itemlist = LinkedList()
itemlist.insert(3)
itemlist.insert(10)
itemlist.insert(1)
itemlist.insert(5)
itemlist.insert(6)
#Print the List
itemlist.printList()
#GEt sum
itemlist.sumList()
| 3.609375 | 4 |
layers/ModConv2d.py | Egor-kokhan/StyleGANv2-genart-keras | 1 | 67844 | import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import tensor_shape
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend
from tensorflow.python.keras import constraints
from tensorflow.python.keras import initializers
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.engine.base_layer import Layer
from tensorflow.python.keras.engine.input_spec import InputSpec
# imports for backwards namespace compatibility
# pylint: disable=unused-import
from tensorflow.python.keras.layers.pooling import AveragePooling1D
from tensorflow.python.keras.layers.pooling import AveragePooling2D
from tensorflow.python.keras.layers.pooling import AveragePooling3D
from tensorflow.python.keras.layers.pooling import MaxPooling1D
from tensorflow.python.keras.layers.pooling import MaxPooling2D
from tensorflow.python.keras.layers.pooling import MaxPooling3D
# pylint: enable=unused-import
from tensorflow.python.keras.utils import conv_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.util.tf_export import keras_export
from upfirdn_2d import *
from layers.other import Dense, normalize_2nd_moment
NOISE_STRENGTH = 0.001
# ToRGB block.
def torgb(x, y, latents, res_name, is_grouped, style_strength_map=None): # res = 2..resolution_log2
if not is_grouped:
t = ModConv2d(rank=2, sampling=None, filters=3, kernel_size=1, demodulate=False, noise=True, act=None, name=res_name+'/ToRGB')([x, latents[0:1, -1]])
else:
t = ModConv2d_grouped(rank=2, sampling=None, filters=3, kernel_size=1, demodulate=False, noise=True, act=None, name=res_name+'/ToRGB')([x, latents])
t = tf.reduce_sum(t * style_strength_map, axis=1)
if y is not None:
t += tf.cast(y, t.dtype)
return t
class ModConv2d(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
sampling, # [None, 'up', 'down']
strides=1,
act='lrelu',
noise=True,
demodulate=True,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(ModConv2d, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.noise = noise
self.demodulate = demodulate
self.act = act
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = [InputSpec(ndim=self.rank + 2), InputSpec(ndim=self.rank)]
self.sampling = sampling
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape[0])
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.modulate_style = Dense(units=input_shape[-1], constant_b=0.0, act=None, name='mod_weight')
self.noise_strength = self.add_weight(
name='noise_strength',
shape=1,
initializer=tf.initializers.zeros(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=False,
dtype=self.dtype)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
conv_inputs = inputs[0]
print('styled: ', conv_inputs)
style = inputs[1]
weights = self.kernel
he_std = 1.0 / tf.math.sqrt(tf.dtypes.cast(tf.math.reduce_prod(weights.shape[:-1]), tf.float32))
runtime_coef = he_std * 1.0
# runtime_coef = 1.0
weights = weights*runtime_coef
style = self.modulate_style(style) + 1.0
if self.demodulate:
style *= 1 / tf.reduce_max(tf.abs(style)) # Pre-normalize to avoid float16 overflow.
weights = weights*style[0, np.newaxis, np.newaxis, :, np.newaxis]
# Demodulate
if self.demodulate: ##########??????
d = tf.math.rsqrt(tf.math.reduce_sum(tf.math.square(weights), axis=[0, 1, 2]) + 1e-8) # [BO] Scaling factor.
weights *= d[np.newaxis, np.newaxis, np.newaxis, :] # [BkkIO] Scale output feature maps.
# conv_inputs = conv_inputs*style[0, np.newaxis, np.newaxis, :] # ##################
# Convolve
padding = 0
kernel = self.kernel_size[0]
resample_kernel = [1,3,3,1]
data_format = 'NHWC' #'NCHW'
if self.sampling == 'up':
x = upsample_conv_2d(conv_inputs, weights, data_format=data_format, k=resample_kernel, padding=padding)
elif self.sampling == 'down':
x = conv_downsample_2d(conv_inputs, weights, data_format=data_format, k=resample_kernel, padding=padding)
else:
padding_mode = {0: 'SAME', -(kernel // 2): 'VALID'}[padding]
x = tf.nn.conv2d(conv_inputs, weights, data_format=data_format, strides=[1, 1, 1, 1], padding=padding_mode)
##############################
if self.noise:
noise = tf.random.normal([tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1], dtype=x.dtype)
x += noise*self.noise_strength*NOISE_STRENGTH
x = nn.bias_add(x, self.bias, data_format=data_format)
if self.act == 'lrelu':
x = tf.nn.leaky_relu(x, alpha=0.2)*tf.math.sqrt(2.0)
elif self.act == 'linear' or self.act is None:
pass
else:
raise ValueError('Activation is unsupported.')
return x
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ModConv2d, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
class ModConv2d_grouped(Layer):
"""Abstract N-D convolution layer (private, used as implementation base).
This layer creates a convolution kernel that is convolved
(actually cross-correlated) with the layer input to produce a tensor of
outputs. If `use_bias` is True (and a `bias_initializer` is provided),
a bias vector is created and added to the outputs. Finally, if
`activation` is not `None`, it is applied to the outputs as well.
Arguments:
rank: An integer, the rank of the convolution, e.g. "2" for 2D convolution.
filters: Integer, the dimensionality of the output space (i.e. the number
of filters in the convolution).
kernel_size: An integer or tuple/list of n integers, specifying the
length of the convolution window.
strides: An integer or tuple/list of n integers,
specifying the stride length of the convolution.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
padding: One of `"valid"`, `"same"`, or `"causal"` (case-insensitive).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
dilation_rate: An integer or tuple/list of n integers, specifying
the dilation rate to use for dilated convolution.
Currently, specifying any `dilation_rate` value != 1 is
incompatible with specifying any `strides` value != 1.
activation: Activation function. Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: An initializer for the convolution kernel.
bias_initializer: An initializer for the bias vector. If None, the default
initializer will be used.
kernel_regularizer: Optional regularizer for the convolution kernel.
bias_regularizer: Optional regularizer for the bias vector.
activity_regularizer: Optional regularizer function for the output.
kernel_constraint: Optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: Optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` the weights of this layer will be marked as
trainable (and listed in `layer.trainable_weights`).
name: A string, the name of the layer.
"""
def __init__(self, rank,
filters,
kernel_size,
sampling, # [None, 'up', 'down']
strides=1,
act='lrelu',
noise=True,
demodulate=True,
padding='valid',
data_format='channels_last',
dilation_rate=1,
activation=None,
use_bias=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(ModConv2d_grouped, self).__init__(
trainable=trainable,
name=name,
activity_regularizer=regularizers.get(activity_regularizer),
**kwargs)
self.rank = rank
self.filters = filters
self.noise = noise
self.demodulate = demodulate
self.act = act
self.kernel_size = conv_utils.normalize_tuple(
kernel_size, rank, 'kernel_size')
self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.dilation_rate = conv_utils.normalize_tuple(
dilation_rate, rank, 'dilation_rate')
self.activation = activations.get(activation)
self.use_bias = use_bias
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
self.kernel_regularizer = regularizers.get(kernel_regularizer)
self.bias_regularizer = regularizers.get(bias_regularizer)
self.kernel_constraint = constraints.get(kernel_constraint)
self.bias_constraint = constraints.get(bias_constraint)
self.input_spec = [InputSpec(ndim=self.rank + 2), InputSpec(ndim=self.rank + 1)]
self.sampling = sampling
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape[0])
input_channel = self._get_input_channel(input_shape)
kernel_shape = self.kernel_size + (input_channel, self.filters)
self.modulate_style = Dense(units=input_shape[-1], constant_b=0.0, act=None, name='mod_weight')
self.noise_strength = self.add_weight(
name='noise_strength',
shape=1,
initializer=tf.initializers.zeros(),
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=False,
dtype=self.dtype)
self.kernel = self.add_weight(
name='kernel',
shape=kernel_shape,
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
trainable=True,
dtype=self.dtype)
if self.use_bias:
self.bias = self.add_weight(
name='bias',
shape=(self.filters,),
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=True,
dtype=self.dtype)
else:
self.bias = None
self.built = True
def call(self, inputs):
conv_inputs = inputs[0]
style = inputs[1][0]
weights = self.kernel[np.newaxis]
# print(f"conv_inputs: {conv_inputs}, style: {style}, weights: {weights}")
he_std = 1.0 / tf.math.sqrt(tf.dtypes.cast(tf.math.reduce_prod(weights.shape[:-1]), tf.float32))
runtime_coef = he_std * 1.0
weights = weights*runtime_coef
# Modulate.
style = self.modulate_style(style) + 1.0
if self.demodulate: #################################
style *= 1 / tf.reduce_max(tf.abs(style), axis=1, keepdims=True) # Pre-normalize to avoid float16 overflow.
weights = weights*style[:, np.newaxis, np.newaxis, :, np.newaxis]
# print('demod')
# Demodulate
if self.demodulate:############
d = tf.math.rsqrt(tf.math.reduce_sum(tf.math.square(weights), axis=[1, 2, 3], keepdims=True) + 1e-8) # [BO] Scaling factor.
weights *= d # [BkkIO] Scale output feature maps.
# print("conv_inputs before reshaping", conv_inputs)
# conv_inputs = tf.reshape(conv_inputs, [1, -1, conv_inputs.shape[2], conv_inputs.shape[3]]) # Fused => reshape minibatch to convolution groups.
# print("conv_inputs after reshaping", conv_inputs)
# print('weights before reshaping: ', weights)
weights = tf.reshape(tf.transpose(weights, [1, 2, 3, 0, 4]), [weights.shape[1], weights.shape[2], weights.shape[3], -1])
# print('weights after reshaping: ', weights)
# Convolve
padding = 0
kernel = self.kernel_size[0]
resample_kernel = [1,3,3,1]
data_format = 'NHWC' #'NCHW'
if self.sampling == 'up':
# print('up')
x = upsample_conv_2d_grouped(conv_inputs, weights, data_format=data_format, k=resample_kernel, padding=padding)
else:
padding_mode = {0: 'SAME', -(kernel // 2): 'VALID'}[padding]
x = tf.nn.conv2d(conv_inputs, weights, data_format=data_format, strides=[1, 1, 1, 1], padding=padding_mode)
out_shape = [-1,
inputs[0].shape[1] * 2 if self.sampling == 'up' else inputs[0].shape[1],
inputs[0].shape[2] * 2 if self.sampling == 'up' else inputs[0].shape[2],
style.shape[0],
self.filters,
]
# print(x)
x = tf.reshape(x, out_shape) # Fused => reshape convolution groups back to minibatch.
# print(x)
x = tf.transpose(x, [0, 3, 1, 2, 4])
# x = tf.transpose(x, [0, 2, 3, 4, 1])
# print(x)
# print(x)
# print(x)
##############################
if self.noise:
noise = tf.random.normal([tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1, 1], dtype=x.dtype)
x += noise*self.noise_strength*NOISE_STRENGTH
# print(x)
x = nn.bias_add(x, self.bias, data_format=data_format)
# print(x)
# 1 / 0
if self.act == 'lrelu':
x = tf.nn.leaky_relu(x, alpha=0.2)*tf.math.sqrt(2.0)
elif self.act == 'linear' or self.act is None:
pass
else:
raise ValueError('Activation is unsupported.')
return x
def compute_output_shape(self, input_shape):
input_shape = input_shape[0]
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if self.data_format == 'channels_last':
space = input_shape[1:-1]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0]] + new_space +
[self.filters])
else:
space = input_shape[2:]
new_space = []
for i in range(len(space)):
new_dim = conv_utils.conv_output_length(
space[i],
self.kernel_size[i],
padding=self.padding,
stride=self.strides[i],
dilation=self.dilation_rate[i])
new_space.append(new_dim)
return tensor_shape.TensorShape([input_shape[0], self.filters] +
new_space)
def get_config(self):
config = {
'filters': self.filters,
'kernel_size': self.kernel_size,
'strides': self.strides,
'padding': self.padding,
'data_format': self.data_format,
'dilation_rate': self.dilation_rate,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint)
}
base_config = super(ModConv2d_grouped, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _compute_causal_padding(self):
"""Calculates padding for 'causal' option for 1-d conv layers."""
left_pad = self.dilation_rate[0] * (self.kernel_size[0] - 1)
if self.data_format == 'channels_last':
causal_padding = [[0, 0], [left_pad, 0], [0, 0]]
else:
causal_padding = [[0, 0], [0, 0], [left_pad, 0]]
return causal_padding
def _get_channel_axis(self):
if self.data_format == 'channels_first':
return 1
else:
return -1
def _get_input_channel(self, input_shape):
channel_axis = self._get_channel_axis()
if input_shape.dims[channel_axis].value is None:
raise ValueError('The channel dimension of the inputs '
'should be defined. Found `None`.')
return int(input_shape[channel_axis])
def _get_padding_op(self):
if self.padding == 'causal':
op_padding = 'valid'
else:
op_padding = self.padding
if not isinstance(op_padding, (list, tuple)):
op_padding = op_padding.upper()
return op_padding
| 1.765625 | 2 |
tests/examples/test_ga_onemax.py | jorgetavares/pygenome | 1 | 67972 | <filename>tests/examples/test_ga_onemax.py<gh_stars>1-10
from examples.ga_onemax import *
stdout1 = """0 0.03125 0.040781302009688776 0.005561455106895243
1 0.029411764705882353 0.038091861610110966 0.00450622600604616
2 0.027777777777777776 0.03547228263920674 0.00378636872370032
3 0.027777777777777776 0.03383622027764222 0.003792657676482543
4 0.027777777777777776 0.03153144879877114 0.0024635362886572016
5 0.023809523809523808 0.030139024953537853 0.00243139455636538
6 0.023809523809523808 0.02894388940095638 0.0026576983008068868
7 0.023809523809523808 0.02740366108153792 0.001826310539891214
8 0.023255813953488372 0.026444187681938076 0.0015349653277309185
9 0.022727272727272728 0.026012870101612462 0.001608538168134231
10 0.022222222222222223 0.025314390354864127 0.0013064223948593403
11 0.022222222222222223 0.02475279874881244 0.0014170379402423956
12 0.02127659574468085 0.024026041106628093 0.0013427418981510168
13 0.02127659574468085 0.0233757082989196 0.0012006885907910165
14 0.020833333333333332 0.02285467855630095 0.0010185863389449473
15 0.02 0.022430398717967374 0.0008704333997032909
16 0.02 0.021960350829972216 0.0008949697776471712
17 0.02 0.021653716984652648 0.0007101590492949621
18 0.02 0.021357860050448662 0.000618545520306597
19 0.02 0.02111156184156859 0.0006393216238278883
fitness: 0.02 genotype: [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1]
"""
stdout2 = """0 0.03125 0.040781302009688776 0.005561455106895243
1 0.029411764705882353 0.03805953402390407 0.004543443870432798
2 0.027777777777777776 0.03540714856298931 0.004003729039375085
3 0.027777777777777776 0.03338954942095523 0.0032194506876552006
4 0.025 0.031759630111261566 0.0024138303577230054
5 0.024390243902439025 0.030465830885796095 0.0024506760085341995
6 0.024390243902439025 0.029748169948644855 0.002240931141267259
7 0.023255813953488372 0.02878293987059667 0.0021429324656018814
8 0.023255813953488372 0.027502249635484855 0.0019123397613806427
9 0.023255813953488372 0.02645550691596207 0.0018288473202157202
10 0.022222222222222223 0.025367100470720813 0.0015199281190212102
11 0.022222222222222223 0.02461907262781222 0.0013499923545325775
12 0.021739130434782608 0.024065322825332153 0.0011729776361822577
13 0.02127659574468085 0.023461332182942187 0.0010670316985843752
14 0.02127659574468085 0.02289507628617888 0.0009396105298584204
15 0.020833333333333332 0.022522016567904247 0.0008268338171416158
16 0.02040816326530612 0.022136339976635826 0.0007804385336199252
17 0.02 0.021817609951539876 0.0008327779489794365
18 0.02 0.02147309566758398 0.0007178798030896314
19 0.02 0.021207604712420763 0.0006741417655733425
fitness: 0.02 genotype: [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
1 1 1 1 1 1 1 1 1 1 1 1 1]
"""
stdout3 = """0 0.03125 0.040781302009688776 0.005561455106895243
1 0.03125 0.04076591739430416 0.0055657214195924745
2 0.03125 0.040622042750429514 0.005507164564763866
3 0.03125 0.04062719572949554 0.005525504386535372
4 0.03125 0.040572560348773205 0.0054984646902999194
5 0.03125 0.04034090492142615 0.005390384072367683
6 0.03125 0.0402046536026411 0.005419778941160164
7 0.03125 0.04013739327676008 0.0054623937675048004
8 0.03125 0.04007443031379712 0.005338017400020037
9 0.03125 0.04008458848257598 0.005319762685780308
10 0.03125 0.04010683542362159 0.005291563125455103
11 0.03125 0.0400226603394465 0.005272742309413064
12 0.03125 0.04005228996907613 0.0052641997698029625
13 0.03125 0.03998787773075085 0.005261292143217217
14 0.03125 0.03984664065698004 0.005316203083084664
15 0.03125 0.03988885010781018 0.005299466931565621
16 0.03125 0.03976902252160328 0.005410194445432538
17 0.03125 0.03968429308554145 0.005435280260514713
18 0.03125 0.03951577456702293 0.005206081471009931
19 0.03125 0.03935236039498807 0.005161212358951413
20 0.03125 0.03943844448969286 0.005138235191023557
21 0.03125 0.03921225401350238 0.005069181577556729
22 0.03125 0.039104984632319956 0.0050662190117403486
23 0.03125 0.03912662965396498 0.005097826009881623
24 0.03125 0.039124976214811535 0.00504608772719245
25 0.03125 0.03907867991851524 0.005043793314564134
26 0.03125 0.03904662863646396 0.005037425492953105
27 0.03125 0.03907867991851524 0.005043793314564134
28 0.03125 0.03896815036186499 0.004962528399866003
29 0.03125 0.03900793816027348 0.004942315673450242
30 0.03125 0.03895114567739404 0.00488617726647651
31 0.03125 0.03895768754534895 0.00495978158921641
32 0.03125 0.0388843550359764 0.005016909124594712
33 0.03125 0.03876251595551663 0.0050627579498394104
34 0.029411764705882353 0.03871180601636856 0.00513031663950765
35 0.029411764705882353 0.03877376323220508 0.005091848287170193
36 0.029411764705882353 0.03876419872264057 0.005108817760699437
37 0.029411764705882353 0.0385806766181466 0.0051765377035178336
38 0.029411764705882353 0.03857604469093403 0.00516256817573713
39 0.029411764705882353 0.03840009933028102 0.004911792561082685
40 0.029411764705882353 0.038435941798657225 0.004981197894576194
41 0.029411764705882353 0.03853501357514836 0.004917322984771472
42 0.029411764705882353 0.038551071591206375 0.004878023381996317
43 0.029411764705882353 0.03850821444834923 0.004883928233132197
44 0.029411764705882353 0.03848615317628796 0.0049545125688823305
45 0.029411764705882353 0.0383247775149123 0.004630315762456711
46 0.029411764705882353 0.03835051678203087 0.004617491935241724
47 0.029411764705882353 0.03835225004238483 0.004622889754411538
48 0.029411764705882353 0.03829598890156486 0.00461175402246592
49 0.029411764705882353 0.038335183040759 0.004595201177906614
50 0.029411764705882353 0.03834498738176467 0.004631145416784666
51 0.029411764705882353 0.03824933877749539 0.0046539394757127055
52 0.029411764705882353 0.03820648163463824 0.004657352542172028
53 0.029411764705882353 0.03811561436985793 0.004638459235855089
54 0.029411764705882353 0.038042281860485375 0.004686336005487959
55 0.029411764705882353 0.03805401594480011 0.004728978747106032
56 0.029411764705882353 0.037953681496639566 0.004665623840500757
57 0.029411764705882353 0.03790496354792162 0.004563527561674136
58 0.029411764705882353 0.037946760831098214 0.0045125666497372
59 0.029411764705882353 0.03786342749776488 0.0045200413120227495
60 0.029411764705882353 0.03787299200732939 0.004502766305273224
61 0.029411764705882353 0.03772479343171146 0.004577197733181666
62 0.029411764705882353 0.03756110295552098 0.004590515018741406
63 0.029411764705882353 0.0375312087756268 0.004581945112910876
64 0.02857142857142857 0.037414501408919436 0.004649577397769239
65 0.02857142857142857 0.03745978020419824 0.004664641840042881
66 0.02857142857142857 0.03744054943496747 0.004699542302834623
67 0.029411764705882353 0.037476425323784535 0.00468220417022404
68 0.029411764705882353 0.03743274716286499 0.00467600604283492
69 0.029411764705882353 0.037407749460447934 0.004685628116342981
70 0.029411764705882353 0.037317251722891374 0.0047513339193878245
71 0.029411764705882353 0.0371447649504046 0.004644532507889975
72 0.029411764705882353 0.03723060921366822 0.004606305036740161
73 0.029411764705882353 0.03719333234519001 0.004568490742026526
74 0.029411764705882353 0.037072499011856684 0.004570371800346553
75 0.029411764705882353 0.03725888669824436 0.00463564051398577
76 0.029411764705882353 0.037377996564413055 0.004572851829122976
77 0.029411764705882353 0.037349533566310586 0.004613310651506204
78 0.029411764705882353 0.037411568305764684 0.004586057330254607
79 0.029411764705882353 0.03731581914751553 0.0045754960654208286
80 0.029411764705882353 0.03728618951788589 0.004567604980281492
81 0.029411764705882353 0.03715757738927377 0.004466345151557656
82 0.029411764705882353 0.03718413767445474 0.0044624741750795074
83 0.029411764705882353 0.037248896144892726 0.004402375850256563
84 0.029411764705882353 0.037142274455737305 0.0043518420263156665
85 0.029411764705882353 0.03697717037183455 0.004176873824141705
86 0.029411764705882353 0.03691124526986941 0.004183622694639944
87 0.029411764705882353 0.03684504480366894 0.004240558604486811
88 0.029411764705882353 0.03679626460782992 0.004306445175176725
89 0.029411764705882353 0.03680857987876588 0.004301569721001867
90 0.029411764705882353 0.03678527172037708 0.004314663123220094
91 0.029411764705882353 0.03669090939471609 0.004363215285960536
92 0.029411764705882353 0.0367041369079436 0.004362239421426741
93 0.029411764705882353 0.03664874409743641 0.004292859713942862
94 0.029411764705882353 0.036647250668523625 0.004343984692202518
95 0.029411764705882353 0.03642403638280934 0.004202931967059495
96 0.029411764705882353 0.036346617027970624 0.004207650344462092
97 0.029411764705882353 0.03613744536746657 0.004189380043502483
98 0.029411764705882353 0.036101821938141807 0.00420205885430731
99 0.029411764705882353 0.03602970655352642 0.004222773629600652
100 0.029411764705882353 0.03592388644770632 0.004060497274442531
101 0.029411764705882353 0.03595217999324124 0.004071462517889265
102 0.029411764705882353 0.03592819979184169 0.004078346489282658
103 0.029411764705882353 0.035853086669669736 0.004133926322621262
104 0.029411764705882353 0.03584505137280204 0.004094286794650818
105 0.029411764705882353 0.035880376696565505 0.004032672462065162
106 0.029411764705882353 0.03591315319426135 0.004063742414963224
107 0.029411764705882353 0.0357711077397159 0.00397503381350392
108 0.029411764705882353 0.035768356978344455 0.003962536919820571
109 0.029411764705882353 0.03567339676614286 0.003933717881361394
110 0.029411764705882353 0.035622114714860806 0.003930461569449365
111 0.029411764705882353 0.03558141467416077 0.003917447924549628
112 0.029411764705882353 0.03561374226036766 0.0038948442958828483
113 0.029411764705882353 0.03554190317990789 0.003848514458455151
114 0.029411764705882353 0.03561523568928045 0.0038381428516082675
115 0.029411764705882353 0.03556268571793181 0.0038805391979271028
116 0.029411764705882353 0.035620556088302176 0.003858883261254072
117 0.029411764705882353 0.03558076828989369 0.0038498871382687747
118 0.029411764705882353 0.03547102134884809 0.003860076863019065
119 0.029411764705882353 0.035436682268388316 0.0038172229775300655
120 0.029411764705882353 0.03537120607791212 0.003856482710039714
121 0.029411764705882353 0.03528586318006923 0.0038617281680746883
122 0.029411764705882353 0.035275782534907936 0.0038709235452645437
123 0.029411764705882353 0.035280937374621596 0.0038435826251394536
124 0.02857142857142857 0.03525930650004955 0.0038534921506053663
125 0.027777777777777776 0.03522458427782733 0.003904764199816617
126 0.027777777777777776 0.035022273816896175 0.003832353761110754
127 0.027777777777777776 0.034827144152127044 0.003559835915024719
128 0.027777777777777776 0.03472864448776989 0.0036016624391633514
129 0.027777777777777776 0.03472156387774811 0.0035043721635600886
130 0.027777777777777776 0.03465311149679572 0.0035207979205571158
131 0.027777777777777776 0.0346293019729862 0.0035215917339726003
132 0.027777777777777776 0.03459457975076398 0.003571503846812583
133 0.027777777777777776 0.03453844264074756 0.0035871430778440278
134 0.027777777777777776 0.03458399774018197 0.0035785494716660954
135 0.027777777777777776 0.034554977701081284 0.0036064459984184467
136 0.027777777777777776 0.034235458984503746 0.00323394946738343
137 0.027777777777777776 0.03422396473163018 0.003235092545128326
138 0.027777777777777776 0.03420949428828043 0.003273869605456437
139 0.027777777777777776 0.03419403214927824 0.003254116560130402
140 0.027777777777777776 0.03415162391791005 0.0032745242857899013
141 0.027777777777777776 0.0342087126624988 0.003217240720355445
142 0.027777777777777776 0.0342087126624988 0.003217240720355445
143 0.027777777777777776 0.034191320780187565 0.003184752436255108
144 0.027777777777777776 0.034036864125730905 0.0032643911269782584
145 0.027777777777777776 0.03390271400166143 0.003218074845557746
146 0.027777777777777776 0.033818057917005345 0.0032457395603201866
147 0.027777777777777776 0.03377779382286194 0.003268686844476118
148 0.027777777777777776 0.03374749079255891 0.003286663928290463
149 0.027777777777777776 0.033735324496674594 0.0032830776385959176
150 0.027777777777777776 0.033504161797481204 0.0031506200813113897
151 0.027777777777777776 0.03348332846414787 0.003158558769349625
152 0.027777777777777776 0.033526568748767464 0.0030999246087808903
153 0.027777777777777776 0.03346154349624222 0.00316030081196644
154 0.027777777777777776 0.03342091420225543 0.003178836757681714
155 0.027777777777777776 0.03336680164814288 0.003185387171859075
156 0.027777777777777776 0.03339245120415296 0.003201781315556209
157 0.02702702702702703 0.03342415938291997 0.003190911336279097
158 0.02702702702702703 0.033372281454582084 0.0032092216966512163
159 0.02702702702702703 0.033300852883153514 0.0032356813659525745
160 0.02702702702702703 0.03321699433973615 0.003255403901293605
161 0.027777777777777776 0.03326856314992565 0.0031931312795011164
162 0.027777777777777776 0.033162297655222016 0.0032588719730904867
163 0.027777777777777776 0.03317671334705706 0.0033321389733581017
164 0.027777777777777776 0.03309570037760596 0.0033620846391585702
165 0.027777777777777776 0.033067631672117896 0.003381712170645736
166 0.027777777777777776 0.03307771231727919 0.0033777791115661075
167 0.027777777777777776 0.03297205222169974 0.0033353899583604013
168 0.027777777777777776 0.03290062365027117 0.0033523395671306525
169 0.027777777777777776 0.03280394255359007 0.0033958866711734062
170 0.027777777777777776 0.032762145270413476 0.003400685321747771
171 0.027777777777777776 0.03276030523821292 0.0033929570661628797
172 0.027777777777777776 0.032687632812599315 0.0033452163657807653
173 0.027777777777777776 0.0325664847726526 0.0033138297659914503
174 0.02702702702702703 0.032502680144888024 0.0033544687941013283
175 0.02702702702702703 0.03239409983536958 0.0033327300870295787
176 0.02702702702702703 0.032357062798332545 0.0033013589668393216
177 0.027777777777777776 0.032392057155907546 0.003273357815197512
178 0.027777777777777776 0.0323320824084328 0.003328726877076974
179 0.027777777777777776 0.03221470580261806 0.0032446462457340615
180 0.027777777777777776 0.03221342281141571 0.00323851735811994
181 0.027777777777777776 0.03207602735143961 0.003260356365938651
182 0.02702702702702703 0.032014407289819545 0.0032550525488685205
183 0.02702702702702703 0.03194672860544257 0.0032723496623143747
184 0.02702702702702703 0.031909352951368625 0.0032874624668255393
185 0.02702702702702703 0.031909352951368625 0.0032874624668255393
186 0.02702702702702703 0.03200194554396121 0.0033001618731900856
187 0.02702702702702703 0.031925825645260676 0.0033184375182541037
188 0.02702702702702703 0.03188055592446913 0.0033422610548507936
189 0.02702702702702703 0.03196196348749434 0.003345614146100431
190 0.02702702702702703 0.0319227099482408 0.0033463680631940817
191 0.02702702702702703 0.031987178159767836 0.0033174240280473624
192 0.02702702702702703 0.03201641485444571 0.003311614185069456
193 0.02702702702702703 0.03196085929889016 0.0033355688856440712
194 0.02702702702702703 0.03186499720463684 0.0031114635289667385
195 0.02702702702702703 0.03179047774167924 0.0030888021593992667
196 0.02702702702702703 0.03181795026915177 0.003135444443992817
197 0.02702702702702703 0.03177739591609741 0.0030785962722787432
198 0.02702702702702703 0.03175840261496465 0.0030841676144967778
199 0.02702702702702703 0.03170284705940909 0.003105262167325866
fitness: 0.02702702702702703 genotype: [1 1 1 0 1 0 1 1 1 1 1 1 1 1 1 1 0 1 0 1 1 1 1 0 1 1 0 1 1 1 0 1 1 1 1 0 0
0 1 1 1 1 1 0 1 1 0 0 1 1]
"""
stdout4 = """0 0.03125 0.040781302009688776 0.005561455106895243
1 0.03125 0.04073358980809726 0.005611323894099167
2 0.03125 0.04053184479385226 0.005618896584066156
3 0.03125 0.04052943212031889 0.005707430033203208
4 0.03125 0.04031885132049323 0.005557155703438686
5 0.03125 0.04017681326595518 0.005550182390644313
6 0.03125 0.04007237029513291 0.005659834969094889
7 0.03125 0.03974061704954726 0.005675526261668921
8 0.03125 0.039619906293755856 0.005681785878437178
9 0.03125 0.03949760425033312 0.005732395162757329
10 0.03125 0.03957740110404302 0.005739659294499826
11 0.02857142857142857 0.03941847253261445 0.0058883086220128
12 0.02857142857142857 0.03946380220294412 0.005875490246256851
13 0.02857142857142857 0.03929806721329609 0.0059559672395683464
14 0.02857142857142857 0.0391270507297796 0.006101526459694825
15 0.02857142857142857 0.03888532756708457 0.006241677313710797
16 0.02857142857142857 0.038668706673044326 0.0063205121898296185
17 0.02857142857142857 0.03856764847198612 0.006393733325621937
18 0.02857142857142857 0.038311677123303065 0.006540601588919424
19 0.02857142857142857 0.038227021038646974 0.006610959070916546
20 0.02857142857142857 0.03792378604899895 0.006703191685388337
21 0.02857142857142857 0.03771059923581213 0.006825735384647775
22 0.02857142857142857 0.037652096883511114 0.006879646149613075
23 0.02857142857142857 0.03756052179193602 0.006806930000633567
24 0.02857142857142857 0.0372530984033113 0.006616244897318855
25 0.02857142857142857 0.037071472621685524 0.006700935418616673
26 0.027777777777777776 0.036894769026183265 0.00681638917178459
27 0.027777777777777776 0.03679332768336261 0.006737740324559595
28 0.027777777777777776 0.03656592636604193 0.006718795651361971
29 0.027777777777777776 0.03636299983863723 0.0067182066098195655
30 0.027777777777777776 0.03615974957582815 0.006784624195964068
31 0.027777777777777776 0.036001067461093515 0.006792736244762245
32 0.02631578947368421 0.03585705404487707 0.006886008196113005
33 0.02631578947368421 0.03571511934702132 0.006980966601862108
34 0.02631578947368421 0.03558208144338916 0.006975671272659737
35 0.02631578947368421 0.035523269641156305 0.007029830564216234
36 0.02631578947368421 0.035352630520894796 0.007109006244468943
37 0.02631578947368421 0.03525904015206289 0.00716713853183638
38 0.02631578947368421 0.035003150427752115 0.007261733438238009
39 0.02631578947368421 0.0350878065124082 0.007235558192867908
40 0.02631578947368421 0.03506031966234071 0.007253354166028904
41 0.02631578947368421 0.03497496379956549 0.007248977861899415
42 0.02631578947368421 0.03481186508054571 0.006966668179409498
43 0.02631578947368421 0.034732893345652924 0.006993106579244648
44 0.02631578947368421 0.03457633876475624 0.0070796600767005675
45 0.02564102564102564 0.03445953473545221 0.007096635586395174
46 0.02564102564102564 0.03424201890301702 0.007115120569419857
47 0.02564102564102564 0.03400009280129056 0.007168165801585374
48 0.02564102564102564 0.033670742784440544 0.00711148149312442
49 0.025 0.033436412300110054 0.007189450666750213
50 0.025 0.033447997635998795 0.007187477871410633
51 0.025 0.03322871184413236 0.007249243312145341
52 0.025 0.0330933317271733 0.00725059006156153
53 0.025 0.03295272671814723 0.007308861341128651
54 0.025 0.03287930504178438 0.007366395988270292
55 0.025 0.03278447745557748 0.007406073280120626
56 0.025 0.032792687082208165 0.007401201055329391
57 0.025 0.03268001985812789 0.0073843136398220365
58 0.025 0.03230008721972339 0.007245850271969602
59 0.024390243902439025 0.03214398965874778 0.007246430111445265
60 0.024390243902439025 0.032015866004383844 0.0072681425037237355
61 0.024390243902439025 0.031883485452205894 0.00733690135935937
62 0.024390243902439025 0.031816880996164566 0.007376902910834809
63 0.024390243902439025 0.03146209328162075 0.007177437385924868
64 0.024390243902439025 0.030962607235877355 0.0068235487821863006
65 0.024390243902439025 0.030309716409687423 0.006138468195036998
66 0.024390243902439025 0.029989853666525578 0.0056202135596107525
67 0.024390243902439025 0.029909236021992244 0.0056570168868638054
68 0.024390243902439025 0.029714197729741806 0.005507420236007412
69 0.024390243902439025 0.029641209851875877 0.005540760460082632
70 0.024390243902439025 0.029590994643841448 0.005565312649992668
71 0.024390243902439025 0.029540284704693373 0.00554356934473582
72 0.023809523809523808 0.0294674131318218 0.005574058368394143
73 0.023809523809523808 0.029395570937021352 0.005560822242024083
74 0.023809523809523808 0.029263551712821353 0.005507388169792296
75 0.023809523809523808 0.029057202506472146 0.0054044000172213495
76 0.023809523809523808 0.02892271087198051 0.005390781173143104
77 0.023809523809523808 0.028937998073724706 0.005386883261199939
78 0.023809523809523808 0.02888072253192904 0.005408732109380401
79 0.023809523809523808 0.02888038515001271 0.00540813338837751
80 0.023809523809523808 0.028692779962407523 0.005274723961496633
81 0.023809523809523808 0.02846658948621705 0.005154135225991897
82 0.023809523809523808 0.028378157592907106 0.005173921018975657
83 0.023809523809523808 0.028293672185841053 0.005178852024892103
84 0.023809523809523808 0.028122237340493163 0.004972342571789223
85 0.023809523809523808 0.02806820803646386 0.004994113363054957
86 0.023809523809523808 0.02806820803646386 0.004994113363054957
87 0.023809523809523808 0.02794674041499624 0.004918427003520906
88 0.023809523809523808 0.027886667587220818 0.004900505036397889
89 0.023809523809523808 0.0279223818729351 0.004892344487055345
90 0.023809523809523808 0.027903729472703757 0.004903536886326738
91 0.023809523809523808 0.027929532101447562 0.004909059012466715
92 0.023809523809523808 0.027844709312465177 0.004905805768405714
93 0.023809523809523808 0.027857217129851043 0.0048985714954845805
94 0.023809523809523808 0.027794784464477206 0.004915719554580067
95 0.023809523809523808 0.027662182081874816 0.004809379264527122
96 0.023809523809523808 0.027541684523877265 0.0048284192202885335
97 0.023809523809523808 0.027499010963501106 0.004810399870685516
98 0.023809523809523808 0.02735829801791011 0.004692016732470327
99 0.023809523809523808 0.027302208274320366 0.004678665596025819
100 0.023809523809523808 0.027162435766126806 0.004552932274250579
101 0.023809523809523808 0.027151233123345216 0.004561946442992086
102 0.023255813953488372 0.026918788873964664 0.004289242904213266
103 0.023255813953488372 0.026900743958018447 0.004299114587977271
104 0.023255813953488372 0.026793565879179342 0.004118194101992821
105 0.023255813953488372 0.026737543470215758 0.004120300907841031
106 0.023255813953488372 0.02677100203810976 0.004163631467732847
107 0.023255813953488372 0.026721347292490583 0.004163033936033469
108 0.023255813953488372 0.02671581019393023 0.004167269243327534
109 0.023255813953488372 0.02657680780408536 0.00417039084574992
110 0.023255813953488372 0.026560934788212344 0.004165409033319495
111 0.023255813953488372 0.02656301848582848 0.004170603683923384
112 0.023255813953488372 0.026610493881282317 0.00421484207235389
113 0.023255813953488372 0.026628808899597334 0.004206602264090147
114 0.023255813953488372 0.026584159261924437 0.0041969238651043605
115 0.023255813953488372 0.02658536980169067 0.00419997458125932
116 0.023255813953488372 0.026472222992579425 0.004215769526466076
117 0.023255813953488372 0.02645352799928223 0.004222125778576367
118 0.023255813953488372 0.026320351341118257 0.004090358032511461
119 0.023255813953488372 0.026302639378284347 0.00409903815479723
120 0.023255813953488372 0.026229175108855644 0.004103421645792556
121 0.023255813953488372 0.026271679625962364 0.004087112723481286
122 0.023255813953488372 0.02626493198763578 0.004087591304364971
123 0.023255813953488372 0.0262463266092743 0.004093708892816499
124 0.023255813953488372 0.02623594508257042 0.0041031299227546085
125 0.023255813953488372 0.026190075366976547 0.0041089287510517845
126 0.023255813953488372 0.026135623494899216 0.004129086542967953
127 0.023255813953488372 0.026205905789783716 0.004115175203514505
128 0.023255813953488372 0.026193308357462128 0.0041182653342320494
129 0.023255813953488372 0.025839045910176425 0.003477341432928507
130 0.023255813953488372 0.025808446154974466 0.0034864631254768698
131 0.023255813953488372 0.02573132158692534 0.0034937543640718286
132 0.023255813953488372 0.02578365360070166 0.0034924993721620577
133 0.023255813953488372 0.025728410740580746 0.0034834736685537
134 0.023255813953488372 0.025675772463750756 0.0035059864473457867
135 0.023255813953488372 0.02554115707913537 0.003262457391280839
136 0.023255813953488372 0.02549593744089248 0.0032624816665789286
137 0.023255813953488372 0.02547668198518003 0.003263268278986877
138 0.023255813953488372 0.025450604513980603 0.003262279428949987
139 0.023255813953488372 0.025469859969693056 0.003261646532635847
140 0.023255813953488372 0.025464052768763904 0.0032640796303695994
141 0.023255813953488372 0.025452708469274398 0.0032697583507197816
142 0.022727272727272728 0.02534261432019256 0.0032621280248064798
143 0.022727272727272728 0.025310961076684527 0.0032628251123270014
144 0.022727272727272728 0.025268708245875997 0.0032715108250115127
145 0.022727272727272728 0.025197252444862813 0.00319915986612111
146 0.022727272727272728 0.025157022302533968 0.003213736996065187
147 0.022727272727272728 0.02514619979171146 0.003220072617385478
148 0.022727272727272728 0.025114389734503608 0.003217563197988547
149 0.022727272727272728 0.025078504567039497 0.0032239688042125085
150 0.022727272727272728 0.02506685149143293 0.003227200421439853
151 0.022727272727272728 0.024913102015102807 0.0031401751832950456
152 0.022727272727272728 0.024918639113663165 0.0031377352236427146
153 0.022727272727272728 0.024871186566210617 0.003145316514677488
154 0.022727272727272728 0.0247821452525671 0.0031303927692320336
155 0.022727272727272728 0.024751797183663337 0.003131023043946763
156 0.022727272727272728 0.024734355323198228 0.0031344480686555398
157 0.022727272727272728 0.024764955078400184 0.0031348009257341713
158 0.022727272727272728 0.024731695654896857 0.0031473966182713883
159 0.022727272727272728 0.02466243158563279 0.0031346008819375867
160 0.022222222222222223 0.024615958814557814 0.003141382574566584
161 0.022222222222222223 0.024519277717876715 0.003105415519295516
162 0.022222222222222223 0.0245028829133367 0.0031112353636597737
163 0.022222222222222223 0.024462887678219518 0.0031180004659155784
164 0.022222222222222223 0.02443615695636684 0.0031291288953815715
165 0.022222222222222223 0.02443135813761454 0.0031331297745607885
166 0.022222222222222223 0.024439337238616896 0.003140412149256389
167 0.022222222222222223 0.02440873748341493 0.0031368851853292674
168 0.022222222222222223 0.024364213531914237 0.003147253659261851
169 0.022222222222222223 0.02435811597093863 0.00314660657645012
170 0.022222222222222223 0.024308657976358682 0.0031600312059756512
171 0.022222222222222223 0.02425625852605793 0.00313504764815793
172 0.022222222222222223 0.024229292896993604 0.0031436219889505058
173 0.022222222222222223 0.024208134468858434 0.003151748218016044
174 0.022222222222222223 0.024202849056596275 0.0031537833257092194
175 0.022222222222222223 0.024149029002776223 0.0031473309832006865
176 0.022222222222222223 0.024154566101336577 0.0031462415617485957
177 0.022222222222222223 0.0241283571681509 0.0031565107346521915
178 0.022222222222222223 0.02413892799267521 0.0031526836436063556
179 0.022222222222222223 0.024123541570312046 0.0031598933565761657
180 0.022222222222222223 0.024094404041174515 0.0031592000755098874
181 0.022222222222222223 0.0239782424250129 0.0031276457946775855
182 0.022222222222222223 0.023934105037852257 0.003133833954361933
183 0.022222222222222223 0.02383551408632013 0.003095805615909798
184 0.022222222222222223 0.02374288449369054 0.0030588974624381
185 0.022222222222222223 0.02375974911265385 0.0030586455350572798
186 0.022222222222222223 0.023684625405951197 0.003048970855806743
187 0.021739130434782608 0.023674509075814646 0.003052852873040262
188 0.021739130434782608 0.023637175430315575 0.0030593530822678734
189 0.021739130434782608 0.023637395017491684 0.003060049731537424
190 0.021739130434782608 0.023611640578693763 0.003067690138196303
191 0.021739130434782608 0.023567571121580824 0.0030682476009510212
192 0.021739130434782608 0.023530994171960397 0.003076225972762006
193 0.021739130434782608 0.023530994171960397 0.003076225972762006
194 0.021739130434782608 0.02352065825464774 0.003078868461455716
195 0.021739130434782608 0.02348593376225734 0.0030709428788565215
196 0.021739130434782608 0.02346522982850993 0.00307566702127991
197 0.021739130434782608 0.023272081629294998 0.002723000011150271
198 0.021739130434782608 0.02323116035531002 0.002732254763077608
199 0.021739130434782608 0.02323621086036053 0.0027308515244538496
fitness: 0.021739130434782608 genotype: [1 1 0 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 1
1 1 1 1 1 1 1 1 1 1 1 1 1]
"""
def test_generational_no_elitism(capfd):
generational_no_elitism()
out, err = capfd.readouterr()
assert out == stdout1
def test_generational_with_elitism(capfd):
generational_with_elitism()
out, err = capfd.readouterr()
assert out == stdout2
def test_steady_state_no_elitism(capfd):
steady_state_no_elitism()
out, err = capfd.readouterr()
assert out == stdout3
def test_steady_state_with_elitism(capfd):
steady_state_with_elitism()
out, err = capfd.readouterr()
assert out == stdout4
| 0.929688 | 1 |
tools/pythonpkg/setup.py | mweisgut/duckdb | 0 | 68100 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy
import sys
import subprocess
import platform
import shutil
import distutils.spawn
from setuptools import setup, Extension
from setuptools.command.sdist import sdist
from distutils.command.build_ext import build_ext
# some paranoia to start with
# if platform.architecture()[0] != '64bit':
# raise Exception('DuckDB only supports 64 bit at this point')
# make sure we are in the right directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
ARCHIVE_EXT = 'a'
LIB_PREFIX = 'lib'
if os.name == 'nt':
ARCHIVE_EXT = 'lib'
LIB_PREFIX = 'Release/'
DIR_PREFIX = 'src/duckdb'
if not os.path.exists(DIR_PREFIX):
# this is a build from within the tools/pythonpkg directory
DIR_PREFIX = '../../'
def get_library_name(lib):
return LIB_PREFIX + lib + '.' + ARCHIVE_EXT
DEFAULT_BUILD_DIR = os.path.join(DIR_PREFIX, 'build', 'release_notest')
BUILD_DIR = DEFAULT_BUILD_DIR
if 'DUCKDB_PYTHON_TARGET' in os.environ:
BUILD_DIR = os.environ['DUCKDB_PYTHON_TARGET']
INCLUDE_DIR = os.path.join(DIR_PREFIX, 'src', 'include')
DUCKDB_LIB = os.path.join(BUILD_DIR, 'src', get_library_name('duckdb_static'))
PG_LIB = os.path.join(BUILD_DIR, 'third_party', 'libpg_query', get_library_name('pg_query'))
RE2_LIB = os.path.join(BUILD_DIR, 'third_party', 're2', get_library_name('re2'))
MINIZ_LIB = os.path.join(BUILD_DIR, 'third_party', 'miniz', get_library_name('miniz'))
# wrapper that builds the main DuckDB library first
class CustomBuiltExtCommand(build_ext):
def build_duckdb(self):
cmake_bin = distutils.spawn.find_executable('cmake')
if (cmake_bin is None):
raise Exception('DuckDB needs cmake to build from source')
wd = os.getcwd()
os.chdir(DIR_PREFIX)
if not os.path.exists('build/release_notest'):
os.makedirs('build/release_notest')
os.chdir('build/release_notest')
configcmd = 'cmake -DCMAKE_BUILD_TYPE=Release -DLEAN=1 ../..'
buildcmd = 'cmake --build . --target duckdb_static'
if os.name == 'nt':
if platform.architecture()[0] == '64bit':
configcmd += ' -DCMAKE_GENERATOR_PLATFORM=x64'
buildcmd += ' --config Release'
subprocess.Popen(configcmd.split(' ')).wait()
subprocess.Popen(buildcmd.split(' ')).wait()
os.chdir(wd)
def run(self):
if BUILD_DIR == DEFAULT_BUILD_DIR:
self.build_duckdb()
for library in [DUCKDB_LIB, PG_LIB, RE2_LIB, MINIZ_LIB]:
if not os.path.isfile(library):
raise Exception('Build failed: could not find required library file "%s"' % library)
print(INCLUDE_DIR)
build_ext.run(self)
# create a distributable directory structure
class CustomSdistCommand(sdist):
def run(self):
if os.path.exists('src/duckdb'):
shutil.rmtree('src/duckdb')
if not os.path.exists('src/duckdb/third_party'):
os.makedirs('src/duckdb/third_party')
shutil.copyfile('../../CMakeLists.txt', 'src/duckdb/CMakeLists.txt')
shutil.copyfile('../../third_party/CMakeLists.txt', 'src/duckdb/third_party/CMakeLists.txt')
shutil.copytree('../../src', 'src/duckdb/src')
shutil.copytree('../../third_party/libpg_query', 'src/duckdb/third_party/libpg_query')
shutil.copytree('../../third_party/hyperloglog', 'src/duckdb/third_party/hyperloglog')
shutil.copytree('../../third_party/re2', 'src/duckdb/third_party/re2')
shutil.copytree('../../third_party/miniz', 'src/duckdb/third_party/miniz')
sdist.run(self)
includes = [numpy.get_include(), INCLUDE_DIR, '.']
sources = ['connection.cpp', 'cursor.cpp', 'module.cpp']
toolchain_args = ['-std=c++11', '-Wall']
if platform.system() == 'Darwin':
toolchain_args.extend(['-stdlib=libc++', '-mmacosx-version-min=10.7'])
libduckdb = Extension('duckdb',
include_dirs=includes,
sources=sources,
extra_compile_args=toolchain_args,
extra_link_args=toolchain_args,
language='c++',
extra_objects=[DUCKDB_LIB, PG_LIB, RE2_LIB, MINIZ_LIB])
# Only include pytest-runner in setup_requires if we're invoking tests
if {'pytest', 'test', 'ptr'}.intersection(sys.argv):
setup_requires = ['pytest-runner']
else:
setup_requires = []
setup(
name = "duckdb",
version = '0.1.0',
description = 'DuckDB embedded database',
keywords = 'DuckDB Database SQL OLAP',
url="https://github.com/cwida/duckdb",
long_description = '',
install_requires=[
'numpy>=1.16',
'pandas>=0.24'
],
packages=['duckdb_query_graph'],
include_package_data=True,
setup_requires=setup_requires,
tests_require=['pytest'],
classifiers = [
'Topic :: Database :: Database Engines/Servers',
'Intended Audience :: Developers',
'Development Status :: 3 - Alpha'
],
cmdclass={
'build_ext': CustomBuiltExtCommand,
'sdist': CustomSdistCommand
},
ext_modules = [libduckdb],
maintainer = "<NAME>",
maintainer_email = "<EMAIL>"
)
| 1.289063 | 1 |
src/ftk.py | flatironinstitute/ftk | 3 | 68228 | #!/usr/bin/env python
import numpy as np
from time import time
import pyfftw
from numpy.fft import fft, ifft, fftshift, ifftshift, fft2, ifft2
from scipy.special import jv as besselj
import finufftpy
def translations_brute_force(Shathat, Mhat, cmul_trans):
# Shathat: (q, te, k)
# Mhat: (im, k × γ)
# cmul_trans: (tr, k × γ)
n_trans = cmul_trans.shape[-2]
n_images = Mhat.shape[-2]
Shathat = Shathat.transpose((2, 0, 1))
# Shathat: (q, te, k)
n_templates = Shathat.shape[-2]
ngridr = Shathat.shape[-1]
n_gamma = Shathat.shape[-3]
Mhat = Mhat.reshape((n_images, ngridr, n_gamma))
cmul_trans = cmul_trans.reshape((n_trans, ngridr, n_gamma))
# Mhat: (im, k, γ)
# cmul_trans: (tr, k, γ)
Mhat = Mhat[:, np.newaxis, :, :]
cmul_trans = cmul_trans[np.newaxis, :, :, :]
# Mhat: (im, 1, k, γ)
# cmul_trans: (1, tr, k, γ)
Mhat = Mhat.transpose((3, 2, 0, 1)).copy()
cmul_trans = cmul_trans.transpose((3, 2, 0, 1)).copy()
# Mhat: (γ, k, im, 1)
# cmul_trans: (γ, k, 1, tr)
Mhat_trans = pyfftw.empty_aligned((n_gamma, ngridr, n_images, n_trans),
dtype='complex128')
# Mhat_trans: (γ, k, im × tr)
plan = pyfftw.FFTW(Mhat_trans, Mhat_trans, axes=(0,),
direction='FFTW_FORWARD', flags=('FFTW_ESTIMATE',), threads=12)
tmr_start = time()
np.multiply(Mhat, cmul_trans, out=Mhat_trans)
plan()
Mhathat_trans = Mhat_trans.reshape((n_gamma, ngridr, n_images * n_trans))
# Mhathat_trans: (q, k, im × tr)
ptm = time() - tmr_start
tmr_start = time()
c_n2 = np.zeros((n_gamma, n_templates, n_images*n_trans),
dtype=np.complex128)
# c_n2: (q, te, im × tr)
for k1 in range(n_gamma):
k1p = (k1 + n_gamma // 2) % n_gamma
c_n2[k1, :, :] = np.matmul(np.conj(Shathat[k1p, :, :]), Mhathat_trans[k1, :, :])
c_n2 = 2 * np.pi * c_n2
c_n2 = ifft(c_n2, axis=0)
# c_n2: (γ, te, im × tr)
c_n2 = c_n2.reshape((n_gamma, n_templates, n_images, n_trans))
c_n2 = np.real(c_n2)
# c_n2: (γ, te, im, tr)
tm = time() - tmr_start
return c_n2, ptm, tm
def translations_brute_force_batch(Shathat, Mhat, pf_grid, tr_grid, n_psi,
n_batch_im=None, n_batch_trans=500):
n_templates = Shathat.shape[0]
n_images = Mhat.shape[0]
trans = tr_grid['trans']
n_trans = tr_grid['n_trans']
if n_batch_im is None:
n_batch_im = n_images
n_batch_trans = min(n_batch_trans, n_trans)
zprods1 = np.zeros((n_psi, n_templates, n_images, n_trans))
# zprods1: (γ, te, im, tr)
tm1 = 0
precomp1 = 0
for cn in range(0, n_images, n_batch_im):
idx_im = range(cn, min(cn + n_batch_im, n_images))
for ttt in range(0, n_trans, n_batch_trans):
idx_trans = range(ttt, min(ttt + n_batch_trans, n_trans))
cmul_trans = pft_phase_shift(-trans[idx_trans, :], pf_grid)
# cmul_trans: (tr, k × γ)
tmp, ptm, tm = translations_brute_force(
Shathat, Mhat[idx_im, :], cmul_trans)
zprods1[np.ix_(range(n_psi),
range(n_templates),
idx_im,
idx_trans)] = tmp
precomp1 += ptm
tm1 += tm
zprods1 = zprods1.transpose((2, 1, 0, 3))
return zprods1, precomp1, tm1
def svd_decomposition_alignment(SSS, Mhat, n_bessel, all_rnks, BigMul_left):
ngridr = SSS.shape[-1]
n_templates = SSS.shape[-2]
n_gamma = SSS.shape[-3]
n_images = Mhat.shape[-2]
n_trans = BigMul_left.shape[-1]
tmr_start = time()
Mhathat = Mhat.reshape((n_images, ngridr, n_gamma))
Mhathat = fftshift(fft(Mhathat, axis=-1), axes=-1) / n_gamma
MMM = np.zeros((n_images, 2 * n_bessel + 1, ngridr, n_gamma),
dtype=np.complex128)
for im in range(n_images):
for qp in range(-n_bessel, n_bessel + 1):
tmp = Mhathat[im, :, :]
MMM[im, qp + n_bessel, :, :] = np.roll(tmp, -qp, axis=-1)
MMM = MMM.transpose((1, 3, 2, 0)).copy()
precomp2 = time() - tmr_start
tmr_start = time()
BigMul_right = np.zeros((sum(all_rnks), n_gamma, n_templates, n_images),
dtype=np.complex128)
for qp in range(-n_bessel, n_bessel + 1):
rnk = all_rnks[qp + n_bessel]
ofst = sum(all_rnks[:qp + n_bessel])
for ll in range(rnk):
for q in range(n_gamma):
tmp = np.matmul(SSS[ofst + ll, q, :, :],
MMM[qp + n_bessel, q, :, :])
BigMul_right[ofst + ll, q, :, :] = tmp
BigMul_right = BigMul_right.transpose((3, 2, 1, 0)).copy()
c_n = np.zeros((n_images, n_templates, n_gamma, n_trans),
dtype=np.complex128)
for im in range(n_images):
for tt in range(n_templates):
c_n[im, tt, :, :] = np.matmul(BigMul_right[im, tt, :, :],
BigMul_left)
c_n = 2 * np.pi * c_n
zprods = ifft(ifftshift(c_n, axes=-2), axis=-2) * n_gamma
tm2 = time() - tmr_start
return zprods, precomp2, tm2
def cartesian_to_pft(templates, T, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
n_templates = templates.shape[0]
N = templates.shape[1]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
Shat = np.zeros((n_templates, ngridr * n_psi), dtype=np.complex128)
upsampfac = 1.25
fcc = np.empty(len(wx), dtype=np.complex128)
for k in range(n_templates):
template = templates[k, :, :]
# Need to force Fortran ordering because that's what the FINUFFT
# interface expects.
gg = np.asfortranarray(template.transpose((1, 0)))
isign = -1
eps = 1e-6
# Note: Crashes if gg is a 1D vector (raveled). Why?
finufftpy.nufft2d2(wx * dx, wy * dy, fcc,
isign, eps, gg, upsampfac=upsampfac)
Shat[k, :] = fcc
return Shat
def pft_to_cartesian(Shat, T, N, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
ngridr = xnodesr.shape[0]
n_templates = Shat.shape[0]
dx = T / N
dy = T / N
wx = pf_grid['wx']
wy = pf_grid['wy']
templates1 = np.zeros((n_templates, N, N))
# Again, Fortran ordering is necessary for FINUFFT.
gxx = np.empty((N, N), dtype=np.complex128, order='F')
upsampfac = 1.25
for k in range(n_templates):
fcc1 = Shat[k, :] * quad_wts
isign = 1
eps = 1e-6
finufftpy.nufft2d1(wx * dx, wy * dy, fcc1, isign, eps, N, N, gxx,
upsampfac=upsampfac)
gxx = gxx*dx*dy/(4*np.pi**2)
templates1[k, :, :] = np.real(gxx.transpose((1, 0)))
return templates1
def rotate_pft(fcc, rgamma, pf_grid):
xnodesr = pf_grid['xnodesr']
n_psi = pf_grid['n_psi']
ngridr = xnodesr.shape[0]
ngridc = n_psi * np.ones(ngridr, dtype=np.int32)
fcc_rot = np.zeros(fcc.shape, dtype=np.complex128)
cnt = 0
for rr in range(ngridr):
tmp = fcc[:, cnt:cnt + ngridc[rr]]
ffcc = fft(tmp)
n_theta = ngridc[rr]
wth = ifftshift(np.arange(-n_theta/2, n_theta/2))
mul = np.exp(-1j * wth * rgamma[:, np.newaxis])
ffcc_rot = ffcc * mul
tmp = ifft(ffcc_rot)
fcc_rot[:, cnt:cnt + ngridc[rr]] = tmp
cnt += ngridc[rr]
return fcc_rot
def pft_phase_shift(sh, pf_grid):
all_psi = pf_grid['all_psi']
quad_xnodesr = pf_grid['all_r']
phase = (np.cos(all_psi) * sh[:, np.newaxis, 0]
+ np.sin(all_psi) * sh[:, np.newaxis, 1])
cmul = np.exp(-1j * quad_xnodesr * phase)
return cmul
def translate_pft(fcc, sh, pf_grid):
cmul = pft_phase_shift(sh, pf_grid)
return fcc * cmul
def pft_norm(Mhat, pf_grid):
quad_wts = pf_grid['quad_wts']
return np.sqrt(np.sum((np.abs(Mhat) ** 2) * quad_wts, axis=-1))
def pft_to_fb(Shat, pf_grid):
ngridr = pf_grid['ngridr']
n_psi = pf_grid['n_psi']
quad_wts = pf_grid['quad_wts']
n_templates = Shat.shape[0]
quad_wts_sq = quad_wts.reshape((ngridr, n_psi))
Shathat = Shat.reshape((n_templates, ngridr, n_psi))
# Shathat: (te, k, γ)
Shathat = np.fft.fftshift(np.fft.fft(Shathat, axis=-1), axes=-1)
Shathat = Shathat * quad_wts_sq[np.newaxis, :, :]
# Shathat: (te, k, q)
# There was a 2π factor missing before. Let's remove it.
Shathat = Shathat / (2 * np.pi)
return Shathat
def make_tensor_grid(rmax, ngridr, n_psi):
dr = rmax/ngridr
xnodesr = dr*np.arange(1, ngridr+1)
weights = dr*np.ones(ngridr)
psi = 2 * np.pi / n_psi * np.arange(n_psi)
all_psi = np.repeat(psi[np.newaxis, :], ngridr, axis=0)
all_psi = np.ravel(all_psi)
all_r = np.repeat(xnodesr[:, np.newaxis], n_psi, axis=1)
all_r = np.ravel(all_r)
wts_theta = 2 * np.pi / n_psi
quad_wts = wts_theta * xnodesr * weights
quad_wts = np.repeat(quad_wts[:, np.newaxis], n_psi, axis=-1)
quad_wts = np.ravel(quad_wts)
wx = np.zeros(n_psi * ngridr)
wy = np.zeros(n_psi * ngridr)
cnt = 0
for rr in range(ngridr):
dd = xnodesr[rr]
theta = 2 * np.pi / n_psi * np.arange(n_psi)
wx[cnt:cnt + n_psi] = dd * np.cos(theta)
wy[cnt:cnt + n_psi] = dd * np.sin(theta)
cnt = cnt + n_psi
grid = dict()
grid['rmax'] = rmax
grid['ngridr'] = ngridr
grid['n_psi'] = n_psi
grid['xnodesr'] = xnodesr
grid['all_psi'] = all_psi
grid['all_r'] = all_r
grid['quad_wts'] = quad_wts
grid['wx'] = wx
grid['wy'] = wy
return grid
def make_adaptive_grid(delta_range, dx, oversampling):
all_delta = dx / oversampling * np.arange(oversampling * delta_range + 1e-10)
n_delta = all_delta.shape[0]
n_omega = oversampling * np.int32(np.ceil(2 * np.pi / dx * all_delta))
n_trans = np.sum(n_omega)
trans = np.zeros((n_trans, 2))
cnt = 0
for kk in range(n_delta):
n_om = n_omega[kk]
all_om = 2 * np.pi * np.arange(n_om) / n_om
trans[cnt:cnt + n_om, 0] = all_delta[kk] * np.cos(all_om)
trans[cnt:cnt + n_om, 1] = all_delta[kk] * np.sin(all_om)
cnt += n_om
grid = dict()
grid['all_delta'] = all_delta
grid['n_delta'] = n_delta
grid['n_omega'] = n_omega
grid['n_trans'] = n_trans
grid['trans'] = trans
return grid
def make_cartesian_grid(delta_range, dx, oversampling):
Nkeep = 2 * oversampling * delta_range
xfine = dx * np.arange(-Nkeep // 2, Nkeep // 2)
trans = xfine
trans = np.meshgrid(trans, trans, indexing='ij')
trans = np.stack(trans[::-1], -1)
trans = trans.reshape((Nkeep ** 2, 2))
grid = {'n_trans': Nkeep ** 2, 'trans': trans}
return grid
def extract_alignments(inner_prods3, tr_grid):
n_images = inner_prods3.shape[0]
n_templates = inner_prods3.shape[1]
n_psi = inner_prods3.shape[2]
n_trans = inner_prods3.shape[3]
trans = tr_grid['trans']
inner_prods3 = inner_prods3.reshape((n_images,
n_templates*n_psi*n_trans))
est_template_ind = np.zeros(n_images, dtype=np.int32)
est_trans = np.zeros((n_images, 2))
est_gamma = np.zeros(n_images)
idx = inner_prods3.argmax(axis=-1)
for cn in range(n_images):
I3, I2, I1 = np.unravel_index(idx[cn],
(n_templates, n_psi, n_trans))
shiftx = trans[I1, 0]
shifty = trans[I1, 1]
rgamma = I2 * 2 * np.pi / n_psi
est_template_ind[cn] = I3
est_trans[cn, 0] = shiftx
est_trans[cn, 1] = shifty
est_gamma[cn] = rgamma
return est_template_ind, est_trans, est_gamma
def rotations_brute_force(fimages, Shat, n_gamma, pf_grid, Nfine):
eval_results = False
if Shat.ndim == 2:
Shat = Shat[np.newaxis, :, :]
n_images, N, _ = fimages.shape
n_templates, ngridr, ngridp = Shat.shape
quad_wts_sq = pf_grid['quad_wts'].reshape((ngridr, ngridp))
wx = pf_grid['wx']
wy = pf_grid['wy']
all_gamma = 2 * np.pi / n_gamma * np.arange(n_gamma)
tmr_start = time()
Shathat = fft(Shat) / ngridp
# Shat: (te, k, γ)
# Shathat: (te, k, q)
Shathat = Shathat.reshape((n_templates, 1, ngridr, ngridp))
# Shathat: (te, 1, k, q)
wth = ifftshift(np.arange(-ngridp / 2, ngridp / 2))
mul = np.exp(-1j * wth[np.newaxis, :] * all_gamma[:,np.newaxis])
# mul: (γ, q)
Shathat_rot = Shathat * mul[:, np.newaxis, :]
# Shathat_rot: (te, γ, k, q)
# NOTE: This can be sped up by using PyFFTW. However, for the execution to
# be efficent, the plan must be created using FFTW_MEASURE, which takes a
# long time. The solution will be to separate this our to the BFR
# “planning” stage for some fixed number of images–template pairs, then
# loop over these, computing the IFFT batchwise at execution (since the
# exact number of pairs is not known as planning time).
Shat_rot = ifft(Shathat_rot)
fx1 = quad_wts_sq * Shat_rot
T = 2
dx = dy = T / N
templates_rot = np.empty((N, N, n_gamma, n_templates),
dtype=np.complex128, order='F')
upsampfac = 1.25
isign = 1
eps = 1e-2
finufftpy.nufft2d1many(wx * dx, wy * dy, fx1, isign, eps, N, N,
templates_rot, upsampfac=upsampfac)
templates_rot = templates_rot / (4 * np.pi ** 2)
# templates_rot: (trx, try, γ, te)
templates_rot = templates_rot.transpose((3, 2, 1, 0)).copy()
# templates_rot: (te, γ, try, trx)
ftemplates_rot = fft2(ifftshift(templates_rot, axes=(-2, -1)))
# ftemplates_rot: (te, γ, trky, trkx)
precomp = time() - tmr_start
tmr_start = time()
ftemplates_rot = ftemplates_rot[:, np.newaxis, :, :, :]
# ftemplates_rot: (te, im, γ, trky, trkx)
fxx = fimages[:, np.newaxis, :, :] * np.conj(ftemplates_rot)
# ftemplates_rot: (te, im, γ, trky, trkx)
inner_prods = pyfftw.zeros_aligned((n_templates, n_images, n_gamma, Nfine, Nfine), dtype='complex128')
inner_prods[:, :, :, :N // 2, :N // 2] = fxx[:, :, :, :N // 2, :N // 2]
inner_prods[:, :, :, :N // 2, -N // 2:] = fxx[:, :, :, :N // 2, -N // 2:]
inner_prods[:, :, :, -N // 2:, :N // 2] = fxx[:, :, :, -N // 2:, :N // 2]
inner_prods[:, :, :, -N // 2:, -N // 2:] = fxx[:, :, :, -N // 2:, -N // 2:]
plan = pyfftw.FFTW(inner_prods, inner_prods, axes=(-2, -1),
direction='FFTW_BACKWARD',
flags=('FFTW_MEASURE',), threads=12)
plan()
inner_prods = np.real(inner_prods)
inner_prods *= (Nfine / N) ** 2
# inner_prods: (te, im, γ, try, trx)
comp = time() - tmr_start
return inner_prods, precomp, comp
def calc_ftk_svd(n_bessel, eps, pf_grid, tr_grid):
all_UU = [None] * (2 * n_bessel + 1)
all_SSVV = [None] * (2 * n_bessel + 1)
all_rnks = np.zeros(2 * n_bessel + 1, dtype=np.int32)
xnodesr = pf_grid['xnodesr']
all_delta = tr_grid['all_delta']
n_delta = tr_grid['n_delta']
n_omega = tr_grid['n_omega']
n_trans = tr_grid['n_trans']
for qp in range(-n_bessel, n_bessel + 1):
J_n = besselj(qp, -all_delta[:, np.newaxis] * xnodesr[np.newaxis, :])
U, S, Vh = np.linalg.svd(J_n)
ind = S > eps
rnk = sum(ind)
all_rnks[qp + n_bessel] = rnk
all_UU[qp + n_bessel] = U[:, :rnk]
all_SSVV[qp + n_bessel] = S[:rnk, np.newaxis] * Vh[:rnk, :]
SSVV_big = np.concatenate(all_SSVV, axis=0)
UUU = np.concatenate(all_UU, axis=1)
all_omega = np.concatenate([2 * np.pi / n_om * np.arange(n_om)
for n_om in n_omega if n_om > 0])
all_qp = np.concatenate([(k - n_bessel) * np.ones(n)
for k, n in enumerate(all_rnks)])
vec_omega = np.exp(1j * all_qp[np.newaxis, :]
* (all_omega[:, np.newaxis] - np.pi / 2))
BigMul_left = np.zeros((sum(all_rnks), n_trans), dtype=np.complex128)
cnt = 0
for kk in range(n_delta):
n_om = n_omega[kk]
BigMul_left[:, cnt:cnt + n_om] = (UUU[kk, :][np.newaxis, :].T
* vec_omega[cnt:cnt + n_om, :].T)
cnt += n_om
return all_rnks, BigMul_left, SSVV_big
def premult_right_fb(Shathat, SSVV_big, all_rnks):
n_psi = Shathat.shape[2]
ngridr = Shathat.shape[1]
n_templates = Shathat.shape[0]
Shathat = Shathat.transpose((2, 0, 1))
Shathat = Shathat.reshape((1, n_psi * n_templates, ngridr))
SSS = SSVV_big[:, np.newaxis, :] * Shathat.conj()
SSS = SSS.reshape((sum(all_rnks), n_psi, n_templates, ngridr))
return SSS
def bft_plan(tr_grid, pf_grid):
plan = {'tr_grid': tr_grid,
'pf_grid': pf_grid}
return plan
def bft_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
tr_grid = plan['tr_grid']
n_psi = pf_grid['n_psi']
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
MSnorm = Mnorm[:, np.newaxis] * Snorm[np.newaxis, :]
tmr_start = time()
Shathat = pft_to_fb(Shat, pf_grid)
precomp1 = time() - tmr_start
zprods1, ptm, tm = translations_brute_force_batch(Shathat, Mhat,
pf_grid, tr_grid, n_psi)
precomp1 += ptm
inner_prods3 = zprods1 / MSnorm[..., np.newaxis, np.newaxis]
return inner_prods3, (precomp1, tm)
def ftk_plan(tr_grid, pf_grid, n_bessel, eps):
all_rnks, BigMul_left, SSVV_big = calc_ftk_svd(n_bessel, eps, pf_grid, tr_grid)
plan = {'tr_grid': tr_grid,
'pf_grid': pf_grid,
'n_bessel': n_bessel,
'eps': eps,
'all_rnks': all_rnks,
'BigMul_left': BigMul_left,
'SSVV_big': SSVV_big}
return plan
def ftk_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
SSVV_big = plan['SSVV_big']
all_rnks = plan['all_rnks']
n_bessel = plan['n_bessel']
BigMul_left = plan['BigMul_left']
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
MSnorm = Mnorm[:, np.newaxis] * Snorm[np.newaxis, :]
tmr_start = time()
Shathat = pft_to_fb(Shat, pf_grid)
SSS = premult_right_fb(Shathat, SSVV_big, all_rnks)
precomp2 = time() - tmr_start
zprods4, ptm, tm = svd_decomposition_alignment(SSS, Mhat, n_bessel,
all_rnks, BigMul_left)
precomp2 += ptm
inner_prods4 = np.real(zprods4) / MSnorm[..., np.newaxis, np.newaxis]
return inner_prods4, (precomp2, tm)
def bfr_plan(Nfine, Nkeep, n_gamma, pf_grid, T, N):
plan = {'Nfine': Nfine,
'Nkeep': Nkeep,
'n_gamma': n_gamma,
'pf_grid': pf_grid,
'T': T,
'N': N}
# TODO: FFTW plans, etc.
return plan
def bfr_execute(plan, Mhat, Shat):
pf_grid = plan['pf_grid']
T = plan['T']
N = plan['N']
Nfine = plan['Nfine']
Nkeep = plan['Nkeep']
n_gamma = plan['n_gamma']
ngridr = pf_grid['ngridr']
n_psi = pf_grid['n_psi']
n_templates = Shat.shape[0]
n_images = Mhat.shape[0]
dx = dy = T / N
images = pft_to_cartesian(Mhat, T, N, pf_grid) / (dx * dy)
Mnorm = pft_norm(Mhat, pf_grid)
Snorm = pft_norm(Shat, pf_grid)
fimages = fft2(ifftshift(images, axes=(-2, -1)))
SShat = Shat.reshape((n_templates, ngridr, n_psi))
fimages = fimages / Mnorm[:, np.newaxis, np.newaxis]
SShat = SShat / Snorm[:, np.newaxis, np.newaxis]
precomp3 = 0
comp3 = 0
inner_prods = np.zeros((n_images, n_templates, n_gamma, Nkeep, Nkeep), dtype=np.complex128)
for tt in range(n_templates):
inn, precomp, comp = rotations_brute_force(fimages, SShat[tt],
n_gamma, pf_grid, Nfine)
# NOTE: The following truncates *and* inverts the FFT shift.
inner_prods[:, tt, :, -Nkeep // 2:, -Nkeep // 2:] = inn[:, :, :, :Nkeep // 2, :Nkeep // 2]
inner_prods[:, tt, :, -Nkeep // 2:, :Nkeep // 2] = inn[:, :, :, :Nkeep // 2, -Nkeep // 2:]
inner_prods[:, tt, :, :Nkeep // 2, -Nkeep // 2:] = inn[:, :, :, -Nkeep // 2:, :Nkeep // 2]
inner_prods[:, tt, :, :Nkeep // 2, :Nkeep // 2] = inn[:, :, :, -Nkeep // 2:, -Nkeep // 2:]
precomp3 += precomp
comp3 += comp
inner_prods = inner_prods.reshape((n_images, n_templates, n_gamma, Nkeep ** 2))
return inner_prods, (precomp3, comp3)
| 1.773438 | 2 |
deep_homography/train.py | nilu33032/google-research | 1 | 68356 | <filename>deep_homography/train.py
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tensorflow code for training and evaluating deep homography models."""
from absl import app
from absl import flags
import tensorflow as tf
from deep_homography import hmg_util
from deep_homography import models
slim = tf.contrib.slim
flags.DEFINE_string('master', 'local', 'Master of the training')
flags.DEFINE_integer('ps_tasks', 0, 'Number of paramater servers')
flags.DEFINE_enum('mode', 'train', ['train', 'eval'], 'Mode of this run')
flags.DEFINE_integer('task', 0, 'Task id')
flags.DEFINE_string('train_dir', '/tmp/train',
'Where to write the checkpoints for training')
flags.DEFINE_string('eval_dir', '',
'Where to write the checkpoints for eval')
flags.DEFINE_string('model_path', '',
'Where to find the checkpoints for eval')
flags.DEFINE_string('vgg_model_path', '',
'Where to find the vgg network checkpoint')
flags.DEFINE_string('data_pattern', '', 'Glob pattern of input data')
flags.DEFINE_enum('data_type', 'ava', ['coco', 'ava', 'ava_seq'],
'training data type')
flags.DEFINE_integer('num_frames_per_sample', 9,
'Number of frames in one sample')
flags.DEFINE_integer('batch_size', 3, 'Batch size')
flags.DEFINE_integer('queue_size', 100, 'Batch queue size')
flags.DEFINE_integer('num_threads', 3, 'The number of threads in the queue')
flags.DEFINE_integer('train_height', 128, 'Height of training images')
flags.DEFINE_integer('train_width', 128, 'Width of training images')
flags.DEFINE_float('max_shift', 16,
'Maximum random shift when creating training samples')
flags.DEFINE_boolean('mix', False,
'Whether to randomly scale random shift sizes')
flags.DEFINE_boolean('screen', False,
'Whether to remove highly distorted homography')
flags.DEFINE_integer('frame_gap', 0, 'Temporal gap between two selected frames')
flags.DEFINE_integer('max_frame_gap', 5, 'Maximal frame gap')
flags.DEFINE_float('learning_rate', 0.001, 'Learning rate')
flags.DEFINE_integer('lr_decay_steps', 100000, 'Decay steps for learning rate')
flags.DEFINE_float('lr_decay_rate', 0.8, 'Decay rate for learning rate')
flags.DEFINE_float('weight_decay', 0.00004, 'weight decay coefficient')
flags.DEFINE_float('dropout_keep', 0.8, 'probability that an element is kept')
flags.DEFINE_integer('num_eval_steps', 10, 'Number of eval steps per cycle')
flags.DEFINE_integer('max_step', 100000, 'the maximal number of global steps')
flags.DEFINE_enum('loss', 'l2', ['l2', 'hier_l2', 'hier_ld'], 'loss function')
flags.DEFINE_boolean('random_flip', False,
'Whether randomly flip training examples left or right')
flags.DEFINE_boolean('random_reverse', False,
'Whether randomly reverse the video sequence')
flags.DEFINE_float('pixel_noise', 2, 'Amount of random noise added to a pixel')
flags.DEFINE_integer('num_level', 2, 'Number of hierarchical levels')
flags.DEFINE_integer('num_layer', 6,
'Number of layers in the motion feature network')
flags.DEFINE_integer('level_wise', 1,
'Whether to train networks level by level')
flags.DEFINE_enum('mask_method', 'f4', ['f4', 'f5', 'f6'], 'Masking method')
flags.DEFINE_enum('network_id', 'hier', ['hier', 'fmask_sem'],
'Type of network')
flags.DEFINE_boolean('block_prop', False,
'Whether block back propagation between different levels')
FLAGS = flags.FLAGS
def predict_homography(inputs, network_id='cvgghmg', reuse=None,
is_training=True, scope='hier_hmg'):
"""Estimates homography using a selected deep neural network.
Args:
inputs: batch of input image pairs of data type float32 and of shape
[batch_size, height, width, None]
network_id: deep neural network method
reuse: whether to reuse this network weights
is_training: whether used for training or testing
scope: the scope of variables in this function
Raises:
ValueError: The nework_id was not good.
Returns:
a list of homographies at each level and a list of images warped by
the list of corresponding homographies
"""
with slim.arg_scope(models.homography_arg_scope(
weight_decay=FLAGS.weight_decay)):
if network_id == 'hier':
return models.hier_homography_estimator(
inputs, num_param=8, num_layer=FLAGS.num_layer,
num_level=FLAGS.num_level,
dropout_keep_prob=FLAGS.dropout_keep,
is_training=is_training, reuse=reuse, scope=scope)
elif network_id == 'fmask_sem':
return models.hier_homography_fmask_estimator(
inputs, num_param=8, num_layer=FLAGS.num_layer,
num_level=FLAGS.num_level,
dropout_keep_prob=FLAGS.dropout_keep,
is_training=is_training, reuse=reuse, scope=scope)
else:
raise ValueError('Unknown network_id: %s' % network_id)
def get_samples(to_gray, mode):
"""Get training or testing samples.
Args:
to_gray: whether prepare color or gray scale training images
mode: 'train' or 'eval', specifying whether preparing images for training or
testing
Raises:
ValueError: The data_type was not good.
Returns:
a batch of training images and the corresponding ground-truth homographies
"""
if FLAGS.data_type == 'coco':
batch_frames, batch_labels = hmg_util.get_batchpairs_coco(
FLAGS.data_pattern, FLAGS.max_shift, batch_size=FLAGS.batch_size,
queue_size=FLAGS.queue_size, num_threads=FLAGS.num_threads,
train_height=FLAGS.train_height, train_width=FLAGS.train_width,
pixel_noise=FLAGS.pixel_noise, mix=FLAGS.mix, screen=FLAGS.screen,
to_gray=to_gray, mode=mode)
elif FLAGS.data_type == 'ava':
batch_frames, batch_labels = hmg_util.get_batchpairs_ava(
FLAGS.data_pattern, FLAGS.max_shift, batch_size=FLAGS.batch_size,
queue_size=FLAGS.queue_size, num_threads=FLAGS.num_threads,
train_height=FLAGS.train_height, train_width=FLAGS.train_width,
pixel_noise=FLAGS.pixel_noise, mix=FLAGS.mix, screen=FLAGS.screen,
to_gray=to_gray, mode=mode)
elif FLAGS.data_type == 'ava_seq':
batch_frames, batch_labels = hmg_util.get_batchseqs_ava(
FLAGS.data_pattern, FLAGS.num_frames_per_sample, FLAGS.max_shift,
batch_size=FLAGS.batch_size, queue_size=FLAGS.queue_size,
num_threads=FLAGS.num_threads,
train_height=FLAGS.train_height, train_width=FLAGS.train_width,
pixel_noise=FLAGS.pixel_noise, mix=FLAGS.mix, screen=FLAGS.screen,
to_gray=to_gray, mode=mode)
else:
raise ValueError('Unknown data_type: %s' % FLAGS.data_type)
return batch_frames, batch_labels
def run_train(scope):
"""Trains a network.
Args:
scope: the scope of variables in this function
"""
with tf.Graph().as_default():
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
to_gray = True
if 'sem' in FLAGS.network_id:
to_gray = False
batch_frames, batch_labels = get_samples(to_gray, 'train')
batch_hmg_prediction, _ = predict_homography(
batch_frames, network_id=FLAGS.network_id, is_training=True,
scope=scope)
if FLAGS.loss == 'hier_l2':
for level in xrange(FLAGS.num_level):
delta_level = FLAGS.num_level - level -1
scale = 2 ** delta_level
l2 = tf.losses.mean_squared_error(batch_labels / scale,
batch_hmg_prediction[level])
slim.summaries.add_scalar_summary(l2, 'l2%d' % delta_level, 'losses')
elif FLAGS.loss == 'hier_ld':
for level in xrange(FLAGS.num_level):
delta_level = FLAGS.num_level - level -1
scale = 2 ** delta_level
diff = tf.reshape(batch_labels / scale - batch_hmg_prediction[level],
[FLAGS.batch_size, 4, 2])
l2d = tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(diff), 2)))
tf.losses.add_loss(l2d)
slim.summaries.add_scalar_summary(l2d, 'l2%d' % delta_level, 'losses')
else:
l2 = tf.losses.mean_squared_error(
batch_labels, batch_hmg_prediction[FLAGS.num_level - 1])
slim.summaries.add_scalar_summary(slim.losses.get_total_loss(),
'loss', 'losses')
global_step = slim.get_or_create_global_step()
learning_rate_decay = tf.train.exponential_decay(
learning_rate=FLAGS.learning_rate,
global_step=global_step,
decay_steps=FLAGS.lr_decay_steps,
decay_rate=FLAGS.lr_decay_rate,
staircase=True)
optimizer = tf.train.AdamOptimizer(learning_rate_decay)
is_chief = (FLAGS.task == 0)
train_op = slim.learning.create_train_op(slim.losses.get_total_loss(),
optimizer=optimizer)
saver = tf.train.Saver(max_to_keep=20)
if FLAGS.level_wise == 0:
variables_to_restore = []
for i in range(0, FLAGS.num_level - 1):
variables = slim.get_variables(scope='%s/level%d' % (scope, i))
variables_to_restore = variables_to_restore + variables
init_fn = slim.assign_from_checkpoint_fn(FLAGS.model_path,
variables_to_restore)
elif 'sem' in FLAGS.network_id:
variables_to_restore = slim.get_variables(scope='vgg_16')
init_fn = slim.assign_from_checkpoint_fn(FLAGS.vgg_model_path,
variables_to_restore)
else:
init_fn = None
slim.learning.train(
train_op=train_op,
logdir=FLAGS.train_dir,
save_summaries_secs=60,
save_interval_secs=600,
saver=saver,
number_of_steps=FLAGS.max_step,
master=FLAGS.master,
is_chief=is_chief,
init_fn=init_fn)
def run_eval(scope):
"""Evaluates a network.
Args:
scope: the scope of variables in this function
"""
to_gray = True
if 'sem' in FLAGS.network_id:
to_gray = False
batch_frames, batch_labels = get_samples(to_gray, 'eval')
batch_hmg_prediction, _ = predict_homography(
batch_frames, network_id=FLAGS.network_id, is_training=False, scope=scope)
loss_dict = {}
if 'hier' in FLAGS.network_id or 'mask' in FLAGS.network_id:
for level in xrange(0, FLAGS.num_level):
delta_level = FLAGS.num_level - level -1
scale = 2 ** delta_level
if FLAGS.loss == 'hier_ld':
diff = tf.reshape(batch_labels / scale - batch_hmg_prediction[level],
[FLAGS.batch_size, 4, 2])
sqrt_diff = tf.sqrt(tf.reduce_sum(tf.square(diff), 2))
loss_dict['l2%d' % delta_level] = tf.metrics.mean(sqrt_diff)
else:
loss_dict['l2%d' % delta_level] = slim.metrics.mean_squared_error(
batch_labels / scale, batch_hmg_prediction[level])
else:
loss_dict['loss'] = slim.metrics.mean_squared_error(
batch_labels, batch_hmg_prediction[FLAGS.num_level - 1])
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map(
loss_dict)
for name, value in names_to_values.iteritems():
slim.summaries.add_scalar_summary(value, name, 'losses', print_summary=True)
slim.evaluation.evaluation_loop(
master=FLAGS.master,
eval_interval_secs=60,
checkpoint_dir=FLAGS.train_dir,
logdir=FLAGS.eval_dir,
eval_op=names_to_updates.values(),
num_evals=FLAGS.num_eval_steps,
)
def main(_):
if FLAGS.mode == 'train':
run_train('hier_hmg')
elif FLAGS.mode == 'eval':
run_eval('hier_hmg')
else:
raise ValueError('Unknown mode: %s' % FLAGS.mode)
if __name__ == '__main__':
flags.mark_flag_as_required('train_dir')
app.run(main)
| 1.585938 | 2 |
core/ai/behaviors/__init__.py | ChrisLR/BasicDungeonRL | 3 | 68484 | <filename>core/ai/behaviors/__init__.py
from core.ai.behaviors.base import Behavior
from core.ai.behaviors.meleeattack import MeleeAttack
from core.ai.behaviors.move import Move
from core.ai.behaviors.wait import Wait
| 0.220703 | 0 |
allrank/click_models/click_utils.py | mrdrozdov/allRank | 0 | 68612 | from typing import List, Tuple, Union
import numpy as np
import torch
from allrank.click_models.base import ClickModel
from allrank.data.dataset_loading import PADDED_Y_VALUE
def click_on_slates(slates: Union[Tuple[np.ndarray, np.ndarray], Tuple[torch.Tensor, torch.Tensor]],
click_model: ClickModel, include_empty: bool) -> Tuple[List[Union[np.ndarray, torch.Tensor]], List[List[int]]]:
"""
This metod runs a click model on a list of slates and returns new slates with `y` taken from clicks
:param slates: a Tuple of X, y:
X being a list of slates represented by document vectors
y being a list of slates represented by document relevancies
:param click_model: a click model to be applied to every slate
:param include_empty: if True - will return even slates that didn't get any click
:return: Tuple of X, clicks, X representing the same document vectors as input 'X', clicks representing click mask for every slate
"""
X, y = slates
clicks = [MaskedRemainMasked(click_model).click(slate) for slate in zip(X, y)]
X_with_clicks = [[X, slate_clicks] for X, slate_clicks in list(zip(X, clicks)) if
(np.sum(slate_clicks > 0) > 0 or include_empty)]
return_X, clicks = map(list, zip(*X_with_clicks))
return return_X, clicks # type: ignore
class MaskedRemainMasked(ClickModel):
"""
This click model wraps another click model and:
1. ensures inner click model do not get documents that were padded
2. ensures padded documents get '-1' in 'clicked' vector
"""
def __init__(self, inner_click_model: ClickModel):
"""
:param inner_click_model: a click model that is run on the list of non-padded documents
"""
self.inner_click_model = inner_click_model
def click(self, documents: Union[Tuple[np.ndarray, np.ndarray], Tuple[torch.Tensor, torch.Tensor]]) -> np.ndarray:
X, y = documents
padded_values_mask = y == PADDED_Y_VALUE
real_X = X[~padded_values_mask]
real_y = y[~padded_values_mask]
clicks = self.inner_click_model.click((real_X, real_y))
final_clicks = np.zeros_like(y)
final_clicks[padded_values_mask] = PADDED_Y_VALUE
final_clicks[~padded_values_mask] = clicks
return final_clicks
| 1.992188 | 2 |
src/snakefiles/raw.py | jlanga/smsk_exfi_paper | 0 | 68740 | <reponame>jlanga/smsk_exfi_paper<filename>src/snakefiles/raw.py
def get_reads(wildcards):
sample = wildcards.sample
library = wildcards.library
forward, reverse = (
samples
[(samples["sample"] == sample and samples["library"] == library)]
[["forward", "reverse"]]
.values
.tolist()[0]
)
return forward, reverse
rule raw_link_pe_sample:
input:
get_reads
output:
forward = RAW + "{sample}_{library}_1.fq.gz",
reverse = RAW + "{sample}_{library}_2.fq.gz"
log:
RAW + "link_dna_pe_{sample}_{library}.log"
benchmark:
RAW + "link_dna_pe_{sample}_{library}.json"
shell:
"ln "
"--symbolic "
"$(readlink --canonicalize {input[0]}) "
"{output.forward} 2> {log}; "
"ln "
"--symbolic "
"$(readlink --canonicalize {input[1]}) "
"{output.reverse} 2>> {log}"
def get_transcriptome(wildcards):
return features[wildcards.sample]["transcriptome"]
def get_genome(wildcards):
return features[wildcards.sample]["genome"]
def get_annotation(wildcards):
return features[wildcards.sample]["annotation"]
rule raw_link_transcriptome:
input:
get_transcriptome
output:
RAW + "{sample}.rna.fa"
shell:
"ln --symbolic $(readlink --canonicalize {input}) {output}"
rule raw_link_genome:
input:
get_genome
output:
RAW + "{sample}.dna.fa"
shell:
"ln --symbolic $(readlink --canonicalize {input}) {output}"
rule raw_link_annotation:
input:
get_annotation
output:
RAW + "{sample}.gff3"
shell:
"ln --symbolic $(readlink --canonicalize {input}) {output}"
rule raw_reference:
input:
expand(
RAW + "{sample}.{ending}",
sample=SPECIES,
ending=["dna.fa", "rna.fa", "gff3"]
)
| 1.5 | 2 |
pys/conf/mgroup.py | cyjseagull/generator | 0 | 68868 | # coding:utf-8
"""[resolve group_genesis.ini]
Raises:
MCError -- [description]
MCError -- [description]
MCError -- [description]
MCError -- [description]
Returns:
[bool] -- [true or false]
"""
import configparser
import codecs
from pys.tool import utils
from pys.log import LOGGER
from pys.error.exp import MCError
class MgroupConf(object):
"""group_genesis.ini configuration
"""
name = 'FISCO group'
group_id = 0
p2p_listen_port = []
p2p_ip = []
# fisco_path = ''
def __init__(self):
self.name = 'FISCO BCOS group'
def __repr__(self):
return 'MchainConf => %s' % (self.name)
def get_group_id(self):
"""[get group_id]
Returns:
[string] -- [group_id]
"""
return self.group_id
def get_p2p_ip(self):
"""[get p2p_ip]
Returns:
[string] -- [p2p_ip]
"""
return self.p2p_ip
def get_listen_port(self):
"""[get listen port]
Returns:
[string] -- [p2p_listen_port]
"""
return self.p2p_listen_port
def parser(mgroup):
"""resolve group_genesis.ini
Arguments:
mgroup {string} -- path of group_genesis.ini
Raises:
MCError -- exception description
"""
LOGGER.info('group_genesis.ini is %s', mgroup)
# resolve configuration
if not utils.valid_string(mgroup):
LOGGER.error(' group_genesis.ini not invalid path, group_genesis.ini is %s', mgroup)
raise MCError(
' group_genesis.ini not invalid path, group_genesis.ini is %s' % mgroup)
# read and parser config file
config_parser = configparser.ConfigParser()
try:
with codecs.open(mgroup, 'r', encoding='utf-8') as file_mchain:
config_parser.readfp(file_mchain)
except Exception as ini_exp:
LOGGER.error(
' open group_genesis.ini file failed, exception is %s', ini_exp)
raise MCError(
' open group_genesis.ini file failed, exception is %s' % ini_exp)
if config_parser.has_section('group'):
MgroupConf.group_id = config_parser.get('group', 'group_id')
else:
LOGGER.error(
' invalid group_genesis.ini format, group id is %s', MgroupConf.group_id)
raise MCError(
' invalid group_genesis.ini format, group id is %s' % MgroupConf.group_id)
if not config_parser.has_section('nodes'):
LOGGER.error(
' invalid group_genesis.ini format, nodes not existed!')
raise MCError(
' invalid group_genesis.ini format, nodes not existed!')
group_nodes = config_parser.options('nodes')
for node in group_nodes:
p2p_section = config_parser.get('nodes', node)
utils.valid_package(p2p_section)
MgroupConf.p2p_ip.append(p2p_section.split(':')[0])
MgroupConf.p2p_listen_port.append(p2p_section.split(':')[1])
LOGGER.info('group_id is %s', MgroupConf.group_id)
LOGGER.info('p2p_ip is %s', MgroupConf.p2p_ip)
LOGGER.info('p2p_listen_port is %s', MgroupConf.p2p_listen_port)
LOGGER.info('group_genesis.ini end, result is %s', MgroupConf())
| 1.53125 | 2 |
documents/error_calculate/error_calculate.py | fe1t/wifi_positioning | 2 | 68996 | <gh_stars>1-10
import math
with open("error_collection") as f:
data = eval(f.read())
def find_erdst(x1, y1, x2, y2):
return math.sqrt( (x1-x2)**2 + (y1 - y2) ** 2)
error_distance = 0
for i in data:
error_distance += find_erdst(*i)
print float(error_distance) / len(data)
| 1.804688 | 2 |
vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/oid_test.py | weltonrodrigo/origin | 807 | 69124 | <filename>vendor/github.com/google/certificate-transparency/python/ct/crypto/asn1/oid_test.py
#!/usr/bin/env python
import unittest
from ct.crypto import error
from ct.crypto.asn1 import oid
from ct.crypto.asn1 import type_test_base
class ObjectIdentifierTest(type_test_base.TypeTestBase):
asn1_type = oid.ObjectIdentifier
hashable = True
initializers = (
((0, 0), "0.0"),
((1, 2), "1.2"),
((2, 5), "2.5"),
((1, 2, 3, 4), "1.2.3.4"),
((1, 2, 840, 113549), "1.2.840.113549"),
((1, 2, 840, 113549, 1), "1.2.840.113549.1"),
)
bad_initializers = (
# Too short.
("0", ValueError),
((0,), ValueError),
(("1"), ValueError),
((1,), ValueError),
# Negative components.
("-1", ValueError),
((-1,), ValueError),
("1.2.3.-4", ValueError),
((1, 2, 3, -4), ValueError),
# Invalid components.
("3.2.3.4", ValueError),
((3, 2, 3, 4), ValueError),
("0.40.3.4", ValueError),
((0, 40, 3, 4), ValueError),
)
encode_test_vectors = (
# Example from ASN.1 spec.
("2.100.3", "0603813403"),
# More examples.
("0.0", "060100"),
("1.2", "06012a"),
("2.5", "060155"),
("1.2.3.4", "06032a0304"),
("1.2.840", "06032a8648"),
("1.2.840.113549", "06062a864886f70d"),
("1.2.840.113549.1", "06072a864886f70d01")
)
bad_encodings = (
# Empty OID.
("0600"),
# Last byte has high bit set.
("06020080"),
("06032a86c8"),
# Leading '80'-octets in component.
("06042a8086c8"),
# Indefinite length.
("06808134030000")
)
bad_strict_encodings = ()
def test_dictionary(self):
rsa = oid.ObjectIdentifier(value=oid.RSA_ENCRYPTION)
self.assertEqual("rsaEncryption", rsa.long_name)
self.assertEqual("RSA", rsa.short_name)
def test_unknown_oids(self):
unknown = oid.ObjectIdentifier(value="1.2.3.4")
self.assertEqual("1.2.3.4", unknown.long_name)
self.assertEqual("1.2.3.4", unknown.short_name)
if __name__ == '__main__':
unittest.main()
| 1.546875 | 2 |
firstclass/middleware/alternative/__init__.py | bennylope/django-firstclass | 0 | 69252 | <filename>firstclass/middleware/alternative/__init__.py
from django.core.mail.message import EmailMultiAlternatives
class MultiAlternativesMiddleware(object):
def process_message(self, message):
return EmailMultiAlternatives(
subject=message.subject,
body=message.body,
from_email=message.from_email,
to=message.to,
bcc=message.bcc,
connection=message.connection,
attachments=message.attachments,
headers=message.extra_headers,
cc=message.cc
)
| 1.195313 | 1 |
alipay/aop/api/response/AlipayPcreditLoanCollateralCarQueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 69380 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayPcreditLoanCollateralCarQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayPcreditLoanCollateralCarQueryResponse, self).__init__()
self._address = None
self._apply_no = None
self._car_brand_id = None
self._car_color = None
self._car_engine_no = None
self._car_mileage = None
self._car_model_id = None
self._car_reg_date = None
self._car_series_id = None
self._car_vin = None
self._cert_no = None
self._cert_type = None
self._created_time = None
self._lic_plate_address = None
self._lic_plate_no = None
self._name = None
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def apply_no(self):
return self._apply_no
@apply_no.setter
def apply_no(self, value):
self._apply_no = value
@property
def car_brand_id(self):
return self._car_brand_id
@car_brand_id.setter
def car_brand_id(self, value):
self._car_brand_id = value
@property
def car_color(self):
return self._car_color
@car_color.setter
def car_color(self, value):
self._car_color = value
@property
def car_engine_no(self):
return self._car_engine_no
@car_engine_no.setter
def car_engine_no(self, value):
self._car_engine_no = value
@property
def car_mileage(self):
return self._car_mileage
@car_mileage.setter
def car_mileage(self, value):
self._car_mileage = value
@property
def car_model_id(self):
return self._car_model_id
@car_model_id.setter
def car_model_id(self, value):
self._car_model_id = value
@property
def car_reg_date(self):
return self._car_reg_date
@car_reg_date.setter
def car_reg_date(self, value):
self._car_reg_date = value
@property
def car_series_id(self):
return self._car_series_id
@car_series_id.setter
def car_series_id(self, value):
self._car_series_id = value
@property
def car_vin(self):
return self._car_vin
@car_vin.setter
def car_vin(self, value):
self._car_vin = value
@property
def cert_no(self):
return self._cert_no
@cert_no.setter
def cert_no(self, value):
self._cert_no = value
@property
def cert_type(self):
return self._cert_type
@cert_type.setter
def cert_type(self, value):
self._cert_type = value
@property
def created_time(self):
return self._created_time
@created_time.setter
def created_time(self, value):
self._created_time = value
@property
def lic_plate_address(self):
return self._lic_plate_address
@lic_plate_address.setter
def lic_plate_address(self, value):
self._lic_plate_address = value
@property
def lic_plate_no(self):
return self._lic_plate_no
@lic_plate_no.setter
def lic_plate_no(self, value):
self._lic_plate_no = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def parse_response_content(self, response_content):
response = super(AlipayPcreditLoanCollateralCarQueryResponse, self).parse_response_content(response_content)
if 'address' in response:
self.address = response['address']
if 'apply_no' in response:
self.apply_no = response['apply_no']
if 'car_brand_id' in response:
self.car_brand_id = response['car_brand_id']
if 'car_color' in response:
self.car_color = response['car_color']
if 'car_engine_no' in response:
self.car_engine_no = response['car_engine_no']
if 'car_mileage' in response:
self.car_mileage = response['car_mileage']
if 'car_model_id' in response:
self.car_model_id = response['car_model_id']
if 'car_reg_date' in response:
self.car_reg_date = response['car_reg_date']
if 'car_series_id' in response:
self.car_series_id = response['car_series_id']
if 'car_vin' in response:
self.car_vin = response['car_vin']
if 'cert_no' in response:
self.cert_no = response['cert_no']
if 'cert_type' in response:
self.cert_type = response['cert_type']
if 'created_time' in response:
self.created_time = response['created_time']
if 'lic_plate_address' in response:
self.lic_plate_address = response['lic_plate_address']
if 'lic_plate_no' in response:
self.lic_plate_no = response['lic_plate_no']
if 'name' in response:
self.name = response['name']
| 1.21875 | 1 |
test/test_airtunnel/testdags/university_pyspark.py | joerg-schneider/airflow-bootstrap | 23 | 69508 | <reponame>joerg-schneider/airflow-bootstrap
from datetime import datetime
from airflow.models import DAG
from airtunnel import PySparkDataAsset
from airtunnel.operators.archival import DataAssetArchiveOperator, IngestArchiveOperator
from airtunnel.operators.ingestion import IngestOperator
from airtunnel.operators.loading import StagingToReadyOperator
from airtunnel.operators.transformation import PySparkTransformationOperator
from airtunnel.sensors.ingestion import SourceFileIsReadySensor
student = PySparkDataAsset("student_pyspark")
programme = PySparkDataAsset("programme_pyspark")
enrollment = PySparkDataAsset("enrollment_pyspark")
enrollment_summary = PySparkDataAsset("enrollment_summary_pyspark")
with DAG(
dag_id="university_pyspark",
schedule_interval=None,
start_date=datetime(year=2019, month=9, day=1),
) as dag:
ingested_ready_tasks = set()
# a common stream of tasks for all ingested assets:
for ingested_asset in (student, programme, enrollment):
source_is_ready = SourceFileIsReadySensor(
# we reduce the poke interval to only 3 seconds so that our test runs complete faster
# do not do in production!! :)
asset=ingested_asset,
poke_interval=3,
no_of_required_static_pokes=2,
)
ingest = IngestOperator(asset=ingested_asset)
transform = PySparkTransformationOperator(asset=ingested_asset)
archive = DataAssetArchiveOperator(asset=ingested_asset)
staging_to_ready = StagingToReadyOperator(asset=ingested_asset)
ingest_archival = IngestArchiveOperator(asset=ingested_asset)
dag >> source_is_ready >> ingest >> transform >> archive >> staging_to_ready >> ingest_archival
ingested_ready_tasks.add(staging_to_ready)
# upon having loaded the three ingested assets, connect the aggregation downstream to them:
build_enrollment_summary = PySparkTransformationOperator(asset=enrollment_summary)
build_enrollment_summary.set_upstream(ingested_ready_tasks)
staging_to_ready = StagingToReadyOperator(asset=enrollment_summary)
dag >> build_enrollment_summary >> staging_to_ready
| 1.453125 | 1 |
main.py | BPod123/WebSudokuSolver | 0 | 69636 |
from Sudoku import solveSudoku
from WebHandler import WebHandler, Difficulty
from random import randint, random
from WebSudokuSolver import WebSudokuSolver
if __name__ == '__main__':
solver = WebSudokuSolver()
solver.handler.signIn()
for i in range(253):
try:
solver.solve(Difficulty.Easy, 8 * 60 + 30 + randint(20, 80))
except:
continue
# for difficulty in range(Difficulty.Hard):
# for i in range(50):
# try:
# solver.solve(Difficulty.Hard - difficulty, 5 * 60 + randint(20, 80))
# except:
# continue
| 1.710938 | 2 |
upload/exceptions.py | branty/hhp | 0 | 69764 | <reponame>branty/hhp
#!/usr/bin/env python2
#-*- coding:utf-8 -*-
"""
Author: <NAME>
Email: <EMAIL>
Date: 2018-11-16
"""
class BaseException(Exception):
"base exception"
def __init__(self, msg):
self.msg = msg
class UnsupportedError(BaseException):
"""Not support"""
def __init__(self, msg):
self.msg = msg
class RequestMethodNotImplemented(UnsupportedError):
"""Request method not implemented"""
def __init__(self, msg):
self.msg = msg
class ParameterNotFound(BaseException):
"""Parameter not Found"""
def __init__(self, msg):
self.msg = msg
class ExcelFormError(BaseException):
"""Excel form is invalid"""
def __init__(self, msg):
self.msg = msg
| 1.125 | 1 |
pineboolib/qsa/tests/test_orm.py | Aulla/pineboo | 2 | 69892 | <gh_stars>1-10
"""
Tests for Orm on qsa.
"""
import unittest
from pineboolib.loader.main import init_testing, finish_testing
from pineboolib.qsa import qsa
class TestOrm(unittest.TestCase):
"""Test Orm."""
@classmethod
def setUpClass(cls) -> None:
"""Ensure pineboo is initialized for testing."""
init_testing()
def test_load(self) -> None:
"""Load model."""
qsa.thread_session_new()
class1_ = qsa.from_project("flareas_orm")
obj_ = qsa.orm_("flareas")()
self.assertEqual(class1_, obj_.__class__)
qsa.thread_session_free()
# ===============================================================================
# def test_sessions_isolation(self) -> None:
#
# session1_ = qsa.session()
# session2_ = qsa.session("dbAux")
# session3_ = qsa.session("aux")
#
# self.assertNotEqual(session1_, session2_)
# self.assertNotEqual(session1_, session3_)
# self.assertNotEqual(session2_, session3_)
# ===============================================================================
def test_create_object(self) -> None:
"""Create object."""
class_ = qsa.from_project("flareas_orm")
self.assertTrue(class_)
obj_ = class_()
setattr(obj_, "bloqueo", True)
setattr(obj_, "idarea", "A")
setattr(obj_, "descripcion", "Area A")
self.assertEqual(obj_.idarea, "A")
self.assertEqual(obj_.bloqueo, True)
self.assertEqual(getattr(obj_, "descripcion", ""), "Area A")
def test_insert_to_database(self) -> None:
"""Insert object to database."""
session_ = qsa.session()
class_ = qsa.from_project("flareas_orm")
obj_ = class_()
setattr(obj_, "bloqueo", True)
setattr(obj_, "idarea", "A")
setattr(obj_, "descripcion", "Area A")
session_.begin()
session_.add(
obj_
) # Introduce el nuevo registro en la BD. A partir de ahora los cambios posteriores se guardarán en la BD.
# res_1 = session_.execute("SELECT idarea FROM flareas WHERE idarea = 'A'")
# self.assertFalse(res_1.returns_rows)
session_.flush() # Aplica el cambio en la BD.
res_2 = session_.execute("SELECT idarea FROM flareas WHERE idarea = 'A'")
self.assertTrue(res_2.returns_rows)
obj2_ = session_.query(class_).get("A") # Recupera el registro de la BD
self.assertEqual(obj_, obj2_)
session_.rollback()
def test_delete_from_database(self) -> None:
"""Insert object to database."""
session_ = qsa.session()
class_ = qsa.from_project("flareas_orm")
obj_ = class_()
setattr(obj_, "bloqueo", True)
setattr(obj_, "idarea", "A")
setattr(obj_, "descripcion", "Area A")
session_.begin()
session_.add(obj_)
session_.commit() # Se cierra la sesión (Transacción)
session_2 = qsa.session()
obj2_ = session_2.query(class_).get("A") # Recupera el registro de la BD
self.assertTrue(obj2_)
session_2.begin()
session_2.delete(obj2_)
session_2.commit()
session_3 = qsa.session()
obj3_ = session_3.query(class_).get("A") # Recupera el registro de la BD
self.assertFalse(obj3_)
def test_modify_data(self) -> None:
"""Insert object to database."""
session_ = qsa.thread_session_new()
class_ = qsa.from_project("flareas_orm")
obj_ = class_()
setattr(obj_, "bloqueo", True)
setattr(obj_, "idarea", "B")
setattr(obj_, "descripcion", "Area B")
session_.begin()
self.assertTrue(obj_.save())
# session_.add(obj_) # Introduce el nuevo registro en la BD
session_.commit()
session_2 = qsa.session()
session_2.begin()
obj2_ = session_2.query(class_).get("B") # Recupera el registro de la BD
self.assertEqual(obj2_.descripcion, "Area B")
obj2_.descripcion = "Area B modificada"
session_2.commit() # Guarda el cambio permanentemente.
session_3 = qsa.session()
obj3_ = session_3.query(class_).get("B")
self.assertEqual(obj3_.descripcion, "Area B modificada")
qsa.thread_session_free()
def test_legacy_metadata(self) -> None:
"""Compares metadata with rom metadata."""
aq_app = qsa.aqApp
class_ = qsa.from_project("flareas_orm")
metadata = aq_app.db().manager().metadata("flareas")
self.assertTrue(metadata)
if metadata is not None:
self.assertEqual(metadata.name(), class_.legacy_metadata["name"])
self.assertEqual(metadata.alias(), class_.legacy_metadata["alias"])
result = None
for field in class_.legacy_metadata["fields"]:
if field["name"] == "bloqueo":
result = field["default"]
break
self.assertEqual(
metadata.field("bloqueo").defaultValue(), result # type: ignore [union-attr]
)
def test_save_points(self) -> None:
"""Save points."""
session_ = (
qsa.thread_session_new()
) # implica nueva Transaccion si en la llama anterior se hizo rollback o commit.
# Si no continua en la transaccion que se abrio la última vez
class_ = qsa.from_project("flareas_orm")
obj_ = class_()
obj_.idarea = "C"
obj_.descripcion = "Descripción C"
obj_.bloqueo = True
session_.begin()
session_.add(obj_)
session_.begin_nested() # Save point
obj_.descripcion = "Descripción Nueva"
obj2_ = session_.query(class_).get("C")
self.assertEqual(obj_.descripcion, "Descripción Nueva")
self.assertEqual(obj2_.descripcion, "Descripción Nueva")
session_.rollback() # rollback save_point
self.assertEqual(obj_.descripcion, "Descripción C")
session_.rollback() # rollback transaccion
qsa.thread_session_free()
@classmethod
def tearDownClass(cls) -> None:
"""Ensure test clear all data."""
finish_testing()
| 1.507813 | 2 |
tests/__init__.py | hemna/aprsd-twitter-plugin | 2 | 70020 | """Unit test package for aprsd_twitter_plugin."""
| 0.052979 | 0 |
spells/scr/tpModifiers/sp_vertigo.py | dolio/toee-mod | 0 | 70148 | <reponame>dolio/toee-mod
from toee import *
import tpdp
from templeplus.pymod import PythonModifier
from spell_utils import *
def Descr(spell_id):
packet = tpdp.SpellPacket(spell_id)
return game.get_spell_mesline(packet.spell_enum)
def Penalty(attachee, args, evt_obj):
spell_id = args.get_arg(0)
evt_obj.bonus_list.add(-2, 0, Descr(spell_id))
return 0
def reflex_roll_delta(target, dc):
dice = dice_new('1d20')
reflex_mod = target.stat_level_get(stat_save_reflexes)
bonus = tpdp.BonusList()
bonus.add(reflex_mod, 0, "~Reflex~[TAG_SAVE_REFLEX] Saves")
roll = dice.roll()
hist = tpdp.create_history_dc_roll(
target, dc, dice, roll, "Reflexive Balance", bonus)
game.create_history_from_id(hist)
print roll
return roll + reflex_mod - dc
def Balance(target, args, evt_obj):
# swap the commenting on the `result` lines if balance is
# valid in the module you are playing and you want to use it.
# result = target.skill_roll_delta(skill_balance, 10, 1)
result = reflex_roll_delta(target, 10)
if result >= 0:
return 0
elif result < -4:
target.fall_down()
target.condition_add_with_args('Unsteady', 0, 0)
return 0
def Remove(attachee, args, evt_obj):
args.condition_remove()
return 0
vertigo = PythonModifier('sp-Vertigo', 4)
vertigo.AddHook(ET_OnGetTooltip, EK_NONE, spellTooltip, ())
vertigo.AddHook(ET_OnGetEffectTooltip, EK_NONE, spellEffectTooltip, ())
vertigo.AddHook(ET_OnToHitBonus2, EK_NONE, Penalty, ())
vertigo.AddHook(ET_OnSaveThrowLevel, EK_NONE, Penalty, ())
vertigo.AddHook(ET_OnD20Signal, EK_S_BeginTurn, Balance, ())
vertigo.AddHook(ET_OnD20Signal, EK_S_Killed, Remove, ())
vertigo.AddHook(ET_OnD20Signal, EK_S_Dismiss_Spells, checkRemoveSpell, ())
vertigo.AddSpellDispelCheckStandard()
vertigo.AddSpellTeleportPrepareStandard()
vertigo.AddSpellTeleportReconnectStandard()
vertigo.AddSpellCountdownStandardHook()
def Unsteady(attachee, args, evt_obj):
# flags, newCap, capType, mesline
evt_obj.bonus_list.set_overall_cap(1, 0, 0, 1004)
evt_obj.bonus_list.set_overall_cap(2, 0, 0, 1004)
return 0
def UTooltip(attachee, args, evt_obj):
evt_obj.append('Unsteady')
return 0
def UETooltip(attachee, args, evt_obj):
key = tpdp.hash('UNSTEADY')
evt_obj.append(key, -2, '')
return 0
unsteady = PythonModifier('Unsteady', 2)
unsteady.AddHook(ET_OnGetTooltip, EK_NONE, UTooltip, ())
unsteady.AddHook(ET_OnGetEffectTooltip, EK_NONE, UETooltip, ())
unsteady.AddHook(ET_OnGetMoveSpeed, EK_NONE, Unsteady, ())
unsteady.AddHook(ET_OnD20Signal, EK_S_EndTurn, Remove, ())
unsteady.AddHook(ET_OnD20Signal, EK_S_Combat_End, Remove, ())
unsteady.AddHook(ET_OnD20Signal, EK_S_Killed, Remove, ())
| 1.546875 | 2 |
EuropePubMedCentralDataset.py | GabrielePisciotta/europe-pubmed-central-dataset | 0 | 70276 | <gh_stars>0
from os import listdir, system, remove
from os.path import isfile, join
import re
import multiprocessing
from urllib.parse import unquote
import json
from lxml import etree
import pandas as pd
import tqdm
import time
import httplib2
from bs4 import BeautifulSoup, SoupStrainer
import wget
from multiprocessing.pool import ThreadPool
import os
import uuid
from queue import Queue
from typing import Optional
import csv
from threading import Thread
import pickle
from config import *
__author__ = "<NAME>"
class EuropePubMedCentralDataset:
def __init__(self,
start_path,
writing_multiple_csv,
skip_download,
download_workers,
unzip_threads,
process_article_threads,
max_file_to_download):
self.pubmed_file_path = start_path
self.skip_download = skip_download
self.download_workers = download_workers
self.unzip_threads = unzip_threads
self.process_article_threads = process_article_threads
self.max_file_to_download = max_file_to_download
self.pubmed_dump_file_path = join(self.pubmed_file_path, 'dump')
self.articles_path = join(self.pubmed_file_path, 'articles')
self.csv_file_path = join(self.pubmed_file_path, 'csv')
self.folder_articles = folder_articles
# We can both exploit a queue in order to write into a single dataset.csv
# or to save multiple csv and then concatenate them into the final dataset
self.writing_multiple_csv = writing_multiple_csv
if not self.writing_multiple_csv:
self.queue = Queue()
os.makedirs(self.articles_path, exist_ok=True)
os.makedirs(self.csv_file_path, exist_ok=True)
os.makedirs(self.pubmed_dump_file_path, exist_ok=True)
def start(self):
if not self.skip_download:
# for each file from the pubmed dump
f = self._get_files_in_dir(self.pubmed_dump_file_path)
# load local index of already downloaded dump and add to the list of already downloaded file
if os.path.isfile(join(self.pubmed_file_path, 'downloaded-dump.txt')):
with open(join(self.pubmed_file_path, 'downloaded-dump.txt'), 'r') as index_file:
f.append(index_file.readline().replace("\n",""))
# get the difference between files to download and files that we have
links = self.get_links_from_pubmed()
if len(links) > 0:
todownload = list(set(links).difference(set(f)))
if self.max_file_to_download != None:
todownload = todownload[:int(self.max_file_to_download)]
if len(todownload):
print("\nDownloading {} OA dumps from EuropePubMedCentral".format(len(todownload)))
with multiprocessing.Pool(self.download_workers) as pool:
pool.map(worker_download_links, ((d, self.pubmed_dump_file_path) for d in todownload))
else:
print("No link to download!")
# Update the file list
f = self._get_files_in_dir(self.pubmed_dump_file_path)
# Unzip all the files
if len(f) > 0:
print("\nUnzipping all the articles")
s = time.time()
with ThreadPool(self.unzip_threads) as pool:
list(tqdm.tqdm(pool.imap(self.worker_unzip_files, f), total=len(f)))
e = time.time()
print("\nTime: {}".format((e - s)))
# process each article
f = self._get_articles_in_dir(self.articles_path)
if len(f) > 0:
self.load_PMC_ids()
s = time.time()
print("\nProcessing the articles")
self.process_articles(f)
e = time.time()
print("\nTime: {}".format((e - s)))
self._concatenate_datasets(self.csv_file_path)
def load_PMC_ids(self):
# Download articles' IDs --
if not os.path.isfile(join(self.pubmed_file_path, 'PMC-ids.csv.gz')):
print("\nDownloading PMC's IDs dataset")
wget.download('http://ftp.ncbi.nlm.nih.gov/pub/pmc/PMC-ids.csv.gz', self.pubmed_file_path)
# Pickle a dictionary of the dataframe containing only the keys that we care about
if not os.path.isfile(join(self.pubmed_file_path, 'PMC-ids.pkl')):
# Read the dataset and create a single big dict having all the needed keys for entity resolution
articleids = pd.read_csv(join(self.pubmed_file_path, 'PMC-ids.csv.gz'), usecols=['PMCID', 'PMID', 'DOI'],
low_memory=True)
articleids = articleids.drop_duplicates()
view = articleids[articleids['PMID'].notna()]
view['PMID'] = view['PMID'].astype(int)
view_clean = view.drop_duplicates(subset='PMID', keep="last")
dataset = view_clean.set_index('PMID').to_dict('index')
del view
view = articleids[articleids['PMCID'].notna()]
view['PMID'] = view['PMID'].astype('Int64')
del articleids
view_clean = view.drop_duplicates(subset='PMCID', keep="last")
self.articleids = {**dataset, **view_clean.set_index('PMCID').to_dict('index')}
del view
pickle.dump(obj=self.articleids, file=open(join(self.pubmed_file_path, 'PMC-ids.pkl'), 'wb'))
else:
print("Loading PMC IDs from pickled dict")
self.articleids = pickle.load(open(join(self.pubmed_file_path, 'PMC-ids.pkl'), 'rb'))
def write_to_csv(self):
keys = ['cur_doi', 'cur_pmid', 'cur_pmcid', 'cur_name', 'references']
while True:
if not self.queue.empty():
row = self.queue.get()
if row == "STOP":
return
else:
row = [v for k, v in row.items()]
if not os.path.isfile(join(self.csv_file_path, "dataset.csv")):
with open(join(self.csv_file_path, "dataset.csv"), 'w', newline='') as output_file:
dict_writer = csv.writer(output_file, delimiter='\t')
dict_writer.writerow(keys)
dict_writer.writerow(row)
else:
with open(join(self.csv_file_path, "dataset.csv"), 'a', newline='') as output_file:
dict_writer = csv.writer(output_file, delimiter='\t')
dict_writer.writerow(row)
def worker_article(self, f: str) -> None:
# Use the extracted file
with open(f, 'r') as fi:
filename = f.split(os.sep)[-1]
try:
cur_xml = etree.parse(fi)
except Exception as e:
print(e)
os.makedirs(join(self.articles_path, 'exceptions'), exist_ok=True)
with open(join(self.articles_path, 'exceptions', filename), 'w') as fout:
for line in fi:
fout.write(line)
os.remove(f)
return
cur_pmid = self.get_id_from_xml_source(cur_xml, 'pmid')
cur_pmcid = self.get_id_from_xml_source(cur_xml, 'pmcid')
if cur_pmcid is not None and not cur_pmcid.startswith("PMC"):
cur_pmcid = "PMC{}".format(cur_pmcid)
cur_doi = self.normalise_doi(self.get_id_from_xml_source(cur_xml, 'doi'))
# If we have no identifier, stop the processing of the article
if cur_pmid is None and cur_pmcid is None and cur_doi is None:
os.makedirs(join(self.articles_path, 'without-id'), exist_ok=True)
with open(join(self.articles_path, 'without-id', filename), 'w') as fout:
with open(f, 'r') as fi:
for line in fi:
fout.write(line)
os.remove(f)
return
try:
# Extract missing metadata from the ID dataset
if cur_pmid is None or cur_pmcid is None or cur_doi is None:
row = None
if cur_pmid is not None and self.articleids.__contains__(int(cur_pmid)):
row = self.articleids[int(cur_pmid)]
elif cur_pmcid is not None and self.articleids.__contains__(cur_pmcid):
row = self.articleids[cur_pmcid]
if row is not None and len(row):
if cur_pmid is None and row['PMID'] is not None and not pd.isna(row['PMID']):
cur_pmid = row['PMID']
if cur_pmcid is None and row['PMCID'] is not None:
cur_pmcid = row['PMCID']
if cur_doi is None and row['DOI'] is not None:
cur_doi = self.normalise_doi(str(row['DOI']))
references = cur_xml.xpath(".//ref-list/ref")
references_list = []
if len(references):
for reference in references:
entry_text = self.create_entry_xml(reference)
ref_pmid = None
ref_doi = None
ref_pmcid = None
ref_url = None
ref_xmlid_attr = reference.get('id')
if len(ref_xmlid_attr):
ref_xmlid = ref_xmlid_attr
if ref_xmlid == "":
ref_xmlid = None
ref_pmid_el = reference.xpath(".//pub-id[@pub-id-type='pmid']")
if len(ref_pmid_el):
ref_pmid = etree.tostring(
ref_pmid_el[0], method="text", encoding='unicode').strip()
ref_doi_el = reference.xpath(".//pub-id[@pub-id-type='doi']")
if len(ref_doi_el):
ref_doi = self.normalise_doi(etree.tostring(
ref_doi_el[0], method="text", encoding='unicode').lower().strip())
if ref_doi == "":
ref_doi = None
ref_pmcid_el = reference.xpath(".//pub-id[@pub-id-type='pmcid']")
if len(ref_pmcid_el):
ref_pmcid = etree.tostring(
ref_pmcid_el[0], method="text", encoding='unicode').strip()
if ref_pmcid == "":
ref_pmcid = None
elif not ref_pmcid.startswith("PMC"):
ref_pmcid = "PMC{}".format(ref_pmcid)
ref_url_el = reference.xpath(".//ext-link")
if len(ref_url_el):
ref_url = etree.tostring(
ref_url_el[0], method="text", encoding='unicode').strip()
if not ref_url.startswith("http"):
ref_url = None
# Extract missing metadata from the ID dataset
if ref_pmid is None or ref_pmcid is None or ref_doi is None:
row = None
if ref_pmid is not None and self.articleids.__contains__(int(ref_pmid)):
row = self.articleids[int(ref_pmid)]
elif ref_pmcid is not None and self.articleids.__contains__(ref_pmcid):
row = self.articleids[ref_pmcid]
if row is not None and len(row):
if ref_pmid is None and row['PMID'] is not None:
ref_pmid = row['PMID']
if ref_pmcid is None and row['PMCID'] is not None:
ref_pmcid = row['PMCID']
if not ref_pmcid.startswith("PMC"):
ref_pmcid = "PMC{}".format(ref_pmcid)
if ref_doi is None and row['DOI'] is not None:
ref_doi = self.normalise_doi(str(row['DOI']))
# Create an object to store the reference
obj = {}
if entry_text is not None:
obj['entry_text'] = entry_text
if ref_pmid is not None:
obj['ref_pmid'] = str(ref_pmid)
if ref_pmcid is not None:
obj['ref_pmcid'] = ref_pmcid
if ref_doi is not None:
obj['ref_doi'] = ref_doi
if ref_url is not None:
obj['ref_url'] = ref_url
if ref_xmlid is not None:
obj['ref_xmlid'] = ref_xmlid
references_list.append(obj)
if self.writing_multiple_csv:
df = pd.DataFrame({
'cur_doi': [cur_doi],
'cur_pmid': [cur_pmid],
'cur_pmcid': [cur_pmcid],
'cur_name': [f.split("articles"+os.sep)[-1]],
'references': [json.dumps(references_list)]
})
df.to_csv(join(self.csv_file_path, "{}.csv".format(filename)), sep="\t", index=False)
else:
self.queue.put({
'cur_doi': cur_doi,
'cur_pmid': cur_pmid,
'cur_pmcid': cur_pmcid,
'cur_name': f,
'references': json.dumps(references_list)
})
except Exception as e:
os.makedirs(join(self.articles_path, 'exceptions'), exist_ok=True)
with open(join(self.articles_path, 'exceptions', filename), 'w') as fout:
with open(f, 'r') as fi:
for line in fi:
fout.write(line)
os.remove(f)
print("Exception {} with file: {}".format(e, f))
return
def process_articles(self, f):
articles_to_process = []
for dump_articles_folder in f:
for path, subdirs, files in os.walk(os.path.join(self.articles_path, dump_articles_folder)):
for name in files:
articles_to_process.append(os.path.join(path, name))
if not self.writing_multiple_csv:
consumer = Thread(target=self.write_to_csv)
consumer.setDaemon(True)
consumer.start()
with ThreadPool(self.process_article_threads) as pool:
list(tqdm.tqdm(pool.imap(self.worker_article, (fi for fi in articles_to_process)), total=len(articles_to_process)))
if not self.writing_multiple_csv:
self.queue.put("STOP")
consumer.join()
@staticmethod
def normalise_doi(doi_string) -> Optional[
str]: # taken from https://github.com/opencitations/index/blob/master/identifier/doimanager.py
if doi_string is not None:
try:
doi_string = re.sub("\0+", "", re.sub("\s+", "", unquote(doi_string[doi_string.index("10."):])))
return doi_string.lower().strip()
except ValueError:
return None
else:
return None
def worker_unzip_files(self, f: str) -> None:
try:
# Unzip
system("gunzip -k {}".format(join(self.pubmed_dump_file_path, f)))
# This is the new filename
gzip_name = f
f = f.replace(".gz", "")
# Create one file for each article, having its named
tree = etree.parse(join(self.pubmed_dump_file_path, f), etree.XMLParser(remove_blank_text=True))
# Extract all the article nodes
articles = tree.findall('article')
dump_articles_dir = os.path.join(self.articles_path, f.replace(".xml", ""))
os.makedirs(dump_articles_dir, exist_ok=True)
for i in range(self.folder_articles+1):
os.makedirs(os.path.join(dump_articles_dir, str(i)), exist_ok=True)
for i, cur_xml in enumerate(articles):
dir_of_article = os.path.join(dump_articles_dir, str(i % self.folder_articles))
with open(join(dir_of_article, "{}.xml".format(str(uuid.uuid4()))), 'w') as writefile:
writefile.write(etree.tostring(cur_xml, pretty_print=True, encoding='unicode'))
# Remove the downloaded dump
remove(join(self.pubmed_dump_file_path, f))
remove(join(self.pubmed_dump_file_path, gzip_name))
except Exception as e:
print("Exception during the extraction: {}".format(e))
system("rm {}{}*.xml".format(self.pubmed_dump_file_path,os.sep))
@staticmethod
def create_entry_xml(xml_ref): # Taken from CCC
entry_string = ""
el_citation = xml_ref.xpath("./element-citation | ./mixed-citation | ./citation")
if len(el_citation):
cur_el = el_citation[0]
is_element_citation = cur_el.tag == "element-citation" or cur_el.tag == "citation"
has_list_of_people = False
first_text_passed = False
for el in cur_el.xpath(".//node()"):
type_name = type(el).__name__
if type_name == "_Element":
cur_text = el.text
if cur_text is not None and " ".join(cur_text.split()) != "":
if first_text_passed:
is_in_person_group = len(el.xpath("ancestor::person-group")) > 0
if is_in_person_group:
entry_string += ", "
has_list_of_people = True
elif not is_in_person_group and has_list_of_people:
entry_string += ". "
has_list_of_people = False
else:
if is_element_citation:
entry_string += ", "
else:
entry_string += " "
else:
first_text_passed = True
if el.tag == "pub-id":
if el.xpath("./@pub-id-type = 'doi'"):
entry_string += "DOI: "
elif el.xpath("./@pub-id-type = 'pmid'"):
entry_string += "PMID: "
elif el.xpath("./@pub-id-type = 'pmcid'"):
entry_string += "PMC: "
elif type_name == "_ElementStringResult" or type_name == "_ElementUnicodeResult":
entry_string += el
del cur_el
del el
entry_string = " ".join(entry_string.split())
entry_string = re.sub(" ([,\.!\?;:])", "\\1", entry_string)
entry_string = re.sub("([\-–––]) ", "\\1", entry_string)
entry_string = re.sub("[\-–––,\.!\?;:] ?([\-–––,\.!\?;:])", "\\1", entry_string)
entry_string = re.sub("(\(\. ?)+", "(", entry_string)
entry_string = re.sub("(\( +)", "(", entry_string)
del el_citation
if entry_string is not None and entry_string != "":
return entry_string
else:
return None
@staticmethod
def get_id_from_xml_source(cur_xml, id_type):
"""This method extract an id_type from the XML"""
if id_type not in ["doi", "pmid", "pmcid"]:
print("Wrong id used: {}".format(id_type))
return None
id_string = cur_xml.xpath(".//front/article-meta/article-id[@pub-id-type='{}']".format(id_type))
if len(id_string):
id_string = u"" + etree.tostring(id_string[0], method="text", encoding='unicode').strip()
if id_string != "":
del cur_xml
toret = str(id_string)
del id_string
return toret
# Get list of file inside the dir
def _get_files_in_dir(self, path: str) -> list:
list_of_files = [f for f in listdir(path) if isfile(join(path, f))]
return list_of_files
def _get_articles_in_dir(self, path: str) -> list:
list_of_files = [f for f in listdir(path)]
return list_of_files
def _concatenate_datasets(self, path: str) -> str:
if self.writing_multiple_csv:
present_files = list(self._get_files_in_dir(path))
header_saved = False
if len(present_files) > 0:
print("\nConcatenating dataset")
start = time.time()
with open(join(path, 'dataset.csv'), 'w') as fout:
for f in tqdm.tqdm(present_files):
if f != "dataset.csv":
with open(join(path, f)) as fin:
header = next(fin)
if not header_saved:
fout.write(header)
header_saved = True
for line in fin:
fout.write(line)
os.remove(join(path, f))
df = pd.read_csv(join(path, 'dataset.csv'), sep='\t')
df.drop_duplicates(inplace=True)
df.to_csv(join(path, 'dataset.csv'), sep='\t', index=False)
end = time.time()
print("Time: {}".format((end - start)))
return join(path, 'dataset.csv')
def get_links_from_pubmed(self) -> list:
links = []
http = httplib2.Http(timeout=20)
try:
status, response = http.request('http://europepmc.org/ftp/oa/')
if status['status'] != '200':
raise Exception("response code {}".format(status['status']))
for link in BeautifulSoup(response, 'html.parser', parse_only=SoupStrainer('a')):
if link.has_attr('href'):
if "xml.gz" in link['href']:
links.append(link['href'])
return links
except Exception as e:
print("Cannot get OA links: {}".format(e))
return []
def worker_download_links(args):
""" If something goes wrong, then wait 3 sec and retry until the max number of possible tries is reached """
todownload, pubmed_dump_file_path = args
downloaded = False
retry = 0
while not downloaded and retry < max_retry:
try:
wget.download('http://europepmc.org/ftp/oa/{}'.format(todownload), pubmed_dump_file_path)
downloaded = True
with open(os.path.join(pubmed_dump_file_path, '..', 'downloaded-dump.txt'), 'a') as index_file:
index_file.write(todownload + "\n")
except Exception as e:
print("\n(retry #{}) Problem with {}: {}".format(retry, todownload, e))
retry += 1
time.sleep(sec_between_retry)
if __name__ == '__main__':
e = EuropePubMedCentralDataset(start_path=start_path,
writing_multiple_csv=writing_multiple_csv,
skip_download=skip_download,
download_workers=download_workers,
unzip_threads=unzip_threads,
process_article_threads=process_article_threads,
max_file_to_download=max_file_to_download)
e.start()
| 1.898438 | 2 |
parsimony/functions/multiblock/losses.py | neurospin/pylearn-parsimony | 41 | 70404 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
The :mod:`parsimony.functions.losses` module contains multiblock loss
functions.
Copyright (c) 2013-2017, CEA/DSV/I2BM/Neurospin. All rights reserved.
Created on Tue Feb 4 08:51:43 2014
@author: <NAME>
@email: <EMAIL>
@license: BSD 3-clause.
"""
import numbers
import numpy as np
import parsimony.utils as utils
import parsimony.utils.maths as maths
import parsimony.functions.properties as properties
import parsimony.utils.consts as consts
from . import properties as mb_properties
__all__ = ["CombinedMultiblockFunction",
"MultiblockFunctionWrapper", "MultiblockNesterovFunctionWrapper",
"LatentVariableCovariance"]
class CombinedMultiblockFunction(mb_properties.MultiblockFunction,
mb_properties.MultiblockGradient,
mb_properties.MultiblockProximalOperator,
mb_properties.MultiblockProjectionOperator,
# mb_properties.MultiblockContinuation,
mb_properties.MultiblockStepSize):
"""Combines one or more loss functions, any number of penalties, any number
of smoothed functions, any number of penalties with known proximal
operators and any number of constraints.
This function thus represents
f(x) = f_1(x) [ + f_2(x) ... ] [ + d_1(x) ... ] [ + N_1(x) ...]
[ + p_1(x) ...],
subject to [ C_1(x) <= c_1,
C_2(x) <= c_2,
... ],
where f_i are differentiable loss Functions, d_j are differentiable
penalties, N_k are smoothed NesterovFunctions and p_l are
ProximalOperators. The C_m are ProjectionOperators and function as
constraints. All functions and penalties must thus be Gradient, unless they
are ProximalOperators or ProjectionOperators.
Parameters
----------
X : list of numpy arrays
The blocks of data in the multiblock model.
functions : list of lists of lists
A function matrix, with element i,j connecting block i to block j.
penalties : a list of lists of penalties
Element i of the outer list is also a list and contains the penalties
for block i.
smoothed : a list if lists of smoothed penalties
Element i of the outer list is also a list and contains the smoothed
penalties for block i.
prox : a list of lists of proximal operators
Element i of the outer list is also a list and contains the penalties
that can be expressed as proximal operators for block i.
constraints : a list of lists of projection operators
Element i of the outer list is also a list and contains the constraints
for block i.
"""
def __init__(self, X, functions=[], penalties=[], smoothed=[], prox=[],
constraints=[]):
self._param_map = dict()
self._method_map = dict()
self.K = len(X)
self.X = X
if len(functions) != self.K:
self._f = [0] * self.K
for i in range(self.K):
self._f[i] = [0] * self.K
for j in range(self.K):
self._f[i][j] = list()
else:
self._f = functions
if len(penalties) != self.K:
self._d = [0] * self.K
for i in range(self.K):
self._d[i] = list()
else:
self._d = [0] * self.K
for i in range(self.K):
self._d[i] = list()
for di in penalties[i]:
self._d[i].append(di)
if len(smoothed) != self.K:
self._N = [0] * self.K
for i in range(self.K):
self._N[i] = list()
else:
self._N = [0] * self.K
for i in range(self.K):
self._N[i] = list()
for di in penalties[i]:
self._N[i].append(di)
if len(prox) != self.K:
self._p = [0] * self.K
for i in range(self.K):
self._p[i] = list()
else:
self._p = prox
if len(constraints) != self.K:
self._c = [0] * self.K
for i in range(self.K):
self._c[i] = list()
else:
self._c = constraints
self.reset()
def reset(self):
for fi in self._f:
for fij in fi:
for fijk in fij:
fijk.reset()
for di in self._d:
for dik in di:
dik.reset()
for Ni in self._N:
for Nik in Ni:
Nik.reset()
for pi in self._p:
for pik in pi:
pik.reset()
for ci in self._c:
for cik in ci:
cik.reset()
def set_params(self, **kwargs):
"""Set the given input parameters in the corresponding function.
"""
for k in kwargs:
if k in self._param_map:
param_map = self._param_map[k]
param = dict()
param[param_map[1]] = kwargs[k]
param_map[0].set_params(param)
else:
self.__setattr__(k, kwargs[k])
def _accept_params(self, function, accepts_params):
if accepts_params is not None:
if isinstance(accepts_params, tuple):
accepts_params = [accepts_params]
for param in accepts_params:
self._param_map[param[0]] = (function, param[1])
def _accept_methods(self, function, accepts_methods):
if accepts_methods is not None:
if isinstance(accepts_methods, tuple):
accepts_methods = [accepts_methods]
for method in accepts_methods:
if not hasattr(function, method[1]):
raise AttributeError("Target function does not have an %s "
"attribute!" % (method[1],))
else:
if method[0] in self._method_map:
self._method_map[method[0]].append((function, method[1]))
else:
self._method_map[method[0]] = [(function, method[1])]
# def __getattribute__(self, name):
# mmap = super(CombinedMultiblockFunction,
# self).__getattribute__("_method_map")
# if name in mmap:
# mm = mmap[name]
# fun = getattr(mm[0], mm[1])
# return fun
# else:
# return super(CombinedMultiblockFunction,
# self).__getattribute__(name)
def __getattr__(self, name):
if name == "_method_map":
if name not in self.__dict__:
self.__dict__["_method_map"] = dict()
else:
return self.__dict__["_method_map"] # Never run ...
mmap = self._method_map
if name in mmap:
mms = mmap[name] # A list of function-name pairs
funs = []
for mm in mms:
fun = getattr(mm[0], mm[1])
funs.append(fun)
if len(funs) > 1:
def function(*args, **kwargs):
results = []
for fun in funs:
result = fun(*args, **kwargs)
results.append(result)
return results
return function
else:
return funs[0]
else:
return super(CombinedMultiblockFunction,
self).__getattribute__(name)
def add_loss(self, function, i, j,
accepts_params=None, accepts_methods=None):
"""Add a loss function that connects blocks i and j.
Parameters
----------
function : Function or MultiblockFunction
A loss function that connects block i and block j.
i : int
Non-negative integer. Index of the first block. Zero based, so 0
is the first block.
j : int
Non-negative integer. Index of the second block. Zero based, so 0
is the first block.
accepts_params : 2-tuple or list of 2-tuples
The outer function will accept parameters with the name of the
first element of any tuple, and map them to this function with the
name of the second element of the tuple.
accepts_methods : 2-tuple or list of 2-tuples
The outer function will accept methods with the name of the
first element of any of the tuples, and map them to this function
with the method name of the second element of the tuple.
"""
if not isinstance(function, properties.Gradient):
if not isinstance(function, mb_properties.MultiblockGradient):
raise ValueError("Loss functions must have gradients.")
self._f[i][j].append(function)
self._accept_params(function, accepts_params)
self._accept_methods(function, accepts_methods)
@utils.deprecated("add_loss")
def add_function(self, function, i, j, accepts_params=None):
return self.add_loss(function, i, j, accepts_params=accepts_params)
def add_penalty(self, penalty, i, accepts_params=None):
"""Add a penalty, i.e. a constraint on the Lagrange form, for block i.
Parameters
----------
penalty : Penalty
A function that penalises the objective function.
i : int
Non-negative integer. Index of the block to penalise. Zero based,
so 0 is the first block.
accepts_params : 2-tuple or list of 2-tuples
The outer function will accept parameters with the name of the
first element of any tuple, and map them to this function with the
name of the second element of the tuple.
"""
if not isinstance(penalty, properties.Penalty):
raise ValueError("Not a penalty.")
elif isinstance(penalty, properties.Gradient):
self._d[i].append(penalty)
elif isinstance(penalty, properties.ProximalOperator):
self._p[i].append(penalty)
elif isinstance(penalty, properties.NesterovFunction):
self._N[i].append(penalty)
else:
raise ValueError("The penalty is not smooth, nor smoothed, and it "
"does not have a proximal operator.")
self._accept_params(penalty, accepts_params)
def add_smoothed(self, penalty, accepts_params=None):
"""Add a smoothed penalty, i.e. a smoothed constraint on the Lagrange
form, for block i.
Parameters
----------
penalty : Penalty
A function that penalises the objective function.
i : int
Non-negative integer. Index of the block to penalise. Zero based,
so 0 is the first block.
accepts_params : 2-tuple or list of 2-tuples
The outer function will accept parameters with the name of the
first element of any tuple, and map them to this function with the
name of the second element of the tuple.
"""
if isinstance(penalty, properties.NesterovFunction):
self._N.append(penalty)
else:
raise ValueError("Not a smoothed function.")
self._accept_params(penalty, accepts_params)
def add_prox(self, penalty, i, accepts_params=None):
"""Add a penalty for block i that has a known or computable proximal
operator.
Parameters
----------
penalty : ProximalOperator
A function that penalises the objective function.
i : int
Non-negative integer. Index of the block to penalise. Zero based,
so 0 is the first block.
accepts_params : 2-tuple or list of 2-tuples
The outer function will accept parameters with the name of the
first element of any tuple, and map them to this function with the
name of the second element of the tuple.
"""
if isinstance(penalty, properties.ProximalOperator):
self._p[i].append(penalty)
else:
raise ValueError("Not a proximal operator.")
self._accept_params(penalty, accepts_params)
def add_constraint(self, constraint, i, accepts_params=None):
"""Add a constraint for block i.
Parameters
----------
constraint : Constraint
A function that constrains the possible solutions of the objective
function.
i : int
Non-negative integer. Index of the block to penalise. Zero based,
so 0 is the first block.
accepts_params : 2-tuple or list of 2-tuples
The outer function will accept parameters with the name of the
first element of any tuple, and map them to this function with the
name of the second element of the tuple.
"""
if not isinstance(constraint, properties.Constraint):
raise ValueError("Not a constraint.")
elif not isinstance(constraint, properties.ProjectionOperator):
raise ValueError("Constraints must have projection operators.")
else:
self._c[i].append(constraint)
self._accept_params(constraint, accepts_params)
def has_nesterov_function(self, index):
return len(self._N[index]) > 0
def _only_f(self, w):
val = 0.0
for i in range(len(self._f)):
fi = self._f[i]
for j in range(len(fi)):
fij = self._f[i][j]
for k in range(len(fij)):
if isinstance(fij[k], mb_properties.MultiblockFunction):
val += fij[k].f([w[i], w[j]])
else:
val += fij[k].f(w[i])
return val
def _non_f(self, w):
val = 0.0
for i in range(len(self._d)):
di = self._d[i]
for k in range(len(di)):
val += di[k].f(w[i])
for i in range(len(self._N)):
Ni = self._N[i]
for k in range(len(Ni)):
val += Ni[k].f(w[i])
for i in range(len(self._p)):
pi = self._p[i]
for k in range(len(pi)):
val += pi[k].f(w[i])
return val
def f(self, w):
"""Function value.
Parameters
----------
w : list of numpy arrays
The parameter vectors at which to evaluate the function.
"""
val = self._only_f(w) + self._non_f(w)
return val
def fmu(self, w):
"""Function value of smoothed function.
Parameters
----------
w : list of numpy arrays
The parameter vectors at which to evaluate the function.
"""
val = self._only_f(w)
for i in range(len(self._d)):
di = self._d[i]
for k in range(len(di)):
val += di[k].f(w[i])
for i in range(len(self._N)):
Ni = self._N[i]
for k in range(len(Ni)):
val += Ni[k].fmu(w[i])
for i in range(len(self._p)):
pi = self._p[i]
for k in range(len(pi)):
val += pi[k].f(w[i])
return val
def _grad_only_f(self, w, index):
grad = np.zeros(w[index].shape)
# Add gradients from the loss functions (row):
fi = self._f[index]
for j in range(len(fi)):
fij = fi[j]
for k in range(len(fij)):
fijk = fij[k]
if isinstance(fijk, properties.Gradient):
grad += fijk.grad(w[index])
elif isinstance(fijk, mb_properties.MultiblockGradient):
grad += fijk.grad([w[index], w[j]], 0)
# Add gradients from the loss functions (column):
for i in range(len(self._f)):
fij = self._f[i][index]
if i != index: # Do not count these twice.
for k in range(len(fij)):
fijk = fij[k]
if isinstance(fijk, properties.Gradient):
# We shouldn't do anything here, right? This means e.g.
# that this (block i) is the y of a logistic regression
# model.
pass
# grad += fij.grad(w[i])
elif isinstance(fijk, mb_properties.MultiblockGradient):
grad += fijk.grad([w[i], w[index]], 1)
return grad
def _grad_non_f(self, w, index):
grad = np.zeros(w[index].shape)
# Add gradients from the penalties:
di = self._d[index]
for k in range(len(di)):
grad += di[k].grad(w[index])
# Add gradients from the smoothed penalties:
Ni = self._N[index]
for k in range(len(Ni)):
grad += Ni[k].grad(w[index])
return grad
def grad(self, w, index):
"""Gradient of the differentiable part of the function.
From the interface "MultiblockGradient".
Parameters
----------
w : list of numpy arrays
The weight vectors, w[index] is the point at which to evaluate the
gradient.
index : int
Non-negative integer. Which parameter vector (block) the gradient
is computed with respect to.
"""
grad = self._grad_only_f(w, index) + self._grad_non_f(w, index)
return grad
def prox(self, w, index, factor=1.0, eps=consts.TOLERANCE, max_iter=100):
"""The proximal operator of the non-differentiable part of the
function with the given index.
From the interface "MultiblockProximalOperator".
Parameters
----------
w : list of numpy arrays
The parameter vectors at which to compute the proximal operator.
index : int
Non-negative integer. The variable for which to compute the
proximal operator.
factor : float
Positive float. A factor by which the Lagrange multiplier is
scaled. This is usually the step size.
"""
prox = self._p[index]
proj = self._c[index]
# We have no penalties with proximal operators and no constraints:
if len(prox) == 0 and len(proj) == 0:
prox_w = w[index] # Do nothing!
# There is one proximal operator and no constraints:
elif len(prox) == 1 and len(proj) == 0:
prox_w = prox[0].prox(w[index], factor=factor,
eps=consts.TOLERANCE, max_iter=100)
# There are two proximal operators, and no constraints:
elif len(prox) == 2 and len(proj) == 0:
from parsimony.algorithms.proximal import DykstrasProximalAlgorithm
prox_combo = DykstrasProximalAlgorithm(eps=eps, max_iter=max_iter)
prox_w = prox_combo.run(prox, w[index], factor=factor)
# There are no proximal operators, but one or two constraints:
elif len(prox) == 0 and (len(proj) == 1 or len(proj) == 2):
prox_w = self.proj(w, index, eps=eps, max_iter=max_iter)
# There are at least one proximal operator and at least one constraint:
else:
from parsimony.algorithms.proximal \
import ParallelDykstrasProximalAlgorithm
combo = ParallelDykstrasProximalAlgorithm(eps=eps,
max_iter=max_iter,
min_iter=1)
prox_w = combo.run(w[index], prox=prox, proj=proj, factor=factor)
return prox_w
def proj(self, w, index, eps=consts.TOLERANCE, max_iter=100):
"""The projection operator of a constraint that corresponds to the
function with the given index.
From the interface "MultiblockProjectionOperator".
Parameters
----------
w : list of numpy arrays
The weight vectors.
index : int
Non-negative integer. Which variable the projection is for.
"""
prox = self._p[index]
proj = self._c[index]
# We have no penalties with projection operators:
if len(prox) == 0 and len(proj) == 0:
proj_w = w[index] # Do nothing!
# There is one projection operator and no proximal operators:
elif len(proj) == 1 and len(prox) == 0:
proj_w = proj[0].proj(w[index], eps=eps, max_iter=max_iter)
# There are two projection operators and no proximal operators:
elif len(proj) == 2 and len(prox) == 0:
from parsimony.algorithms.proximal \
import DykstrasProjectionAlgorithm
combo = DykstrasProjectionAlgorithm(eps=eps,
max_iter=max_iter, min_iter=1)
proj_w = combo.run(proj, w[index])
# There are no constraints, but one or two proximal operators, or any
# number of constraints and any number of proximal oeprators:
else:
proj_w = self.prox(w, index, eps=eps, max_iter=max_iter)
return proj_w
def step(self, w, index):
"""The step size to use in descent methods.
From the interface "StepSize".
Parameters
----------
w : list of numpy arrays
The point at which to determine the step size.
index : int
Non-negative integer. The variable which the step is for.
"""
all_lipschitz = True
L = 0.0
# Add Lipschitz constants from the loss functions.
fi = self._f[index]
for j in range(len(fi)):
fij = fi[j]
for k in range(len(fij)):
fijk = fij[k]
if isinstance(fijk, properties.Gradient):
if not isinstance(fijk,
properties.LipschitzContinuousGradient):
all_lipschitz = False
break
else:
L += fijk.L(w[index])
elif isinstance(fijk, mb_properties.MultiblockGradient):
if not isinstance(fijk,
mb_properties.MultiblockLipschitzContinuousGradient):
all_lipschitz = False
break
else:
L += fijk.L([w[index], w[j]], 0)
if not all_lipschitz:
break
for i in range(len(self._f)):
fij = self._f[i][index]
if i != index: # Do not visit these twice.
for k in range(len(fij)):
fijk = fij[k]
if isinstance(fijk, properties.Gradient):
# We shouldn't do anything here, right? This means that
# this (block i) is e.g. the y in a logistic
# regression.
pass
elif isinstance(fijk, mb_properties.MultiblockGradient):
if not isinstance(fijk,
mb_properties.MultiblockLipschitzContinuousGradient):
all_lipschitz = False
break
else:
L += fijk.L([w[i], w[index]], 1)
# Add Lipschitz constants from the penalties.
di = self._d[index]
for k in range(len(di)):
if not isinstance(di[k], properties.LipschitzContinuousGradient):
all_lipschitz = False
break
else:
L += di[k].L() # w[index])
Ni = self._N[index]
for k in range(len(Ni)):
if not isinstance(Ni[k], properties.LipschitzContinuousGradient):
all_lipschitz = False
break
else:
L += Ni[k].L() # w[index])
step = 0.0
if all_lipschitz and L >= consts.TOLERANCE:
step = 1.0 / L
else:
# If all functions did not have Lipschitz continuous gradients,
# try to find the step size through backtracking line search.
class F(properties.Function,
properties.Gradient):
def __init__(self, func, w, index):
self.func = func
self.w = w
self.index = index
def f(self, x):
# Temporarily replace the index:th variable with x.
w_old = self.w[self.index]
self.w[self.index] = x
f = self.func.f(w)
self.w[self.index] = w_old
return f
def grad(self, x):
# Temporarily replace the index:th variable with x.
w_old = self.w[self.index]
self.w[self.index] = x
g = self.func.grad(w, index)
self.w[self.index] = w_old
return g
func = F(self, w, index)
p = -self.grad(w, index)
from parsimony.algorithms.utils import BacktrackingLineSearch
import parsimony.functions.penalties as penalties
line_search = BacktrackingLineSearch(
condition=penalties.SufficientDescentCondition, max_iter=30)
a = np.sqrt(1.0 / self.X[index].shape[1]) # Arbitrarily "small".
step = line_search.run(func, w[index], p, rho=0.5, a=a,
condition_params={"c": 1e-4})
return step
class MultiblockFunctionWrapper(properties.CompositeFunction,
properties.Gradient,
properties.StepSize,
properties.ProximalOperator):
def __init__(self, function, w, index):
self.function = function
self.w = w
self.index = index
def f(self, w):
"""Function value.
From the interface "Function".
Parameters
----------
w : Numpy array (p-by-1). The point at which to evaluate the function.
"""
return self.function.f(self.w[:self.index] +
[w] +
self.w[self.index + 1:])
def grad(self, w):
"""Gradient of the function.
Parameters
----------
w : Numpy array (p-by-1). The point at which to evaluate the gradient.
"""
return self.function.grad(self.w[:self.index] +
[w] +
self.w[self.index + 1:],
index=self.index)
def prox(self, w, factor=1.0, eps=consts.TOLERANCE, max_iter=100):
"""The proximal operator corresponding to the function.
Parameters
----------
w : Numpy array (p-by-1). The point at which to apply the proximal
operator.
factor : Positive float. A factor by which the Lagrange multiplier is
scaled. This is usually the step size.
"""
return self.function.prox(self.w[:self.index] +
[w] +
self.w[self.index + 1:],
self.index, factor=factor,
eps=eps, max_iter=max_iter)
def step(self, w, index=0, **kwargs):
"""The step size to use in descent methods.
Parameters
----------
w : Numpy array. The point at which to determine the step size.
"""
return self.function.step(self.w[:self.index] +
[w] +
self.w[self.index + 1:],
index=self.index,
**kwargs)
class MultiblockNesterovFunctionWrapper(MultiblockFunctionWrapper,
properties.NesterovFunction,
properties.Continuation):
def __init__(self, function, w, index):
super(MultiblockNesterovFunctionWrapper, self).__init__(function,
w,
index)
def get_params(self, *args):
Ni = self.function._N[self.index]
ret = dict()
for k in args:
params = []
for N in Ni:
value = getattr(N, k)
params.append(value)
ret[k] = params
return ret
def fmu(self, beta, mu=None):
"""Returns the smoothed function value.
From the interface "NesterovFunction".
Parameters
----------
beta : Numpy array. A weight vector.
mu : Non-negative float. The regularisation constant for the smoothing.
"""
Ni = self.function._N[self.index]
f = 0.0
for N in Ni:
f += N.fmu(beta, mu=mu)
return f
def phi(self, alpha, beta):
""" Function value with known alpha.
From the interface "NesterovFunction".
"""
raise NotImplementedError('Abstract method "phi" must be '
'specialised!')
def get_mu(self):
"""Returns the regularisation constant for the smoothing.
From the interface "NesterovFunction".
"""
Ni = self.function._N[self.index]
if len(Ni) == 0:
raise ValueError("No penalties are Nesterov functions.")
return Ni[0].get_mu()
def set_mu(self, mu):
"""Sets the regularisation constant for the smoothing.
From the interface "NesterovFunction".
Parameters
----------
mu : Non-negative float. The regularisation constant for the smoothing
to use from now on.
Returns
-------
old_mu : Non-negative float. The old regularisation constant for the
smoothing that was overwritten and no longer is used.
"""
old_mu = self.get_mu()
Ni = self.function._N[self.index]
for N in Ni:
N.set_mu(mu)
return old_mu
def alpha(self, beta):
""" Dual variable of the Nesterov function.
From the interface "NesterovFunction".
Parameters
----------
beta : Numpy array (p-by-1). The variable for which to compute the dual
variable alpha.
"""
Ni = self.function._N[self.index]
alpha = []
for N in Ni:
alpha += N.alpha(beta)
return alpha
def A(self):
""" Linear operator of the Nesterov function.
From the interface "NesterovFunction".
"""
Ni = self.function._N[self.index]
A = []
for N in Ni:
A += N.A()
def Aa(self, alpha):
""" Compute A'*alpha.
From the interface "NesterovFunction".
Parameters
----------
alpha : Numpy array (x-by-1). The dual variable alpha.
"""
A = self.A()
Aa = A[0].T.dot(alpha[0])
for i in range(1, len(A)):
Aa += A[i].T.dot(alpha[i])
return Aa
def project(self, alpha):
""" Projection onto the compact space of the Nesterov function.
From the interface "NesterovFunction".
Parameters
----------
alpha : Numpy array (x-by-1). The not-yet-projected dual variable
alpha.
"""
Ni = self.function._N[self.index]
a = []
i = 0
for N in Ni:
A = N.A()
a += N.project(alpha[i:len(A)])
i += len(A)
return a
def M(self):
""" The maximum value of the regularisation of the dual variable. We
have
M = max_{alpha in K} 0.5*|alpha|²_2.
From the interface "NesterovFunction".
"""
Ni = self.function._N[self.index]
M = 0.0
for N in Ni:
M += N.M()
return M
def estimate_mu(self, beta):
""" Compute a "good" value of mu with respect to the given beta.
From the interface "NesterovFunction".
Parameters
----------
beta : Numpy array (p-by-1). The primal variable at which to compute a
feasible value of mu.
"""
Ni = self.function._N[self.index]
mu = consts.TOLERANCE
for N in Ni:
mu = max(mu, N.estimate_mu(beta))
return mu
def mu_opt(self, eps):
"""The optimal value of mu given epsilon.
Parameters
----------
eps : Positive float. The desired precision.
Returns
-------
mu : Positive float. The optimal regularisation parameter.
From the interface "Continuation".
"""
raise NotImplementedError('Abstract method "mu_opt" must be '
'specialised!')
def eps_opt(self, mu):
"""The optimal value of epsilon given mu.
Parameters
----------
mu : Positive float. The regularisation constant of the smoothing.
Returns
-------
eps : Positive float. The optimal precision.
From the interface "Continuation".
"""
raise NotImplementedError('Abstract method "eps_opt" must be '
'specialised!')
def eps_max(self, mu):
"""The maximum value of epsilon.
From the interface "Continuation".
Parameters
----------
mu : Positive float. The regularisation constant of the smoothing.
Returns
-------
eps : Positive float. The upper limit, the maximum, precision.
"""
Ni = self.function._N[self.index]
gM = 0.0
for N in Ni:
gM += N.l * N.M()
return float(mu) * gM
def mu_max(self, eps):
"""The maximum value of mu.
From the interface "Continuation".
Parameters
----------
eps : Positive float. The maximum precision of the smoothing.
Returns
-------
mu : Positive float. The upper limit, the maximum, of the
regularisation constant of the smoothing.
"""
Ni = self.function._N[self.index]
gM = 0.0
for N in Ni:
gM += N.l * N.M()
return float(eps) / gM
class LatentVariableCovariance(mb_properties.MultiblockFunction,
mb_properties.MultiblockGradient,
mb_properties.MultiblockLipschitzContinuousGradient):
"""Represents
Cov(X.w, Y.c) = (K / (n - 1)) * w'.X'.Y.c,
where X.w and Y.c are latent variables.
Parameters
----------
X : List with two numpy arrays. The two blocks.
unbiased : bool
Whether or not to use biased or unbiased sample covariance. Default is
True, the unbiased sample covariance is used.
scalar_multiple : float
Must be non-negative. Default is 1.0. A scalar multiple of the
function. Useful when the covariance is used as a "penalty".
"""
def __init__(self, X, unbiased=True, scalar_multiple=1.0):
self.X = X
if unbiased:
self.n = float(X[0].shape[0] - 1.0)
else:
self.n = float(X[0].shape[0])
self.K = max(0.0, float(scalar_multiple))
self.reset()
def reset(self):
self._lambda_max = None
def f(self, w):
"""Function value.
From the interface "Function".
"""
wX = np.dot(self.X[0], w[0]).T
Yc = np.dot(self.X[1], w[1])
wXYc = np.dot(wX, Yc)
return -wXYc[0, 0] * (self.K / self.n)
def grad(self, w, index):
"""Gradient of the function.
From the interface "MultiblockGradient".
Parameters
----------
w : List of numpy arrays. The weight vectors, w[index] is the point at
which to evaluate the gradient.
index : Non-negative integer. Which variable the gradient is for.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.multiblock.losses import LatentVariableCovariance
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> Y = np.random.rand(100, 50)
>>> w = np.random.rand(150, 1)
>>> c = np.random.rand(50, 1)
>>> cov = LatentVariableCovariance([X, Y])
>>> grad = cov.grad([w, c], 0)
>>> approx_grad = cov.approx_grad([w, c], 0)
>>> np.allclose(grad, approx_grad)
True
"""
index = int(index)
grad = -np.dot(self.X[index].T,
np.dot(self.X[1 - index], w[1 - index]))
return grad * (self.K / self.n)
def L(self, w, index):
"""Lipschitz constant of the gradient with given index.
From the interface "MultiblockLipschitzContinuousGradient".
"""
# Any positive real number suffices, but a small one will give a larger
# step in e.g. proximal gradient descent.
return np.sqrt(consts.TOLERANCE)
class LatentVariableCovarianceSquared(mb_properties.MultiblockFunction,
mb_properties.MultiblockGradient,
mb_properties.MultiblockLipschitzContinuousGradient):
"""Represents
Cov(X.w, Y.c)² = ((1 / (n - 1)) * w'.X'.Y.c)²,
where X.w and Y.c are latent variables.
Parameters
----------
X : List with two numpy arrays. The two blocks.
unbiased : Boolean. Whether or not to use biased or unbiased sample
covariance. Default is True, the unbiased sample covariance is
used.
"""
def __init__(self, X, unbiased=True):
self.X = X
if unbiased:
self.n = float(X[0].shape[0] - 1.0)
else:
self.n = float(X[0].shape[0])
self.reset()
def reset(self):
pass
def f(self, w):
"""Function value.
From the interface "Function".
Parameters
----------
w : Numpy array (p-by-1). The point at which to evaluate the function.
"""
wX = np.dot(self.X[0], w[0]).T
Yc = np.dot(self.X[1], w[1])
wXYc = np.dot(wX, Yc)[0, 0]
return -((wXYc / self.n) ** 2)
def grad(self, w, index):
"""Gradient of the function.
From the interface "MultiblockGradient".
Parameters
----------
w : List of numpy arrays. The weight vectors, w[index] is the point at
which to evaluate the gradient.
index : Non-negative integer. Which variable the gradient is for.
Examples
--------
>>> import numpy as np
>>> from parsimony.functions.multiblock.losses import LatentVariableCovarianceSquared
>>>
>>> np.random.seed(42)
>>> X = np.random.rand(100, 150)
>>> Y = np.random.rand(100, 50)
>>> w = np.random.rand(150, 1)
>>> c = np.random.rand(50, 1)
>>> cov = LatentVariableCovarianceSquared([X, Y])
>>> grad = cov.grad([w, c], 0)
>>> approx_grad = cov.approx_grad([w, c], 0)
>>> np.allclose(grad, approx_grad)
True
"""
wX = np.dot(self.X[0], w[0]).T
Yc = np.dot(self.X[1], w[1])
wXYc = np.dot(wX, Yc)[0, 0]
index = int(index)
grad = np.dot(self.X[index].T,
np.dot(self.X[1 - index], w[1 - index])) \
* ((2.0 * wXYc) / (self.n * self.n))
return -grad
def L(self, w, index):
"""Lipschitz constant of the gradient with given index.
From the interface "MultiblockLipschitzContinuousGradient".
"""
index = int(index)
grad = np.dot(self.X[index].T,
np.dot(self.X[1 - index], w[1 - index])) \
* (1.0 / self.n)
return 2.0 * maths.norm(grad) ** 2
class GeneralisedMultiblock(mb_properties.MultiblockFunction,
mb_properties.MultiblockGradient,
# mb_properties.MultiblockProximalOperator,
mb_properties.MultiblockProjectionOperator,
properties.StepSize,
# LipschitzContinuousGradient,
# NesterovFunction, Continuation, DualFunction
):
def __init__(self, X, functions):
self.X = X
self.functions = functions
self.reset()
def reset(self):
for i in range(len(self.functions)):
for j in range(len(self.functions[i])):
if i == j:
for k in range(len(self.functions[i][j])):
self.functions[i][j][k].reset()
else:
if not self.functions[i][j] is None:
self.functions[i][j].reset()
def f(self, w):
"""Function value.
"""
val = 0.0
for i in range(len(self.functions)):
fi = self.functions[i]
for j in range(len(fi)):
fij = fi[j]
if i == j and isinstance(fij, (list, tuple)):
for k in range(len(fij)):
# print "Diag: ", i
val += fij[k].f(w[i])
else:
# print "f(w[%d], w[%d])" % (i, j)
if fij is not None:
val += fij.f([w[i], w[j]])
# TODO: Check instead if it is a numpy array.
if not isinstance(val, numbers.Number):
return val[0, 0]
else:
return val
def grad(self, w, index):
"""Gradient of the differentiable part of the function.
From the interface "MultiblockGradient".
"""
grad = 0.0
fi = self.functions[index]
for j in range(len(fi)):
fij = fi[j]
if index != j:
if isinstance(fij, properties.Gradient):
grad += fij.grad(w[index])
elif isinstance(fij, mb_properties.MultiblockGradient):
grad += fij.grad([w[index], w[j]], 0)
for i in range(len(self.functions)):
fij = self.functions[i][index]
if i != index:
if isinstance(fij, properties.Gradient):
# We shouldn't do anything here, right? This means e.g.
# that this (block i) is the y of a logistic regression.
pass
# grad += fij.grad(w)
elif isinstance(fij, mb_properties.MultiblockGradient):
grad += fij.grad([w[i], w[index]], 1)
fii = self.functions[index][index]
for k in range(len(fii)):
if isinstance(fii[k], properties.Gradient):
grad += fii[k].grad(w[index])
return grad
# def prox(self, w, index, factor=1.0):
# """The proximal operator corresponding to the function with the index.
#
# From the interface "MultiblockProximalOperator".
# """
## # Find a proximal operator.
## fii = self.functions[index][index]
## for k in xrange(len(fii)):
## if isinstance(fii[k], ProximalOperator):
## w[index] = fii[k].prox(w[index], factor)
## break
## # If no proximal operator was found, we will just return the same
## # vectors again. The proximal operator of the zero function returns
## # the vector itself.
#
# return w
def proj(self, w, index):
"""The projection operator corresponding to the function with the
index.
From the interface "MultiblockProjectionOperator".
"""
# Find a projection operators.
# fii = self.functions[index][index]
f = self.get_constraints(index)
for k in range(len(f)):
if isinstance(f[k], properties.ProjectionOperator):
w[index] = f[k].proj(w[index])
break
# If no projection operator was found, we will just return the same
# vectors again.
return w
def step(self, w, index):
# return 0.0001
all_lipschitz = True
# Add the Lipschitz constants.
L = 0.0
fi = self.functions[index]
for j in range(len(fi)):
if j != index and fi[j] is not None:
fij = fi[j]
if isinstance(fij, properties.LipschitzContinuousGradient):
L += fij.L()
elif isinstance(fij,
mb_properties.MultiblockLipschitzContinuousGradient):
L += fij.L(w, index)
else:
all_lipschitz = False
break
if all_lipschitz:
fii = self.functions[index][index]
for k in range(len(fii)):
if fi[j] is None:
continue
if isinstance(fii[k], properties.LipschitzContinuousGradient):
L += fii[k].L()
elif isinstance(fii[k],
mb_properties.MultiblockLipschitzContinuousGradient):
L += fii[k].L(w, index)
else:
all_lipschitz = False
break
if all_lipschitz and L > 0.0:
t = 1.0 / L
else:
# If all functions did not have Lipschitz continuous gradients,
# try to find the step size through backtracking line search.
class F(properties.Function,
properties.Gradient):
def __init__(self, func, w, index):
self.func = func
self.w = w
self.index = index
def f(self, x):
# Temporarily replace the index:th variable with x.
w_old = self.w[self.index]
self.w[self.index] = x
f = self.func.f(w)
self.w[self.index] = w_old
return f
def grad(self, x):
# Temporarily replace the index:th variable with x.
w_old = self.w[self.index]
self.w[self.index] = x
g = self.func.grad(w, index)
self.w[self.index] = w_old
return g
func = F(self, w, index)
p = -self.grad(w, index)
from algorithms import BacktrackingLineSearch
import parsimony.functions.penalties as penalties
line_search = BacktrackingLineSearch(
condition=penalties.SufficientDescentCondition, max_iter=30)
a = np.sqrt(1.0 / self.X[index].shape[1]) # Arbitrarily "small".
t = line_search(func, w[index], p, rho=0.5, a=a, c=1e-4)
return t
| 2.140625 | 2 |
python/opentrons_ot3_firmware/messages/messages.py | Opentrons/ot3-firmware | 3 | 70532 | """Message types."""
from functools import lru_cache
from typing import Union, Optional, Type
from typing_extensions import get_args
from . import message_definitions as defs
from ..constants import MessageId
MessageDefinition = Union[
defs.HeartbeatRequest,
defs.HeartbeatResponse,
defs.DeviceInfoRequest,
defs.DeviceInfoResponse,
defs.StopRequest,
defs.GetStatusRequest,
defs.GetStatusResponse,
defs.EnableMotorRequest,
defs.DisableMotorRequest,
defs.MoveRequest,
defs.SetupRequest,
defs.WriteToEEPromRequest,
defs.ReadFromEEPromRequest,
defs.ReadFromEEPromResponse,
defs.AddLinearMoveRequest,
defs.GetMoveGroupRequest,
defs.GetMoveGroupResponse,
defs.ExecuteMoveGroupRequest,
defs.ClearAllMoveGroupsRequest,
defs.MoveCompleted,
defs.SetMotionConstraints,
defs.GetMotionConstraintsRequest,
defs.GetMotionConstraintsResponse,
defs.WriteMotorDriverRegister,
defs.ReadMotorDriverRequest,
defs.ReadMotorDriverResponse,
defs.ReadPresenceSensingVoltageRequest,
defs.ReadPresenceSensingVoltageResponse,
]
@lru_cache(maxsize=None)
def get_definition(message_id: MessageId) -> Optional[Type[MessageDefinition]]:
"""Get the message type for a message id.
Args:
message_id: A message id
Returns: The message definition for a type
"""
# Dumb linear search, but the result is memoized.
for i in get_args(MessageDefinition):
if i.message_id == message_id:
# get args returns Tuple[Any...]
return i # type: ignore[no-any-return]
return None
| 1.671875 | 2 |
examples/5_custom_auth.py | jsdelivrbot/canister | 35 | 70660 | <reponame>jsdelivrbot/canister<gh_stars>10-100
#!/usr/bin/env python3
# ab -n 1000 -c 10 http://localhost:8080/hello/world
# ab -n 2 -c 2 http://127.0.0.1:8080/hello/world
import sys
sys.path.insert(0, '..')
import canister
import time
import bottle
from canister import session
app = bottle.Bottle()
app.install(canister.Canister())
@app.get('/')
def index():
return '''
<pre>
Session sid: %s
Session user: %s
</pre>
<form target="/login">
<a href="/login?username">My private area</a> (username: alice, password: <PASSWORD>)</a>
''' % (session.sid, session.user)
@app.get('/login')
def login(username, password):
session.user = username
return 'Welcome %s! <a href="/">Go back</a> <a href="/logout">Log out</a>' % session.user
@app.get('/logout')
def logout():
session.user = None
return 'Bye! <a href="/">Go back</a>'
app.run(host='0.0.0.0') | 1.414063 | 1 |
Feature/Feature.py | NunoXu/UnbabelChallenge2016 | 0 | 70788 | <reponame>NunoXu/UnbabelChallenge2016<filename>Feature/Feature.py
from abc import ABCMeta, abstractmethod
class Feature(metaclass=ABCMeta):
@abstractmethod
def evaluate(self, sentence):
pass
| 0.792969 | 1 |
software/multifluids_icferst/tests/swe_mms_p2p1_quadratic_drag/swe_mms_p2p1_quadratic_drag_tools.py | msc-acse/acse-9-independent-research-project-Wade003 | 2 | 70916 | from math import sin, cos, tanh, pi, sqrt
def u(x):
return cos(x[1])*sin(x[0])
def v(x):
return -cos(x[0])*sin(x[1])
def h(x):
return sin(x[0])*sin(x[1])
def forcing_u(x):
return cos(x[0])*cos(x[1])**2*sin(x[0]) + cos(x[0])*sin(x[0])*sin(x[1])**2 + 1.20*cos(x[1])*sin(x[0]) + 9.80*cos(x[0])*sin(x[1]) + 0.00250*sqrt(cos(x[1])**2*sin(x[0])**2 + cos(x[0])**2*sin(x[1])**2)*cos(x[1])*sin(x[0])/(sin(x[0])*sin(x[1]) + 20.0)
def forcing_v(x):
return cos(x[0])**2*cos(x[1])*sin(x[1]) + cos(x[1])*sin(x[0])**2*sin(x[1]) + 9.80*cos(x[1])*sin(x[0]) - 1.20*cos(x[0])*sin(x[1]) - 0.00250*sqrt(cos(x[1])**2*sin(x[0])**2 + cos(x[0])**2*sin(x[1])**2)*cos(x[0])*sin(x[1])/(sin(x[0])*sin(x[1]) + 20.0)
def velocity(x):
return [u(x), v(x)]
def forcing_velocity(x):
return [forcing_u(x), forcing_v(x)]
| 2.21875 | 2 |
test_non-equivalent-colourings.py | jcgwt/foobar | 0 | 71044 | <reponame>jcgwt/foobar
import unittest
import importlib
colourings = importlib.import_module('non-equivalent-colourings')
class TestCases(unittest.TestCase):
def test_solution(self):
self.assertEqual(colourings.solution(1,1,1),
1)
self.assertEqual(colourings.solution(10,10,1),
1)
self.assertEqual(colourings.solution(1,1,1000),
1000)
self.assertEqual(colourings.solution(7,3,1000),
33068783763227519014651588965450182424626554362604265581000)
self.assertEqual(colourings.solution(10,10,1000),
75940584281266233059295963476813407690465303634028067643120115987638768360299350432189628371332144284588102338919580926586742819469270026586348030871938085197343689280790904580324483625413736702601041419903161751121037817588203800360996035121655697677217564930154927270848934180754000000)
if __name__ == '__main__':
unittest.main()
| 1.085938 | 1 |
src/eos/transaction.py | Remmeauth/gimmeremmetokensbot | 3 | 71172 | """
Provide implementation of transaction.
"""
import os
from eosiopy.eosioparams import EosioParams
from eosiopy.nodenetwork import NodeNetwork
from eosiopy.rawinputparams import RawinputParams
from eosiopy import eosio_config
MASTER_WALLET_PRIVATE_KEY = os.environ.get('MASTER_WALLET_PRIVATE_KEY')
NODEOS_HOST = os.environ.get('NODEOS_HOST')
NODEOS_PORT = os.environ.get('NODEOS_PORT')
eosio_config.url = f'https://{NODEOS_HOST}'
eosio_config.port = int(NODEOS_PORT)
class Transaction:
"""
Transaction implementation.
"""
def send(self, account_from_name, account_to_name, amount, symbol) -> str:
"""
Send transaction.
"""
raw_input_params = RawinputParams('transfer', {
'from': account_from_name,
'memo': 'Remme Protocol transaction.',
'quantity': f'{amount}.0000 {symbol}',
'to': account_to_name,
}, 'eosio.token', f'{account_from_name}@active')
eosio_params = EosioParams(raw_input_params.params_actions_list, MASTER_WALLET_PRIVATE_KEY)
transaction = NodeNetwork.push_transaction(eosio_params.trx_json)
return transaction.get('transaction_id')
| 1.507813 | 2 |
src/trunk/apps/python/bindings2cfg.py | kbouk/seiscomp3 | 94 | 71300 | <reponame>kbouk/seiscomp3
#!/usr/bin/env seiscomp-python
# -*- coding: utf-8 -*-
############################################################################
# Copyright (C) by gempa GmbH #
# Author: <NAME> <<EMAIL>> #
# #
# You can redistribute and/or modify this program under the #
# terms of the SeisComP Public License. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# SeisComP Public License for more details. #
############################################################################
import seiscomp3.bindings2cfg
import sys
sys.exit(seiscomp3.bindings2cfg.main())
| 0.777344 | 1 |
topics/DynamicProgramming/Climbing_Stairs_70/[1_DP_rolling_vars]_Climbing_Stairs_70.py | DmitryNaimark/leetcode-solutions-python | 1 | 71428 | # https://leetcode.com/problems/climbing-stairs/
# ---------------------------------------------------
# Runtime Complexity: O(n)
# Space Complexity: O(1)
class Solution:
def climbStairs(self, n: int) -> int:
if n <= 2:
return n
prev_prev = 1
prev = 2
cur = 0
for i in range(3, n + 1):
cur = prev_prev + prev
prev_prev, prev = prev, cur
return cur
# ---------------------------------------------------
# Test Cases
# ---------------------------------------------------
solution = Solution()
# 0
print(solution.climbStairs(0))
# 1
print(solution.climbStairs(1))
# 2
print(solution.climbStairs(2))
# 3
print(solution.climbStairs(3))
# 5
print(solution.climbStairs(4))
# 8
print(solution.climbStairs(5))
# 13
print(solution.climbStairs(6)) | 2.796875 | 3 |
tests/test_rotateWord.py | clara0/learn-python | 0 | 71556 | import unittest
import rotate_word
class TestRotateWord(unittest.TestCase):
def test_rotateWord(self):
self.assertEqual(rotate_word.rotate('abc', 1), 'bcd')
self.assertEqual(rotate_word.rotate('abcz', 2), 'cdeb')
self.assertEqual(rotate_word.rotate('abc', 27), 'bcd')
self.assertEqual(rotate_word.rotate('abc', -1), 'zab')
self.assertEqual(rotate_word.rotate('ABC', 1), 'bcd')
| 1.609375 | 2 |
privatise.py | seykuyinu/privatise-spotify-playlists | 1 | 71684 | <reponame>seykuyinu/privatise-spotify-playlists
import sys
import spotipy
import spotipy.util as util
def filter_public(playlist_items):
'''
Returns all public playlsits present in the given playlist array
'''
public_playlists = list(filter(lambda playlist: playlist['public'] == True, playlist_items))
return public_playlists
def pretty_print_playlists(playlists):
list(map(lambda playlist: print(playlist['name']), playlists))
def make_playlists_private(sp: spotipy.Spotify, username, playlists):
list(map(lambda playlist: sp.user_playlist_change_details(username, playlist['id'], public=False), playlists))
def main():
scope = 'user-library-read playlist-modify-public'
if len(sys.argv) > 1:
username = sys.argv[1]
else:
print(f"Usage: {sys.argv[0]} username")
sys.exit()
token = util.prompt_for_user_token(username, scope)
if token:
sp = spotipy.Spotify(auth=token)
offset = 0
limit = 50
count = 0
while True:
playlists = sp.current_user_playlists(offset=offset, limit=limit)
public_playlists = filter_public(playlists['items'])
if len(public_playlists) > 0:
print("Privatising the following public playlists.. \n")
pretty_print_playlists(public_playlists)
make_playlists_private(sp, username, public_playlists)
count += len(public_playlists)
next_page = playlists['next']
if next_page:
offset += limit
else:
if count == 0:
print("No public playlists were found.")
else:
print(f"{count} public playlists have been made private.")
break
else:
print ("Can't get token for", username)
if __name__ == "__main__":
main()
| 2.265625 | 2 |
train_procgen/graph_util.py | tuthoang/train-procgen | 146 | 71812 | import csv
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from math import ceil
from constants import ENV_NAMES
import seaborn # sets some style parameters automatically
COLORS = [(57, 106, 177), (218, 124, 48)]
def switch_to_outer_plot(fig):
ax0 = fig.add_subplot(111, frame_on=False)
ax0.set_xticks([])
ax0.set_yticks([])
return ax0
def ema(data_in, smoothing=0):
data_out = np.zeros_like(data_in)
curr = np.nan
for i in range(len(data_in)):
x = data_in[i]
if np.isnan(curr):
curr = x
else:
curr = (1 - smoothing) * x + smoothing * curr
data_out[i] = curr
return data_out
def plot_data_mean_std(ax, data_y, color_idx=0, data_x=None, x_scale=1, smoothing=0, first_valid=0, label=None):
color = COLORS[color_idx]
hexcolor = '#%02x%02x%02x' % color
data_y = data_y[:,first_valid:]
nx, num_datapoint = np.shape(data_y)
if smoothing > 0:
for i in range(nx):
data_y[i,...] = ema(data_y[i,...], smoothing)
if data_x is None:
data_x = (np.array(range(num_datapoint)) + first_valid) * x_scale
data_mean = np.mean(data_y, axis=0)
data_std = np.std(data_y, axis=0, ddof=1)
ax.plot(data_x, data_mean, color=hexcolor, label=label, linestyle='solid', alpha=1, rasterized=True)
ax.fill_between(data_x, data_mean - data_std, data_mean + data_std, color=hexcolor, alpha=.25, linewidth=0.0, rasterized=True)
def read_csv(filename, key_name):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
key_index = -1
values = []
for line_num, row in enumerate(csv_reader):
row = [x.lower() for x in row]
if line_num == 0:
idxs = [i for i, val in enumerate(row) if val == key_name]
key_index = idxs[0]
else:
values.append(row[key_index])
return np.array(values, dtype=np.float32)
def plot_values(ax, all_values, title=None, max_x=0, label=None, **kwargs):
if max_x > 0:
all_values = all_values[...,:max_x]
if ax is not None:
plot_data_mean_std(ax, all_values, label=label, **kwargs)
ax.set_title(title)
return all_values
def plot_experiment(run_directory_prefix, titles=None, suffixes=[''], normalization_ranges=None, key_name='eprewmean', **kwargs):
run_folders = [f'{run_directory_prefix}{x}' for x in range(3)]
num_envs = len(ENV_NAMES)
will_normalize_and_reduce = normalization_ranges is not None
if will_normalize_and_reduce:
num_visible_plots = 1
f, axarr = plt.subplots()
else:
num_visible_plots = num_envs
dimx = dimy = ceil(np.sqrt(num_visible_plots))
f, axarr = plt.subplots(dimx, dimy, sharex=True)
for suffix_idx, suffix in enumerate(suffixes):
all_values = []
game_weights = [1] * num_envs
for env_idx in range(num_envs):
env_name = ENV_NAMES[env_idx]
label = suffix if env_idx == 0 else None # only label the first graph to avoid legend duplicates
print(f'loading results from {env_name}...')
if num_visible_plots == 1:
ax = axarr
else:
dimy = len(axarr[0])
ax = axarr[env_idx // dimy][env_idx % dimy]
csv_files = [f"results/{resid}/progress-{env_name}{'-' if len(suffix) > 0 else ''}{suffix}.csv" for resid in run_folders]
curr_ax = None if will_normalize_and_reduce else ax
raw_data = np.array([read_csv(file, key_name) for file in csv_files])
values = plot_values(curr_ax, raw_data, title=env_name, color_idx=suffix_idx, label=label, **kwargs)
if will_normalize_and_reduce:
game_range = normalization_ranges[env_name]
game_min = game_range[0]
game_max = game_range[1]
game_delta = game_max - game_min
sub_values = game_weights[env_idx] * (np.array(values) - game_min) / (game_delta)
all_values.append(sub_values)
if will_normalize_and_reduce:
normalized_data = np.sum(all_values, axis=0)
normalized_data = normalized_data / np.sum(game_weights)
title = 'Mean Normalized Score'
plot_values(ax, normalized_data, title=None, color_idx=suffix_idx, label=suffix, **kwargs)
if len(suffixes) > 1:
if num_visible_plots == 1:
ax.legend(loc='lower right')
else:
f.legend(loc='lower right', bbox_to_anchor=(.5, 0, .5, 1))
return f, axarr | 2.171875 | 2 |
leetcode/string/165.py | 1lch2/PythonExercise | 1 | 71940 | # 比较两个版本号 version1 和 version2。
# 如果 version1 > version2 返回 1,如果 version1 < version2 返回 -1, 除此之外返回 0。
# 你可以假设版本字符串非空,并且只包含数字和 . 字符。
# . 字符不代表小数点,而是用于分隔数字序列。
# 例如,2.5 不是“两个半”,也不是“差一半到三”,而是第二版中的第五个小版本。
# 你可以假设版本号的每一级的默认修订版号为 0。
# 例如,版本号 3.4 的第一级(大版本)和第二级(小版本)修订号分别为 3 和 4。其第三级和第四级修订号均为 0。
#
# 示例 1:
# 输入: version1 = "0.1", version2 = "1.1"
# 输出: -1
# 示例 2:
# 输入: version1 = "1.0.1", version2 = "1"
# 输出: 1
# 示例 3:
# 输入: version1 = "7.5.2.4", version2 = "7.5.3"
# 输出: -1
# 示例 4:
# 输入:version1 = "1.01", version2 = "1.001"
# 输出:0
# 解释:忽略前导零,“01” 和 “001” 表示相同的数字 “1”。
# 示例 5:
# 输入:version1 = "1.0", version2 = "1.0.0"
# 输出:0
# 解释:version1 没有第三级修订号,这意味着它的第三级修订号默认为 “0”。
class Solution:
def compareVersion(self, version1: str, version2: str) -> int:
version1 = version1.split(".")
version2 = version2.split(".")
l1 = len(version1)
l2 = len(version2)
# 补充 0 使两列表长度相等
if l1 > l2:
version2.extend(['0' for _ in range(l1 - l2)])
elif l2 > l1:
version1.extend(['0' for _ in range(l2 - l1)])
# 按规则比较
for i1, i2 in zip(version1, version2):
if int(i1) > int(i2):
return 1
elif int(i1) < int(i2):
return -1
return 0
| 2.078125 | 2 |
tracing_tool/strace_module.py | ganeshutah/FPChecker | 19 | 72068 | import os
import shutil
import subprocess
import sys
import re
import glob
from colors import prGreen,prCyan,prRed
TRACES_DIR = './.fpchecker/traces'
TRACES_FILES = TRACES_DIR+'/'+'trace'
STRACE = 'strace'
SUPPORTED_COMPILERS = set([
'nvcc',
'c++',
'cc',
'gcc',
'g++',
'xlc',
'xlC',
'xlc++',
'xlc_r',
'xlc++_r',
'mpic',
'mpic++',
'mpicxx',
'mpicc',
'mpixlc',
'mpixlC',
'mpixlf',
'mpif77',
'mpif90',
'clang',
'clang++',
'gfortran',
'xlf',
'xlf-gpu',
'xlf2003',
'xlf2003-gpu',
'xlf2003_r',
'xlf2003_r-gpu',
'xlf2008',
'xlf2008-gpu',
'xlf2008_r',
'xlf2008_r-gpu',
'xlf90',
'xlf90-gpu',
'xlf90_r',
'xlf90_r-gpu',
'xlf95',
'xlf95-gpu',
'xlf95_r',
'xlf95_r-gpu',
'xlf_r',
'xlf_r-gpu'
])
SUPPORTED_TOOLS = set([
'ar',
'ranlib',
'bin2c'
])
# Examples of top commands
# [pid 83362] execve("/usr/tce/packages/cuda/cuda-9.2.148/bin/nvcc",
# [pid 63885] execve("/bin/sh", ["/bin/sh", "-c", "cd /usr/workspace/wsa/laguna/fpchecker/FPChecker/tests/tracing_tool/dynamic/test_cmake_simple/build/src/util && /usr/tcetmp/bin/c++ -o CMakeFiles/util.dir/util.cpp.o -c /usr/workspace/wsa/laguna/fpchecker/FPChecker/tests/tracing_tool/dynamic/test_cmake_simple/src/util/util.cpp"]
# Saves Compilation commands
class CommandsTracing:
#open("/usr/tcetmp/packages/spack/opt/spack/linux-redhat7-ppc64le/gcc-4.8.5/gcc-4.9.3-3clrxj5wz2i54h
#[pid 8690] execve("/usr/tcetmp/bin/c++", ["/usr/tcetmp/bin/c++", "CMakeFiles/main.dir/src/main.cpp.o", "-o", "main"]
pidPattern = re.compile("^\[pid\s+[0-9]+\] ")
# clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 55734
# vfork() = 55729
childSpawn_clone = re.compile("^clone\(.+=\s+[0-9]+")
childSpawn_fork = re.compile("^vfork\(\).+=\s+[0-9]+")
# Chdir call
# chdir("/usr/workspace/wsa/laguna/fpchecker/clang_tool/wrapper/apps/RAJA_perf/RAJAPerf/build_ilaguna_build/tpl/RAJA") = 0
chdirPattern = re.compile("^chdir\(.+\s+=\s+[0-9]+")
# Fork from root:
# vfork(strace: Process 22625 attached
# Other forks:
# [pid 95927] stat("/usr/gapps/resmpi/llvm/ppc64le/llvm-openmp-trunk-install/lib/tls/power9/altivec", strace: Process 95932 attached
# [pid 22631] vfork(strace: Process 22634 attached
# [pid 78391] clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 78392
# [pid 86430] clone(strace: Process 86431 attached
#attachPattern1 = re.compile("vfork\(strace\:\s+Process\s+[0-9]+\s+attached")
#attachPattern_clone = re.compile("clone\(.+=\s+[0-9]+")
#attachPattern_attach = re.compile("Process\s+[0-9]+\s+attached")
# Process creation patterns:
# We trace vfork() and clone()
#[pid 69813] clone(child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 69814
# [pid 129570] <... clone resumed>child_stack=NULL, flags=CLONE_CHILD_CLEARTID|CLONE_CHILD_SETTID|SIGCHLD, child_tidptr=0x200000044f60) = 129601
#[pid 69807] <... vfork resumed>) = 69808
childCreationPattern_clone_1 = re.compile("^\[pid\s+[0-9]+\] clone\(.+=\s+[0-9]+")
childCreationPattern_clone_2 = re.compile("^\[pid\s+[0-9]+\] \<\.\.\. clone resumed\>.+=\s+[0-9]+")
childCreationPattern_fork = re.compile("^\[pid\s+[0-9]+\] .+vfork.+=\s+[0-9]+")
readPattern = re.compile("^\[pid\s+[0-9]+\] read\(")
writePattern = re.compile("^\[pid\s+[0-9]+\] write\(")
def __init__(self, make_command):
self.traced_commands = []
self.make_command = make_command
self.childTree = {}
self.parentTree = {}
self.tracedPIDs = set([])
def getTracesDir(self):
return TRACES_DIR
def isChildSpawn(self, line):
child_fork = self.childSpawn_fork.search(line)
child_clone = self.childSpawn_clone.search(line)
pid = None
if child_fork != None or child_clone != None:
pid = line.split()[-1:][0]
return pid
def isMakeCommand(self, line):
ret = False
if "execve(\"" in line:
# execve("/usr/tcetmp/bin/make", ["make", "-j"], 0x7fffffffb780 /* 128 vars */) = 0
cmd = line.split(', [')[1].split('], ')[0]
cmd = cmd.replace('"','')
cmd = cmd.replace(',','')
cmd = cmd.split()
#print(cmd, self.make_command)
if cmd == self.make_command:
return True
return ret
def getRootFile(self):
# Find root file
files = glob.glob(TRACES_DIR+'/trace.*')
root_file = ''
for f in files:
#print('Checking', f)
with open(f) as fd:
first_line = fd.readline()
if self.isMakeCommand(first_line):
root_file = f
break
#print('Root file', root_file)
if root_file == '':
prRed('Error: root file not found')
exit(-1)
return root_file
# Check if it is a chdir() system call
# chdir("/usr/workspace/wsa/laguna/fpchecker/clang_tool/wrapper/apps/RAJA_perf/RAJAPerf/build_ilaguna_build/tpl/RAJA") = 0
def isChangeDir(self, line):
chdir_found = self.chdirPattern.search(line)
newDir = None
if chdir_found != None:
if line.split()[2] == '0': # check it ends with 0
newDir = line
return newDir
# Old implementation of recursive search
# It has a bug on the cwd (it's kept for any recent process)
# We want to unset the cwd once the process examination exits
#
# def recursiveTreeTraversal(self, fileName):
# with open(fileName) as fd:
# for line in fd:
# # Save current dir
# cwd = self.isChangeDir(line)
# if cwd != None:
# print('Found chdir: ', cwd, 'file:', fileName)
# self.currentWorkingDir = cwd
#
# # Check if it's a top command, and it if so
# topCmd = self.isTopCommand(line)
# if topCmd != None:
# # Add CWD and command
# print('Adding:')
# print('self.currentWorkingDir: ', self.currentWorkingDir)
# print('line:', line)
# self.traced_commands.append((self.currentWorkingDir, line))
# return
#
# # Check if child is created
# childPID = self.isChildSpawn(line)
# if childPID != None:
# childFileName = TRACES_DIR + '/trace.' + childPID
# self.recursiveTreeTraversal(childFileName)
def recursiveTreeTraversal(self, fileName, chdirCmd):
lastSeenCHDIR = chdirCmd
with open(fileName) as fd:
for line in fd:
# Save current dir
cwd = self.isChangeDir(line)
if cwd != None:
lastSeenCHDIR = cwd
# Check if it's a top command, and it if so
topCmd = self.isTopCommand(line)
if topCmd != None:
# Add CWD and command
self.traced_commands.append((lastSeenCHDIR, line))
return
# Check if child is created
childPID = self.isChildSpawn(line)
if childPID != None:
childFileName = TRACES_DIR + '/trace.' + childPID
self.recursiveTreeTraversal(childFileName, lastSeenCHDIR)
def analyzeTraces(self):
#prveTreeTraversal(root_file)
prGreen('Searching root PID...')
root_file = self.getRootFile()
print('root:', root_file)
prGreen('Analyzing traces...')
self.recursiveTreeTraversal(root_file, '')
def getProcessID(self, line):
p = self.pidPattern.match(line)
#print('match', p)
if p != None:
pid = line.split()[1].split(']')[0]
else:
pid = 'root'
return pid
def buildChildTree(self, line):
pid = self.getProcessID(line)
child = None
child_clone_1 = self.childCreationPattern_clone_1.search(line)
child_clone_2 = self.childCreationPattern_clone_2.search(line)
child_fork = self.childCreationPattern_fork.search(line)
read_pattern = self.readPattern.search(line)
write_pattern = self.writePattern.search(line)
if child_clone_1 != None and read_pattern == None and write_pattern == None:
child = line.split()[-1:][0]
elif child_clone_2 != None and read_pattern == None and write_pattern == None:
child = line.split()[-1:][0]
elif child_fork != None and read_pattern == None and write_pattern == None:
child = line.split()[-1:][0]
if child != None: # found child creation
if pid not in self.childTree:
self.childTree[pid] = [child]
else:
self.childTree[pid].append(child)
self.parentTree[child] = pid
if pid in self.tracedPIDs:
self.tracedPIDs.add(child)
def isASupportedCompiler(self, line):
for compiler in SUPPORTED_COMPILERS:
if line.endswith('/'+compiler): #or line == compiler:
return True
for tool in SUPPORTED_TOOLS:
if line.endswith('/'+tool):
return True
return False
# If it's a top command we do not trace their child commands
def isTopCommand(self, line):
baseExecutable = None
if "execve(\"" in line:
strCmd = line.split('execve(')[1].split(',')[0]
# Shell command
if strCmd.endswith('/sh"'):
cmd = line.split('["')[1].split(']')[0]
cmd = cmd.replace(', ','')
cmd = cmd.replace('"', '')
tokens = cmd.split()
for t in tokens:
if self.isASupportedCompiler(t):
baseExecutable = ' '.join(tokens)
strCmd = strCmd.replace('"', '')
if self.isASupportedCompiler(strCmd):
baseExecutable = strCmd
return baseExecutable
# [pid 78395] write(1, "[ 33%] Linking CXX static library libutil.a\n", 44[ 33%] Linking CXX static library libutil.a
def printStdOut(self, line):
if 'write(1' in line:
if 'Building' in line or 'Linking' in line:
l = line.split(', ')[1].replace('"','')
prGreen(l)
def saveCompilingCommands(self, l):
#l = line.decode('utf-8')
pid = self.getProcessID(l)
cmd = self.isTopCommand(l)
if cmd != None:
if pid not in self.tracedPIDs:
self.tracedPIDs.add(pid)
self.traced_commands.append(l)
#print('-->', cmd)
self.buildChildTree(l)
self.printStdOut(l)
# Check if the command invokes chaning directories
# If not, we change to the CWD
def commandIvokesChangeDir(self, line):
tokens = line.split()
if 'cd' in tokens:
idx = tokens.index('cd')
path = tokens[idx+1]
if os.path.exists(path):
return True
return False
def formatCommandForExecution(self, cwd, line):
if line.startswith('execve('):
line = line.split(', [')[1:]
line = ' '.join(line).split(']')[0]
line = line.replace(', ',' ')
line = line.replace('"', '')
line = line.replace('\\', '')
# Split commands if needed
allCommands = re.split('\&\&|\;', line)
newCommand = []
for cmd in allCommands:
if '/sh -c' in cmd:
cmd = ' '.join(cmd.split()[2:]) # remove /bin/sh -c
if '-E ' in cmd: # Remove commands that only run the preprocessor with -E
continue
if not self.commandIvokesChangeDir(line):
cmd = 'cd ' + cwd + ' && ' + cmd
newCommand.append(cmd)
line = ' && '.join(newCommand)
return line
def writeToFile(self):
fileNameRaw = TRACES_DIR + '/raw_traces.txt'
prGreen('Saving raw traces in '+fileNameRaw)
fd = open(fileNameRaw, 'w')
for line in self.traced_commands:
fd.write(str(line)+'\n')
fd.close()
fileNameExec = TRACES_DIR + '/executable_traces.txt'
prGreen('Saving executable traces in '+fileNameExec)
fd = open(fileNameExec, 'w')
for l in self.traced_commands:
#line = l[1]
cwd, line = l
#if l[0] != '':
# cwd = l[0].split('"')[1]
#else:
# cwd = '.'
if cwd != '':
cwd = cwd.split('"')[1]
else:
cwd = '.'
line = self.formatCommandForExecution(cwd, line)
fd.write(line+'\n')
fd.close()
def replayTraces(self, fileName):
fd = open(fileName, 'r')
for line in fd:
self.saveCompilingCommands(line)
fd.close()
def createTracesDir(self):
if os.path.exists(TRACES_DIR):
shutil.rmtree(TRACES_DIR)
os.makedirs(TRACES_DIR)
def startTracing(self):
self.createTracesDir()
trace_command = [STRACE, '-o', TRACES_FILES, '-ff', '-s', '9999'] + self.make_command
process = subprocess.Popen(trace_command, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Poll process for new output until finished
c = 0
while True:
nextline = process.stdout.readline()
#nextline = process.stderr.readline()
if process.poll() is not None or nextline.decode('utf-8') == '':
break
l = nextline.decode('utf-8')[:-1]
print(l)
#self.saveCompilingCommands(l)
#fd.write(l)
(stdout_data, stderr_data) = process.communicate()
exitCode = process.returncode
if (exitCode == 0):
return (stdout_data, stderr_data)
else:
sys.exit('Error in input: ' + str(self.make_command))
if __name__ == '__main__':
#l = 'execve("/usr/tce/packages/cuda/cuda-9.2.148/bin/nvcc", ["/usr/tce/packages/cuda/cuda-9.2.148/bin/nvcc", "-ccbin=clang++", "-restrict", "-gencode=arch=compute_70,code=sm_70", "-O3", "--expt-extended-lambda", "-Xcompiler=-fPIC", "-Wno-deprecated-gpu-targets", "-shared", "-dlink", "CMakeFiles/kripke.exe.dir/src/kripke.cpp.o", "-o", "CMakeFiles/kripke.exe.dir/cmake_device_link.o", "-L/usr/tce/packages/cuda/cuda-9.2.148/nvidia/targets/ppc64le-linux/lib/stubs", "-L/usr/tce/packages/cuda/cuda-9.2.148/nvidia/targets/ppc64le-linux/lib", "lib/libchai.a", "lib/libRAJA.a", "/usr/tce/packages/cuda/cuda-9.2.148/lib64/libcudart_static.a", "-lpthread", "-ldl", "lib/libkripke.a", "lib/libumpire.a", "-lcudadevrt", "-lcudart_static", "-lrt"], 0x7fffffffb8b8 /* 129 vars */) = 0\n'
#strace = CommandsTracing(['make', '-j'])
#ret = strace.isTopCommand(l)
#print(l)
#print('ret:', ret)
#exit()
#strace.analyzeTraces()
#strace.traced_commands.append(('', l))
#strace.writeToFile()
#exit()
cmd = sys.argv[1:]
strace = CommandsTracing(cmd)
#strace.startTracing()
strace.analyzeTraces()
strace.writeToFile()
| 0.914063 | 1 |
sk6502/c64/image.py | skoolkid/sk6502 | 7 | 72196 | <gh_stars>1-10
from skoolkit.image import (ImageWriter, TRANSPARENT, BLACK, BLUE, RED, GREEN,
CYAN, YELLOW, WHITE, PNG_ENABLE_ANIMATION)
PURPLE = 'PURPLE'
ORANGE = 'ORANGE'
BROWN = 'BROWN'
LIGHT_RED = 'LIGHT_RED'
DARK_GREY = 'DARK_GREY'
GREY = 'GREY'
LIGHT_GREEN = 'LIGHT_GREEN'
LIGHT_BLUE = 'LIGHT_BLUE'
LIGHT_GREY = 'LIGHT_GREY'
class C64ImageWriter(ImageWriter):
def __init__(self, config=None, palette=None):
config[PNG_ENABLE_ANIMATION] = 0
super().__init__(config, palette)
def get_default_colours(self):
return (
(TRANSPARENT, (0, 255, 0)),
(BLACK, (0, 0, 0)),
(WHITE, (255, 255, 255)),
(RED, (136, 0, 0)),
(CYAN, (170, 255, 238)),
(PURPLE, (204, 68, 204)),
(GREEN, (0, 204, 85)),
(BLUE, (0, 0, 170)),
(YELLOW, (238, 238, 119)),
(ORANGE, (221, 136, 85)),
(BROWN, (102, 68, 0)),
(LIGHT_RED, (255, 119, 119)),
(DARK_GREY, (51, 51, 51)),
(GREY, (119, 119, 119)),
(LIGHT_GREEN, (170, 255, 102)),
(LIGHT_BLUE, (0, 136, 255)),
(LIGHT_GREY, (187, 187, 187))
)
def get_attr_map(self):
return {c: (1 + (c & 0x0f), 1 + c // 16) for c in range(256)}
| 1.867188 | 2 |
tests/data/expected/main/main_openapi_enum_models_one/output.py | tomercagan/datamodel-code-generator | 0 | 72324 | # generated by datamodel-codegen:
# filename: enum_models.yaml
# timestamp: 2019-07-26T00:00:00+00:00
from __future__ import annotations
from enum import Enum
from typing import List, Literal, Optional, Union
from pydantic import BaseModel
class Kind(Enum):
dog = 'dog'
cat = 'cat'
class Pet(BaseModel):
id: int
name: str
tag: Optional[str] = None
kind: Optional[Kind] = None
type: Optional[Literal['animal']] = None
class Pets(BaseModel):
__root__: List[Pet]
class Kind1(Enum):
snake = 'snake'
rabbit = 'rabbit'
class Animal(BaseModel):
kind: Optional[Kind1] = None
class Error(BaseModel):
code: int
message: str
class Type(Enum):
a = 'a'
b = 'b'
class EnumObject(BaseModel):
type: Optional[Type] = None
class EnumRoot(Enum):
a = 'a'
b = 'b'
class IntEnum(Enum):
number_1 = 1
number_2 = 2
class AliasEnum(Enum):
a = 1
b = 2
c = 3
class MultipleTypeEnum(Enum):
red = 'red'
amber = 'amber'
green = 'green'
NoneType_None = None
int_42 = 42
class SingleEnum(Enum):
pet = 'pet'
class ArrayEnum(BaseModel):
__root__: List[Union[Literal['cat'], Literal['dog']]]
| 1.828125 | 2 |
extra/moderation/firewall.py | NiumXp/sloth-bot | 0 | 72452 | import discord
from discord.ext import commands
from mysqldb import the_database
class ModerationFirewallTable(commands.Cog):
""" Category for the Firewall system and its commands and methods. """
def __init__(self, client) -> None:
self.client = client
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def create_table_firewall(self, ctx) -> None:
""" (ADM) Creates the Firewall table. """
if await self.check_table_firewall_exists():
return await ctx.send("**Table __Firewall__ already exists!**")
await ctx.message.delete()
mycursor, db = await the_database()
await mycursor.execute("""CREATE TABLE Firewall (
state TINYINT(1) NOT NULL DEFAULT 0)""")
await mycursor.execute("INSERT INTO Firewall VALUES(0)")
await db.commit()
await mycursor.close()
return await ctx.send("**Table __Firewall__ created!**", delete_after=3)
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def drop_table_firewall(self, ctx) -> None:
""" (ADM) Creates the Firewall table """
if not await self.check_table_firewall_exists():
return await ctx.send("**Table __Firewall__ doesn't exist!**")
await ctx.message.delete()
mycursor, db = await the_database()
await mycursor.execute("DROP TABLE Firewall")
await db.commit()
await mycursor.close()
return await ctx.send("**Table __Firewall__ dropped!**", delete_after=3)
@commands.command(hidden=True)
@commands.has_permissions(administrator=True)
async def reset_table_firewall(self, ctx):
""" (ADM) Resets the Firewall table. """
if not await self.check_table_firewall_exists():
return await ctx.send("**Table __Firewall__ doesn't exist yet**")
await ctx.message.delete()
mycursor, db = await the_database()
await mycursor.execute("DELETE FROM Firewall")
await mycursor.execute("INSERT INTO Firewall VALUES(0)")
await db.commit()
await mycursor.close()
return await ctx.send("**Table __Firewall__ reset!**", delete_after=3)
async def check_table_firewall_exists(self) -> bool:
""" Checks if the MutedMember table exists """
mycursor, db = await the_database()
await mycursor.execute("SHOW TABLE STATUS LIKE 'Firewall'")
table_info = await mycursor.fetchall()
await mycursor.close()
if len(table_info) == 0:
return False
else:
return True
async def set_firewall_state(self, state: int) -> None:
""" Sets the firewall state to either true or false.
:param state: The state of the firewall to set. """
mycursor, db = await the_database()
await mycursor.execute("UPDATE Firewall SET state = %s", (state,))
await db.commit()
await mycursor.close()
async def get_firewall_state(self) -> int:
""" Gets the firewall's current state. """
mycursor, db = await the_database()
await mycursor.execute("SELECT state FROM Firewall")
fw_state = await mycursor.fetchone()
await mycursor.close()
return fw_state[0]
| 1.742188 | 2 |
route4me/address_book.py | route4me/route4me-python-sdk | 10 | 72580 | <reponame>route4me/route4me-python-sdk
# -*- coding: utf-8 -*-
import json
from .api_endpoints import ADDRESSBOOK
from .base import Base
from .exceptions import ParamValueException
class AddressBook(Base):
"""
Address Book Management
"""
REQUIRED_FIELDS = ('address_1', 'cached_lat', 'cached_lng',)
def __init__(self, api, addresses=[]):
"""
AddressBook Instance
:param api:
:return:
"""
self.json_data = {}
Base.__init__(self, api)
def create_contact(self, **kwargs):
"""
Create a contact in AddressBook using POST request
:return: API response
:raise: ParamValueException if required params are not present.
"""
self.json_data = kwargs
if self.check_required_params(self.json_data, self.REQUIRED_FIELDS):
self.response = self.api._request_post(ADDRESSBOOK,
self.params,
json=self.json_data)
return self.response.json()
else:
raise ParamValueException('params', 'Params are not complete')
def get_addressbook_contacts(self, **kwargs):
"""
Get contacts from AddressBook using GET request
:return: API response
:raise: ParamValueException if required params are not present.
"""
kwargs.update({'api_key': self.params['api_key'], })
if self.check_required_params(kwargs, ['api_key', ]):
self.response = self.api._request_get(ADDRESSBOOK,
kwargs)
return self.response.json()
else:
raise ParamValueException('params', 'Params are not complete')
def get_addressbook_contact(self, **kwargs):
"""
Get a contact from AddressBook using GET request
:return: API response
:raise: ParamValueException if required params are not present.
"""
kwargs.update({'api_key': self.params['api_key'], })
if self.check_required_params(kwargs, ['address_id', ]):
self.response = self.api._request_get(ADDRESSBOOK,
kwargs)
return self.response.json()
else:
raise ParamValueException('params', 'Params are not complete')
def update_contact(self, **kwargs):
"""
Update a contact from AddressBook using PUT request
:return: API response
:raise: ParamValueException if required params are not present.
"""
if self.check_required_params(kwargs, ['address_id', ]):
self.response = self.api._request_put(ADDRESSBOOK,
self.params,
json=kwargs)
return self.response.json()
else:
raise ParamValueException('params', 'Params are not complete')
def delete_addressbook_contact(self, **kwargs):
"""
Delete a contact from AddressBook using DELETE request
:return: API response
:raise: ParamValueException if required params are not present.
"""
if self.check_required_params(kwargs, ['address_ids', ]):
self.response = self.api._request_delete(ADDRESSBOOK,
self.params,
data=json.dumps(kwargs))
return self.response.json()
else:
raise ParamValueException('params', 'Params are not complete')
| 1.96875 | 2 |
cybox/core/observable.py | Mattlk13/python-cybox | 0 | 72708 | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities, fields, idgen
from cybox import Unicode
import cybox.bindings.cybox_core as core_binding
from cybox.common import MeasureSource, ObjectProperties, StructuredText
from cybox.core import Object, Event
def validate_operator(instance, value):
allowed = ObservableComposition.OPERATORS
if value in allowed:
return
error = "Operator must be one of {allowed}. Received '{value}'."
raise ValueError(error.format(**locals()))
def validate_object(instance, value):
if not value:
return
elif not isinstance(value, Object):
raise TypeError('value must be an Object')
elif instance.event:
raise ValueError("Observable already has an Event.")
elif instance.observable_composition:
raise ValueError("Observable already has an ObservableComposition.")
def validate_event(instance, value):
if not value:
return
elif not isinstance(value, Event):
raise TypeError("value must be an Event")
elif instance.object_:
raise ValueError("Observable already has an Object.")
elif instance.observable_composition:
raise ValueError("Observable already has an ObservableComposition.")
def validate_observable_composition(instance, value):
if not value:
return
elif not isinstance(value, ObservableComposition):
raise TypeError('value must be an ObservableComposition')
elif instance.object_:
raise ValueError("Observable already has an Object.")
elif instance.event:
raise ValueError("Observable already has an Event.")
class Keywords(entities.EntityList):
_binding = core_binding
_binding_class = core_binding.KeywordsType
_namespace = 'http://cybox.mitre.org/cybox-2'
keyword = fields.TypedField("Keyword", Unicode, multiple=True)
class Observable(entities.Entity):
"""A single Observable.
"""
_binding = core_binding
_binding_class = _binding.ObservableType
_namespace = 'http://cybox.mitre.org/cybox-2'
id_ = fields.IdField("id")
idref = fields.IdrefField("idref")
title = fields.TypedField("Title")
description = fields.TypedField("Description", StructuredText)
object_ = fields.TypedField("Object", Object, preset_hook=validate_object) # TODO: Add preset hook
event = fields.TypedField("Event", Event, preset_hook=validate_event)
observable_composition = fields.TypedField("Observable_Composition", type_="cybox.core.ObservableComposition", preset_hook=validate_observable_composition)
sighting_count = fields.TypedField("sighting_count")
observable_source = fields.TypedField("Observable_Source", MeasureSource, multiple=True)
keywords = fields.TypedField("Keywords", Keywords)
pattern_fidelity = fields.TypedField("Pattern_Fidelity", type_="cybox.core.PatternFidelity")
def __init__(self, item=None, id_=None, idref=None, title=None, description=None):
"""Create an Observable out of 'item'.
`item` can be any of:
- an Object
- an Event
- an ObservableComposition
- any subclass of ObjectProperties.
In the first three cases, the appropriate property of the Observable
will be set. In the last cases, an Object will be built automatically
to ensure the correct hierarchy is created.
"""
super(Observable, self).__init__()
self.id_ = id_ or idgen.create_id(prefix="Observable")
self.idref = idref
self.title = title
self.description = description
self.keywords = Keywords()
if item is None:
return
elif isinstance(item, Object):
self.object_ = item
elif isinstance(item, ObservableComposition):
self.observable_composition = item
elif isinstance(item, Event):
self.event = item
elif isinstance(item, ObjectProperties):
if item.parent:
self.object_ = item.parent
else:
self.object_ = Object(item)
else:
msg = ("item must be an Object, Event, ObservableComposition, or "
"subclass of ObjectProperties. Received an %s" % type(item))
raise TypeError(msg)
def add_keyword(self, value):
self.keywords.append(value)
class Observables(entities.EntityList):
"""The root CybOX Observables object.
"""
_binding = core_binding
_binding_class = _binding.ObservablesType
_namespace = 'http://cybox.mitre.org/cybox-2'
observable_package_source = fields.TypedField("Observable_Package_Source", MeasureSource)
observables = fields.TypedField("Observable", Observable, multiple=True, key_name="observables")
pools = fields.TypedField("Pools", type_="cybox.core.pool.Pools")
def __init__(self, observables=None):
super(Observables, self).__init__(observables)
# Assume major_verion and minor_version are immutable for now
self._major_version = 2
self._minor_version = 1
self._update_version = 0
def add(self, object_):
from cybox.core.pool import Pools
if not object_:
return
elif isinstance(object_, MeasureSource):
self.observable_package_source = object_
return
elif isinstance(object_, Pools):
self.pools = object_
return
elif not isinstance(object_, Observable):
object_ = Observable(object_)
self.observables.append(object_)
def to_obj(self, ns_info=None):
observables_obj = super(Observables, self).to_obj(ns_info=ns_info)
observables_obj.cybox_major_version = self._major_version
observables_obj.cybox_minor_version = self._minor_version
observables_obj.cybox_update_version = self._update_version
return observables_obj
def to_dict(self):
observables_dict = super(Observables, self).to_dict()
observables_dict['major_version'] = self._major_version
observables_dict['minor_version'] = self._minor_version
observables_dict['update_version'] = self._update_version
return observables_dict
class ObservableComposition(entities.EntityList):
"""The ObservableCompositionType entity defines a logical compositions of
CybOX Observables. The combinatorial behavior is derived from the operator
property."""
_binding = core_binding
_binding_class = _binding.ObservableCompositionType
_namespace = 'http://cybox.mitre.org/cybox-2'
OPERATOR_AND = 'AND'
OPERATOR_OR = 'OR'
OPERATORS = (OPERATOR_AND, OPERATOR_OR)
operator = fields.TypedField("operator", preset_hook=validate_operator)
observables = fields.TypedField("Observable", Observable, multiple=True, key_name="observables")
def __init__(self, operator='AND', observables=None):
super(ObservableComposition, self).__init__(observables)
self.operator = operator
def add(self, observable):
if not observable:
raise ValueError("'observable' must not be None")
self.append(observable)
| 1.71875 | 2 |
tests/api/test_serializer.py | Metabaron1/app | 1 | 72836 | from flask import url_for
from app.api.serializer import get_alias_infos_with_pagination_v3
from app.config import PAGE_LIMIT
from app.extensions import db
from app.models import User, ApiKey, Alias, Contact, EmailLog, Mailbox
def test_get_alias_infos_with_pagination_v3(flask_client):
user = User.create(
email="[email protected]",
password="password",
name="Test User",
activated=True,
commit=True,
)
# user has 1 alias that's automatically created when the account is created
alias_infos = get_alias_infos_with_pagination_v3(user)
assert len(alias_infos) == 1
alias_info = alias_infos[0]
alias = Alias.query.first()
assert alias_info.alias == alias
assert alias_info.mailbox == user.default_mailbox
assert alias_info.mailboxes == [user.default_mailbox]
assert alias_info.nb_forward == 0
assert alias_info.nb_blocked == 0
assert alias_info.nb_reply == 0
assert alias_info.latest_email_log is None
assert alias_info.latest_contact is None
def test_get_alias_infos_with_pagination_v3_query_alias_email(flask_client):
"""test the query on the alias email"""
user = User.create(
email="[email protected]",
password="password",
name="Test User",
activated=True,
commit=True,
)
alias = Alias.query.first()
alias_infos = get_alias_infos_with_pagination_v3(user, query=alias.email)
assert len(alias_infos) == 1
alias_infos = get_alias_infos_with_pagination_v3(user, query="no match")
assert len(alias_infos) == 0
def test_get_alias_infos_with_pagination_v3_query_alias_mailbox(flask_client):
"""test the query on the alias mailbox email"""
user = User.create(
email="[email protected]",
password="password",
name="Test User",
activated=True,
commit=True,
)
alias = Alias.query.first()
alias_infos = get_alias_infos_with_pagination_v3(user, query=alias.mailbox.email)
assert len(alias_infos) == 1
def test_get_alias_infos_with_pagination_v3_query_alias_mailboxes(flask_client):
"""test the query on the alias additional mailboxes"""
user = User.create(
email="[email protected]",
password="password",
name="Test User",
activated=True,
commit=True,
)
alias = Alias.query.first()
mb = Mailbox.create(user_id=user.id, email="<EMAIL>")
alias._mailboxes.append(mb)
db.session.commit()
alias_infos = get_alias_infos_with_pagination_v3(user, query=mb.email)
assert len(alias_infos) == 1
alias_infos = get_alias_infos_with_pagination_v3(user, query=alias.email)
assert len(alias_infos) == 1
def test_get_alias_infos_with_pagination_v3_query_alias_note(flask_client):
"""test the query on the alias note"""
user = User.create(
email="[email protected]",
password="password",
name="Test User",
activated=True,
commit=True,
)
alias = Alias.query.first()
alias.note = "test note"
db.session.commit()
alias_infos = get_alias_infos_with_pagination_v3(user, query="test note")
assert len(alias_infos) == 1
def test_get_alias_infos_with_pagination_v3_query_alias_name(flask_client):
"""test the query on the alias name"""
user = User.create(
email="[email protected]",
password="password",
name="Test User",
activated=True,
commit=True,
)
alias = Alias.query.first()
alias.name = "Test Name"
db.session.commit()
alias_infos = get_alias_infos_with_pagination_v3(user, query="test name")
assert len(alias_infos) == 1
def test_get_alias_infos_with_pagination_v3_no_duplicate(flask_client):
"""When an alias belongs to multiple mailboxes, make sure get_alias_infos_with_pagination_v3
returns no duplicates
"""
user = User.create(
email="[email protected]",
password="password",
name="Test User",
activated=True,
commit=True,
)
alias = Alias.query.first()
mb = Mailbox.create(user_id=user.id, email="<EMAIL>")
alias._mailboxes.append(mb)
db.session.commit()
alias_infos = get_alias_infos_with_pagination_v3(user)
assert len(alias_infos) == 1
| 1.6875 | 2 |
plottr/utils/misc.py | koubitlabuiuc/plottr | 19 | 72964 | <gh_stars>10-100
"""misc.py
Various utility functions.
"""
from enum import Enum
from typing import List, Tuple, TypeVar, Optional, Sequence, Any
def reorder_indices(lst: Sequence[str], target: Sequence[str]) -> Tuple[int, ...]:
"""
Determine how to bring a list with unique entries to a different order.
Supports only lists of strings.
:param lst: input list
:param target: list in the desired order
:return: the indices that will reorder the input to obtain the target.
:raises: ``ValueError`` for invalid inputs.
"""
if set([type(i) for i in lst]) != {str}:
raise ValueError('Only lists of strings are supported')
if len(set(lst)) < len(lst):
raise ValueError('Input list elements are not unique.')
if set(lst) != set(target) or len(lst) != len(target):
raise ValueError('Contents of input and target do not match.')
idxs = []
for elt in target:
idxs.append(lst.index(elt))
return tuple(idxs)
def reorder_indices_from_new_positions(lst: List[str], **pos: int) \
-> Tuple[int, ...]:
"""
Determine how to bring a list with unique entries to a different order.
:param lst: input list (of strings)
:param pos: new positions in the format ``element = new_position``.
non-specified elements will be adjusted automatically.
:return: the indices that will reorder the input to obtain the target.
:raises: ``ValueError`` for invalid inputs.
"""
if set([type(i) for i in lst]) != {str}:
raise ValueError('Only lists of strings are supported')
if len(set(lst)) < len(lst):
raise ValueError('Input list elements are not unique.')
target = lst.copy()
for item, newidx in pos.items():
oldidx = target.index(item)
del target[oldidx]
target.insert(newidx, item)
return reorder_indices(lst, target)
T = TypeVar('T')
def unwrap_optional(val: Optional[T]) -> T:
"""Covert a variable of type Optional[T] to T
If the variable has value None a ValueError will be raised
"""
if val is None:
raise ValueError("Expected a not None value but got a None value.")
return val
class AutoEnum(Enum):
"""Enum that with automatically incremented integer values.
Allows to pass additional arguments in the class variables to the __init__
method of the instances.
See: https://stackoverflow.com/questions/19330460/how-do-i-put-docstrings-on-enums/19330461#19330461
"""
def __new__(cls, *args: Any) -> "AutoEnum":
"""creating a new instance.
:param args: will be passed to __init__.
"""
value = len(cls) + 1
obj = object.__new__(cls)
obj._value_ = value
return obj
class LabeledOptions(AutoEnum):
"""Enum with a label for each element. We can find the name from the label
using :meth:`.fromLabel`.
Example::
>>> class Color(LabeledOptions):
... red = 'Red'
... blue = 'Blue'
Here, ``Color.blue`` has value ``2`` and ``Color.fromLabel('Blue')`` returns
``Color.blue``.
"""
def __init__(self, label: str) -> None:
self.label = label
@classmethod
def fromLabel(cls, label: str) -> Optional["LabeledOptions"]:
"""Find enum element from label."""
for k in cls:
if k.label.lower() == label.lower():
return k
return None
| 3.09375 | 3 |
fm/models/oidalias.py | xUndero/noc | 1 | 73092 | <filename>fm/models/oidalias.py<gh_stars>1-10
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# OIDAlias model
# ---------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ---------------------------------------------------------------------
# Third-party modules
import six
from mongoengine.document import Document
from mongoengine.fields import StringField, UUIDField
# NOC modules
from noc.core.prettyjson import to_json
@six.python_2_unicode_compatible
class OIDAlias(Document):
meta = {
"collection": "noc.oidaliases",
"strict": False,
"auto_create_index": False,
"json_collection": "fm.oidaliases",
"json_unique_fields": ["rewrite_oid"],
}
rewrite_oid = StringField(unique=True)
to_oid = StringField()
description = StringField(required=False)
uuid = UUIDField(binary=True)
# Lookup cache
cache = None
def __str__(self):
return "%s -> %s" % (self.rewrite_oid, self.to_oid)
@classmethod
def rewrite(cls, oid):
"""
Rewrite OID with alias if any
"""
if cls.cache is None:
# Initialize cache
cls.cache = dict((a.rewrite_oid, a.to_oid.split(".")) for a in cls.objects.all())
# Lookup
l_oid = oid.split(".")
rest = []
while l_oid:
c_oid = ".".join(l_oid)
try:
a_oid = cls.cache[c_oid]
# Found
return ".".join(a_oid + rest)
except KeyError:
rest = [l_oid.pop()] + rest
# Not found
return oid
def get_json_path(self):
return "%s.json" % self.rewrite_oid
def to_json(self):
r = {
"rewrite_oid": self.rewrite_oid,
"to_oid": self.to_oid,
"uuid": self.uuid,
"$collection": self._meta["json_collection"],
}
if self.description:
r["description"] = self.description
return to_json(r, order=["$collection", "rewrite_oid", "to_oid", "uuid"])
| 1.484375 | 1 |
cloudasr/monitor/test_lib.py | oplatek/cloud-asr | 62 | 73220 | import unittest
from lib import Monitor
from cloudasr.test_doubles import PollerSpy
from cloudasr.messages.helpers import *
class TestMonitor(unittest.TestCase):
def setUp(self):
self.poller = PollerSpy()
self.scale_workers = ScaleWorkersSpy()
self.create_poller = lambda: self.poller
self.monitor = Monitor(self.create_poller, self.emit, self.scale_workers, self.poller.has_next_message)
self.emmited_messages = []
def test_monitor_forwards_messages_to_socketio(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "STARTED", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 2).SerializeToString(),
]
self.run_monitor(messages)
expected_messages = [
{"address": "tcp://127.0.0.1:1", "model": "en-GB", "status": "STARTED", "time": 1},
{"address": "tcp://127.0.0.1:1", "model": "en-GB", "status": "WORKING", "time": 2},
]
self.assertThatMonitorForwardedMessages(expected_messages)
def test_monitor_saves_worker_statuses(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "STARTED", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:2", "en-GB", "WORKING", 2).SerializeToString(),
]
self.run_monitor(messages)
expected_messages = [
{"address": "tcp://127.0.0.1:1", "model": "en-GB", "status": "STARTED", "time": 1},
{"address": "tcp://127.0.0.1:2", "model": "en-GB", "status": "WORKING", "time": 2},
]
self.assertEqual(expected_messages, self.monitor.get_statuses())
def test_monitor_will_add_new_workers_when_all_workers_are_working(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 1).SerializeToString()
]
self.run_monitor(messages)
expected_messages = [
{"en-GB": +1}
]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
def test_monitor_will_not_add_new_workers_when_it_is_currently_adding_new_workers(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 2).SerializeToString()
]
self.run_monitor(messages)
expected_messages = [{"en-GB": +1}, {}]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
def test_monitor_will_add_new_workers_when_it_finished_scaling_and_it_needs_new_workers(self):
messages = [
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 1).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 2).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:2", "en-GB", "STARTED", 3).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:1", "en-GB", "WORKING", 4).SerializeToString(),
createWorkerStatusMessage("tcp://127.0.0.1:2", "en-GB", "WORKING", 5).SerializeToString()
]
self.run_monitor(messages)
expected_messages = [{"en-GB": +1}, {}, {}, {}, {"en-GB": +1}]
self.assertEqual(expected_messages, self.scale_workers.scaling_history)
def run_monitor(self, messages):
self.poller.add_messages([{"master": message} for message in messages])
self.monitor.run()
def assertThatMonitorForwardedMessages(self, messages):
forwarded_messages = self.emmited_messages
self.assertEqual(messages, forwarded_messages)
def emit(self, message):
self.emmited_messages.append(message)
class ScaleWorkersSpy:
def __init__(self):
self.scaling_history = []
def __call__(self, commands):
self.scaling_history.append(commands)
| 1.625 | 2 |
libcst/_parser/conversions/statement.py | jschavesr/LibCST | 1 | 73348 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from typing import Any, Dict, List, Optional, Sequence, Tuple, Type
from libcst._exceptions import PartialParserSyntaxError
from libcst._maybe_sentinel import MaybeSentinel
from libcst._nodes.expression import (
Annotation,
Arg,
Asynchronous,
Attribute,
Call,
From,
LeftParen,
Name,
Param,
Parameters,
RightParen,
)
from libcst._nodes.op import (
AddAssign,
AssignEqual,
BaseAugOp,
BitAndAssign,
BitOrAssign,
BitXorAssign,
Comma,
DivideAssign,
Dot,
FloorDivideAssign,
ImportStar,
LeftShiftAssign,
MatrixMultiplyAssign,
ModuloAssign,
MultiplyAssign,
PowerAssign,
RightShiftAssign,
Semicolon,
SubtractAssign,
)
from libcst._nodes.statement import (
AnnAssign,
AsName,
Assert,
Assign,
AssignTarget,
AugAssign,
Break,
ClassDef,
Continue,
Decorator,
Del,
Else,
ExceptHandler,
Expr,
Finally,
For,
FunctionDef,
Global,
If,
Import,
ImportAlias,
ImportFrom,
IndentedBlock,
NameItem,
Nonlocal,
Pass,
Raise,
Return,
SimpleStatementLine,
SimpleStatementSuite,
Try,
While,
With,
WithItem,
)
from libcst._nodes.whitespace import EmptyLine, SimpleWhitespace
from libcst._parser.custom_itertools import grouper
from libcst._parser.production_decorator import with_production
from libcst._parser.types.config import ParserConfig
from libcst._parser.types.partials import (
AnnAssignPartial,
AssignPartial,
AugAssignPartial,
DecoratorPartial,
ExceptClausePartial,
FuncdefPartial,
ImportPartial,
ImportRelativePartial,
SimpleStatementPartial,
WithLeadingWhitespace,
)
from libcst._parser.types.token import Token
from libcst._parser.whitespace_parser import (
parse_empty_lines,
parse_parenthesizable_whitespace,
parse_simple_whitespace,
)
AUGOP_TOKEN_LUT: Dict[str, Type[BaseAugOp]] = {
"+=": AddAssign,
"-=": SubtractAssign,
"*=": MultiplyAssign,
"@=": MatrixMultiplyAssign,
"/=": DivideAssign,
"%=": ModuloAssign,
"&=": BitAndAssign,
"|=": BitOrAssign,
"^=": BitXorAssign,
"<<=": LeftShiftAssign,
">>=": RightShiftAssign,
"**=": PowerAssign,
"//=": FloorDivideAssign,
}
@with_production("stmt_input", "stmt ENDMARKER")
def convert_stmt_input(config: ParserConfig, children: Sequence[Any]) -> Any:
(child, endmarker) = children
return child
@with_production("stmt", "simple_stmt_line | compound_stmt")
def convert_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(child,) = children
return child
@with_production("simple_stmt_partial", "small_stmt (';' small_stmt)* [';'] NEWLINE")
def convert_simple_stmt_partial(config: ParserConfig, children: Sequence[Any]) -> Any:
*statements, trailing_whitespace = children
last_stmt = len(statements) / 2
body = []
for i, (stmt_body, semi) in enumerate(grouper(statements, 2)):
if semi is not None:
if i == (last_stmt - 1):
# Trailing semicolons only own the whitespace before.
semi = Semicolon(
whitespace_before=parse_simple_whitespace(
config, semi.whitespace_before
),
whitespace_after=SimpleWhitespace(""),
)
else:
# Middle semicolons own the whitespace before and after.
semi = Semicolon(
whitespace_before=parse_simple_whitespace(
config, semi.whitespace_before
),
whitespace_after=parse_simple_whitespace(
config, semi.whitespace_after
),
)
else:
semi = MaybeSentinel.DEFAULT
body.append(stmt_body.value.with_changes(semicolon=semi))
return SimpleStatementPartial(
body,
whitespace_before=statements[0].whitespace_before,
trailing_whitespace=trailing_whitespace,
)
@with_production("simple_stmt_line", "simple_stmt_partial")
def convert_simple_stmt_line(config: ParserConfig, children: Sequence[Any]) -> Any:
"""
This function is similar to convert_simple_stmt_suite, but yields a different type
"""
(partial,) = children
return SimpleStatementLine(
partial.body,
leading_lines=parse_empty_lines(config, partial.whitespace_before),
trailing_whitespace=partial.trailing_whitespace,
)
@with_production("simple_stmt_suite", "simple_stmt_partial")
def convert_simple_stmt_suite(config: ParserConfig, children: Sequence[Any]) -> Any:
"""
This function is similar to convert_simple_stmt_line, but yields a different type
"""
(partial,) = children
return SimpleStatementSuite(
partial.body,
leading_whitespace=parse_simple_whitespace(config, partial.whitespace_before),
trailing_whitespace=partial.trailing_whitespace,
)
@with_production(
"small_stmt",
(
"expr_stmt | del_stmt | pass_stmt | break_stmt | continue_stmt | return_stmt"
+ "| raise_stmt | yield_stmt | import_stmt | global_stmt | nonlocal_stmt"
+ "| assert_stmt"
),
)
def convert_small_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
# Doesn't construct SmallStatement, because we don't know about semicolons yet.
# convert_simple_stmt will construct the SmallStatement nodes.
(small_stmt_body,) = children
return small_stmt_body
@with_production(
"expr_stmt",
"testlist_star_expr (annassign | augassign | assign* )",
version=">=3.6",
)
@with_production(
"expr_stmt", "testlist_star_expr (augassign | assign* )", version="<=3.5"
)
@with_production("yield_stmt", "yield_expr")
def convert_expr_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 1:
# This is an unassigned expr statement (like a function call)
(test_node,) = children
return WithLeadingWhitespace(
Expr(value=test_node.value), test_node.whitespace_before
)
elif len(children) == 2:
lhs, rhs = children
if isinstance(rhs, AnnAssignPartial):
return WithLeadingWhitespace(
AnnAssign(
target=lhs.value,
annotation=rhs.annotation,
equal=MaybeSentinel.DEFAULT if rhs.equal is None else rhs.equal,
value=rhs.value,
),
lhs.whitespace_before,
)
elif isinstance(rhs, AugAssignPartial):
return WithLeadingWhitespace(
AugAssign(target=lhs.value, operator=rhs.operator, value=rhs.value),
lhs.whitespace_before,
)
# The only thing it could be at this point is an assign with one or more targets.
# So, walk the children moving the equals ownership back one and constructing a
# list of AssignTargets.
targets = []
for i in range(len(children) - 1):
target = children[i].value
equal = children[i + 1].equal
targets.append(
AssignTarget(
target=target,
whitespace_before_equal=equal.whitespace_before,
whitespace_after_equal=equal.whitespace_after,
)
)
return WithLeadingWhitespace(
Assign(targets=tuple(targets), value=children[-1].value),
children[0].whitespace_before,
)
@with_production("annassign", "':' test ['=' test]", version=">=3.6,<3.8")
@with_production(
"annassign", "':' test ['=' (yield_expr|testlist_star_expr)]", version=">=3.8"
)
def convert_annassign(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 2:
# Variable annotation only
colon, annotation = children
annotation = annotation.value
equal = None
value = None
elif len(children) == 4:
# Variable annotation and assignment
colon, annotation, equal, value = children
annotation = annotation.value
value = value.value
equal = AssignEqual(
whitespace_before=parse_simple_whitespace(config, equal.whitespace_before),
whitespace_after=parse_simple_whitespace(config, equal.whitespace_after),
)
else:
raise Exception("Invalid parser state!")
return AnnAssignPartial(
annotation=Annotation(
whitespace_before_indicator=parse_simple_whitespace(
config, colon.whitespace_before
),
whitespace_after_indicator=parse_simple_whitespace(
config, colon.whitespace_after
),
annotation=annotation,
),
equal=equal,
value=value,
)
@with_production(
"augassign",
(
"('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | "
+ "'>>=' | '**=' | '//=') (yield_expr | testlist)"
),
version=">=3.5",
)
@with_production(
"augassign",
(
"('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | "
+ "'>>=' | '**=' | '//=') (yield_expr | testlist)"
),
version="<3.5",
)
def convert_augassign(config: ParserConfig, children: Sequence[Any]) -> Any:
op, expr = children
if op.string not in AUGOP_TOKEN_LUT:
raise Exception(f"Unexpected token '{op.string}'!")
return AugAssignPartial(
# pyre-ignore Pyre seems to think that the value of this LUT is CSTNode
operator=AUGOP_TOKEN_LUT[op.string](
whitespace_before=parse_simple_whitespace(config, op.whitespace_before),
whitespace_after=parse_simple_whitespace(config, op.whitespace_after),
),
value=expr.value,
)
@with_production("assign", "'=' (yield_expr|testlist_star_expr)")
def convert_assign(config: ParserConfig, children: Sequence[Any]) -> Any:
equal, expr = children
return AssignPartial(
equal=AssignEqual(
whitespace_before=parse_simple_whitespace(config, equal.whitespace_before),
whitespace_after=parse_simple_whitespace(config, equal.whitespace_after),
),
value=expr.value,
)
@with_production("pass_stmt", "'pass'")
def convert_pass_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(name,) = children
return WithLeadingWhitespace(Pass(), name.whitespace_before)
@with_production("del_stmt", "'del' exprlist")
def convert_del_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(del_name, exprlist) = children
return WithLeadingWhitespace(
Del(
target=exprlist.value,
whitespace_after_del=parse_simple_whitespace(
config, del_name.whitespace_after
),
),
del_name.whitespace_before,
)
@with_production("continue_stmt", "'continue'")
def convert_continue_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(name,) = children
return WithLeadingWhitespace(Continue(), name.whitespace_before)
@with_production("break_stmt", "'break'")
def convert_break_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(name,) = children
return WithLeadingWhitespace(Break(), name.whitespace_before)
@with_production("return_stmt", "'return' [testlist]", version="<=3.7")
@with_production("return_stmt", "'return' [testlist_star_expr]", version=">=3.8")
def convert_return_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 1:
(keyword,) = children
return WithLeadingWhitespace(
Return(whitespace_after_return=SimpleWhitespace("")),
keyword.whitespace_before,
)
else:
(keyword, testlist) = children
return WithLeadingWhitespace(
Return(
value=testlist.value,
whitespace_after_return=parse_simple_whitespace(
config, keyword.whitespace_after
),
),
keyword.whitespace_before,
)
@with_production("import_stmt", "import_name | import_from")
def convert_import_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(child,) = children
return child
@with_production("import_name", "'import' dotted_as_names")
def convert_import_name(config: ParserConfig, children: Sequence[Any]) -> Any:
importtoken, names = children
return WithLeadingWhitespace(
Import(
names=names.names,
whitespace_after_import=parse_simple_whitespace(
config, importtoken.whitespace_after
),
),
importtoken.whitespace_before,
)
@with_production("import_relative", "('.' | '...')* dotted_name | ('.' | '...')+")
def convert_import_relative(config: ParserConfig, children: Sequence[Any]) -> Any:
dots = []
dotted_name = None
for child in children:
if isinstance(child, Token):
# Special case for "...", which is part of the grammar
if child.string == "...":
dots.extend(
[
Dot(),
Dot(),
Dot(
whitespace_after=parse_simple_whitespace(
config, child.whitespace_after
)
),
]
)
else:
dots.append(
Dot(
whitespace_after=parse_simple_whitespace(
config, child.whitespace_after
)
)
)
else:
# This should be the dotted name, and we can't get more than
# one, but lets be sure anyway
if dotted_name is not None:
raise Exception("Logic error!")
dotted_name = child
return ImportRelativePartial(relative=tuple(dots), module=dotted_name)
@with_production(
"import_from",
"'from' import_relative 'import' ('*' | '(' import_as_names ')' | import_as_names)",
)
def convert_import_from(config: ParserConfig, children: Sequence[Any]) -> Any:
fromtoken, import_relative, importtoken, *importlist = children
if len(importlist) == 1:
(possible_star,) = importlist
if isinstance(possible_star, Token):
# Its a "*" import, so we must construct this node.
names = ImportStar()
else:
# Its an import as names partial, grab the names from that.
names = possible_star.names
lpar = None
rpar = None
else:
# Its an import as names partial with parens
lpartoken, namespartial, rpartoken = importlist
lpar = LeftParen(
whitespace_after=parse_parenthesizable_whitespace(
config, lpartoken.whitespace_after
)
)
names = namespartial.names
rpar = RightParen(
whitespace_before=parse_parenthesizable_whitespace(
config, rpartoken.whitespace_before
)
)
# If we have a relative-only import, then we need to relocate the space
# after the final dot to be owned by the import token.
if len(import_relative.relative) > 0 and import_relative.module is None:
whitespace_before_import = import_relative.relative[-1].whitespace_after
relative = (
*import_relative.relative[:-1],
import_relative.relative[-1].with_changes(
whitespace_after=SimpleWhitespace("")
),
)
else:
whitespace_before_import = parse_simple_whitespace(
config, importtoken.whitespace_before
)
relative = import_relative.relative
return WithLeadingWhitespace(
ImportFrom(
whitespace_after_from=parse_simple_whitespace(
config, fromtoken.whitespace_after
),
relative=relative,
module=import_relative.module,
whitespace_before_import=whitespace_before_import,
whitespace_after_import=parse_simple_whitespace(
config, importtoken.whitespace_after
),
lpar=lpar,
names=names,
rpar=rpar,
),
fromtoken.whitespace_before,
)
@with_production("import_as_name", "NAME ['as' NAME]")
def convert_import_as_name(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 1:
(dotted_name,) = children
return ImportAlias(name=Name(dotted_name.string), asname=None)
else:
dotted_name, astoken, name = children
return ImportAlias(
name=Name(dotted_name.string),
asname=AsName(
whitespace_before_as=parse_simple_whitespace(
config, astoken.whitespace_before
),
whitespace_after_as=parse_simple_whitespace(
config, astoken.whitespace_after
),
name=Name(name.string),
),
)
@with_production("dotted_as_name", "dotted_name ['as' NAME]")
def convert_dotted_as_name(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 1:
(dotted_name,) = children
return ImportAlias(name=dotted_name, asname=None)
else:
dotted_name, astoken, name = children
return ImportAlias(
name=dotted_name,
asname=AsName(
whitespace_before_as=parse_parenthesizable_whitespace(
config, astoken.whitespace_before
),
whitespace_after_as=parse_parenthesizable_whitespace(
config, astoken.whitespace_after
),
name=Name(name.string),
),
)
@with_production("import_as_names", "import_as_name (',' import_as_name)* [',']")
def convert_import_as_names(config: ParserConfig, children: Sequence[Any]) -> Any:
return _gather_import_names(config, children)
@with_production("dotted_as_names", "dotted_as_name (',' dotted_as_name)*")
def convert_dotted_as_names(config: ParserConfig, children: Sequence[Any]) -> Any:
return _gather_import_names(config, children)
def _gather_import_names(
config: ParserConfig, children: Sequence[Any]
) -> ImportPartial:
names = []
for name, comma in grouper(children, 2):
if comma is None:
names.append(name)
else:
names.append(
name.with_changes(
comma=Comma(
whitespace_before=parse_parenthesizable_whitespace(
config, comma.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, comma.whitespace_after
),
)
)
)
return ImportPartial(names=names)
@with_production("dotted_name", "NAME ('.' NAME)*")
def convert_dotted_name(config: ParserConfig, children: Sequence[Any]) -> Any:
left, *rest = children
node = Name(left.string)
for dot, right in grouper(rest, 2):
node = Attribute(
value=node,
dot=Dot(
whitespace_before=parse_parenthesizable_whitespace(
config, dot.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, dot.whitespace_after
),
),
attr=Name(right.string),
)
return node
@with_production("raise_stmt", "'raise' [test ['from' test]]")
def convert_raise_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 1:
(raise_token,) = children
whitespace_after_raise = MaybeSentinel.DEFAULT
exc = None
cause = None
elif len(children) == 2:
(raise_token, test) = children
whitespace_after_raise = parse_simple_whitespace(config, test.whitespace_before)
exc = test.value
cause = None
elif len(children) == 4:
(raise_token, test, from_token, source) = children
whitespace_after_raise = parse_simple_whitespace(config, test.whitespace_before)
exc = test.value
cause = From(
whitespace_before_from=parse_simple_whitespace(
config, from_token.whitespace_before
),
whitespace_after_from=parse_simple_whitespace(
config, source.whitespace_before
),
item=source.value,
)
else:
raise Exception("Logic error!")
return WithLeadingWhitespace(
Raise(whitespace_after_raise=whitespace_after_raise, exc=exc, cause=cause),
raise_token.whitespace_before,
)
def _construct_nameitems(config: ParserConfig, names: Sequence[Any]) -> List[NameItem]:
nameitems: List[NameItem] = []
for name, maybe_comma in grouper(names, 2):
if maybe_comma is None:
nameitems.append(NameItem(Name(name.string)))
else:
nameitems.append(
NameItem(
Name(name.string),
comma=Comma(
whitespace_before=parse_simple_whitespace(
config, maybe_comma.whitespace_before
),
whitespace_after=parse_simple_whitespace(
config, maybe_comma.whitespace_after
),
),
)
)
return nameitems
@with_production("global_stmt", "'global' NAME (',' NAME)*")
def convert_global_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(global_token, *names) = children
return WithLeadingWhitespace(
Global(
names=tuple(_construct_nameitems(config, names)),
whitespace_after_global=parse_simple_whitespace(
config, names[0].whitespace_before
),
),
global_token.whitespace_before,
)
@with_production("nonlocal_stmt", "'nonlocal' NAME (',' NAME)*")
def convert_nonlocal_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(nonlocal_token, *names) = children
return WithLeadingWhitespace(
Nonlocal(
names=tuple(_construct_nameitems(config, names)),
whitespace_after_nonlocal=parse_simple_whitespace(
config, names[0].whitespace_before
),
),
nonlocal_token.whitespace_before,
)
@with_production("assert_stmt", "'assert' test [',' test]")
def convert_assert_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 2:
(assert_token, test) = children
assert_node = Assert(
whitespace_after_assert=parse_simple_whitespace(
config, test.whitespace_before
),
test=test.value,
msg=None,
)
else:
(assert_token, test, comma_token, msg) = children
assert_node = Assert(
whitespace_after_assert=parse_simple_whitespace(
config, test.whitespace_before
),
test=test.value,
comma=Comma(
whitespace_before=parse_simple_whitespace(
config, comma_token.whitespace_before
),
whitespace_after=parse_simple_whitespace(config, msg.whitespace_before),
),
msg=msg.value,
)
return WithLeadingWhitespace(assert_node, assert_token.whitespace_before)
@with_production(
"compound_stmt",
("if_stmt | while_stmt | asyncable_stmt | try_stmt | classdef | decorated"),
)
def convert_compound_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(stmt,) = children
return stmt
@with_production(
"if_stmt", "'if' test ':' suite [if_stmt_elif|if_stmt_else]", version="<=3.7"
)
@with_production(
"if_stmt",
"'if' namedexpr_test ':' suite [if_stmt_elif|if_stmt_else]",
version=">=3.8",
)
def convert_if_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
if_tok, test, colon_tok, suite, *tail = children
if len(tail) > 0:
(orelse,) = tail
else:
orelse = None
return If(
leading_lines=parse_empty_lines(config, if_tok.whitespace_before),
whitespace_before_test=parse_simple_whitespace(config, if_tok.whitespace_after),
test=test.value,
whitespace_after_test=parse_simple_whitespace(
config, colon_tok.whitespace_before
),
body=suite,
orelse=orelse,
)
@with_production(
"if_stmt_elif", "'elif' test ':' suite [if_stmt_elif|if_stmt_else]", version="<=3.7"
)
@with_production(
"if_stmt_elif",
"'elif' namedexpr_test ':' suite [if_stmt_elif|if_stmt_else]",
version=">=3.8",
)
def convert_if_stmt_elif(config: ParserConfig, children: Sequence[Any]) -> Any:
# this behaves exactly the same as `convert_if_stmt`, except that the leading token
# has a different string value.
return convert_if_stmt(config, children)
@with_production("if_stmt_else", "'else' ':' suite")
def convert_if_stmt_else(config: ParserConfig, children: Sequence[Any]) -> Any:
else_tok, colon_tok, suite = children
return Else(
leading_lines=parse_empty_lines(config, else_tok.whitespace_before),
whitespace_before_colon=parse_simple_whitespace(
config, colon_tok.whitespace_before
),
body=suite,
)
@with_production(
"while_stmt", "'while' test ':' suite ['else' ':' suite]", version="<=3.7"
)
@with_production(
"while_stmt", "'while' namedexpr_test ':' suite ['else' ':' suite]", version=">=3.8"
)
def convert_while_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
while_token, test, while_colon_token, while_suite, *else_block = children
if len(else_block) > 0:
(else_token, else_colon_token, else_suite) = else_block
orelse = Else(
leading_lines=parse_empty_lines(config, else_token.whitespace_before),
whitespace_before_colon=parse_simple_whitespace(
config, else_colon_token.whitespace_before
),
body=else_suite,
)
else:
orelse = None
return While(
leading_lines=parse_empty_lines(config, while_token.whitespace_before),
whitespace_after_while=parse_simple_whitespace(
config, while_token.whitespace_after
),
test=test.value,
whitespace_before_colon=parse_simple_whitespace(
config, while_colon_token.whitespace_before
),
body=while_suite,
orelse=orelse,
)
@with_production(
"for_stmt", "'for' exprlist 'in' testlist ':' suite ['else' ':' suite]"
)
def convert_for_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(
for_token,
expr,
in_token,
test,
for_colon_token,
for_suite,
*else_block,
) = children
if len(else_block) > 0:
(else_token, else_colon_token, else_suite) = else_block
orelse = Else(
leading_lines=parse_empty_lines(config, else_token.whitespace_before),
whitespace_before_colon=parse_simple_whitespace(
config, else_colon_token.whitespace_before
),
body=else_suite,
)
else:
orelse = None
return WithLeadingWhitespace(
For(
whitespace_after_for=parse_simple_whitespace(
config, for_token.whitespace_after
),
target=expr.value,
whitespace_before_in=parse_simple_whitespace(
config, in_token.whitespace_before
),
whitespace_after_in=parse_simple_whitespace(
config, in_token.whitespace_after
),
iter=test.value,
whitespace_before_colon=parse_simple_whitespace(
config, for_colon_token.whitespace_before
),
body=for_suite,
orelse=orelse,
),
for_token.whitespace_before,
)
@with_production(
"try_stmt",
"('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite))",
)
def convert_try_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
trytoken, try_colon_token, try_suite, *rest = children
handlers: List[ExceptHandler] = []
orelse: Optional[Else] = None
finalbody: Optional[Finally] = None
for clause, colon_token, suite in grouper(rest, 3):
if isinstance(clause, Token):
if clause.string == "else":
if orelse is not None:
raise Exception("Logic error!")
orelse = Else(
leading_lines=parse_empty_lines(config, clause.whitespace_before),
whitespace_before_colon=parse_simple_whitespace(
config, colon_token.whitespace_before
),
body=suite,
)
elif clause.string == "finally":
if finalbody is not None:
raise Exception("Logic error!")
finalbody = Finally(
leading_lines=parse_empty_lines(config, clause.whitespace_before),
whitespace_before_colon=parse_simple_whitespace(
config, colon_token.whitespace_before
),
body=suite,
)
else:
raise Exception("Logic error!")
elif isinstance(clause, ExceptClausePartial):
handlers.append(
ExceptHandler(
body=suite,
type=clause.type,
name=clause.name,
leading_lines=clause.leading_lines,
whitespace_after_except=clause.whitespace_after_except,
whitespace_before_colon=parse_simple_whitespace(
config, colon_token.whitespace_before
),
)
)
else:
raise Exception("Logic error!")
return Try(
leading_lines=parse_empty_lines(config, trytoken.whitespace_before),
whitespace_before_colon=parse_simple_whitespace(
config, try_colon_token.whitespace_before
),
body=try_suite,
handlers=tuple(handlers),
orelse=orelse,
finalbody=finalbody,
)
@with_production("except_clause", "'except' [test ['as' NAME]]")
def convert_except_clause(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 1:
(except_token,) = children
whitespace_after_except = SimpleWhitespace("")
test = None
name = None
elif len(children) == 2:
(except_token, test_node) = children
whitespace_after_except = parse_simple_whitespace(
config, except_token.whitespace_after
)
test = test_node.value
name = None
else:
(except_token, test_node, as_token, name_token) = children
whitespace_after_except = parse_simple_whitespace(
config, except_token.whitespace_after
)
test = test_node.value
name = AsName(
whitespace_before_as=parse_simple_whitespace(
config, as_token.whitespace_before
),
whitespace_after_as=parse_simple_whitespace(
config, as_token.whitespace_after
),
name=Name(name_token.string),
)
return ExceptClausePartial(
leading_lines=parse_empty_lines(config, except_token.whitespace_before),
whitespace_after_except=whitespace_after_except,
type=test,
name=name,
)
@with_production(
"with_stmt", "'with' with_item (',' with_item)* ':' suite", version=">=3.1"
)
@with_production("with_stmt", "'with' with_item ':' suite", version="<3.1")
def convert_with_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
(with_token, *items, colon_token, suite) = children
item_nodes: List[WithItem] = []
for with_item, maybe_comma in grouper(items, 2):
if maybe_comma is not None:
item_nodes.append(
with_item.with_changes(
comma=Comma(
whitespace_before=parse_parenthesizable_whitespace(
config, maybe_comma.whitespace_before
),
whitespace_after=parse_parenthesizable_whitespace(
config, maybe_comma.whitespace_after
),
)
)
)
else:
item_nodes.append(with_item)
return WithLeadingWhitespace(
With(
whitespace_after_with=parse_simple_whitespace(
config, with_token.whitespace_after
),
items=tuple(item_nodes),
whitespace_before_colon=parse_simple_whitespace(
config, colon_token.whitespace_before
),
body=suite,
),
with_token.whitespace_before,
)
@with_production("with_item", "test ['as' expr]")
def convert_with_item(config: ParserConfig, children: Sequence[Any]) -> Any:
if len(children) == 3:
(test, as_token, expr_node) = children
test_node = test.value
asname = AsName(
whitespace_before_as=parse_simple_whitespace(
config, as_token.whitespace_before
),
whitespace_after_as=parse_simple_whitespace(
config, as_token.whitespace_after
),
name=expr_node.value,
)
else:
(test,) = children
test_node = test.value
asname = None
return WithItem(item=test_node, asname=asname)
def _extract_async(
config: ParserConfig, children: Sequence[Any]
) -> Tuple[List[EmptyLine], Optional[Asynchronous], Any]:
if len(children) == 1:
(stmt,) = children
whitespace_before = stmt.whitespace_before
asyncnode = None
else:
asynctoken, stmt = children
whitespace_before = asynctoken.whitespace_before
asyncnode = Asynchronous(
whitespace_after=parse_simple_whitespace(
config, asynctoken.whitespace_after
)
)
return (parse_empty_lines(config, whitespace_before), asyncnode, stmt.value)
@with_production("asyncable_funcdef", "[ASYNC] funcdef", version=">=3.5")
@with_production("asyncable_funcdef", "funcdef", version="<3.5")
def convert_asyncable_funcdef(config: ParserConfig, children: Sequence[Any]) -> Any:
leading_lines, asyncnode, funcdef = _extract_async(config, children)
return funcdef.with_changes(
asynchronous=asyncnode, leading_lines=leading_lines, lines_after_decorators=()
)
@with_production("funcdef", "'def' NAME parameters [funcdef_annotation] ':' suite")
def convert_funcdef(config: ParserConfig, children: Sequence[Any]) -> Any:
defnode, namenode, param_partial, *annotation, colon, suite = children
# If the trailing paremeter doesn't have a comma, then it owns the trailing
# whitespace before the rpar. Otherwise, the comma owns it (and will have
# already parsed it). We don't check/update ParamStar because if it exists
# then we are guaranteed have at least one kwonly_param.
parameters = param_partial.params
if parameters.star_kwarg is not None:
if parameters.star_kwarg.comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
star_kwarg=parameters.star_kwarg.with_changes(
whitespace_after_param=param_partial.rpar.whitespace_before
)
)
elif parameters.kwonly_params:
if parameters.kwonly_params[-1].comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
kwonly_params=(
*parameters.kwonly_params[:-1],
parameters.kwonly_params[-1].with_changes(
whitespace_after_param=param_partial.rpar.whitespace_before
),
)
)
elif isinstance(parameters.star_arg, Param):
if parameters.star_arg.comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
star_arg=parameters.star_arg.with_changes(
whitespace_after_param=param_partial.rpar.whitespace_before
)
)
elif parameters.params:
if parameters.params[-1].comma == MaybeSentinel.DEFAULT:
parameters = parameters.with_changes(
params=(
*parameters.params[:-1],
parameters.params[-1].with_changes(
whitespace_after_param=param_partial.rpar.whitespace_before
),
)
)
return WithLeadingWhitespace(
FunctionDef(
whitespace_after_def=parse_simple_whitespace(
config, defnode.whitespace_after
),
name=Name(namenode.string),
whitespace_after_name=parse_simple_whitespace(
config, namenode.whitespace_after
),
whitespace_before_params=param_partial.lpar.whitespace_after,
params=parameters,
returns=None if not annotation else annotation[0],
whitespace_before_colon=parse_simple_whitespace(
config, colon.whitespace_before
),
body=suite,
),
defnode.whitespace_before,
)
@with_production("parameters", "'(' [typedargslist] ')'")
def convert_parameters(config: ParserConfig, children: Sequence[Any]) -> Any:
lpar, *paramlist, rpar = children
return FuncdefPartial(
lpar=LeftParen(
whitespace_after=parse_parenthesizable_whitespace(
config, lpar.whitespace_after
)
),
params=Parameters() if not paramlist else paramlist[0],
rpar=RightParen(
whitespace_before=parse_parenthesizable_whitespace(
config, rpar.whitespace_before
)
),
)
@with_production("funcdef_annotation", "'->' test")
def convert_funcdef_annotation(config: ParserConfig, children: Sequence[Any]) -> Any:
arrow, typehint = children
return Annotation(
whitespace_before_indicator=parse_parenthesizable_whitespace(
config, arrow.whitespace_before
),
whitespace_after_indicator=parse_parenthesizable_whitespace(
config, arrow.whitespace_after
),
annotation=typehint.value,
)
@with_production("classdef", "'class' NAME ['(' [arglist] ')'] ':' suite")
def convert_classdef(config: ParserConfig, children: Sequence[Any]) -> Any:
classdef, name, *arglist, colon, suite = children
# First, parse out the comments and empty lines before the statement.
leading_lines = parse_empty_lines(config, classdef.whitespace_before)
# Compute common whitespace and nodes
whitespace_after_class = parse_simple_whitespace(config, classdef.whitespace_after)
namenode = Name(name.string)
whitespace_after_name = parse_simple_whitespace(config, name.whitespace_after)
# Now, construct the classdef node itself
if not arglist:
# No arglist, so no arguments to this class
return ClassDef(
leading_lines=leading_lines,
lines_after_decorators=(),
whitespace_after_class=whitespace_after_class,
name=namenode,
whitespace_after_name=whitespace_after_name,
body=suite,
)
else:
# Unwrap arglist partial, because its valid to not have any
lpar, *args, rpar = arglist
args = args[0].args if args else []
bases: List[Arg] = []
keywords: List[Arg] = []
current_arg = bases
for arg in args:
if arg.star == "**" or arg.keyword is not None:
current_arg = keywords
# Some quick validation
if current_arg is keywords and (
arg.star == "*" or (arg.star == "" and arg.keyword is None)
):
raise PartialParserSyntaxError(
"Positional argument follows keyword argument."
)
current_arg.append(arg)
return ClassDef(
leading_lines=leading_lines,
lines_after_decorators=(),
whitespace_after_class=whitespace_after_class,
name=namenode,
whitespace_after_name=whitespace_after_name,
lpar=LeftParen(
whitespace_after=parse_parenthesizable_whitespace(
config, lpar.whitespace_after
)
),
bases=bases,
keywords=keywords,
rpar=RightParen(
whitespace_before=parse_parenthesizable_whitespace(
config, rpar.whitespace_before
)
),
whitespace_before_colon=parse_simple_whitespace(
config, colon.whitespace_before
),
body=suite,
)
@with_production("decorator", "'@' dotted_name [ '(' [arglist] ')' ] NEWLINE")
def convert_decorator(config: ParserConfig, children: Sequence[Any]) -> Any:
atsign, name, *arglist, newline = children
if not arglist:
# This is either a name or an attribute node, so just extract it.
decoratornode = name
else:
# This needs to be converted into a call node, and we have the
# arglist partial.
lpar, *args, rpar = arglist
args = args[0].args if args else []
# If the trailing argument doesn't have a comma, then it owns the
# trailing whitespace before the rpar. Otherwise, the comma owns
# it.
if len(args) > 0 and args[-1].comma == MaybeSentinel.DEFAULT:
args[-1] = args[-1].with_changes(
whitespace_after_arg=parse_parenthesizable_whitespace(
config, rpar.whitespace_before
)
)
decoratornode = Call(
func=name,
whitespace_after_func=parse_simple_whitespace(
config, lpar.whitespace_before
),
whitespace_before_args=parse_parenthesizable_whitespace(
config, lpar.whitespace_after
),
args=tuple(args),
)
return Decorator(
leading_lines=parse_empty_lines(config, atsign.whitespace_before),
whitespace_after_at=parse_simple_whitespace(config, atsign.whitespace_after),
decorator=decoratornode,
trailing_whitespace=newline,
)
@with_production("decorators", "decorator+")
def convert_decorators(config: ParserConfig, children: Sequence[Any]) -> Any:
return DecoratorPartial(decorators=children)
@with_production("decorated", "decorators (classdef | asyncable_funcdef)")
def convert_decorated(config: ParserConfig, children: Sequence[Any]) -> Any:
partial, class_or_func = children
# First, split up the spacing on the first decorator
leading_lines = partial.decorators[0].leading_lines
# Now, redistribute ownership of the whitespace
decorators = (
partial.decorators[0].with_changes(leading_lines=()),
*partial.decorators[1:],
)
# Now, modify the original function or class to add the decorators.
return class_or_func.with_changes(
leading_lines=leading_lines,
# pyre-fixme[60]: Concatenation not yet support for multiple variadic
# tuples: `*class_or_func.leading_lines,
# *class_or_func.lines_after_decorators`.
# pyre-fixme[60]: Expected to unpack an iterable, but got `unknown`.
lines_after_decorators=(
*class_or_func.leading_lines,
*class_or_func.lines_after_decorators,
),
decorators=decorators,
)
@with_production(
"asyncable_stmt", "[ASYNC] (funcdef | with_stmt | for_stmt)", version=">=3.5"
)
@with_production("asyncable_stmt", "funcdef | with_stmt | for_stmt", version="<3.5")
def convert_asyncable_stmt(config: ParserConfig, children: Sequence[Any]) -> Any:
leading_lines, asyncnode, stmtnode = _extract_async(config, children)
if isinstance(stmtnode, FunctionDef):
return stmtnode.with_changes(
asynchronous=asyncnode,
leading_lines=leading_lines,
lines_after_decorators=(),
)
elif isinstance(stmtnode, With):
return stmtnode.with_changes(
asynchronous=asyncnode, leading_lines=leading_lines
)
elif isinstance(stmtnode, For):
return stmtnode.with_changes(
asynchronous=asyncnode, leading_lines=leading_lines
)
else:
raise Exception("Logic error!")
@with_production("suite", "simple_stmt_suite | indented_suite")
def convert_suite(config: ParserConfig, children: Sequence[Any]) -> Any:
(suite,) = children
return suite
@with_production("indented_suite", "NEWLINE INDENT stmt+ DEDENT")
def convert_indented_suite(config: ParserConfig, children: Sequence[Any]) -> Any:
newline, indent, *stmts, dedent = children
return IndentedBlock(
header=newline,
indent=(
None
if indent.relative_indent == config.default_indent
else indent.relative_indent
),
body=stmts,
# We want to be able to only keep comments in the footer that are actually for
# this IndentedBlock. We do so by assuming that lines which are indented to the
# same level as the block itself are comments that go at the footer of the
# block. Comments that are indented to less than this indent are assumed to
# belong to the next line of code. We override the indent here because the
# dedent node's absolute indent is the resulting indentation after the dedent
# is performed. Its this way because the whitespace state for both the dedent's
# whitespace_after and the next BaseCompoundStatement's whitespace_before is
# shared. This allows us to partially parse here and parse the rest of the
# whitespace and comments on the next line, effectively making sure that
# comments are attached to the correct node.
footer=parse_empty_lines(
config,
dedent.whitespace_after,
override_absolute_indent=indent.whitespace_before.absolute_indent,
),
)
| 1.164063 | 1 |
aioopenssl/__init__.py | freundTech/aioopenssl | 11 | 73476 | """ # NOQA
:mod:`aioopenssl` --- A transport for asyncio using :mod:`OpenSSL`
##################################################################
This package provides a socket-based :class:`asyncio.Transport` which uses
:mod:`OpenSSL` to create a TLS connection. Optionally, the TLS handshake can be
deferred and performed later using :meth:`STARTTLSTransport.starttls`.
.. note::
Use this module at your own risk. It has lower test coverage than I’d like
it to have; it has been exported from aioxmpp on request, where it undergoes
implicit testing. If you find bugs, please report them. If possible, add
regression tests while you’re at it.
If you find security-critical bugs, please follow the procedure announced in
the `aioxmpp readme <https://github.com/horazont/aioxmpp>`_.
The following function can be used to create a connection using the
:class:`STARTTLSTransport`, which itself is documented below:
.. autofunction:: create_starttls_connection
The transport implementation is documented below:
.. autoclass:: STARTTLSTransport(loop, rawsock, protocol, ssl_context_factory, [waiter=None], [use_starttls=False], [post_handshake_callback=None], [peer_hostname=None], [server_hostname=None])
:members:
"""
import asyncio
import logging
import socket
import typing
from enum import Enum
from .version import __version__, version_info, version # noqa:F401
from .utils import SendWrap
import OpenSSL.SSL
logger = logging.getLogger(__name__)
class _State(Enum):
RAW_OPEN = 0x0000 # noqa:E221
RAW_EOF_RECEIVED = 0x0001 # noqa:E221
TLS_HANDSHAKING = 0x0300 # noqa:E221
TLS_OPEN = 0x0100 # noqa:E221
TLS_EOF_RECEIVED = 0x0101 # noqa:E221
TLS_SHUTTING_DOWN = 0x0102 # noqa:E221
TLS_SHUT_DOWN = 0x0103 # noqa:E221
CLOSED = 0x0003 # noqa:E221
@property
def eof_received(self) -> bool:
return bool(self.value & 0x0001)
@property
def tls_started(self) -> bool:
return bool(self.value & 0x0100)
@property
def tls_handshaking(self) -> bool:
return bool(self.value & 0x0200)
@property
def is_writable(self) -> bool:
return not bool(self.value & 0x0002)
@property
def is_open(self) -> bool:
return (self.value & 0x3) == 0
SSLContextFactory = typing.Callable[[asyncio.Transport], OpenSSL.SSL.Context]
PostHandshakeCallback = typing.Callable[
["STARTTLSTransport"],
typing.Coroutine[typing.Any, typing.Any, None],
]
class STARTTLSTransport(asyncio.Transport):
"""
Create a new :class:`asyncio.Transport` which supports TLS and the deferred
starting of TLS using the :meth:`starttls` method.
`loop` must be a :class:`asyncio.BaseEventLoop` with support for
:meth:`BaseEventLoop.add_reader` as well as removal and the writer
complements.
`rawsock` must be a :class:`socket.socket` which will be used as the socket
for the transport. `protocol` must be a :class:`asyncio.Protocol` which
will be fed the data the transport receives.
`ssl_context_factory` must be a callable accepting a single positional
argument which returns a :class:`OpenSSL.SSL.Context`. The transport will
be passed as the argument to the factory. The returned context will be used
to create the :class:`OpenSSL.SSL.Connection` when TLS is enabled on the
transport. If the callable is :data:`None`, a `ssl_context` must be
supplied to :meth:`starttls` and `use_starttls` must be true.
`use_starttls` must be a boolean value. If it is true, TLS is not enabled
immediately. Instead, the user must call :meth:`starttls` to enable TLS on
the transport. Until that point, the transport is unencrypted. If it is
false, the TLS handshake is started immediately. This is roughly equivalent
to calling :meth:`starttls` immediately.
`peer_hostname` must be either a :class:`str` or :data:`None`. It may be
used by certificate validators and must be the host name this transport
actually connected to. That might be (e.g. in the case of XMPP) different
from the actual domain name the transport communicates with (and for which
the service must have a valid certificate). This host name may be used by
certificate validators implementing e.g. DANE.
`server_hostname` must be either a :class:`str` or :data:`None`. It may be
used by certificate validators anrd must be the host name for which the
peer must have a valid certificate (if host name based certificate
validation is performed). `server_hostname` is also passed via the TLS
Server Name Indication (SNI) extension if it is given.
If host names are to be converted to :class:`bytes` by the transport, they
are encoded using the ``utf-8`` codec.
If `waiter` is not :data:`None`, it must be a
:class:`asyncio.Future`. After the stream has been established, the futures
result is set to a value of :data:`None`. If any errors occur, the
exception is set on the future.
If `use_starttls` is true, the future is fulfilled immediately after
construction, as there is no blocking process which needs to take place. If
`use_starttls` is false and thus TLS negotiation starts right away, the
future is fulfilled when TLS negotiation is complete.
`post_handshake_callback` may be a coroutine or :data:`None`. If it is not
:data:`None`, it is called asynchronously after the TLS handshake and
blocks the completion of the TLS handshake until it returns.
It can be used to perform blocking post-handshake certificate verification,
e.g. using DANE. The coroutine must not return a value. If it encounters an
error, an appropriate exception should be raised, which will propagate out
of :meth:`starttls` and/or passed to the `waiter` future.
"""
MAX_SIZE = 256 * 1024
def __init__(
self,
loop: asyncio.BaseEventLoop,
rawsock: socket.socket,
protocol: asyncio.Protocol,
ssl_context_factory: typing.Optional[SSLContextFactory] = None,
waiter: typing.Optional[asyncio.Future] = None,
use_starttls: bool = False,
post_handshake_callback: typing.Optional[
PostHandshakeCallback
] = None,
peer_hostname: typing.Optional[str] = None,
server_hostname: typing.Optional[str] = None):
if not use_starttls and not ssl_context_factory:
raise ValueError("Cannot have STARTTLS disabled (i.e. immediate "
"TLS connection) and without SSL context.")
super().__init__()
self._rawsock = rawsock
self._raw_fd = rawsock.fileno()
self._trace_logger = logger.getChild(
"trace.fd={}".format(self._raw_fd)
)
self._sock = rawsock # type: typing.Union[socket.socket, OpenSSL.SSL.Connection] # noqa
self._send_wrap = SendWrap(self._sock)
self._protocol = protocol
self._loop = loop
self._extra = {
"socket": rawsock,
} # type: typing.Dict[str, typing.Any]
self._waiter = waiter
self._conn_lost = 0
self._buffer = bytearray()
self._ssl_context_factory = ssl_context_factory
self._extra.update(
sslcontext=None,
ssl_object=None,
peername=self._rawsock.getpeername(),
peer_hostname=peer_hostname,
server_hostname=server_hostname
)
# this is a list set of tasks which will also be cancelled if the
# _waiter is cancelled
self._chained_pending = set() # type: typing.Set[asyncio.Future]
self._paused = False
self._closing = False
self._tls_conn = None # type: typing.Optional[OpenSSL.SSL.Connection]
self._tls_read_wants_write = False
self._tls_write_wants_read = False
self._tls_post_handshake_callback = post_handshake_callback
self._state = None # type: typing.Optional[_State]
if not use_starttls:
assert ssl_context_factory is not None
self._ssl_context = ssl_context_factory(self)
self._extra.update(
sslcontext=self._ssl_context,
)
self._initiate_tls()
else:
self._initiate_raw()
def _waiter_done(self, fut: asyncio.Future) -> None:
self._trace_logger.debug("_waiter future done (%r)", fut)
for chained in self._chained_pending:
self._trace_logger.debug("cancelling chained %r", chained)
chained.cancel()
self._chained_pending.clear()
def _invalid_transition(
self,
via: typing.Optional[str] = None,
to: typing.Optional[_State] = None) -> None:
via_text = (" via {}".format(via)) if via is not None else ""
to_text = (" to {}".format(to)) if to is not None else ""
msg = "Invalid state transition (from {}{}{})".format(
self._state,
via_text,
to_text
)
logger.error(msg)
raise RuntimeError(msg)
def _invalid_state(
self,
what: str,
exc: typing.Type[Exception] = RuntimeError,
) -> Exception:
msg = "{what} (invalid in state {state}, closing={closing})".format(
what=what,
state=self._state,
closing=self._closing)
logger.error(msg)
# raising is optional :)
return exc(msg)
def _fatal_error(
self,
exc: BaseException,
msg: str) -> None:
if not isinstance(exc, (BrokenPipeError, ConnectionResetError)):
self._loop.call_exception_handler({
"message": msg,
"exception": exc,
"transport": self,
"protocol": self._protocol
})
self._force_close(exc)
def _force_close(
self,
exc: typing.Optional[BaseException],
) -> None:
self._trace_logger.debug("_force_close called")
self._remove_rw()
if self._state == _State.CLOSED:
raise self._invalid_state("_force_close called")
self._state = _State.CLOSED
if self._buffer:
self._buffer.clear()
if self._waiter is not None and not self._waiter.done():
self._waiter.set_exception(
exc or ConnectionError("_force_close() called"),
)
self._loop.remove_reader(self._raw_fd)
self._loop.remove_writer(self._raw_fd)
self._loop.call_soon(self._call_connection_lost_and_clean_up, exc)
def _remove_rw(self) -> None:
self._trace_logger.debug("clearing readers/writers")
self._loop.remove_reader(self._raw_fd)
self._loop.remove_writer(self._raw_fd)
def _call_connection_lost_and_clean_up(
self,
exc: Exception,
) -> None:
"""
Clean up all resources and call the protocols connection lost method.
"""
self._state = _State.CLOSED
try:
self._protocol.connection_lost(exc)
finally:
self._rawsock.close()
if self._tls_conn is not None:
self._tls_conn.set_app_data(None)
self._tls_conn = None
self._rawsock = None # type:ignore
self._protocol = None # type:ignore
def _initiate_raw(self) -> None:
if self._state is not None:
self._invalid_transition(via="_initiate_raw", to=_State.RAW_OPEN)
self._state = _State.RAW_OPEN
self._loop.add_reader(self._raw_fd, self._read_ready)
self._loop.call_soon(self._protocol.connection_made, self)
if self._waiter is not None:
self._loop.call_soon(self._waiter.set_result, None)
self._waiter = None
def _initiate_tls(self) -> None:
self._trace_logger.debug("_initiate_tls called")
if self._state is not None and self._state != _State.RAW_OPEN:
self._invalid_transition(via="_initiate_tls",
to=_State.TLS_HANDSHAKING)
self._tls_was_starttls = (self._state == _State.RAW_OPEN)
self._state = _State.TLS_HANDSHAKING
self._tls_conn = OpenSSL.SSL.Connection(
self._ssl_context,
self._sock)
self._tls_conn.set_connect_state()
self._tls_conn.set_app_data(self)
try:
self._tls_conn.set_tlsext_host_name(
self._extra["server_hostname"].encode("IDNA"))
except KeyError:
pass
self._sock = self._tls_conn
self._send_wrap = SendWrap(self._sock)
self._extra.update(
ssl_object=self._tls_conn
)
self._tls_do_handshake()
def _tls_do_handshake(self) -> None:
assert self._tls_conn is not None
self._trace_logger.debug("_tls_do_handshake called")
if self._state != _State.TLS_HANDSHAKING:
raise self._invalid_state("_tls_do_handshake called")
try:
self._tls_conn.do_handshake()
except OpenSSL.SSL.WantReadError:
self._trace_logger.debug(
"registering reader for _tls_do_handshake")
self._loop.add_reader(self._raw_fd, self._tls_do_handshake)
return
except OpenSSL.SSL.WantWriteError:
self._trace_logger.debug(
"registering writer for _tls_do_handshake")
self._loop.add_writer(self._raw_fd, self._tls_do_handshake)
return
except Exception as exc:
self._remove_rw()
self._fatal_error(exc, "Fatal error on tls handshake")
if self._waiter is not None:
self._waiter.set_exception(exc)
return
except BaseException as exc:
self._remove_rw()
if self._waiter is not None:
self._waiter.set_exception(exc)
raise
self._remove_rw()
# handshake complete
self._trace_logger.debug("handshake complete")
self._extra.update(
peercert=self._tls_conn.get_peer_certificate()
)
if self._tls_post_handshake_callback:
self._trace_logger.debug("post handshake scheduled via callback")
task = asyncio.ensure_future(
self._tls_post_handshake_callback(self)
)
task.add_done_callback(self._tls_post_handshake_done)
self._chained_pending.add(task)
self._tls_post_handshake_callback = None
else:
self._tls_post_handshake(None)
def _tls_post_handshake_done(
self,
task: asyncio.Future,
) -> None:
self._chained_pending.discard(task)
try:
task.result()
except asyncio.CancelledError:
# canceled due to closure or something similar
pass
except BaseException as err:
self._tls_post_handshake(err)
else:
self._tls_post_handshake(None)
def _tls_post_handshake(
self,
exc: typing.Optional[BaseException],
) -> None:
self._trace_logger.debug("_tls_post_handshake called")
if exc is not None:
if self._waiter is not None and not self._waiter.done():
self._waiter.set_exception(exc)
self._fatal_error(exc, "Fatal error on post-handshake callback")
return
self._tls_read_wants_write = False
self._tls_write_wants_read = False
self._state = _State.TLS_OPEN
self._loop.add_reader(self._raw_fd, self._read_ready)
if not self._tls_was_starttls:
self._loop.call_soon(self._protocol.connection_made, self)
if self._waiter is not None:
self._loop.call_soon(self._waiter.set_result, None)
def _tls_do_shutdown(self) -> None:
self._trace_logger.debug("_tls_do_shutdown called")
if self._state != _State.TLS_SHUTTING_DOWN:
raise self._invalid_state("_tls_do_shutdown called")
assert isinstance(self._sock, OpenSSL.SSL.Connection)
try:
self._sock.shutdown()
except OpenSSL.SSL.WantReadError:
self._trace_logger.debug("registering reader for _tls_shutdown")
self._loop.add_reader(self._raw_fd, self._tls_shutdown)
return
except OpenSSL.SSL.WantWriteError:
self._trace_logger.debug("registering writer for _tls_shutdown")
self._loop.add_writer(self._raw_fd, self._tls_shutdown)
return
except Exception as exc:
# force_close will take care of removing rw handlers
self._fatal_error(exc, "Fatal error on tls shutdown")
return
except BaseException:
self._remove_rw()
raise
self._remove_rw()
self._state = _State.TLS_SHUT_DOWN
# continue to raw shut down
self._raw_shutdown()
def _tls_shutdown(self) -> None:
self._state = _State.TLS_SHUTTING_DOWN
self._tls_do_shutdown()
def _raw_shutdown(self) -> None:
self._remove_rw()
try:
self._rawsock.shutdown(socket.SHUT_RDWR)
except OSError:
# we cannot do anything anyway if this fails
pass
self._force_close(None)
def _read_ready(self) -> None:
assert self._state is not None
if self._state.tls_started and self._tls_write_wants_read:
self._tls_write_wants_read = False
self._write_ready()
if self._buffer:
self._trace_logger.debug("_read_ready: add writer for more"
" data")
self._loop.add_writer(self._raw_fd, self._write_ready)
if self._state.eof_received:
# no further reading
return
try:
data = self._sock.recv(self.MAX_SIZE)
except (BlockingIOError, InterruptedError, OpenSSL.SSL.WantReadError):
pass
except OpenSSL.SSL.WantWriteError:
assert self._state.tls_started
self._tls_read_wants_write = True
self._trace_logger.debug("_read_ready: swap reader for writer")
self._loop.remove_reader(self._raw_fd)
self._loop.add_writer(self._raw_fd, self._write_ready)
except OpenSSL.SSL.SysCallError as exc:
if self._state in (_State.TLS_SHUT_DOWN,
_State.TLS_SHUTTING_DOWN,
_State.CLOSED):
self._trace_logger.debug(
"_read_ready: ignoring syscall exception during shutdown: "
"%s",
exc,
)
else:
self._fatal_error(exc,
"Fatal read error on STARTTLS transport")
except Exception as err:
self._fatal_error(err, "Fatal read error on STARTTLS transport")
return
else:
if data:
self._protocol.data_received(data)
else:
keep_open = False
try:
keep_open = bool(self._protocol.eof_received())
finally:
self._eof_received(keep_open)
def _write_ready(self) -> None:
assert self._state is not None
if self._tls_read_wants_write:
self._tls_read_wants_write = False
self._read_ready()
if not self._paused and not self._state.eof_received:
self._trace_logger.debug("_write_ready: add reader for more"
" data")
self._loop.add_reader(self._raw_fd, self._read_ready)
# do not send data during handshake!
if self._buffer and self._state != _State.TLS_HANDSHAKING:
try:
nsent = self._send_wrap.send(self._buffer)
except (BlockingIOError, InterruptedError,
OpenSSL.SSL.WantWriteError):
nsent = 0
except OpenSSL.SSL.WantReadError:
nsent = 0
assert self._state.tls_started
self._tls_write_wants_read = True
self._trace_logger.debug(
"_write_ready: swap writer for reader")
self._loop.remove_writer(self._raw_fd)
self._loop.add_reader(self._raw_fd, self._read_ready)
except OpenSSL.SSL.SysCallError as exc:
if self._state in (_State.TLS_SHUT_DOWN,
_State.TLS_SHUTTING_DOWN,
_State.CLOSED):
self._trace_logger.debug(
"_write_ready: ignoring syscall exception during "
"shutdown: %s",
exc,
)
else:
self._fatal_error(exc,
"Fatal write error on STARTTLS "
"transport")
except Exception as err:
self._fatal_error(err,
"Fatal write error on STARTTLS "
"transport")
return
if nsent:
del self._buffer[:nsent]
if not self._buffer:
if not self._tls_read_wants_write:
self._trace_logger.debug("_write_ready: nothing more to write,"
" removing writer")
self._loop.remove_writer(self._raw_fd)
if self._closing:
if self._state.tls_started:
self._tls_shutdown()
else:
self._raw_shutdown()
def _eof_received(self, keep_open: bool) -> None:
assert self._state is not None
self._trace_logger.debug("_eof_received: removing reader")
self._loop.remove_reader(self._raw_fd)
if self._state.tls_started:
assert self._tls_conn is not None
if self._tls_conn.get_shutdown() & OpenSSL.SSL.RECEIVED_SHUTDOWN:
# proper TLS shutdown going on
if keep_open:
self._state = _State.TLS_EOF_RECEIVED
else:
self._tls_shutdown()
else:
if keep_open:
self._trace_logger.warning(
"result of eof_received() ignored as shut down is"
" improper",
)
self._fatal_error(
ConnectionError("Underlying transport closed"),
"unexpected eof_received"
)
else:
if keep_open:
self._state = _State.RAW_EOF_RECEIVED
else:
self._raw_shutdown()
# public API
def abort(self) -> None:
"""
Immediately close the stream, without sending remaining buffers or
performing a proper shutdown.
"""
if self._state == _State.CLOSED:
self._invalid_state("abort() called")
return
self._force_close(None)
def can_write_eof(self) -> bool:
"""
Return :data:`False`.
.. note::
Writing of EOF (i.e. closing the sending direction of the stream) is
theoretically possible. However, it was deemed by the author that
the case is rare enough to neglect it for the sake of implementation
simplicity.
"""
return False
def close(self) -> None:
"""
Close the stream. This performs a proper stream shutdown, except if the
stream is currently performing a TLS handshake. In that case, calling
:meth:`close` is equivalent to calling :meth:`abort`.
Otherwise, the transport waits until all buffers are transmitted.
"""
if self._state == _State.CLOSED:
self._invalid_state("close() called")
return
if self._state == _State.TLS_HANDSHAKING:
# hard-close
self._force_close(None)
elif self._state == _State.TLS_SHUTTING_DOWN:
# shut down in progress, nothing to do
pass
elif self._buffer:
# there is data to be send left, first wait for it to transmit ...
self._closing = True
elif self._state is not None and self._state.tls_started:
# normal TLS state, nothing left to transmit, shut down
self._tls_shutdown()
else:
# normal non-TLS state, nothing left to transmit, close
self._raw_shutdown()
def get_extra_info(
self,
name: str,
default: typing.Optional[typing.Any] = None,
) -> typing.Any:
"""
The following extra information is available:
* ``socket``: the underlying :mod:`socket` object
* ``sslcontext``: the :class:`OpenSSL.SSL.Context` object to use (this
may be :data:`None` until :meth:`starttls` has been called)
* ``ssl_object``: :class:`OpenSSL.SSL.Connection` object (:data:`None`
if TLS is not enabled (yet))
* ``peername``: return value of :meth:`socket.Socket.getpeername`
* ``peer_hostname``: The `peer_hostname` value passed to the
constructor.
* ``server_hostname``: The `server_hostname` value passed to the
constructor.
"""
return self._extra.get(name, default)
async def starttls(
self,
ssl_context: typing.Optional[OpenSSL.SSL.Context] = None,
post_handshake_callback: typing.Optional[
PostHandshakeCallback
] = None,
) -> None:
"""
Start a TLS stream on top of the socket. This is an invalid operation
if the stream is not in RAW_OPEN state.
If `ssl_context` is set, it overrides the `ssl_context` passed to the
constructor. If `post_handshake_callback` is set, it overrides the
`post_handshake_callback` passed to the constructor.
.. versionchanged:: 0.4
This method is now a barrier with respect to reads and writes:
before the handshake is completed (including the post handshake
callback, if any), no data is received or sent.
"""
if self._state != _State.RAW_OPEN or self._closing:
raise self._invalid_state("starttls() called")
if ssl_context is not None:
self._ssl_context = ssl_context
self._extra.update(
sslcontext=ssl_context
)
else:
assert self._ssl_context_factory is not None
self._ssl_context = self._ssl_context_factory(self)
if post_handshake_callback is not None:
self._tls_post_handshake_callback = post_handshake_callback
self._waiter = asyncio.Future()
self._waiter.add_done_callback(self._waiter_done)
self._initiate_tls()
try:
await self._waiter
finally:
self._waiter = None
def write(self, data: typing.Union[bytes, bytearray, memoryview]) -> None:
"""
Write data to the transport. This is an invalid operation if the stream
is not writable, that is, if it is closed. During TLS negotiation, the
data is buffered.
"""
if not isinstance(data, (bytes, bytearray, memoryview)):
raise TypeError('data argument must be byte-ish (%r)',
type(data))
if (self._state is None or
not self._state.is_writable or
self._closing):
raise self._invalid_state("write() called")
if not data:
return
if not self._buffer:
self._loop.add_writer(self._raw_fd, self._write_ready)
self._buffer.extend(data)
def write_eof(self) -> None:
"""
Writing the EOF has not been implemented, for the sake of simplicity.
"""
raise NotImplementedError("Cannot write_eof() on STARTTLS transport")
def can_starttls(self) -> bool:
"""
Return :data:`True`.
"""
return True
def is_closing(self) -> bool:
return (self._state == _State.TLS_SHUTTING_DOWN or
self._state == _State.CLOSED)
async def create_starttls_connection(
loop: asyncio.BaseEventLoop,
protocol_factory: typing.Callable[[], asyncio.Protocol],
host: typing.Optional[str] = None,
port: typing.Optional[int] = None,
*,
sock: typing.Optional[socket.socket] = None,
ssl_context_factory: typing.Optional[SSLContextFactory] = None,
use_starttls: bool = False,
local_addr: typing.Any = None,
**kwargs # type: typing.Any
) -> typing.Tuple[asyncio.Transport, asyncio.Protocol]:
"""
Create a connection which can later be upgraded to use TLS.
.. versionchanged:: 0.4
The `local_addr` argument was added.
:param loop: The event loop to use.
:type loop: :class:`asyncio.BaseEventLoop`
:param protocol_factory: Factory for the protocol for the connection
:param host: The host name or address to connect to
:type host: :class:`str` or :data:`None`
:param port: The port to connect to
:type port: :class:`int` or :data:`None`
:param sock: A socket to wrap (conflicts with `host` and `port`)
:type sock: :class:`socket.socket`
:param ssl_context_factory: Function which returns a
:class:`OpenSSL.SSL.Context` to use for TLS operations
:param use_starttls: Flag to control whether TLS is negotiated right away
or deferredly.
:type use_starttls: :class:`bool`
:param local_addr: Address to bind to
This is roughly a copy of the asyncio implementation of
:meth:`asyncio.BaseEventLoop.create_connection`. It returns a pair
``(transport, protocol)``, where `transport` is a newly created
:class:`STARTTLSTransport` instance. Further keyword arguments are
forwarded to the constructor of :class:`STARTTLSTransport`.
`loop` must be a :class:`asyncio.BaseEventLoop`, with support for
:meth:`asyncio.BaseEventLoop.add_reader` and the corresponding writer and
removal functions for sockets. This is typically a selector type event
loop.
`protocol_factory` must be a callable which (without any arguments) returns
a :class:`asyncio.Protocol` which will be connected to the STARTTLS
transport.
`host` and `port` must be a hostname and a port number, or both
:data:`None`. Both must be :data:`None`, if and only if `sock` is not
:data:`None`. In that case, `sock` is used instead of a newly created
socket. `sock` is put into non-blocking mode and must be a stream socket.
If `use_starttls` is :data:`True`, no TLS handshake will be performed
initially. Instead, the connection is established without any
transport-layer security. It is expected that the
:meth:`STARTTLSTransport.starttls` method is used when the application
protocol requires TLS. If `use_starttls` is :data:`False`, the TLS
handshake is initiated right away.
`local_addr` may be an address to bind this side of the socket to. If
omitted or :data:`None`, the local address is assigned by the operating
system.
This coroutine returns when the stream is established. If `use_starttls` is
:data:`False`, this means that the full TLS handshake has to be finished
for this coroutine to return. Otherwise, no TLS handshake takes place. It
must be invoked using the :meth:`STARTTLSTransport.starttls` coroutine.
"""
if host is not None and port is not None:
host_addrs = await loop.getaddrinfo(
host, port,
type=socket.SOCK_STREAM,
)
exceptions = []
for family, type, proto, cname, address in host_addrs:
sock = None
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
if local_addr is not None:
sock.bind(local_addr)
await loop.sock_connect(sock, address)
except OSError as exc:
if sock is not None:
sock.close()
exceptions.append(exc)
else:
break
else:
if len(exceptions) == 1:
raise exceptions[0]
model = str(exceptions[0])
if all(str(exc) == model for exc in exceptions):
raise exceptions[0]
try:
from aioxmpp.errors import MultiOSError # type:ignore
except ImportError:
MultiOSError = OSError
raise MultiOSError(
"could not connect to [{}]:{}".format(host, port),
exceptions,
)
elif sock is None:
raise ValueError("sock must not be None if host and/or port are None")
else:
sock.setblocking(False)
protocol = protocol_factory()
waiter = asyncio.Future(loop=loop) # type: asyncio.Future[None]
transport = STARTTLSTransport(loop, sock, protocol,
ssl_context_factory=ssl_context_factory,
waiter=waiter,
use_starttls=use_starttls,
**kwargs)
await waiter
return transport, protocol
| 1.71875 | 2 |
pyqmc/__init__.py | maximegodin/pyqmc | 0 | 73604 | # -*- coding: utf-8 -*-
"""Top-level package for pyqmc."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.0'
| 0.46875 | 0 |
models/AlexNet_bn.py | SpiritedAwayCN/ImageNet-Classification | 4 | 73732 | '''
超参数:
learning rate
weight decay
Dense: 全相联
BatchNormailization: (x-均值)/sqrt{标准差^2 + epsilon} * a + b
'''
import tensorflow as tf
import constants as c
from tensorflow import keras
from tensorflow.keras.layers import Activation, BatchNormalization, Conv2D, MaxPool2D, Flatten, Dense, Dropout
class AlexNet_BN(tf.keras.models.Model):
def __init__(self, **kwargs):
super(AlexNet_BN, self).__init__(**kwargs)
# 2013年冠军是参数优化后的AlexNet(ZFnet),第一层是7*7,步长2*2,其余层不变
self.conv1 = Conv2D(filters=96, kernel_size=(11,11), strides=(4,4), activation='relu', padding="same")
self.bn_1 = BatchNormalization(momentum=0.9, epsilon=1e-5)
self.pool1 = MaxPool2D(pool_size=(3,3), strides=(2,2), padding="same")
self.conv2 = Conv2D(filters=256, kernel_size=(5,5), strides=(2,2), activation='relu', padding="same")
self.bn_2 = BatchNormalization(momentum=0.9, epsilon=1e-5)
self.pool2 = MaxPool2D(pool_size=(3,3), strides=(2,2), padding="same")
self.conv3 = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same")
self.conv4 = Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same")
self.conv5 = Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), activation='relu', padding="same")
self.pool3 = MaxPool2D(pool_size=(3,3), strides=(2,2), padding="same")
self.fc_1 = Dense(4096, activation='relu')
self.fc_2 = Dense(4096, activation='relu')
self.fc_3 = Dense(c.num_class, activation='softmax')
def call(self, inputs, training):
res = self.conv1(inputs)
res = self.bn_1(res, training=training)
res = self.pool1(res)
res = self.conv2(res)
res = self.bn_2(res, training=training)
res = self.pool2(res)
res = self.conv3(res)
res = self.conv4(res)
res = self.conv5(res)
res = self.pool3(res)
res = Flatten()(res)
res = self.fc_1(res)
if training:
res = Dropout(rate=0.5)(res)
res = self.fc_2(res)
if training:
res = Dropout(rate=0.5)(res)
outputs = self.fc_3(res)
return outputs
if __name__ == '__main__':
# img_input = keras.layers.Input(shape=(img_rows,img_cols,img_channels))
# output = alexNetInference(img_input,num_classes)
# alexnet = keras.models.Model(img_input, output)
# alexnet.save('alexNet_cifar10.h5')
model = AlexNet()
model.build((None, ) + c.input_shape)
cnt1 = cnt2 = 0
for v in model.trainable_variables:
print(v.name)
cnt1 += 1
if 'kernel' in v.name:
cnt2 += 1
print(cnt1, cnt2)
print(model.summary())
| 2.28125 | 2 |
deregister_from_elb.py | SaschaMoellering/aws-docker-scripts | 4 | 73860 | import sys
import getopt
import boto
import boto.ec2.elb
import aws_library
__author__ = 'sascha.moellering'
def main(argv):
image = ''
tag = ''
elb_name = ''
stage = ''
mode = ''
try:
opts, args = getopt.getopt(argv, "hi:t:e:s:g:m:",
["image=", "tag=", "elb=", "stage=", "region=", "mode="])
except getopt.GetoptError:
print 'deregister_from_elb.py -i <image> -t <tag> -e <elb> -s <stage> -g <region> -m <mode>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'deregister_from_elb.py -i <image> -t <tag> -e <elb> -s <stage> -g <region>'
sys.exit()
elif opt in ("-i", "--image"):
image = arg
elif opt in ("-e", "--elb"):
elb_name = arg
elif opt in ("-s", "--stage"):
stage = arg
elif opt in ("-t", "--tag"):
tag = arg
elif opt in ("-g", "--region"):
region = arg
elif opt in ("-m", "--mode"):
mode = arg
print 'Using image {0}'.format(image)
print 'Using tag {0}'.format(tag)
print 'Using elb {0}'.format(elb_name)
print 'Using stage {0}'.format(stage)
print 'Using region {0}'.format(region)
print 'Using mode {0}'.format(mode)
deregister_from_elb(region=region, image=image, tag=tag, elb=elb_name, mode=mode)
sys.exit(0)
def deregister_from_elb(region, image, tag, elb, mode):
conn_elb = boto.ec2.elb.connect_to_region(region_name=region)
lb_list = conn_elb.get_all_load_balancers([elb])
if len(lb_list) == 0:
print "No ELB {0} found".format(elb)
sys.exit(1)
lb = lb_list[0]
aws_library.delete_instances_from_lb(image, tag, lb, region, mode)
if __name__ == "__main__":
main(sys.argv[1:]) | 1.15625 | 1 |
lib/third_party/mcu_vendor/espressif/esp-idf/tools/tiny-test-fw/CIAssignExampleTest.py | NoMaY-tmp/ex2 | 46 | 73988 | <filename>lib/third_party/mcu_vendor/espressif/esp-idf/tools/tiny-test-fw/CIAssignExampleTest.py<gh_stars>10-100
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command line tool to assign example tests to CI test jobs.
"""
# TODO: Need to handle running examples on different chips
import os
import sys
import re
import argparse
import yaml
test_fw_path = os.getenv("TEST_FW_PATH")
if test_fw_path:
sys.path.insert(0, test_fw_path)
from Utility import CaseConfig, SearchCases, GitlabCIJob
class Group(object):
MAX_EXECUTION_TIME = 30
MAX_CASE = 15
SORT_KEYS = ["env_tag"]
def __init__(self, case):
self.execution_time = 0
self.case_list = [case]
self.filters = dict(zip(self.SORT_KEYS, [case.case_info[x] for x in self.SORT_KEYS]))
def accept_new_case(self):
"""
check if allowed to add any case to this group
:return: True or False
"""
max_time = (sum([x.case_info["execution_time"] for x in self.case_list]) < self.MAX_EXECUTION_TIME)
max_case = (len(self.case_list) < self.MAX_CASE)
return max_time and max_case
def add_case(self, case):
"""
add case to current group
:param case: test case
:return: True if add succeed, else False
"""
added = False
if self.accept_new_case():
for key in self.filters:
if case.case_info[key] != self.filters[key]:
break
else:
self.case_list.append(case)
added = True
return added
def output(self):
"""
output data for job configs
:return: {"Filter": case filter, "CaseConfig": list of case configs for cases in this group}
"""
output_data = {
"Filter": self.filters,
"CaseConfig": [{"name": x.case_info["name"]} for x in self.case_list],
}
return output_data
class AssignTest(object):
"""
Auto assign tests to CI jobs.
:param test_case: path of test case file(s)
:param ci_config_file: path of ``.gitlab-ci.yml``
"""
CI_TEST_JOB_PATTERN = re.compile(r"^example_test_.+")
def __init__(self, test_case, ci_config_file):
self.test_cases = self._search_cases(test_case)
self.jobs = self._parse_gitlab_ci_config(ci_config_file)
def _parse_gitlab_ci_config(self, ci_config_file):
with open(ci_config_file, "r") as f:
ci_config = yaml.load(f)
job_list = list()
for job_name in ci_config:
if self.CI_TEST_JOB_PATTERN.search(job_name) is not None:
job_list.append(GitlabCIJob.Job(ci_config[job_name], job_name))
return job_list
@staticmethod
def _search_cases(test_case, case_filter=None):
"""
:param test_case: path contains test case folder
:param case_filter: filter for test cases
:return: filtered test case list
"""
test_methods = SearchCases.Search.search_test_cases(test_case)
return CaseConfig.filter_test_cases(test_methods, case_filter if case_filter else dict())
def _group_cases(self):
"""
separate all cases into groups according group rules. each group will be executed by one CI job.
:return: test case groups.
"""
groups = []
for case in self.test_cases:
for group in groups:
# add to current group
if group.add_case(case):
break
else:
# create new group
groups.append(Group(case))
return groups
def assign_cases(self):
"""
separate test cases to groups and assign test cases to CI jobs.
:raise AssertError: if failed to assign any case to CI job.
:return: None
"""
failed_to_assign = []
test_groups = self._group_cases()
for group in test_groups:
for job in self.jobs:
if job.match_group(group):
job.assign_group(group)
break
else:
failed_to_assign.append(group)
assert not failed_to_assign
def output_configs(self, output_path):
"""
:param output_path: path to output config files for each CI job
:return: None
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
for job in self.jobs:
job.output_config(output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("test_case",
help="test case folder or file")
parser.add_argument("ci_config_file",
help="gitlab ci config file")
parser.add_argument("output_path",
help="output path of config files")
args = parser.parse_args()
assign_test = AssignTest(args.test_case, args.ci_config_file)
assign_test.assign_cases()
assign_test.output_configs(args.output_path)
| 1.578125 | 2 |
mne/datasets/hf_sef/__init__.py | fmamashli/mne-python | 1,953 | 74116 | """HF-SEF dataset."""
from .hf_sef import data_path
| -0.030029 | 0 |
pytorch/MaskRCNN/MaskRCNN.py | markpp/object_detectors | 2 | 74244 | <reponame>markpp/object_detectors
import torch
import torchvision
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
import numpy as np
import cv2
def create_model(num_classes,pretrained=True):
# load an instance segmentation model pre-trained on COCO
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=pretrained)
# get the number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
return model
def _evaluate_iou(target, pred):
"""
Evaluate intersection over union (IOU) for target from dataset and output prediction
from model
"""
if pred["boxes"].shape[0] == 0:
# no box detected
return torch.tensor(0.0, device=pred["boxes"].device)
return torchvision.ops.box_iou(target["boxes"], pred["boxes"]).diag().mean()
def _plot_boxes(imgs, targets, preds):
"""
Plot the target and prediction boxes
"""
dets = []
for img, tar, pred in zip(imgs, targets, preds):
out = img.cpu()
out[0] = out[0] * 0.229 + 0.485
out[1] = out[1] * 0.224 + 0.456
out[2] = out[2] * 0.225 + 0.406
out = out.mul(255).permute(1, 2, 0).byte().numpy()
for b,l in zip(tar["boxes"],tar["labels"]):
x1, y1, x2, y2 = [int(x) for x in b.tolist()]
cv2.rectangle(out,(x1, y1),(x2, y2),(255,0,0),3)
for b,l,s in zip(pred["boxes"],pred["labels"],pred["scores"]):
score = s.item()
if score > 0.25:
x1, y1, x2, y2 = [int(x) for x in b.tolist()]
cv2.rectangle(out,(x1, y1),(x2, y2),(0,0,255),2)
cv2.putText(out,"{:.2f}".format(score), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255), 2)
if len(dets):
dets = np.concatenate((dets, out), axis=1)
else:
dets = out
return dets
| 2.203125 | 2 |
tendie_expenses.py | Saumen95/kipte | 1 | 74372 | import os
import calendar
from flask import request, session
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
from datetime import datetime
from helpers import convertSQLToDict
# Create engine object to manage connections to DB, and scoped session to separate user interactions with DB
engine = create_engine(os.getenv("DATABASE_URL"))
db = scoped_session(sessionmaker(bind=engine))
# Add expense(s) to the users expense records
# There are two entry points for this: 1) 'addexpenses' route and 2) 'index' route. #1 allows many expenses whereas #2 only allows 1 expense per POST.
def addExpenses(formData, userID):
expenses = []
expense = {"description": None, "category": None,
"date": None, "amount": None, "payer": None}
# Check if the user is submitting via 'addexpenses' or 'index' route - this determines if a user is adding 1 or potentially many expenses in a single POST
if "." not in formData[0][0]:
for key, value in formData:
# Add to dictionary
expense[key] = value.strip()
# Convert the amount from string to float for the DB
expense["amount"] = float(expense["amount"])
# Add dictionary to list (to comply with design/standard of expensed.html)
expenses.append(expense)
# User is submitting via 'addexpenses' route
else:
counter = 0
for key, value in formData:
# Keys are numbered by default in HTML form. Remove those numbers so we can use the HTML element names as keys for the dictionary.
cleanKey = key.split(".")
# Add to dictionary
expense[cleanKey[0]] = value.strip()
# Every 5 loops add the expense to the list of expenses (because there are 5 fields for an expense record)
counter += 1
if counter % 5 == 0:
# Store the amount as a float
expense["amount"] = float(expense["amount"])
# Add dictionary to list
expenses.append(expense.copy())
# Insert expenses into DB
for expense in expenses:
now = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
db.execute("INSERT INTO expenses (description, category, expenseDate, amount, payer, submitTime, user_id) VALUES (:description, :category, :expenseDate, :amount, :payer, :submitTime, :usersID)",
{"description": expense["description"], "category": expense["category"], "expenseDate": expense["date"], "amount": expense["amount"], "payer": expense["payer"], "submitTime": now, "usersID": userID})
db.commit()
return expenses
# Get and return the users lifetime expense history
def getHistory(userID):
results = db.execute("SELECT description, category, expenseDate AS date, payer, amount, submitTime FROM expenses WHERE user_id = :usersID ORDER BY submitTime ASC",
{"usersID": userID}).fetchall()
history = convertSQLToDict(results)
return history
# Get and return an existing expense record with ID from the DB
def getExpense(formData, userID):
expense = {"description": None, "category": None,
"date": None, "amount": None, "payer": None, "submitTime": None, "id": None}
expense["description"] = formData.get("oldDescription").strip()
expense["category"] = formData.get("oldCategory").strip()
expense["date"] = formData.get("oldDate").strip()
expense["amount"] = formData.get("oldAmount").strip()
expense["payer"] = formData.get("oldPayer").strip()
expense["submitTime"] = formData.get("submitTime").strip()
# Remove dollar sign and comma from the old expense so we can convert to float for the DB
expense["amount"] = float(
expense["amount"].replace("$", "").replace(",", ""))
# Query the DB for the expense unique identifier
expenseID = db.execute("SELECT id FROM expenses WHERE user_id = :usersID AND description = :oldDescription AND category = :oldCategory AND expenseDate = :oldDate AND amount = :oldAmount AND payer = :oldPayer AND submitTime = :oldSubmitTime",
{"usersID": userID, "oldDescription": expense["description"], "oldCategory": expense["category"], "oldDate": expense["date"], "oldAmount": expense["amount"], "oldPayer": expense["payer"], "oldSubmitTime": expense["submitTime"]}).fetchone()
# Make sure a record was found for the expense otherwise set as None
if expenseID:
expense["id"] = expenseID[0]
else:
expense["id"] = None
return expense
# Delete an existing expense record for the user
def deleteExpense(expense, userID):
result = db.execute("DELETE FROM expenses WHERE user_id = :usersID AND id = :oldExpenseID",
{"usersID": userID, "oldExpenseID": expense["id"]})
db.commit()
return result
# Update an existing expense record for the user
def updateExpense(oldExpense, formData, userID):
expense = {"description": None, "category": None,
"date": None, "amount": None, "payer": None}
expense["description"] = formData.get("description").strip()
expense["category"] = formData.get("category").strip()
expense["date"] = formData.get("date").strip()
expense["amount"] = formData.get("amount").strip()
expense["payer"] = formData.get("payer").strip()
# Convert the amount from string to float for the DB
expense["amount"] = float(expense["amount"])
# Make sure the user actually is submitting changes and not saving the existing expense again
hasChanges = False
for key, value in oldExpense.items():
# Exit the loop when reaching submitTime since that is not something the user provides in the form for a new expense
if key == "submitTime":
break
else:
if oldExpense[key] != expense[key]:
hasChanges = True
break
if hasChanges is False:
return None
# Update the existing record
now = datetime.now().strftime("%m/%d/%Y %H:%M:%S")
result = db.execute("UPDATE expenses SET description = :newDescription, category = :newCategory, expenseDate = :newDate, amount = :newAmount, payer = :newPayer, submitTime = :newSubmitTime WHERE id = :existingExpenseID AND user_id = :usersID",
{"newDescription": expense["description"], "newCategory": expense["category"], "newDate": expense["date"], "newAmount": expense["amount"], "newPayer": expense["payer"], "newSubmitTime": now, "existingExpenseID": oldExpense["id"], "usersID": userID}).rowcount
db.commit()
# Make sure result is not empty (indicating it could not update the expense)
if result:
# Add dictionary to list (to comply with design/standard of expensed.html)
expenses = []
expenses.append(expense)
return expenses
else:
return None
| 2.359375 | 2 |
epg/epgcpmg.py | jtamir/mri-sim-py | 5 | 74500 | <filename>epg/epgcpmg.py
#!/usr/bin/python
# EPG CPMG simulation code, based off of Matlab scripts from <NAME> <<EMAIL>>
# 2015 <NAME> <<EMAIL>>
import numpy as np
from warnings import warn
def rf(FpFmZ, alpha):
"Same as rf2, but only returns FpFmZ"""
return rf2(FpFmZ, alpha)[0]
def rf2(FpFmZ, alpha):
""" Propagate EPG states through an RF rotation of
alpha (radians). Assumes CPMG condition, i.e.
magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
"""
# -- From Weigel at al, JMRI 41(2015)266-295, Eq. 21.
if abs(alpha) > 2 * np.pi:
warn('rf2: Flip angle should be in radians! alpha=%f' % alpha)
cosa2 = np.cos(alpha/2.)**2
sina2 = np.sin(alpha/2.)**2
cosa = np.cos(alpha)
sina = np.sin(alpha)
RR = np.array([ [cosa2, sina2, sina],
[sina2, cosa2, -sina],
[-0.5 * sina, 0.5 * sina, cosa] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_ex(FpFmZ, alpha):
"Same as rf2_ex, but only returns FpFmZ"""
return rf2_ex(FpFmZ, alpha)[0]
def rf2_ex(FpFmZ, alpha):
""" Propagate EPG states through an RF excitation of
alpha (radians) along the y direction, i.e. phase of pi/2.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Updated FpFmZ state.
RR = RF rotation matrix (3x3).
"""
try:
alpha = alpha[0]
except:
pass
if abs(alpha) > 2 * np.pi:
warn('rf2_ex: Flip angle should be in radians! alpha=%f' % alpha)
cosa2 = np.cos(alpha/2.)**2
sina2 = np.sin(alpha/2.)**2
cosa = np.cos(alpha)
sina = np.sin(alpha)
RR = np.array([ [cosa2, -sina2, sina],
[-sina2, cosa2, sina],
[-0.5 * sina, -0.5 * sina, cosa] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_prime(FpFmZ, alpha):
"""Same as rf_prime2, but only returns FpFmZ"""
return rf_prime2(FpFmZ, alpha)[0]
def rf_prime2(FpFmZ, alpha):
""" Compute the gradient of the RF rotation operator, where
alpha (radians) is the RF rotation. Assumes CPMG condition, i.e.
magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
RR = Derivative of RF rotation matrix (3x3) w.r.t. alpha
"""
if abs(alpha) > 2 * np.pi:
warn('rf_prime2: Flip angle should be in radians! alpha=%f' % alpha)
RR = np.array([ [-np.cos(alpha/2.) * np.sin(alpha/2.), np.cos(alpha/2.) * np.sin(alpha/2.), np.cos(alpha)],
[np.cos(alpha/2.) * np.sin(alpha/2.), -np.cos(alpha/2.) * np.sin(alpha/2.), -np.cos(alpha)],
[-0.5 * np.cos(alpha), 0.5 * np.cos(alpha), -np.sin(alpha)] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_B1_prime(FpFmZ, alpha, B1):
"""Same as rf_B1_prime2, but only returns FpFmZ"""
return rf_B1_prime2(FpFmZ, alpha, B1)[0]
def rf_B1_prime2(FpFmZ, alpha, B1):
""" Compute the gradient of B1 inhomogeneity w.r.t. RF refocusing operator, where
alpha (radians) is the RF rotation and B1 is the B1 homogeneity (0, 2).
Assumes CPMG condition, i.e. magnetization lies on the real x axis.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
B1 = B1 Homogeneity, where 1. is homogeneous
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
RR = Derivative of RF rotation matrix (3x3) w.r.t. B1
"""
if abs(alpha) > 2 * np.pi:
warn('rf_B1_prime2: Flip angle should be in radians! alpha=%f' % alpha)
if B1 < 0 or B1 > 2:
warn('rf_B1_prime2: B1 Homogeneity should be a percentage between (0, 2)')
RR = np.array([ [-alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha)],
[alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), -alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), -alpha*np.cos(B1*alpha)],
[-0.5*alpha*np.cos(B1*alpha), 0.5*alpha*np.cos(B1*alpha), -alpha*np.sin(B1*alpha)] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ, RR
def rf_ex_B1_prime(FpFmZ, alpha, B1):
"""Gradient of B1 inhomogeneity w.r.t. RF excitation operator, where
alpha (radians) is the RF rotation and B1 is the B1 honogeneity (0, 2).
Assumes CPMG condition, i.e. RF excitation in the y direction.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
B1 = B1 Homogeneity, where 1. is homogeneous
OUTPUT:
FpFmZ = Derivative of FpFmZ state w.r.t. alpha
"""
if abs(alpha) > 2 * np.pi:
warn('rf_ex_B1_prime2: Flip angle should be in radians! alpha=%f' % alpha)
if B1 < 0 or B1 > 2:
warn('rf_ex_B1_prime: B1 Homogeneity should be a percentage between (0, 2)')
RR = np.array([ [-alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha)],
[alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), -alpha*np.cos(B1*alpha/2.) * np.sin(B1*alpha/2.), alpha*np.cos(B1*alpha)],
[-0.5*alpha*np.cos(B1*alpha), -0.5*alpha*np.cos(B1*alpha), -alpha*np.sin(B1*alpha)] ])
FpFmZ = np.dot(RR, FpFmZ)
return FpFmZ
def relax_mat(T, T1, T2):
E2 = np.exp(-T/T2)
E1 = np.exp(-T/T1)
EE = np.diag([E2, E2, E1]) # Decay of states due to relaxation alone.
return EE
def relax_mat_prime_T1(T, T1, T2):
E1_prime_T1 = T * np.exp(-T/T1) / T1**2
return np.diag([0, 0, E1_prime_T1])
def relax_mat_prime_T2(T, T1, T2):
E2_prime_T2 = T * np.exp(-T/T2) / T2**2
return np.diag([E2_prime_T2, E2_prime_T2, 0])
def relax_prime_T1(FpFmZ, T, T1, T2):
"""returns E'(T1) FpFmZ + E0'(T1)"""
EE_prime_T1 = relax_mat_prime_T1(T, T1, T2)
RR = -EE_prime_T1[2,2]
FpFmZ = np.dot(EE_prime_T1, FpFmZ)
FpFmZ[2,0] = FpFmZ[2,0] + RR
return FpFmZ
def relax_prime_T2(FpFmZ, T, T1, T2):
"""returns E'(T2) FpFmZ"""
EE_prime_T2 = relax_mat_prime_T2(T, T1, T2)
FpFmZ = np.dot(EE_prime_T2, FpFmZ)
return FpFmZ
def relax(FpFmZ, T, T1, T2):
"""Same as relax2, but only returns FpFmZ"""
return relax2(FpFmZ, T, T1, T2)[0]
def relax2(FpFmZ, T, T1, T2):
""" Propagate EPG states through a period of relaxation over
an interval T.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
T1, T2 = Relaxation times (same as T)
T = Time interval (same as T1,T2)
OUTPUT:
FpFmZ = updated F+, F- and Z states.
EE = decay matrix, 3x3 = diag([E2 E2 E1]);
"""
E2 = np.exp(-T/T2)
E1 = np.exp(-T/T1)
EE = np.diag([E2, E2, E1]) # Decay of states due to relaxation alone.
RR = 1 - E1 # Mz Recovery, affects only Z0 state, as
# recovered magnetization is not dephased.
FpFmZ = np.dot(EE, FpFmZ) # Apply Relaxation
FpFmZ[2,0] = FpFmZ[2,0] + RR # Recovery
return FpFmZ, EE
def grad(FpFmZ, noadd=False):
"""Propagate EPG states through a "unit" gradient. Assumes CPMG condition,
i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
Updated FpFmZ state.
"""
# Gradient does not affect the Z states.
if noadd == False:
FpFmZ = np.hstack((FpFmZ, [[0],[0],[0]])) # add higher dephased state
FpFmZ[0,:] = np.roll(FpFmZ[0,:], 1) # shift Fp states
FpFmZ[1,:] = np.roll(FpFmZ[1,:], -1) # shift Fm states
FpFmZ[1,-1] = 0 # Zero highest Fm state
FpFmZ[0,0] = FpFmZ[1,0] # Fill in lowest Fp state
return FpFmZ
def FSE_TE(FpFmZ, alpha, TE, T1, T2, noadd=False, recovery=True):
""" Propagate EPG states through a full TE, i.e.
relax -> grad -> rf -> grad -> relax.
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
EE = relax_mat(TE/2., T1, T2)
if recovery:
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
else:
FpFmZ = np.dot(EE, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
if recovery:
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
else:
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime_alpha(FpFmZ, alpha, TE, T1, T2, noadd=False, recovery=True):
""" Gradient of EPG over a full TE, w.r.t. flip angle alpha, i.e.
relax -> grad -> rf_prime -> grad -> relax_hat,
where rf_prime is the derivative of the RF pulse matrix w.r.t. alpha,
and relax_hat is the relaxation without longitudinal recovery
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
recovery = True to include T1 recovery in the Z0 state.
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
FpFmZ, EE = relax2(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf_prime(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime1_T2(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E(T2) G R G E'(T2) FpFmZ"""
EE = relax_mat(TE/2., T1, T2)
EE_prime = relax_mat_prime_T2(TE/2., T1, T2)
FpFmZ = np.dot(EE_prime, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime2_T2(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E'(T2) G R G (E(T2) FpFmZ + E0)"""
EE_prime = relax_mat_prime_T2(TE/2., T1, T2)
FpFmZ = relax(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE_prime, FpFmZ)
return FpFmZ
def FSE_TE_prime1_T1(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E(T1) G R G (E'(T1) FpFmZ + E0'(T1))"""
EE = relax_mat(TE/2., T1, T2)
FpFmZ = relax_prime_T1(FpFmZ, TE/2., T1, T2) # E'(T1) FpFmZ + E0'(T1)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
def FSE_TE_prime2_T1(FpFmZ, alpha, TE, T1, T2, noadd=False):
""" Returns E'(T1) G R G E(T1) FpFmZ + E0'(T1)"""
EE = relax_mat(TE/2., T1, T2)
FpFmZ = np.dot(EE, FpFmZ)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf(FpFmZ, alpha)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = relax_prime_T1(FpFmZ, TE/2., T1, T2) # E'(T1) FpFmZ + E0'(T1)
return FpFmZ
def FSE_TE_prime_B1(FpFmZ, alpha, TE, T1, T2, B1, noadd=False):
""" Gradient of EPG over a full TE, w.r.t. B1 homogeneity fraciton B1, i.e.
relax -> grad -> rf_B1_prime -> grad -> relax_hat,
where rf_B1_prime is the derivative of the RF pulse matrix w.r.t. B1,
and relax_hat is the relaxation without longitudinal recovery
Assumes CPMG condition, i.e. all states are real-valued.
INPUT:
FpFmZ = 3xN vector of F+, F- and Z states.
alpha = RF pulse flip angle in radians
T1, T2 = Relaxation times (same as TE)
TE = Echo Time interval (same as T1, T2)
B1 = fraction of B1 homogeneity (1 is fully homogeneous)
noadd = True to NOT add any higher-order states - assume
that they just go to zero. Be careful - this
speeds up simulations, but may compromise accuracy!
recovery = True to include T1 recovery in the Z0 state.
OUTPUT:
FpFmZ = updated F+, F- and Z states.
"""
FpFmZ, EE = relax2(FpFmZ, TE/2., T1, T2)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = rf_B1_prime(FpFmZ, alpha, B1)
FpFmZ = grad(FpFmZ, noadd)
FpFmZ = np.dot(EE, FpFmZ)
return FpFmZ
### Gradients of full FSE EPG function across T time points
def FSE_signal_prime_alpha_idx(angles_rad, TE, T1, T2, idx):
"""Gradient of EPG function at each time point w.r.t. RF pulse alpha_i"""
T = len(angles_rad)
zi = np.hstack((np.array([[1],[1],[0]]), np.zeros((3, T))))
z_prime = np.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i < idx:
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = 0
elif i == idx:
wi = FSE_TE_prime_alpha(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_prime_T1(angles_rad, TE, T1, T2):
return FSE_signal_ex_prime_T1(np.pi/2, angles_rad, TE, T1, T2)
def FSE_signal_ex_prime_T1(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Gradient of EPG function at each time point w.r.t. T1"""
T = len(angles_rad)
try:
B1 = B1[0]
except:
pass
# since the grad doesn't depend on B1 inhomog, can just pre-scale flip angles
angle_ex_rad = B1 * np.copy(angle_ex_rad)
angles_rad = B1 * np.copy(angles_rad)
zi = np.hstack((rf_ex(np.array([[0],[0],[1]]), angle_ex_rad), np.zeros((3, T))))
z_prime = np.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i == 0:
wi = np.zeros((3, T+1))
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
wi += FSE_TE_prime1_T1(zi, alpha, TE, T1, T2, noadd=True)
wi += FSE_TE_prime2_T1(zi, alpha, TE, T1, T2, noadd=True)
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_prime_T2(angles_rad, TE, T1, T2):
return FSE_signal_ex_prime_T2(np.pi/2, angles_rad, TE, T1, T2)
def FSE_signal_ex_prime_T2(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Gradient of EPG function at each time point w.r.t. T2"""
T = len(angles_rad)
try:
B1 = B1[0]
except:
pass
# since the grad doesn't depend on B1 inhomog, can just pre-scale flip angles
angle_ex_rad = B1 * np.copy(angle_ex_rad)
angles_rad = B1 * np.copy(angles_rad)
zi = np.hstack((rf_ex(np.array([[0],[0],[1]]), angle_ex_rad), np.zeros((3, T))))
z_prime = np.zeros((T, 1))
for i in range(T):
alpha = angles_rad[i]
if i == 0:
wi = np.zeros((3, T+1))
else:
wi = FSE_TE(wi, alpha, TE, T1, T2, noadd=True, recovery=False)
wi += FSE_TE_prime1_T2(zi, alpha, TE, T1, T2, noadd=True)
wi += FSE_TE_prime2_T2(zi, alpha, TE, T1, T2, noadd=True)
zi = FSE_TE(zi, alpha, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
def FSE_signal_ex_prime_B1(angle_ex_rad, angles_rad, TE, T1, T2, B1):
"""Gradient of EPG function at each time point w.r.t. B1 Homogeneity.
Includes the excitation flip angle"""
T = len(angles_rad)
zi = np.hstack((np.array([[0],[0],[1]]), np.zeros((3, T+1))))
z_prime = np.zeros((T, 1))
wi = rf_ex_B1_prime(zi, angle_ex_rad, B1)
zi = rf_ex(zi, angle_ex_rad * B1)
for i in range(T):
alpha = angles_rad[i]
if i == 0:
xi = FSE_TE(wi, alpha * B1, TE, T1, T2, noadd=True, recovery=False)
else:
xi = FSE_TE(wi, alpha * B1, TE, T1, T2, noadd=True)
wi = FSE_TE_prime_B1(zi, alpha, TE, T1, T2, B1, noadd=True) + xi
zi = FSE_TE(zi, alpha * B1, TE, T1, T2, noadd=True)
z_prime[i] = wi[0,0]
return z_prime
### Full FSE EPG function across T time points
def FSE_signal_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Same as FSE_signal2_ex, but only returns Mxy"""
return FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1)[0]
def FSE_signal(angles_rad, TE, T1, T2):
"""Same as FSE_signal2, but only returns Mxy"""
return FSE_signal2(angles_rad, TE, T1, T2)[0]
def FSE_signal2(angles_rad, TE, T1, T2):
"""Same as FSE_signal2_ex, but assumes excitation pulse is 90 degrees"""
return FSE_signal2_ex(np.pi/2., angles_rad, TE, T1, T2)
def FSE_signal2_ex(angle_ex_rad, angles_rad, TE, T1, T2, B1=1.):
"""Simulate Fast Spin-Echo CPMG sequence with specific flip angle train.
Prior to the flip angle train, an excitation pulse of angle_ex_rad degrees
is applied in the Y direction. The flip angle train is then applied in the X direction.
INPUT:
angles_rad = array of flip angles in radians equal to echo train length
TE = echo time/spacing
T1 = T1 value in seconds
T2 = T2 value in seconds
OUTPUT:
Mxy = Transverse magnetization at each echo time
Mz = Longitudinal magnetization at each echo time
"""
T = len(angles_rad)
Mxy = np.zeros((T,1))
Mz = np.zeros((T,1))
P = np.array([[0],[0],[1]]) # initially on Mz
try:
B1 = B1[0]
except:
pass
# pre-scale by B1 homogeneity
angle_ex_rad = B1 * np.copy(angle_ex_rad)
angles_rad = B1 * np.copy(angles_rad)
P = rf_ex(P, angle_ex_rad) # initial tip
for i in range(T):
alpha = angles_rad[i]
P = FSE_TE(P, alpha, TE, T1, T2)
Mxy[i] = P[0,0]
Mz[i] = P[2,0]
return Mxy, Mz
if __name__ == "__main__":
import matplotlib.pyplot as plt
T1 = 1000e-3
T2 = 200e-3
TE = 5e-3
N = 100
angles = 120 * np.ones((N,))
angles_rad = angles * np.pi / 180.
S = FSE_signal(angles_rad, TE, T1, T2)
S2 = abs(S)
plt.plot(TE*1000*np.arange(1, N+1), S2)
plt.xlabel('time (ms)')
plt.ylabel('signal')
plt.title('T1 = %.2f ms, T2 = %.2f ms' % (T1 * 1000, T2 * 1000))
plt.show()
| 1.835938 | 2 |
migrations/0.0.24/src/python/publish_migration.py | hythloday/pants | 11 | 74628 | #!/usr/bin/python
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import sys
filename = sys.argv[1]
def extract_artifact(line):
splitline = line.split('%')
org = re.sub(r'^revision\.[a-z_]+\.', '', splitline[0])
name = re.sub(r'=.*', '', splitline[1].rstrip())
return (org, name)
with open(filename) as f:
base_dir = os.path.dirname(filename)
content = f.readlines()
for line in content:
# For each line get the org and name, make a directory with these
# and open the publish file.
artifact = extract_artifact(line)
(org, name) = artifact
publish_dir = os.path.join(base_dir, org, name)
if not os.path.exists(publish_dir):
os.makedirs(publish_dir)
with open(os.path.join(publish_dir, 'publish.properties'), 'a') as output:
output.write(line)
| 1.820313 | 2 |
rllib/agents/dqn/learner_thread.py | firebolt55439/ray | 3 | 74756 | <gh_stars>1-10
import queue
import threading
from ray.rllib.evaluation.metrics import get_learner_stats
from ray.rllib.policy.policy import LEARNER_STATS_KEY
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.timer import TimerStat
from ray.rllib.utils.window_stat import WindowStat
LEARNER_QUEUE_MAX_SIZE = 16
tf1, tf, tfv = try_import_tf()
class LearnerThread(threading.Thread):
"""Background thread that updates the local model from replay data.
The learner thread communicates with the main thread through Queues. This
is needed since Ray operations can only be run on the main thread. In
addition, moving heavyweight gradient ops session runs off the main thread
improves overall throughput.
"""
def __init__(self, local_worker):
threading.Thread.__init__(self)
self.learner_queue_size = WindowStat("size", 50)
self.local_worker = local_worker
self.inqueue = queue.Queue(maxsize=LEARNER_QUEUE_MAX_SIZE)
self.outqueue = queue.Queue()
self.queue_timer = TimerStat()
self.grad_timer = TimerStat()
self.overall_timer = TimerStat()
self.daemon = True
self.weights_updated = False
self.stopped = False
self.stats = {}
def run(self):
# Switch on eager mode if configured.
if self.local_worker.policy_config.get("framework") in ["tf2", "tfe"]:
tf1.enable_eager_execution()
while not self.stopped:
self.step()
def step(self):
with self.overall_timer:
with self.queue_timer:
ra, replay = self.inqueue.get()
if replay is not None:
prio_dict = {}
with self.grad_timer:
grad_out = self.local_worker.learn_on_batch(replay)
for pid, info in grad_out.items():
td_error = info.get(
"td_error",
info[LEARNER_STATS_KEY].get("td_error"))
prio_dict[pid] = (replay.policy_batches[pid].data.get(
"batch_indexes"), td_error)
self.stats[pid] = get_learner_stats(info)
self.grad_timer.push_units_processed(replay.count)
self.outqueue.put((ra, prio_dict, replay.count))
self.learner_queue_size.push(self.inqueue.qsize())
self.weights_updated = True
self.overall_timer.push_units_processed(replay and replay.count
or 0)
| 1.5 | 2 |
old_agents/qrps.py | ajmcastro/quantum-reinforcement-learning | 1 | 74884 | import time, math, cmath
import numpy as np
from functools import reduce
from qiskit import *
from qiskit.quantum_info import Statevector
from circuit_builder import CircuitBuilder
from agent import Agent
class QRPS_Agent(Agent):
def __init__(self, backend):
self.backend = backend
self.memory = {}
self.gamma = 0.0
self.n = 0.05
# def act2(self, env):
# transitions = np.array([
# [0.4, 0.2, 0.2, 0.2],
# [0.2, 0.4, 0.4, 0.4],
# [0.2, 0.2, 0.2, 0.2],
# [0.2, 0.2, 0.2, 0.2]
# ])
# flags = [1, 2]
# self.rank_two(transitions, flags)
def act(self, env):
env_state = env.state()
env_actions = env.actions()
# If only one action is available, there's nothing to learn here
if len(env_actions) == 1:
env.step(env_actions[0])
return
# Add to memory
if env_state not in self.memory:
self.memory[env_state] = np.array([1] * len(env_actions)), np.array(range(len(env_actions))), 0.0
weights, flags, glow = self.memory[env_state]
sum_weights = np.sum(weights)
prob = np.array([h / sum_weights for h in weights])
print('Pr:', prob)
print('Flags:', flags)
# Quantum deliberation
max_tries = 3
action_index = None
for _ in range(max_tries):
action_index = self.rank_one(prob, flags, debug=False)
if action_index in flags:
break
reward = env.step(env_actions[action_index])
print("Action:", env_actions[action_index], reward)
self.update_values(env_state, env_actions, action_index, reward)
def update_values(self, state, actions, action_index, reward):
"Updates the weights, flags and glow values according to the received reward"
weights, flags, glows = self.memory[state]
glows = np.array([1.0 if action_index == i else (1 - self.n) * g for i, g in enumerate(glows)])
weights[action_index] = weights[action_index] - self.gamma * (weights[action_index] - 1) + glows[action_index] * reward
flags = np.delete(flags, action_index) if reward < 0.0 else flags
if len(flags) == 0:
flags = np.array([i for i in range(len(actions)) if i is not action_index])
self.memory[state] = weights, flags, glows
def prob_to_angles(self, prob, previous=1):
"Calculates the angles to encode the given probabilities"
def calc_angle(x):
return 2 * math.acos(math.sqrt(x))
if len(prob) == 2:
return [calc_angle(prob[0] / previous)] if previous != 0 else [0]
lhs, rhs = np.split(prob, 2)
angles = np.array([calc_angle(np.sum(lhs) / previous)])
angles = np.append(angles, self.prob_to_angles(lhs, previous=np.sum(lhs)))
angles = np.append(angles, self.prob_to_angles(rhs, previous=np.sum(rhs)))
return angles
def rank_one(self, prob, flags, debug=False):
"Rank-one implementation of Reflective Projective Simulation"
num_qubits = math.ceil(math.log(len(prob), 2))
# Ensure lenght of probabilities is 2**num_qubits
if len(prob) != 2**num_qubits:
prob = np.append(prob, [0] * (2**num_qubits - len(prob)))
# Epsilon (probability of flagged actions)
epsilon = reduce(lambda e, i: e + prob[i], flags, 0.0)
# State preparation
U = CircuitBuilder().get_U(num_qubits, self.prob_to_angles(prob)).to_instruction()
# Quantum circuit
qreg = QuantumRegister(num_qubits, name='q')
circ = QuantumCircuit(qreg)
# Encode stationary distribution
circ.append(U, qreg)
k = math.floor(math.pi / (4 * math.sqrt(epsilon)))
for _ in range(k):
# Reflection around the flagged actions
circ.diagonal([-1 if i in flags else 1 for i in range(2**num_qubits)], qreg)
# Reflection around the stationary distribution
circ.append(U.inverse(), qreg)
circ.x(qreg)
if num_qubits == 1:
circ.z(qreg)
else:
circ.h(qreg[-1])
circ.mcx(qreg[:-1], qreg[-1])
circ.h(qreg[-1])
circ.x(qreg)
circ.append(U, qreg)
if debug:
print(circ.draw(fold=140))
circ.snapshot('sv')
# Sample from stationary distribution
circ.measure_all()
result = execute(circ, backend=self.backend, shots=1).result()
if debug:
resulting_sv = result.data()['snapshots']['statevector']['sv'][0]
print(Statevector(resulting_sv).probabilities_dict())
counts = result.get_counts(circ)
action_index = max(counts, key=counts.get)
return int(action_index, 2)
def rank_two(self, transitions, flags, debug=False):
eigvals = np.linalg.eigvals(transitions)
eigvals.sort()
num_qubits = int(math.log2(len(transitions)))
num_ancilla = math.ceil(math.log2(1 / math.sqrt(1 - abs(eigvals[-2])))) + 1
# Stationary distribution
S, U = np.linalg.eig(transitions)
stat_distr = np.array(U[:, np.where(np.abs(S - 1.) < 1e-8)[0][0]].flat)
stat_distr = stat_distr / np.sum(stat_distr)
print(stat_distr)
# Epsilon (probability of flagged actions)
epsilon = reduce(lambda e, i: e + stat_distr[i], flags, 0.0)
# Reverse transition matrix
rev_transitions = transitions * np.array(stat_distr)
rev_transitions = rev_transitions.transpose() / np.array(stat_distr)
# Angles
stat_angles = self.prob_to_angles(stat_distr)
angles = np.concatenate([self.prob_to_angles(transitions[:,i]) for i in range(2**num_qubits)])
rev_angles = np.concatenate([self.prob_to_angles(rev_transitions[:,i]) for i in range(2**num_qubits)])
# Quantum circuit
anc = AncillaRegister(num_ancilla, 'anc')
qreg1 = QuantumRegister(num_qubits, 'reg1')
qreg2 = QuantumRegister(num_qubits, 'reg2')
creg = ClassicalRegister(num_qubits, 'creg')
circ = QuantumCircuit(anc, qreg1, qreg2, creg)
# Encode stationary distribution
U = CircuitBuilder().get_U(num_qubits, stat_angles)
circ.append(U.to_instruction(), qreg1)
Up = CircuitBuilder().get_Up(num_qubits, angles)
circ.append(Up.to_instruction(), qreg1[:] + qreg2[:])
ARO = CircuitBuilder().get_ARO(num_qubits, num_ancilla)
k = math.floor(math.pi / (4 * math.sqrt(epsilon)))
for _ in range(k):
circ.diagonal([-1 if i in flags else 1 for i in range(2**num_qubits)], qreg1)
circ.append(ARO, anc[:] + qreg1[:] + qreg2[:])
print(circ.draw(fold=240))
# circ.snapshot('sv')
circ.measure(qreg1, creg)
# Bind transition angles
parameters = CircuitBuilder().get_parameters(num_qubits)
binds = dict(zip(parameters, np.concatenate([angles, rev_angles])))
start = time.time()
result = execute(circ, backend=self.backend, shots=2048, parameter_binds=[binds]).result()
end = time.time()
if debug:
resulting_sv = result.data()['snapshots']['statevector']['sv'][0]
print(Statevector(resulting_sv).probabilities_dict())
print("RUN took:", end - start)
print(result.get_counts(circ))
| 2.40625 | 2 |
cogbot/extensions/groups/error.py | Arcensoth/cogbot | 8 | 75012 | <gh_stars>1-10
class GroupDirectoryError(Exception):
pass
class NoSuchRoleIdError(GroupDirectoryError):
def __init__(self, *args, role_id, **kwargs):
super().__init__(*args, **kwargs)
self.role_id = role_id
class NoSuchRoleNameError(GroupDirectoryError):
def __init__(self, *args, role_name, **kwargs):
super().__init__(*args, **kwargs)
self.role_name = role_name
class GroupDirectoryGroupError(GroupDirectoryError):
def __init__(self, *args, group, **kwargs):
super().__init__(*args, **kwargs)
self.group = group
class NoSuchGroupError(GroupDirectoryGroupError):
pass
class GroupAlreadyExistsError(GroupDirectoryGroupError):
pass
| 1.585938 | 2 |
treadmill/runtime/linux/runtime.py | gaocegege/treadmill | 2 | 75140 | <reponame>gaocegege/treadmill<gh_stars>1-10
"""Linux runtime interface.
"""
import time
import logging
import os
from treadmill import appcfg
from treadmill import context
from treadmill import exc
from treadmill import presence
from treadmill import subproc
from treadmill import supervisor
from treadmill import tickets
from treadmill.appcfg import abort as app_abort
from treadmill.runtime import runtime_base
from . import _run as app_run
from . import _finish as app_finish
_LOGGER = logging.getLogger(__name__)
def _start_service_sup(container_dir):
"""Safely start services supervisor."""
sys_dir = os.path.join(container_dir, 'sys')
svc_sup_dir = os.path.join(sys_dir, 'start_container')
if not os.path.exists(os.path.join(svc_sup_dir, 'self.pid')):
supervisor.start_service(sys_dir, 'start_container', once=True)
else:
_LOGGER.info('services supervisor already started.')
def _get_tickets(appname, app, container_dir):
"""Get tickets."""
tkts_spool_dir = os.path.join(
container_dir, 'root', 'var', 'spool', 'tickets')
reply = tickets.request_tickets(context.GLOBAL.zk.conn, appname)
if reply:
tickets.store_tickets(reply, tkts_spool_dir)
# Check that all requested tickets are valid.
for princ in app.get('tickets', []):
krbcc_file = os.path.join(tkts_spool_dir, princ)
if not tickets.krbcc_ok(krbcc_file):
_LOGGER.error('Missing or expired tickets: %s, %s',
princ, krbcc_file)
raise exc.ContainerSetupError('tickets.%s' % princ)
else:
_LOGGER.info('Ticket ok: %s, %s', princ, krbcc_file)
class LinuxRuntime(runtime_base.RuntimeBase):
"""Linux Treadmill runtime."""
def __init__(self, tm_env, container_dir):
super(LinuxRuntime, self).__init__(tm_env, container_dir)
def _can_run(self, manifest):
try:
return appcfg.AppType(manifest['type']) in (
appcfg.AppType.NATIVE,
appcfg.AppType.TAR
# TODO: Add support for DOCKER
)
except ValueError:
return False
def run_timeout(self, manifest):
if appcfg.AppType(manifest['type']) is appcfg.AppType.NATIVE:
return '60s'
# Inflated to allow time to download and extract the image.
return '5m'
def _run(self, manifest, watchdog, terminated):
app_run.run(self.tm_env, self.container_dir, manifest, watchdog,
terminated)
def _finish(self, watchdog, terminated):
app_finish.finish(self.tm_env, context.GLOBAL.zk.conn,
self.container_dir, watchdog)
def _register(self, manifest, refresh_interval=None):
app_presence = presence.EndpointPresence(
context.GLOBAL.zk.conn,
manifest
)
try:
app_presence.register()
if manifest.get('tickets', None):
_get_tickets(manifest['name'], manifest, self.container_dir)
_start_service_sup(self.container_dir)
except exc.ContainerSetupError:
app_abort.abort(
self.tm_env,
manifest['name'],
reason='container_setup_error',
)
# If tickets are not ok, app will be aborted. Waiting for tickets
# in the loop is harmless way to wait for that.
#
# If tickets acquired successfully, services will start, and
# tickets will be refreshed after each interval.
tkts_spool_dir = os.path.join(
self.container_dir, 'root', 'var', 'spool', 'tickets')
while True:
time.sleep(refresh_interval)
reply = tickets.request_tickets(context.GLOBAL.zk.conn,
manifest['name'])
if reply:
tickets.store_tickets(reply, tkts_spool_dir)
else:
_LOGGER.error('Error requesting tickets.')
def _monitor(self, manifest):
svc_presence = presence.ServicePresence(
manifest,
self.container_dir,
self.tm_env.app_events_dir
)
sys_dir = os.path.join(self.container_dir, 'sys')
svc_sup_dir = os.path.join(sys_dir, 'start_container')
failed_svc = None
killed = False
# Check that start_container was not terminated. This fixed race
# condition if the presence exits and while restarted,
# start_container is terminated.
svc_sup_ran_once = os.path.exists(os.path.join(svc_sup_dir,
'self.pid'))
_LOGGER.info('services supervisor ran once: %s', svc_sup_ran_once)
svc_sup_down = presence.is_down(svc_sup_dir)
_LOGGER.info('services supervisor down: %s', svc_sup_down)
if svc_sup_down and svc_sup_ran_once:
_LOGGER.info('services supervisor was terminated, exiting.')
else:
svc_presence.ensure_supervisors_running()
# Try to start the service, taking into account number of
# restarts.
# If the number of restarts is more than specified, delete app
# from the model, which will trigger container shutdown.
#
# In case of container shutdown (application evicted from the
# server), exit_app will not be called.
while True:
success, failed_svc = svc_presence.start_all()
if not success:
break
svc_presence.wait_for_exit(svc_sup_dir)
if presence.is_down(svc_sup_dir):
_LOGGER.info('Container services supervisor is down.')
failed_svc = None
killed = True
break
svc_presence.exit_app(failed_svc, killed=killed)
_LOGGER.info('Shutting down sys supervisor.')
subproc.call(['s6_svscanctl', '-pi', sys_dir])
| 1.570313 | 2 |
examples/modules/index.py | ventillo/cherrypy_sqlite_api | 0 | 75268 | <gh_stars>0
#!/usr/bin/python3
import cherrypy
import json
static_dir = '/templates/' # Needs to have trailing and leading slash '/'
class wellcome(object):
'''Base Index constructor and expose function'''
@cherrypy.expose
def index(self):
result = '''{
"firstName": "John",
"lastName": "Smith",
"isAlive": true,
"age": 27,
"address": {
"streetAddress": "21 2nd Street",
"city": "New York",
"state": "NY",
"postalCode": "10021-3100"
},
"phoneNumbers": [
{
"type": "home",
"number": "212 555-1234"
},
{
"type": "office",
"number": "646 555-4567"
},
{
"type": "mobile",
"number": "123 456-7890"
}
],
"children": [],
"spouse": null
}'''
return json.dumps(json.loads(result))
@cherrypy.expose
def other(self):
result = '<h1>Other</h1>'
return result
| 1.78125 | 2 |
DiscordBot.py | DaanSelen/POC_DiscordBot | 0 | 75396 | <filename>DiscordBot.py
import discord
import requests
import json
import time
import youtube_dl
import os
import random
from discord import client
from discord.ext import commands
from discord import Intents
from discord import FFmpegPCMAudio
dababyImages = ["dababy1.jpg", "dababy2.jpg", "dababy3.jpg", "dababy4.jpg"] #Array met een aantal images waaruit geselecteerd kan worden.
keys = ["REDACTED"] #Dit is de sleutel waarmee de bot gemanipuleerd word, deze is identical.
prefix = "?"
intents = discord.Intents.default()
intents.members = True
client = commands.Bot(command_prefix = prefix ,help_command=None, intents=intents)
#Functions
def jokeApi(): #Deze functie krijgt een random joke van een api, bij deze api is geen key nodig.
jokeUrl = "https://official-joke-api.appspot.com/random_joke"
response = requests.request("GET", jokeUrl)
jokeApiResponse = str((json.loads(response.text)['setup']) + " " + (json.loads(response.text)['punchline']))
return jokeApiResponse
#Commands
@client.event #Dit is een commando dat een reactie geeft wanneer de bot klaar is voor gebruik
async def on_ready():
await client.change_presence()
print('Status: Online')
@client.command(pass_context=True) #Met dit commando word er een bericht gestuurd waarbij alle bruikbare commando's weergeven worden
async def help(ctx):
embed = discord.Embed(
title = 'Helping you!',
description = 'Available commands:',
colour = discord.Colour.blue()
)
embed.set_footer(text=('Requested by: ' + ctx.message.author.name))
embed.set_thumbnail(url='https://cdn.discordapp.com/avatars/852510059874877441/9ca138c0a16d7f3de8bd2c0833136b0c.webp?size=1024')
embed.add_field(name=(prefix + 'introduce'), value='Displays a short introduction of the bot.')
embed.add_field(name=(prefix + 'ping'), value='Displays response time of the bot.')
embed.add_field(name=(prefix + 'clear'), value='Removes certain amount of messages from a channel.')
embed.add_field(name=(prefix + 'joke'), value='Displays a random joke each time.')
embed.add_field(name=(prefix + 'join'), value='Joins the channel you are in.')
embed.add_field(name=(prefix + 'leave'), value='Leaves the channel the bot is in.')
embed.add_field(name=(prefix + 'play + [Youtube URL]'), value='Plays the Youtube url you requested.')
embed.add_field(name=(prefix + 'pause'), value='Pauses the audio currently playing.')
embed.add_field(name=(prefix + 'resume'), value='Resumes the audio currently playing.')
embed.add_field(name=(prefix + 'stop'), value='Stops playing audio.')
embed.add_field(name=(prefix + 'finn'), value='Random easter egg.')
embed.add_field(name=(prefix + 'dababy'), value='Random easter egg.')
await ctx.send(embed=embed)
@client.command(aliases=['Introduce']) #Dit commando zorgt ervoor dat de bot zichzelf voorsteld
async def introduce(ctx):
await ctx.send("Hello, I am RedAlp! I am here to help.")
@client.command(aliases=['Ping']) #Met dit command krijg je de response tijd van de bot, hoe lang het duurt voor de bot om te reageren
async def ping(ctx):
await ctx.send("Pong! " + str(round(client.latency * 1000))+ "ms")
@client.command(aliases=['Clear']) #Verwijderd een geselecteerd aantal berichten uit het kanaal, als er geen getal is opgegeven word het default 10 gedaan.
async def clear(ctx, amount=10):
await ctx.channel.purge(limit = amount)
@client.command(aliases=['Joke']) #Geeft een random joke, met gebruik van de voorgenoemde API
async def joke(ctx):
jokeApiResponse = jokeApi()
await ctx.send(jokeApiResponse)
@client.command(aliases=['Join']) #Met die commando zal de bot checken of jij in een voice channel zit en zo ja, hetzelfde channel joinen.
async def join(ctx):
if (ctx.author.voice):
channel = ctx.message.author.voice.channel
await channel.connect()
await ctx.message.add_reaction('✅')
else:
await ctx.send("You are not in a voice channel, you must be in one for me to join!")
@client.command(pass_context = True, aliases=['dc', 'Leave']) #Met die commando zal de bot checken of hijzelf in een voice channel zit, zo ja zal hij dit channel leaven.
async def leave(ctx):
if (ctx.voice_client):
await ctx.guild.voice_client.disconnect()
await ctx.message.add_reaction('✅')
else:
await ctx.send("I am not in a voice channel.")
#Play audio
@client.command(pass_content = True, aliases=['p', 'P', 'Play']) #Met dit command zal de bot eerst de link afgaan met het Youtube_DL package en zodra die een file vind met .mp3 zal hij deze downloaden en daarna door middel van FFmpeg afspelen.
async def play(ctx, url:str):
if not (ctx.author.voice):
channel = ctx.message.author.voice.channel
await ctx.send("To request a song, you must be in a voice channel.")
song_there = os.path.isfile("song.mp3")
try:
if song_there:
os.remove("song.mp3")
except PermissionError:
await ctx.send("Wait for the current playing music to end or use the 'stop' command")
return
Channel = ctx.message.author.voice.channel
if not (ctx.voice_client):
await Channel.connect()
await ctx.message.add_reaction('✅')
voice = discord.utils.get(client.voice_clients, guild=ctx.guild)
ydl_opts = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',
}],
}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
for file in os.listdir("./"):
if file.endswith(".mp3"):
nowPlaying = str(file)[0:-16]
nowPlaying2 = nowPlaying
os.rename(file, "song.mp3")
voice.play(discord.FFmpegPCMAudio("song.mp3"))
songEmbed = discord.Embed( #Embed voor het aangeven welk liedje aan het spelen is.
title = 'Music Time!',
description = 'Now playing: ' + nowPlaying,
colour = discord.Colour.blue()
)
songEmbed.set_footer(text='Author: ' + ctx.message.author.name)
songEmbed.set_thumbnail(url='https://cdn.discordapp.com/avatars/852510059874877441/9ca138c0a16d7f3de8bd2c0833136b0c.webp?size=1024')
songEmbed.add_field(name='Url: ', value=url, inline=True)
await ctx.send(embed=songEmbed)
@client.command(pass_context = True) #Pauseerd de media die op het moment aan het spelen is
async def pause(ctx):
voice = discord.utils.get(client.voice_clients,guild=ctx.guild)
if voice.is_playing(): #Kijkt of er muziek aan het spelen is
voice.pause()
await ctx.message.add_reaction('✅')
else:
await ctx.send("There is no audio playing right now.")
@client.command(pass_context = True) #Vervolgd de media die op het moment aan het spelen is
async def resume(ctx):
voice = discord.utils.get(client.voice_clients,guild=ctx.guild)
if voice.is_paused(): #Kijkt of er muziek gepauseerd is.
voice.resume()
await ctx.message.add_reaction('✅')
else:
await ctx.send("There is no audio paused right now.")
@client.command(pass_context = True) #Stopt compleet met het spelen van media.
async def stop(ctx):
voice = discord.utils.get(client.voice_clients,guild=ctx.guild)
voice.stop() #Stopt met spelen van audio
await ctx.message.add_reaction('✅')
#Easter eggs, Geen uitleg bij.
@client.command(pass_content = True, aliases=['Finn'])
async def finn(ctx):
if (ctx.voice_client):
await ctx.send("Nee, niet nu ik er al ben.")
elif (ctx.author.voice):
channel = ctx.message.author.voice.channel
voice = await channel.connect()
finnsource = FFmpegPCMAudio('wetfart.wav')
player = voice.play(finnsource)
while voice.is_playing():
if not (voice.is_playing()):
break
await ctx.guild.voice_client.disconnect()
else:
await ctx.send("Deze zemmel is gewoon niet in een voice channel.")
@client.command(pass_content = True)
async def dababy(ctx):
await ctx.send("Lezzz go, I'm dababy", file=discord.File(random.choice(dababyImages))) #Geeft een random image uit een lijst van png images
#Events
@client.event #Wanneer iemand de guild/server joined zal de bot dat zien en een welkombericht sturen
async def on_member_join(member):
for channel in member.guild.channels: #kijkt bij elke channel of de channel met de string te vinden is.
if str(channel) == "welcome":
await channel.send("`Welcome `*" + str(member)[0:-5] + "*")
@client.event #Wanneer iemand de guild/server verlaat zal de bot dat zien en een vaarwelbericht sturen
async def on_member_remove(member):
for channel in member.guild.channels: #kijkt bij elke channel of de channel met de string te vinden is.
if str(channel) == "welcome":
await channel.send("`Goodbye `*" + str(member)[0:-5] + "*")
client.run(keys[0]) #Hier is de key van de bot nodig, bovenin genoemd
| 1.53125 | 2 |
Agents/utils/elevation/elevation_demo.py | cambridge-cares/TheWorldAvatar | 21 | 75524 | <reponame>cambridge-cares/TheWorldAvatar
"""
Demo script to show the usage of the elevations.py script. This demo reads in a CSV with
sample locations, determines their elevations, then writes out a local CSV with the results.
Requires:
- A remote GeoServer with a valid WMS endpoint
Authors:
- mdhillman<@><EMAIL>
"""
import csv
import elevations
def outputLine(outputFile, name, lat, lon, refHeight, height, absDev, relDev):
"""
Output line in CSV file.
"""
outputFile.write(name)
outputFile.write(",")
outputFile.write(str(lat))
outputFile.write(",")
outputFile.write(str(lon))
outputFile.write(",")
outputFile.write(str(refHeight))
outputFile.write(",")
outputFile.write(str(height))
outputFile.write(",")
outputFile.write(str(absDev))
outputFile.write(",")
outputFile.write(str(relDev))
outputFile.write("\n")
# WMS Endpoint
WMS = "url-goes-here"
# Input CSV
CSV = "./inputs.csv"
# Open file handles
resultsFile = open("results.csv", "w")
resultsFile.write("Name,Latitude,Longitude,Ref height [m],Height [m],Absolute deviation [m],Relative deviation [%]\n")
# Read the CSV
with open(CSV, mode="r") as csvFile:
csvReader = csv.DictReader(csvFile)
# Read each row
for row in csvReader:
name = row["Name"]
refHeight = float(row["Height [m]"])
lat = float(row["Latitude"])
lon = float(row["Longitude"])
# Determine the elevations
height = elevations.getHeight(WMS, lat, lon)
# Calculate deviations
absDev = abs(refHeight - height)
relDev = 100.0 * abs(refHeight - height) / refHeight
# Add rows in output files
outputLine(resultsFile, name, lat, lon, refHeight, height, absDev, relDev)
# Close file and finish
csvFile.close()
resultsFile.close()
print("Elevation demo finished, please see 'results.csv' file for heights.") | 2.515625 | 3 |
examples/projectq_example.py | dstallenberg/In-Phase | 2 | 75652 | <reponame>dstallenberg/In-Phase<filename>examples/projectq_example.py<gh_stars>1-10
import numpy as np
from src.quantum_phase_estimation.util_functions import error_estimate, find_qubits_from_unitary
from src.quantum_phase_estimation.generator.generator import generate_qasm_code
from src.qasm_optimizer.optimizer import optimize
from src.qasm_error_introducer.error_introducer import introduce_error
from src.qasm_topology_mapper.mapping import map_to_topology
from src.quantum_phase_estimation.processing.classical_postprocessing import print_result, remove_degeneracy_projectq
from src.quantum_phase_estimation.plot_results import plot_results_projectq
from src.qasm_to_projectq.converter import qasm_to_projectq
if __name__ == "__main__":
# variables
unitary = np.array([[0.7071, -0.7071j], [-0.7071j, 0.7071]])
desired_bit_accuracy = 5
minimum_chance_of_success = 0.5
mu = 0
sigma = 0.01
error_toggle = False
topology = [['0', '1'],
['0', '3'],
['1', '2'],
['1', '4'],
['2', '5'],
['3', '4'],
['3', '6'],
['4', '5'],
['4', '7'],
['5', '8'],
['6', '7'],
['7', '8']]
shots = 100
# process
nancillas, p_succes = error_estimate(desired_bit_accuracy, minimum_chance_of_success)
qubits, extra_empty_bits = find_qubits_from_unitary(unitary, nancillas, topology=topology)
final_qasm = generate_qasm_code(nancillas, qubits, unitary, extra_empty_bits=extra_empty_bits)
final_qasm = optimize(final_qasm, nancillas, qubits, extra_empty_bits)
if topology is not None:
final_qasm = map_to_topology(topology, final_qasm)
final_qasm = optimize(final_qasm, nancillas, qubits, extra_empty_bits)
if error_toggle:
final_qasm = introduce_error(final_qasm, mu, sigma)
final_qasm = optimize(final_qasm, nancillas, qubits, extra_empty_bits)
projecq_code = qasm_to_projectq(final_qasm)
file = open('generated/code/generated.py', 'w')
file.write(projecq_code)
file.close()
from generated.code.generated import calc_probs
result = calc_probs()
print(result)
plot_results_projectq(result, nancillas, qubits, p_succes)
# Classical postprocessing
fraction, error = print_result(remove_degeneracy_projectq(result, nancillas), desired_bit_accuracy, nancillas)
print('Fraction: ', fraction)
print('Error: ', error)
print('Correct chance: ', 1 - (1 - p_succes) ** shots)
| 2.078125 | 2 |
d_speed_size.py | nmoinvaz/pigz-bench-python | 6 | 75780 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# python3 d_speed_size.py : test speed/compression for folder 'corpus'
# python3 d_speed_size.py indir : test speed/compression for folder 'indir'
import os
import sys
import stat
import time
import shutil
import ntpath
import subprocess
import pandas as pd
def _cmp(
exe,
fnm,
lvl,
opts=' -f -k -',
):
"""
compress file 'fnm' using executable 'exe'
Parameters
----------
exe : str
name of compression executable
fnm : str
name of file to be compressed
lvl : int
compression level
opts : str
command line options for executable (default, ' -f -k -')
"""
env = os.environ
cmd = exe + opts + str(lvl) + ' "' + fnm + '"'
subprocess.call(cmd, shell=True)
def test_cmp(
exe='gzip',
indir='',
repeats=1,
ext='.gz',
opts=' -q -f -k -',
max_level=9,
):
"""
compress all files in folder 'indir' using executable 'exe'
Parameters
----------
exe : str
name of compression executable
indir : str
name of folder with files to compress
repeats : int
how many times is each file compressed. More is slower but better timing accuracy
ext : str
extension for files created by exe (default, '.gz')
opts : str
command line options for executable (default, ' -f -k -')
max_level : int
maximum compression level to test (default 9)
"""
if not os.path.exists(exe) and not shutil.which(exe):
print('Skipping test: Unable to find "' + exe + '"')
return ()
if len(indir) < 1:
indir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'corpus')
if not os.path.isdir(indir):
print('Run a_compile.py first: Unable to find "' + indir +'"')
sys.exit()
meth = ntpath.basename(exe)
print('Method\tLevel\tms\tmb/s\t%')
for lvl in range(1, max_level + 1):
t0 = time.time()
size = 0
nsize = 0
for rep in range(repeats):
for f in os.listdir(indir):
if not os.path.isfile(os.path.join(indir, f)):
continue
if f.startswith('.'):
continue
if not f.endswith('.zst') and not f.endswith('.gz') \
and not f.endswith('.bz2'):
fnm = os.path.join(indir, f)
_cmp(exe, fnm, lvl, opts)
if rep > 0:
continue
size = size + os.stat(fnm).st_size
nsize = nsize + os.stat(fnm + ext).st_size
size = size * repeats
nsize = nsize * repeats
seconds = time.time() - t0
# bytes_per_mb = 1024**2
bytes_per_mb = 1000000
speed = size / bytes_per_mb / seconds
print('{}\t{}\t{:.0f}\t{:.0f}\t{:.2f}'.format(meth, lvl,
seconds * 1000, speed, nsize / size * 100))
row_df = pd.DataFrame([[meth, nsize / size * 100, speed, lvl]])
row_df.columns = ['exe', 'size %', 'speed mb/s', 'level']
try:
df = pd.read_pickle('speed_size.pkl')
df = pd.concat([row_df, df], ignore_index=True)
except (OSError, IOError) as e:
df = row_df
df.to_pickle('speed_size.pkl')
# clean up
for f in os.listdir(indir):
if not os.path.isfile(os.path.join(indir, f)):
continue
if f.endswith('.zst') or f.endswith('.gz') or f.endswith('.bz2'
):
fnm = os.path.join(indir, f)
os.remove(fnm)
def plot(resultsFile):
"""line-plot showing how compression level impacts file size and conpression speed
Parameters
----------
resultsFile : str
name of pickle format file to plot
"""
if os.name == 'posix' and 'DISPLAY' not in os.environ:
print('Plot the results on a machine with a graphical display')
exit()
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_pickle(resultsFile)
sns.set()
ax = sns.lineplot(x='speed mb/s', y='size %', hue='exe', data=df, marker='o')
plt.show()
if __name__ == '__main__':
"""Compare speed and size for different compression tools
Parameters
----------
indir : str
folder with files to compress (default './corpus')
repeats : int
how many times is each file compressed. More (default 1)
"""
indir = ''
if len(sys.argv) > 1:
indir = sys.argv[1]
repeats = 1
if len(sys.argv) > 2:
repeats = int(sys.argv[2])
resultsFile = 'speed_size.pkl'
if os.path.exists(resultsFile):
os.remove(resultsFile)
test_cmp('pbzip2', indir, repeats, '.bz2')
test_cmp(
'zstd',
indir,
repeats,
'.zst',
' -T0 -q -f -k -',
19,
)
test_cmp('gzip', indir, repeats)
# test pigz variants
executable = stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH
exeDir = './exe'
for exe in os.listdir(exeDir):
exe = os.path.join(exeDir, exe)
if os.path.isfile(exe):
st = os.stat(exe)
mode = st.st_mode
if mode & executable:
exe = os.path.abspath(exe)
test_cmp(exe, indir, repeats)
plot(resultsFile)
| 2.34375 | 2 |
ssd/modeling/box_head/box_head.py | helmuthb/ssdlite-pytorch-mobilenext | 55 | 75908 | from torch import nn
import torch.nn.functional as F
from ssd.modeling import registry
from ssd.modeling.anchors.prior_box import PriorBox
from ssd.modeling.box_head.box_predictor import make_box_predictor
from ssd.utils import box_utils
from .inference import PostProcessor
from .loss import MultiBoxLoss, FocalLoss
@registry.BOX_HEADS.register('SSDBoxHead')
class SSDBoxHead(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.predictor = make_box_predictor(cfg)
#
if self.cfg.MODEL.BOX_HEAD.LOSS == 'FocalLoss':
self.loss_evaluator = FocalLoss(0.25, 2)
else: # By default, we use MultiBoxLoss
self.loss_evaluator = MultiBoxLoss(neg_pos_ratio=cfg.MODEL.NEG_POS_RATIO)
self.post_processor = PostProcessor(cfg)
self.priors = None
def forward(self, features, targets=None):
cls_logits, bbox_pred = self.predictor(features)
if self.training:
return self._forward_train(cls_logits, bbox_pred, targets)
else:
return self._forward_test(cls_logits, bbox_pred)
def _forward_train(self, cls_logits, bbox_pred, targets):
gt_boxes, gt_labels = targets['boxes'], targets['labels']
reg_loss, cls_loss = self.loss_evaluator(cls_logits, bbox_pred, gt_labels, gt_boxes)
loss_dict = dict(
reg_loss=reg_loss,
cls_loss=cls_loss,
)
detections = (cls_logits, bbox_pred)
return detections, loss_dict
def _forward_test(self, cls_logits, bbox_pred):
if self.priors is None:
self.priors = PriorBox(self.cfg)().to(bbox_pred.device)
#
if self.cfg.MODEL.BOX_HEAD.LOSS == 'FocalLoss':
scores = cls_logits.sigmoid()
else:
scores = F.softmax(cls_logits, dim=2)
boxes = box_utils.convert_locations_to_boxes(
bbox_pred, self.priors, self.cfg.MODEL.CENTER_VARIANCE, self.cfg.MODEL.SIZE_VARIANCE
)
boxes = box_utils.center_form_to_corner_form(boxes)
detections = (scores, boxes)
detections = self.post_processor(detections)
return detections, {}
| 1.921875 | 2 |
Platform/Intel/Tools/GenBiosId/GenBiosId.py | spbrogan/edk2-platforms | 5 | 76036 | ## @file
# Trim files preprocessed by compiler
#
# Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
import os
import sys
import time
import logging
import struct
import datetime
import argparse
import platform
from collections import OrderedDict
try:
from configparser import ConfigParser
except:
from ConfigParser import ConfigParser
# Config message
_BIOS_Signature = "$IBIOSI$"
_ConfigItem = {
"BOARD_ID": {'Value': '', 'Length': 7},
"BOARD_REV": {'Value': '', 'Length': 1},
"BOARD_EXT": {'Value': '', 'Length': 3},
"BUILD_TYPE": {'Value': '', 'Length': 1},
"VERSION_MAJOR": {'Value': '0000', 'Length': 4},
"VERSION_MINOR": {'Value': '00', 'Length': 2},
}
# Version message
__prog__ = 'GenBiosld'
__description__ = 'Trim files preprocessed by compiler'
__copyright__ = 'Copyright (c) 2019, Intel Corporation. All rights reserved.<BR> '
__version__ = '%s Version %s' % (__prog__, '0.1 ')
# ExtraData message
_Usage = "Usage: GenBiosId -i Configfile -o OutputFile [-ot OutputTextFile]"
_ConfigSectionNotDefine = "Not support the config file format, need config section"
_ErrorMessageTemplate = '\n\n%(tool)s...\n : error: %(msg)s\n\t%(extra)s'
_ErrorLogger = logging.getLogger("tool_error")
_ErrorFormatter = logging.Formatter("%(message)s")
_ConfigLenInvalid = "Config item %s length is invalid"
_ConfigItemInvalid = "Item %s is invalid"
# Error message
INFO = 20
ERRORCODE = 50
OPTION_MISSING = 'Missing option'
FORMAT_INVALID = 'Invalid syntax/format'
FILE_NOT_FOUND = 'File/directory not found in workspace'
FORMAT_UNKNOWN_ERROR = 'Unknown error in syntax/format'
FORMAT_NOT_SUPPORTED = 'Not supported syntax/format'
def SetEdkLogger():
_ErrorLogger.setLevel(INFO)
_ErrorCh = logging.StreamHandler(sys.stderr)
_ErrorCh.setFormatter(_ErrorFormatter)
_ErrorLogger.addHandler(_ErrorCh)
return _ErrorLogger
# Output the error message and exit the tool
def EdkLogger(ToolName, Message, ExtraData):
_ErrorLogger = SetEdkLogger()
TemplateDict = {"tool": ToolName, "msg": Message, "extra": ExtraData}
LogText = _ErrorMessageTemplate % TemplateDict
_ErrorLogger.log(ERRORCODE, LogText)
sys.exit(1)
# Open the file in the correct way
def FileOpen(FileName, Mode, Buffer=-1):
def LongFilePath(FileName):
FileName = os.path.normpath(FileName)
if platform.system() == 'Windows':
if FileName.startswith('\\\\?\\'):
return FileName
if FileName.startswith('\\\\'):
return '\\\\?\\UNC\\' + FileName[2:]
if os.path.isabs(FileName):
return '\\\\?\\' + FileName
return FileName
return open(LongFilePath(FileName), Mode, Buffer)
# Parse command line options
def MyOptionParser():
parser = argparse.ArgumentParser(prog=__prog__,
description=__description__ + __copyright__ + _Usage,
conflict_handler='resolve')
parser.add_argument('-v', '--version', action='version', version=__version__,
help="show program's version number and exit")
parser.add_argument('-i', '--int', metavar='FILENAME', dest='InputFile', help="Input Config file")
parser.add_argument('-o', '--out', metavar='FILENAME', dest='OutputFile', help="Output file")
parser.add_argument('-ot', '--text', metavar='FILENAME', dest='OutputTextFile', help="Output Text file")
Options = parser.parse_args()
return Options
# Check the Tool for missing variables
def CheckOptions(Options):
if len(sys.argv) != 5 and not (len(sys.argv) == 7 and Options.OutputTextFile):
EdkLogger("GenBiosId", OPTION_MISSING, ExtraData=_Usage)
elif not Options.InputFile or not Options.OutputFile:
EdkLogger("GenBiosId", OPTION_MISSING, ExtraData=_Usage)
InputFile = Options.InputFile
OutputFile = Options.OutputFile
OutputTextFile = Options.OutputTextFile
if not os.path.exists(InputFile):
EdkLogger("GenBiosId", FILE_NOT_FOUND, ExtraData="Input file not found")
return InputFile, OutputFile, OutputTextFile
# Read input file and get config
def ReadInputFile(InputFile):
InputDict = OrderedDict()
with open(InputFile) as File:
FileLines = File.readlines()
for Line in FileLines:
if Line.strip().startswith('#'):
continue
if '=' in Line:
Key, Value = Line.split('=')
InputDict[Key.strip()] = Value.strip()
return InputDict
# Parse the input file and extract the information
def ParserInputFile(InputDict):
for Item in InputDict:
if Item not in _ConfigItem:
EdkLogger("GenBiosId", FORMAT_INVALID, ExtraData=_ConfigItemInvalid % Item)
_ConfigItem[Item]['Value'] = InputDict[Item]
if len(_ConfigItem[Item]['Value']) != _ConfigItem[Item]['Length']:
EdkLogger("GenBiosId", FORMAT_INVALID, ExtraData=_ConfigLenInvalid % Item)
for Item in _ConfigItem:
if not _ConfigItem[Item]['Value']:
EdkLogger("GenBiosId", FORMAT_UNKNOWN_ERROR, ExtraData="Item %s is missing" % Item)
utcnow = datetime.datetime.utcnow()
TimeStamp = time.strftime("%y%m%d%H%M", utcnow.timetuple())
Id_Str = _ConfigItem['BOARD_ID']['Value'] + _ConfigItem['BOARD_REV']['Value'] + '.' + _ConfigItem['BOARD_EXT'][
'Value'] + '.' + _ConfigItem['VERSION_MAJOR']['Value'] + \
'.' + _ConfigItem["BUILD_TYPE"]['Value'] + _ConfigItem['VERSION_MINOR']['Value'] + '.' + TimeStamp
return Id_Str
# Output information to a file
def PrintOutputFile(OutputFile, OutputTextFile, Id_Str):
with FileOpen(OutputFile, 'wb') as FdOut:
for i in _BIOS_Signature:
FdOut.write(struct.pack('B', ord(i)))
for i in Id_Str:
FdOut.write(struct.pack('H', ord(i)))
FdOut.write(struct.pack('H', 0x00))
if OutputTextFile:
with FileOpen(OutputTextFile, 'w') as FdOut:
FdOut.write(Id_Str)
# Tool entrance method
def Main():
Options = MyOptionParser()
InputFile, OutputFile, OutputTextFile = CheckOptions(Options)
InputDict = ReadInputFile(InputFile)
Id_Str = ParserInputFile(InputDict)
PrintOutputFile(OutputFile, OutputTextFile, Id_Str)
return 0
if __name__ == '__main__':
r = Main()
## 0-127 is a safe return range, and 1 is a standard default error
if r < 0 or r > 127: r = 1
sys.exit(r)
| 1.179688 | 1 |
code/taxid2NC.py | linsalrob/PhageHosts | 17 | 76164 | '''
Convert a table with one or more columns of taxids to NC ids
'''
import sys
import os
import re
try:
resultsF = sys.argv[1]
except:
sys.exit( sys.argv[0] + " <results file (tab separated)>")
hostT = '/home3/redwards/phage/host_analysis/all_host_taxid.txt'
if not os.path.exists(hostT):
sys.exit(hostT + " does not exist. This is just a two column table of NC id and taxid\n")
taxa={}
with open(hostT, 'r') as hin:
for line in hin:
line = line.strip()
p = line.split("\t")
taxa[p[1]] = p[0]
with open(resultsF, 'r') as rin:
for line in rin:
line = line.strip()
pieces=line.split("\t")
for i in range(len(pieces)):
pieces[i] = pieces[i].strip()
for i in range(len(pieces)):
p=pieces[i]
if p not in taxa:
sys.stderr.write("Found a taxonomy with no NC" + p + "\n")
continue
if i == 0:
sys.stdout.write(taxa[match[0]])
else:
sys.stdout.write("\t" + taxa[match[0]])
print
| 2.0625 | 2 |
CartPole/.ipynb_checkpoints/CartPole_v1-checkpoint.py | ayjabri/playGym | 0 | 76292 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import gym
import torch
import time
import numpy as np
import torch.nn as nn
from datetime import datetime, timedelta
from torch.nn import functional as F
from collections import namedtuple
hidden_size =128
batch_size = 100
percentile = 70
lr = 0.01
class Net(nn.Module):
def __init__(self, obs_size, hidden, num_actions):
super().__init__()
self.net = nn.Sequential(nn.Linear(obs_size, hidden),
nn.ReLU(),
nn.Linear(hidden, num_actions)
)
def forward(self,x):
return self.net(x)
Episode = namedtuple('Episode',('reward','steps'))
Steps = namedtuple('Steps',('observation','action'))
@torch.no_grad()
def play(env):
state = env.reset()
r = 0
while True:
env.render()
time.sleep(0.01)
action = net(torch.FloatTensor(state)).argmax(dim=-1).item()
last_state, reward, done, _ = env.step(action)
r += reward
if done:
print(r)
break
state = last_state
env.close()
def iter_batch(env, net, batch_size):
batch = []
obs_action = []
rewards = 0
obs = env.reset()
while True:
obs_t = torch.FloatTensor([obs])
action_p_t = F.softmax(net(obs_t),dim=-1)
action_p = action_p_t.detach().numpy()[0]
action = np.random.choice(len(action_p),p=action_p)
step = Steps(obs, action)
obs_action.append(step)
next_obs,r,done,_= env.step(action)
rewards += r
if done:
e = Episode(rewards,obs_action)
batch.append(e)
obs_action = []
rewards = 0
next_obs = env.reset()
if len(batch) == batch_size:
yield batch
batch = []
obs = next_obs
def filter_batch(batch, percentile):
rewards = list(map(lambda s:s.reward, batch))
reward_boundry = np.percentile(rewards, percentile)
rewards_mean = float(np.mean(rewards))
obs_v = []
act_v = []
for reward, step in batch:
if reward < reward_boundry:
continue
obs_v.extend(list(map(lambda s:s.observation, step)))
act_v.extend(list(map(lambda s:s.action, step)))
obs_v = torch.FloatTensor(obs_v)
act_v = torch.LongTensor(act_v)
return (obs_v, act_v, reward_boundry, rewards_mean)
if __name__=="__main__":
start_time = datetime.now()
env = gym.make('CartPole-v1')
obs_size = env.observation_space.shape[0]
num_actions = env.action_space.n
net = Net(obs_size, hidden_size, num_actions)
loss_fun = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(), lr= lr)
for i, batch in enumerate(iter_batch(env, net, batch_size)):
obs_v, act_v, reward_boundry, rewards_mean = \
filter_batch(batch, percentile)
optimizer.zero_grad()
output = net(obs_v)
loss = loss_fun(output, act_v)
loss.backward()
optimizer.step()
print(f'epoch:{i} loss:{loss.item():.3f} mean:{rewards_mean:.0f}')
if rewards_mean > 475:
duration = timedelta(seconds = (datetime.now()-start_time).seconds)
print(f'Solved! in {duration}')
break
| 2.234375 | 2 |
scripts/calculateKappa.py | NockLabHarvard/Valinor | 0 | 76420 | # Script to accept as input two .xlsx files and
# calculate the inter-rater kappa agreement values
#
# The input files should have only 1 sheet
# The code column should be named 'Code'
# The instance number column should be labeled 'Segment'
#
# author: <NAME>
# affliation: MIT Media Lab & Nock Lab, Harvard
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import csv
import os.path
from re import findall, split
from csv import reader
from subprocess import call
from os.path import basename
from sklearn.metrics import classification_report
from argparse import ArgumentParser
# Function to define parent level codes
def getParentCodes():
return ['BACKGROUND INFORMATION',
'MENTAL HEALTH TREATMENT',
'RISK FACTORS',
'PSYCHIATRIC SYMPTOMS',
'SELF-HARM THOUGHTS AND BEHAVIORS',
'SOCIAL COMMUNICATION / POST INFORMATION',
'SOCIAL MEDIA SPECIFIC']
#Function to parse an Excel file exported via MAXQDA annotation
def parseCSV(path):
print path
try:
call( ["xlsx2csv", path, "tmp.csv"] )
codes = {}
with open("tmp.csv", "rU") as fp:
rdr = reader(fp)
rows = [ row for row in rdr ]
codeIndex = rows[0].index('Code')
postIndex = rows[0].index('Segment')
sets = []
errors = []
rows = rows[1:]
for i, row in enumerate(rows):
if findall(r'\d+', row[postIndex]):
sets.append([ findall(r'\d+', row[postIndex])[0], row[codeIndex] ])
else:
errors.append(row)
if errors:
for i in errors:
for row in rows:
if ( i[postIndex] in row[postIndex] ) and ( findall(r'\d+', row[postIndex]) ):
sets.append( [ findall(r'\d+', row[postIndex])[0], i[codeIndex] ] )
break
for item in sets:
codes[item[0]] = []
for item in sets:
codes[item[0]].append(item[1])
call ( ["rm", "tmp.csv"] )
return codes
except Exception, e:
print "Oops: " + str(e)
#Function to get metrics for annotation agreement
def getClassificationReport(gold, coder):
y_gold = []
y_coder = []
for code in gold:
y_gold.append(gold[code])
y_coder.append(coder[code])
with open('tmp.csv','wb') as fp:
print >> fp, classification_report(y_gold, y_coder)
with open('tmp.csv','rb') as fp:
reader = csv.reader(fp)
scores = [ split(" *", ' '.join(row).replace(",","")) for row in reader if row ]
parentCodes = getParentCodes()
report = {}
for parentCode in parentCodes:
report[parentCode] = {'count': 0, 'score': [0.0, 0.0, 0.0, 0.0] }
for parentCode in parentCodes:
for score in scores[1:-1]:
if parentCode in score[0]:
report[parentCode]['score'] = [float(x) + float(y) for x, y in zip(report[parentCode]['score'], score[1:])]
report[parentCode]['count'] += 1
if report[parentCode]['count'] > 0:
report[parentCode]['score'] = [ i / report[parentCode]['count'] for i in report[parentCode]['score'] ]
report[scores[-1][0]] = {'count': 1, 'score': scores[-1][1:]}
return report
#Function to save metrics into a csv file
def putScoreCard(report, coderName):
scoreCard = [['Code','Precision','Recall','F1','Support']]
for code in sorted(report):
scoreCard.append( [code] + report[code]['score'] )
with open(coderName + '.csv', 'wb') as fp:
wr = csv.writer(fp)
wr.writerows(scoreCard)
#Function to check if the args supplied are valid files
def is_valid_file(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def main():
parser = ArgumentParser(description="Calculate F1 Agreement Values Between Gold & Coder Sets")
parser.add_argument("-gold",
dest="gold",
required=True,
help="Input a gold excel file",
metavar="FILE",
type=lambda x: is_valid_file(parser, x))
parser.add_argument("-coder",
dest="coder",
required=True,
help="Input a coder excel file",
metavar="FILE", type=lambda x: is_valid_file(parser, x))
args = parser.parse_args()
gold = parseCSV(args.gold)
coder = parseCSV(args.coder)
report = getClassificationReport(gold, coder)
putScoreCard(report, os.path.basename(args.coder).split('.')[0])
if __name__ == "__main__":
main()
| 2.3125 | 2 |
ansible/modules/network/nxos/nxos_ip_interface.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 1 | 76548 | <gh_stars>1-10
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ip_interface
version_added: "2.1"
short_description: Manages L3 attributes for IPv4 and IPv6 interfaces.
description:
- Manages Layer 3 attributes for IPv4 and IPv6 interfaces.
extends_documentation_fragment: nxos
author:
- <NAME> (@jedelman8)
- <NAME> (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- Interface must already be a L3 port when using this module.
- Logical interfaces (po, loop, svi) must be created first.
- C(mask) must be inserted in decimal format (i.e. 24) for
both IPv6 and IPv4.
- A single interface can have multiple IPv6 configured.
- C(tag) is not idempotent for IPv6 addresses and I2 system image.
options:
interface:
description:
- Full name of interface, i.e. Ethernet1/1, vlan10.
required: true
addr:
description:
- IPv4 or IPv6 Address.
required: false
default: null
mask:
description:
- Subnet mask for IPv4 or IPv6 Address in decimal format.
required: false
default: null
tag:
description:
- Route tag for IPv4 or IPv6 Address in integer format.
required: false
default: 0
version_added: "2.4"
allow_secondary:
description:
- Allow to configure IPv4 secondary addresses on interface.
required: false
default: false
version_added: "2.4"
state:
description:
- Specify desired state of the resource.
required: false
default: present
choices: ['present','absent']
requirements:
- "ipaddress"
'''
EXAMPLES = '''
- name: Ensure ipv4 address is configured on Ethernet1/32
nxos_ip_interface:
interface: Ethernet1/32
transport: nxapi
version: v4
state: present
addr: 20.20.20.20
mask: 24
- name: Ensure ipv6 address is configured on Ethernet1/31
nxos_ip_interface:
interface: Ethernet1/31
transport: cli
version: v6
state: present
addr: '2001::db8:800:200c:cccb'
mask: 64
- name: Ensure ipv4 address is configured with tag
nxos_ip_interface:
interface: Ethernet1/32
transport: nxapi
version: v4
state: present
tag: 100
addr: 20.20.20.20
mask: 24
- name: Configure ipv4 address as secondary if needed
nxos_ip_interface:
interface: Ethernet1/32
transport: nxapi
version: v4
state: present
allow_secondary: true
addr: 21.21.21.21
mask: 24
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"addr": "20.20.20.20", "allow_secondary": true,
"interface": "Ethernet1/32", "mask": "24", "tag": 100}
existing:
description: k/v pairs of existing IP attributes on the interface
returned: always
type: dict
sample: {"addresses": [{"addr": "11.11.11.11", "mask": 17, "tag": 101, "secondary": false}],
"interface": "ethernet1/32", "prefixes": ["172.16.17.32/17"],
"type": "ethernet", "vrf": "default"}
end_state:
description: k/v pairs of IP attributes after module execution
returned: always
type: dict
sample: {"addresses": [{"addr": "11.11.11.11", "mask": 17, "tag": 101, "secondary": false},
{"addr": "20.20.20.20", "mask": 24, "tag": 100, "secondary": true}],
"interface": "ethernet1/32", "prefixes": ["172.16.17.32/17", "20.20.20.0/24"],
"type": "ethernet", "vrf": "default"}
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface ethernet1/32", "ip address 20.20.20.20/24 secondary tag 100"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
try:
import ipaddress
HAS_IPADDRESS = True
except ImportError:
HAS_IPADDRESS = False
from ansible.module_utils.nxos import load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def find_same_addr(existing, addr, mask, full=False, **kwargs):
for address in existing['addresses']:
if address['addr'] == addr and address['mask'] == mask:
if full:
if kwargs['version'] == 'v4' and int(address['tag']) == kwargs['tag']:
return address
elif kwargs['version'] == 'v6' and kwargs['tag'] == 0:
# Currently we don't get info about IPv6 address tag
# But let's not break idempotence for the default case
return address
else:
return address
return False
def execute_show_command(command, module):
cmd = {}
cmd['answer'] = None
cmd['command'] = command
cmd['output'] = 'text'
cmd['prompt'] = None
body = run_commands(module, [cmd])
return body
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def is_default(interface, module):
command = 'show run interface {0}'.format(interface)
try:
body = execute_show_command(command, module)[0]
if 'invalid' in body.lower():
return 'DNE'
else:
raw_list = body.split('\n')
if raw_list[-1].startswith('interface'):
return True
else:
return False
except KeyError:
return 'DNE'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0} switchport'.format(interface)
mode = 'unknown'
if intf_type in ['ethernet', 'portchannel']:
body = execute_show_command(command, module)[0]
if len(body) > 0:
if 'Switchport: Disabled' in body:
mode = 'layer3'
elif 'Switchport: Enabled' in body:
mode = "layer2"
elif intf_type == 'svi':
mode = 'layer3'
return mode
def send_show_command(interface_name, version, module):
if version == 'v4':
command = 'show ip interface {0}'.format(interface_name)
elif version == 'v6':
command = 'show ipv6 interface {0}'.format(interface_name)
body = execute_show_command(command, module)
return body
def parse_unstructured_data(body, interface_name, version, module):
interface = {}
interface['addresses'] = []
interface['prefixes'] = []
vrf = None
body = body[0]
splitted_body = body.split('\n')
if version == "v6":
if "ipv6 is disabled" not in body.lower():
address_list = []
# We can have multiple IPv6 on the same interface.
# We need to parse them manually from raw output.
for index in range(0, len(splitted_body) - 1):
if "IPv6 address:" in splitted_body[index]:
first_reference_point = index + 1
elif "IPv6 subnet:" in splitted_body[index]:
last_reference_point = index
break
interface_list_table = splitted_body[first_reference_point:last_reference_point]
for each_line in interface_list_table:
address = each_line.strip().split(' ')[0]
if address not in address_list:
address_list.append(address)
interface['prefixes'].append(str(ipaddress.ip_interface(u"%s" % address).network))
if address_list:
for ipv6 in address_list:
address = {}
splitted_address = ipv6.split('/')
address['addr'] = splitted_address[0]
address['mask'] = splitted_address[1]
interface['addresses'].append(address)
else:
for index in range(0, len(splitted_body) - 1):
if "IP address" in splitted_body[index]:
regex = '.*IP\saddress:\s(?P<addr>\d{1,3}(?:\.\d{1,3}){3}),\sIP\ssubnet:' + \
'\s\d{1,3}(?:\.\d{1,3}){3}\/(?P<mask>\d+)(?:\s(?P<secondary>secondary)\s)?' + \
'(.+?tag:\s(?P<tag>\d+).*)?'
match = re.match(regex, splitted_body[index])
if match:
match_dict = match.groupdict()
if match_dict['secondary'] is None:
match_dict['secondary'] = False
else:
match_dict['secondary'] = True
if match_dict['tag'] is None:
match_dict['tag'] = 0
else:
match_dict['tag'] = int(match_dict['tag'])
interface['addresses'].append(match_dict)
prefix = str(ipaddress.ip_interface(u"%(addr)s/%(mask)s" % match_dict).network)
interface['prefixes'].append(prefix)
try:
vrf_regex = '.+?VRF\s+(?P<vrf>\S+?)\s'
match_vrf = re.match(vrf_regex, body, re.DOTALL)
vrf = match_vrf.groupdict()['vrf']
except AttributeError:
vrf = None
interface['interface'] = interface_name
interface['type'] = get_interface_type(interface_name)
interface['vrf'] = vrf
return interface
def get_ip_interface(interface_name, version, module):
body = send_show_command(interface_name, version, module)
interface = parse_unstructured_data(body, interface_name, version, module)
return interface
def get_remove_ip_config_commands(interface, addr, mask, existing, version):
commands = ['interface {0}'.format(interface)]
if version == 'v4':
# We can't just remove primary address if secondary address exists
for address in existing['addresses']:
if address['addr'] == addr:
if address['secondary']:
commands.append('no ip address {0}/{1} secondary'.format(addr, mask))
elif len(existing['addresses']) > 1:
new_primary = False
for address in existing['addresses']:
if address['addr'] != addr:
commands.append('no ip address {0}/{1} secondary'.format(address['addr'], address['mask']))
if not new_primary:
command = 'ip address {0}/{1}'.format(address['addr'], address['mask'])
new_primary = True
else:
command = 'ip address {0}/{1} secondary'.format(address['addr'], address['mask'])
if 'tag' in address and address['tag'] != 0:
command += " tag " + str(address['tag'])
commands.append(command)
else:
commands.append('no ip address {0}/{1}'.format(addr, mask))
break
else:
for address in existing['addresses']:
if address['addr'] == addr:
commands.append('no ipv6 address {0}/{1}'.format(addr, mask))
return commands
def get_config_ip_commands(delta, interface, existing, version):
commands = []
delta = dict(delta)
if version == 'v4':
command = 'ip address {addr}/{mask}'.format(**delta)
if len(existing['addresses']) > 0:
if delta['allow_secondary']:
for address in existing['addresses']:
if delta['addr'] == address['addr'] and address['secondary'] is False and delta['tag'] != 0:
break
else:
command += ' secondary'
else:
# Remove all existed addresses if 'allow_secondary' isn't specified
for address in existing['addresses']:
if address['secondary']:
commands.insert(0, 'no ip address {addr}/{mask} secondary'.format(**address))
else:
commands.append('no ip address {addr}/{mask}'.format(**address))
else:
if not delta['allow_secondary']:
# Remove all existed addresses if 'allow_secondary' isn't specified
for address in existing['addresses']:
commands.insert(0, 'no ipv6 address {addr}/{mask}'.format(**address))
command = 'ipv6 address {addr}/{mask}'.format(**delta)
if int(delta['tag']) > 0:
command += ' tag {tag}'.format(**delta)
elif int(delta['tag']) == 0:
# Case when we need to remove tag from an address. Just enter command like
# 'ip address ...' (without 'tag') not enough
commands += get_remove_ip_config_commands(interface, delta['addr'], delta['mask'], existing, version)
commands.append(command)
if commands[0] != 'interface {0}'.format(interface):
commands.insert(0, 'interface {0}'.format(interface))
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def validate_params(addr, interface, mask, tag, allow_secondary, version, state, intf_type, module):
if state == "present":
if addr is None or mask is None:
module.fail_json(msg="An IP address AND a mask must be provided "
"when state=present.")
elif state == "absent" and version == "v6":
if addr is None or mask is None:
module.fail_json(msg="IPv6 address and mask must be provided when "
"state=absent.")
if intf_type != "ethernet" and module.params["transport"] == "cli":
if is_default(interface, module) == "DNE":
module.fail_json(msg="That interface does not exist yet. Create "
"it first.", interface=interface)
if mask is not None:
try:
if (int(mask) < 1 or int(mask) > 32) and version == "v4":
raise ValueError
elif int(mask) < 1 or int(mask) > 128:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'mask' must be an integer between"
" 1 and 32 when version v4 and up to 128 "
"when version v6.", version=version,
mask=mask)
if addr is not None and mask is not None:
try:
ipaddress.ip_interface(u'%s/%s' % (addr, mask))
except ValueError:
module.fail_json(msg="Warning! Invalid ip address or mask set.", addr=addr, mask=mask)
if tag is not None:
try:
if 0 > tag > 4294967295:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'tag' must be an integer between"
" 0 (default) and 4294967295."
"To use tag you must set 'addr' and 'mask' params.", tag=tag)
if allow_secondary is not None:
try:
if addr is None or mask is None:
raise ValueError
except ValueError:
module.fail_json(msg="Warning! 'secondary' can be used only when 'addr' and 'mask' set.",
allow_secondary=allow_secondary)
def main():
argument_spec = dict(
interface=dict(required=True),
addr=dict(required=False),
version=dict(required=False, choices=['v4', 'v6'],
default='v4'),
mask=dict(type='str', required=False),
tag=dict(required=False, default=0, type='int'),
state=dict(required=False, default='present',
choices=['present', 'absent']),
allow_secondary=dict(required=False, default=False,
type='bool'),
include_defaults=dict(default=True),
config=dict(),
save=dict(type='bool', default=False)
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
if not HAS_IPADDRESS:
module.fail_json(msg="ipaddress is required for this module. Run 'pip install ipaddress' for install.")
warnings = list()
check_args(module, warnings)
addr = module.params['addr']
version = module.params['version']
mask = module.params['mask']
tag = module.params['tag']
allow_secondary = module.params['allow_secondary']
interface = module.params['interface'].lower()
state = module.params['state']
intf_type = get_interface_type(interface)
validate_params(addr, interface, mask, tag, allow_secondary, version, state, intf_type, module)
mode = get_interface_mode(interface, intf_type, module)
if mode == 'layer2':
module.fail_json(msg='That interface is a layer2 port.\nMake it '
'a layer 3 port first.', interface=interface)
existing = get_ip_interface(interface, version, module)
args = dict(addr=addr, mask=mask, tag=tag, interface=interface, allow_secondary=allow_secondary)
proposed = dict((k, v) for k, v in args.items() if v is not None)
commands = []
changed = False
end_state = existing
if state == 'absent' and existing['addresses']:
if find_same_addr(existing, addr, mask):
command = get_remove_ip_config_commands(interface, addr,
mask, existing, version)
commands.append(command)
elif state == 'present':
if not find_same_addr(existing, addr, mask, full=True, tag=tag, version=version):
command = get_config_ip_commands(proposed, interface, existing, version)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
changed = True
end_state = get_ip_interface(interface, version, module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['commands'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| 1.304688 | 1 |
weather/services/sun.py | vyahello/async-weather-api | 2 | 76676 | <filename>weather/services/sun.py
import datetime
import time
from typing import Dict
import aiohttp
def _utc_to_local(date: str) -> datetime.datetime:
"""Converts utl to local datetime."""
now_timestamp = time.time()
return datetime.datetime.strptime(date, "%I:%M:%S %p") + (
datetime.datetime.fromtimestamp(now_timestamp)
- datetime.datetime.utcfromtimestamp(now_timestamp)
)
async def today(latitude: float, longitude: float) -> Dict[str, str]:
"""Returns sunrise/sunset for today."""
async with aiohttp.ClientSession() as session:
async with session.get(
f"https://api.sunrise-sunset.org/json?lat={latitude}&lng={longitude}"
) as response:
response.raise_for_status()
data = await response.json()
sun_data = data.get("results", {})
for key, value in tuple(sun_data.items()): # type: str, str
if "AM" not in value and "PM" not in value:
continue
sun_data[key] = datetime.datetime.strftime(
_utc_to_local(value), "%I:%M:%S %p"
)
return sun_data
| 1.929688 | 2 |