id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
400 | # -*- coding: utf-8 -*-
from PySide2.QtCore import Qt
from PySide2.QtWidgets import QDialogButtonBox, QMessageBox, QWidget
from activity_browser.ui.widgets import (
BiosphereUpdater, SwitchComboBox, CutoffMenu, ForceInputDialog,
parameter_save_errorbox, simple_warning_box
)
# NOTE: No way of testing the BiosphereUpdater class without causing the
# ab_app fixture to flip its lid and fail to clean itself up.
def test_comparison_switch_empty(qtbot):
parent = QWidget()
parent.has_scenarios = False
qtbot.addWidget(parent)
box = SwitchComboBox(parent)
box.configure(False, False)
size = box.count()
assert size == 0
assert not box.isVisible()
def test_comparison_switch_no_scenarios(qtbot):
parent = QWidget()
parent.has_scenarios = False
qtbot.addWidget(parent)
box = SwitchComboBox(parent)
box.configure()
size = box.count()
assert size == 2
# assert box.isVisible() # Box fails to be visible, except it definitely is?
def METHOD_NAME(qtbot):
parent = QWidget()
parent.has_scenarios = True
qtbot.addWidget(parent)
box = SwitchComboBox(parent)
box.configure()
size = box.count()
assert size == 3
# assert box.isVisible() # Box fails to be visible, except it definitely is?
#Outdated doesnt work with the new update
# def test_cutoff_menu_relative(qtbot):
# """ Simple check of all the slots on the CutoffMenu class
# """
# slider = CutoffMenu()
# qtbot.addWidget(slider)
# assert slider.cutoff_value == 0.01
# assert slider.is_relative
#
# assert slider.sliders.relative.value() == 20
# assert slider.sliders.relative.log_value == 1.8
# qtbot.mouseClick(slider.cutoff_slider_lft_btn, Qt.LeftButton)
# assert slider.sliders.relative.value() == 21
# assert slider.sliders.relative.log_value == 2.0
# qtbot.mouseClick(slider.cutoff_slider_rght_btn, Qt.LeftButton)
# assert slider.sliders.relative.value() == 20
# assert slider.sliders.relative.log_value == 1.8
#
# with qtbot.waitSignal(slider.slider_change, timeout=1600):
# slider.cutoff_slider_line.setText("0.1")
# assert slider.sliders.relative.value() == 40
# assert slider.sliders.relative.log_value == 10
def test_cutoff_slider_toggle(qtbot):
slider = CutoffMenu()
qtbot.addWidget(slider)
with qtbot.waitSignal(slider.buttons.topx.toggled, timeout=800):
slider.buttons.topx.click()
assert not slider.is_relative
assert slider.limit_type == "number"
# def test_cutoff_slider_top(qtbot):
# slider = CutoffMenu()
# qtbot.addWidget(slider)
# slider.buttons.topx.click()
#
# assert slider.sliders.topx.value() == 1
# qtbot.mouseClick(slider.cutoff_slider_rght_btn, Qt.LeftButton)
# assert slider.sliders.topx.value() == 2
# qtbot.mouseClick(slider.cutoff_slider_lft_btn, Qt.LeftButton)
# assert slider.sliders.topx.value() == 1
#
# with qtbot.waitSignal(slider.slider_change, timeout=1600):
# slider.cutoff_slider_line.setText("15")
# assert slider.sliders.topx.value() == 15
def test_input_dialog(qtbot):
""" Test the various thing about the dialog widget.
"""
parent = QWidget()
qtbot.addWidget(parent)
dialog = ForceInputDialog.get_text(
parent, "Early in the morning", "What should we do with a drunken sailor"
)
assert dialog.output == ""
assert not dialog.buttons.button(QDialogButtonBox.Ok).isEnabled()
existing = ForceInputDialog.get_text(
parent, "Existence", "is a nightmare", "and here is why"
)
assert existing.output == "and here is why"
# Text in dialog MUST be changed before Ok button is enabled.
assert not dialog.buttons.button(QDialogButtonBox.Ok).isEnabled()
with qtbot.waitSignal(dialog.input.textChanged, timeout=100):
dialog.input.setText("Now it works.")
assert dialog.buttons.button(QDialogButtonBox.Ok).isEnabled()
def test_parameter_errorbox(qtbot, monkeypatch):
""" Not truly used anymore in favour of not saving invalid values.
"""
parent = QWidget()
qtbot.addWidget(parent)
monkeypatch.setattr(QMessageBox, "exec_", lambda *args: QMessageBox.Cancel)
result = parameter_save_errorbox(parent, "got an error")
assert result == QMessageBox.Cancel
def test_simple_warning_box(qtbot, monkeypatch):
parent = QWidget()
qtbot.addWidget(parent)
monkeypatch.setattr(QMessageBox, "warning", lambda *args: QMessageBox.Ok)
result = simple_warning_box(parent, "Warning title", "This is a warning")
assert result == QMessageBox.Ok | null |
401 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Script utils for generating datasets figures and dataframes."""
import concurrent.futures
import functools
import itertools
import multiprocessing
import os
import traceback
from typing import Any, Callable, List, Optional, TypeVar
from absl import app
from absl import logging
import tensorflow_datasets as tfds
from tensorflow_datasets.core.utils.lazy_imports_utils import tensorflow as tf
# pylint: disable=logging-format-interpolation,logging-not-lazy,logging-fstring-interpolation
T = TypeVar('T')
_WORKER_COUNT_DATASETS = 10
def _log_exception(fn):
"""Logs the exceptions from a `ThreadPoolExecutor`."""
@functools.wraps(fn)
def decorated(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception: # pylint: disable=broad-except
err_str = traceback.format_exc()
logging.error(f'Exception occurred for {args}, {kwargs}:\n' + err_str)
raise
return decorated
@_log_exception
def generate_and_save_artifact(
full_name: str,
*,
dst_dir: tfds.typing.PathLike,
overwrite: bool,
file_extension: str,
get_artifact_fn: Callable[[tf.data.Dataset, tfds.core.DatasetInfo], T],
save_artifact_fn: Callable[[str, T], None],
) -> None:
"""Builds and saves the generated artifact for the dataset in dst_dir.
Args:
full_name: Name of the dataset to build `dataset`, `dataset/config`.
dst_dir: Destination where the dataset will be saved (as
`dataset-config-version`)
overwrite: If True, recompute the image even if it exists.
file_extension: File extension of the artifact (e.g. `.png`)
get_artifact_fn: Function which extracts the dataset artifact to save.
save_artifact_fn: Function which save the extracted artifact.
"""
dst_filename = full_name.replace('/', '-') + file_extension
dst_path = os.path.join(dst_dir, dst_filename)
# If the file already exists, skip the generation
if not overwrite and tf.io.gfile.exists(dst_path):
logging.info(f'Skipping generation for {full_name} (already exists)')
return
logging.info(f'Generating for {full_name}...')
# Load the dataset.
builder_name, _, version = full_name.rpartition('/')
builder = tfds.builder(f'{builder_name}:{version}')
split_names = list(builder.info.splits.keys())
if not split_names:
logging.info(f'Dataset `{full_name}` not generated.')
return
elif 'train' in split_names:
split = 'train'
else:
split = split_names[0]
ds = builder.as_dataset(split=split, shuffle_files=False)
if not tf.io.gfile.exists(dst_dir):
tf.io.gfile.makedirs(dst_dir)
try:
artifact = get_artifact_fn(ds.take(10), builder.info)
except Exception: # pylint: disable=broad-except
logging.info(f'Generation not supported for dataset `{full_name}`')
return
save_artifact_fn(dst_path, artifact)
def _get_full_names(datasets: Optional[List[str]] = None) -> List[str]:
"""Lists all builder names `ds/version` and `ds/config/version` to generate.
Args:
datasets: List of datasets from which get the builder names.
Returns:
builder_names: The builder names.
"""
if datasets is None:
return tfds.core.load.list_full_names(current_version_only=True)
else:
builder_names = list(
itertools.chain.from_iterable(
[
tfds.core.load.single_full_names(builder_name)
for builder_name in datasets
]
)
)
return builder_names
def multi_process_map(
worker_fn: Callable[..., None],
datasets: Optional[List[str]] = None,
) -> None:
"""Applies the function for each given datasets.
Args:
worker_fn: Function called on each dataset version.
datasets: List of all `dataset` names to generate. If None, visualization
for all available datasets will be generated.
"""
full_names = _get_full_names(datasets)
logging.info(f'Generate figures for {len(full_names)} builders')
with multiprocessing.Pool(_WORKER_COUNT_DATASETS) as tpool:
list(tpool.map(worker_fn, full_names))
def METHOD_NAME(
worker_fn: Callable[..., None],
datasets: Optional[List[str]] = None,
) -> None:
"""Applies the function for each given datasets.
Args:
worker_fn: Function called on each dataset version.
datasets: List of all `dataset` names to generate. If None, visualization
for all available datasets will be generated.
"""
full_names = _get_full_names(datasets)
with concurrent.futures.ThreadPoolExecutor(
max_workers=_WORKER_COUNT_DATASETS,
) as executor:
list(executor.map(worker_fn, full_names))
def multi_process_run(main: Callable[[Any], None]) -> None:
"""Same as `absl.app.run` but with special multiprocess flags."""
app.run(main) | null |
402 | # Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import cv2
import numpy as np
import time
from models.decode import ctdet_decode
from utils.post_process import ctdet_post_process
import nnabla as nn
import nnabla.functions as F
from .base_detector import BaseDetector
class CtdetDetector(BaseDetector):
def __init__(self, opt):
super(CtdetDetector, self).__init__(opt)
def METHOD_NAME(self, images):
""" Apply detection to input images.
Args:
images (numpy.ndarray): Input images with "NCHW" format.
Returns:
tuple: containing the following items
- outputs: Output of NN model.
- dets: Detection results with the same format of dataset.
- forward_time: Processing time.
"""
inputs = nn.Variable.from_numpy_array(images)
outputs = self.model(inputs)
hm = outputs['hm']
hm = F.sigmoid(hm)
wh = outputs['wh']
reg = outputs['reg']
if self.opt.channel_last:
hm = F.transpose(hm, (0, 3, 1, 2))
wh = F.transpose(wh, (0, 3, 1, 2))
reg = F.transpose(reg, (0, 3, 1, 2))
forward_time = time.time()
dets = ctdet_decode(hm, wh, reg=reg, K=self.opt.K)
return outputs, dets, forward_time
def post_process(self, dets, meta, scale=1):
dets = dets.reshape(1, -1, dets.shape[2])
dets = ctdet_post_process(
dets.copy(),
[meta['c']],
[meta['s']],
meta['out_height'],
meta['out_width'],
self.opt.num_classes,
)
for j in range(1, self.opt.num_classes + 1):
dets[0][j] = np.array(dets[0][j], dtype=np.float32).reshape(-1, 5)
dets[0][j][:, :4] /= scale
return dets[0]
def merge_outputs(self, detections):
"""Merge detection results.
Args:
detections (list): List of detection results. Each 1-based detection result will be saved to dictionary.
Returns:
dict: Merged detection results. The keys will be [1, 2, ..., num_classes].
"""
results = {}
for j in range(1, self.opt.num_classes + 1):
results[j] = np.concatenate(
[detection[j] for detection in detections], axis=0
).astype(np.float32)
scores = np.hstack(
[results[j][:, 4] for j in range(1, self.opt.num_classes + 1)]
)
if len(scores) > self.max_per_image:
kth = len(scores) - self.max_per_image
thresh = np.partition(scores, kth)[kth]
for j in range(1, self.opt.num_classes + 1):
keep_inds = results[j][:, 4] >= thresh
results[j] = results[j][keep_inds]
return results
def debug(self, debugger, images, dets, output, scale=1):
detection = dets.copy()
detection[:, :, :4] *= self.opt.down_ratio
hm = output['hm']
hm = F.sigmoid(hm)
if self.opt.channel_last:
hm = F.transpose(hm, (0, 3, 1, 2))
for i in range(1):
if self.opt.mixed_precision:
# Removing pad from input image for drawing
img = images[i][:, :, :3]
if not self.opt.channel_last:
img = images[i].transpose(1, 2, 0)
img = ((img * self.std + self.mean) * 255).astype(np.uint8)
pred = debugger.gen_colormap(hm[i].d)
debugger.add_blend_img(img, pred, 'pred_hm_{:.1f}'.format(scale))
debugger.add_img(img, img_id='out_pred_{:.1f}'.format(scale))
for k in range(len(dets[i])):
if detection[i, k, 4] > self.opt.vis_thresh:
debugger.add_coco_bbox(
detection[i, k, :4],
detection[i, k, -1],
detection[i, k, 4],
img_id='out_pred_{:.1f}'.format(scale),
)
for j in range(hm[i].shape[0]):
hmap = hm[i][j].d
hmap *= 255
hmap = hmap.astype('uint8')
print("max at channel {}:".format(j), np.max(hmap))
hmap = cv2.applyColorMap(hmap, cv2.COLORMAP_JET)
debugger.add_img(
hmap, img_id='heatmap_{}_{:.1f}'.format(j, scale)
)
def show_results(self, debugger, image, results):
debugger.add_img(image, img_id='ctdet')
for j in range(1, self.opt.num_classes + 1):
for bbox in results[j]:
if bbox[4] > self.opt.vis_thresh:
debugger.add_coco_bbox(
bbox[:4], j - 1, bbox[4], img_id='ctdet'
)
debugger.show_all_imgs(path=self.opt.save_dir) | null |
403 | """Handle omega (specimen rotation) metadata
* OmegaWedges class specifies omega metadata in wedges
"""
import numpy as np
from .baseclass import ImageSeries
OMEGA_KEY = 'omega'
class OmegaImageSeries(ImageSeries):
"""ImageSeries with omega metadata"""
DFLT_TOL = 1.0e-6
TAU = 360
def __init__(self, ims):
"""This class is initialized with an existing imageseries"""
# check for omega metadata
if OMEGA_KEY in ims.metadata:
self._omega = ims.metadata[OMEGA_KEY]
if len(ims) != self._omega.shape[0]:
msg = 'omega array mismatch: array has %s frames, expecting %s'
msg = msg % (self._omega.shape[0], len(ims))
raise OmegaSeriesError(msg)
else:
raise OmegaSeriesError('Imageseries has no omega metadata')
super(OmegaImageSeries, self).__init__(ims)
self._make_wedges()
def _make_wedges(self, tol=DFLT_TOL):
nf = len(self)
om = self.omega
# find the frames where the wedges break
starts = [0]
delta = om[0, 1] - om[0, 0]
omlast = om[0, 1]
for f in range(1, nf):
if delta <= 0:
raise OmegaSeriesError('omega array must be increasing')
# check whether delta changes or ranges not contiguous
d = om[f,1] - om[f,0]
if (np.abs(d - delta) > tol) or (np.abs(om[f,0] - omlast) > tol):
starts.append(f)
delta = d
omlast = om[f, 1]
starts.append(nf)
nw = len(starts) - 1
nf0 = 0
self._wedge_om = np.zeros((nw, 3))
self._wedge_f = np.zeros((nw, 2), dtype=int)
self._omegawedges = OmegaWedges(nf)
for s in range(nw):
ostart = om[starts[s], 0]
ostop = om[starts[s + 1] - 1, 1]
steps = starts[s+1] - starts[s]
self._omegawedges.addwedge(ostart, ostop, steps)
#
delta = (ostop - ostart)/steps
self._wedge_om[s, :] = (ostart, ostop, delta)
self._wedge_f[s, 0] = nf0
self._wedge_f[s, 1] = steps
nf0 += steps
assert(nf0 == nf)
@property
def omega(self):
"""return omega range array (nframes, 2)"""
return self._omega
@property
def omegawedges(self):
return self._omegawedges
@property
def nwedges(self):
return self.omegawedges.nwedges
def wedge(self, i):
"""return i'th wedge as a dictionary"""
d = self.omegawedges.METHOD_NAME[i]
delta = (d['ostop'] - d['ostart'])/d['nsteps']
d.update(delta=delta)
return d
def omega_to_frame(self, om):
"""Return frame and wedge which includes given omega, -1 if not found"""
f = -1
w = -1
for i in range(len(self._wedge_om)):
omin = self._wedge_om[i, 0]
omax = self._wedge_om[i, 1]
omcheck = omin + np.mod(om - omin, self.TAU)
if omcheck < omax:
odel = self._wedge_om[i, 2]
f = self._wedge_f[i,0] + int(np.floor((omcheck - omin)/odel))
w = i
break
return f, w
def omegarange_to_frames(self, omin, omax):
"""Return list of frames for range of omegas"""
noframes = ()
f0, w0 = self.omega_to_frame(omin)
if w0 < 0:
return noframes
f1, w1 = self.omega_to_frame(omax)
if w1 < 0:
return noframes
# if same wedge, require frames be increasing
if (w0 == w1) and (f1 > f0):
return list(range(f0, f1+1))
# case: adjacent wedges with 2pi jump in omega
w0max = self._wedge_om[w0, 1]
w1min = self._wedge_om[w1, 0]
if np.mod(np.abs(w1min - w0max), self.TAU) < self.DFLT_TOL:
r0 = list(range(f0, self._wedge_f[w0, 0] + self._wedge_f[w0, 1]))
r1 = list(range(self._wedge_f[w1, 0], f1 + 1))
return r0 + r1
return noframes
class OmegaWedges(object):
"""piecewise linear omega ranges"""
def __init__(self, nframes):
"""Constructor for OmegaWedge"""
self.nframes = nframes
self._wedges = []
#
# ============================== API
#
@property
def omegas(self):
"""n x 2 array of omega values, one per frame"""
if self.nframes != self.wframes:
msg = "number of frames (%s) does not match "\
"number of wedge frames (%s)" %(self.nframes, self.wframes)
raise OmegaSeriesError(msg)
oa = np.zeros((self.nframes, 2))
wstart = 0
for w in self.METHOD_NAME:
ns = w['nsteps']
wr = list(range(wstart, wstart + ns))
wa0 = np.linspace(w['ostart'], w['ostop'], ns + 1)
oa[wr, 0] = wa0[:-1]
oa[wr, 1] = wa0[1:]
wstart += ns
return oa
@property
def nwedges(self):
"""number of wedges"""
return len(self._wedges)
@property
def METHOD_NAME(self):
"""list of wedges (dictionaries)"""
return self._wedges
def addwedge(self, ostart, ostop, nsteps, loc=None):
"""add wedge to list"""
d = dict(ostart=ostart, ostop=ostop, nsteps=nsteps)
if loc is None:
loc = self.nwedges
self.METHOD_NAME.insert(loc, d)
def delwedge(self, i):
"""delete wedge number i"""
self.METHOD_NAME.pop(i)
@property
def wframes(self):
"""number of frames in wedges"""
wf = [w['nsteps'] for w in self.METHOD_NAME]
return int(np.sum(wf))
def save_omegas(self, fname):
"""save omegas to text file"""
np.save(fname, self.omegas)
pass # end class
class OmegaSeriesError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value) | null |
404 | # Copyright (c) ZenML GmbH 2023. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for step run models."""
import inspect
import random
import string
from typing import TYPE_CHECKING
from tests.integration.functional.conftest import step_with_logs
from tests.integration.functional.zen_stores.utils import (
constant_int_output_test_step,
int_plus_one_test_step,
)
from zenml.enums import ExecutionStatus
if TYPE_CHECKING:
from zenml.client import Client
from zenml.models.step_run_models import StepRunResponseModel
from zenml.pipelines.base_pipeline import BasePipeline
def test_step_run_linkage(clean_client: "Client", one_step_pipeline):
"""Integration test for `step.run` property."""
step_ = constant_int_output_test_step()
pipe: "BasePipeline" = one_step_pipeline(step_)
pipe.run()
# Non-cached run
pipeline_run = pipe.model.last_run
step_run = pipeline_run.steps["step_"]
assert step_run.run == pipeline_run
# Cached run
pipe.run()
pipeline_run_2 = pipe.model.last_run
step_run_2 = pipeline_run_2.steps["step_"]
assert step_run_2.status == ExecutionStatus.CACHED
def test_step_run_parent_steps_linkage(
clean_client: "Client", connected_two_step_pipeline
):
"""Integration test for `step.parent_steps` property."""
pipeline_instance = connected_two_step_pipeline(
step_1=constant_int_output_test_step(),
step_2=int_plus_one_test_step(),
)
pipeline_instance.run()
pipeline_run = pipeline_instance.model.last_run
step_1 = pipeline_run.steps["step_1"]
step_2 = pipeline_run.steps["step_2"]
assert step_1.parent_steps == []
assert step_2.parent_steps == [step_1]
def test_step_run_has_source_code(clean_client, connected_two_step_pipeline):
"""Test that the step run has correct source code."""
pipeline_instance = connected_two_step_pipeline(
step_1=constant_int_output_test_step(),
step_2=int_plus_one_test_step(),
)
pipeline_instance.run()
pipeline_run = clean_client.get_pipeline(
"connected_two_step_pipeline"
).runs[0]
step_1 = pipeline_run.steps["step_1"]
step_2 = pipeline_run.steps["step_2"]
assert step_1.source_code == inspect.getsource(
constant_int_output_test_step.entrypoint
)
assert step_2.source_code == inspect.getsource(
int_plus_one_test_step.entrypoint
)
def test_step_run_with_too_long_source_code_is_truncated(
clean_client, connected_two_step_pipeline, mocker
):
"""Test that the step source code gets truncated if it is too long."""
random_source = "".join(random.choices(string.ascii_uppercase, k=1000000))
mocker.patch("zenml.steps.base_step.BaseStep.source_code", random_source)
pipeline_instance = connected_two_step_pipeline(
step_1=constant_int_output_test_step(),
step_2=int_plus_one_test_step(),
)
pipeline_instance.run()
pipeline_run = clean_client.get_pipeline(
"connected_two_step_pipeline"
).runs[0]
step_1 = pipeline_run.steps["step_1"]
step_2 = pipeline_run.steps["step_2"]
assert step_1.source_code == random_source[:1000] + "..."
assert step_2.source_code == random_source[:1000] + "..."
def test_step_run_has_docstring(clean_client, connected_two_step_pipeline):
"""Test that the step run has correct docstring."""
pipeline_instance = connected_two_step_pipeline(
step_1=constant_int_output_test_step(),
step_2=int_plus_one_test_step(),
)
pipeline_instance.run()
pipeline_run = clean_client.get_pipeline(
"connected_two_step_pipeline"
).runs[0]
step_1 = pipeline_run.steps["step_1"]
step_2 = pipeline_run.steps["step_2"]
assert step_1.docstring == constant_int_output_test_step.entrypoint.__doc__
assert step_2.docstring == int_plus_one_test_step.entrypoint.__doc__
def test_step_run_with_too_long_docstring_is_truncated(
clean_client, connected_two_step_pipeline, mocker
):
"""Test that the step docstring gets truncated if it is too long."""
random_docstring = "".join(
random.choices(string.ascii_uppercase, k=1000000)
)
mocker.patch("zenml.steps.base_step.BaseStep.docstring", random_docstring)
pipeline_instance = connected_two_step_pipeline(
step_1=constant_int_output_test_step(),
step_2=int_plus_one_test_step(),
)
pipeline_instance.run()
pipeline_run = clean_client.get_pipeline(
"connected_two_step_pipeline"
).runs[0]
step_1 = pipeline_run.steps["step_1"]
step_2 = pipeline_run.steps["step_2"]
assert step_1.docstring == random_docstring[:1000] + "..."
assert step_2.docstring == random_docstring[:1000] + "..."
def test_disabling_step_logs(clean_client: "Client", one_step_pipeline):
"""Test that disabling step logs works."""
# By default, step logs should be enabled
step_ = step_with_logs()
pipe: "BasePipeline" = one_step_pipeline(step_)
pipe.configure(enable_cache=False)
pipe.run()
_assert_step_logs_enabled(pipe)
# Test disabling step logs on pipeline level
pipe.configure(enable_step_logs=False)
pipe.run()
_assert_step_logs_disabled(pipe)
pipe.configure(enable_step_logs=True)
pipe.run()
_assert_step_logs_enabled(pipe)
# Test disabling step logs on step level
# This should override the pipeline level setting
step_.configure(enable_step_logs=False)
pipe.run()
_assert_step_logs_disabled(pipe)
step_.configure(enable_step_logs=True)
pipe.run()
_assert_step_logs_enabled(pipe)
# Test disabling step logs on run level
# This should override both the pipeline and step level setting
pipe.run(enable_step_logs=False)
_assert_step_logs_disabled(pipe)
pipe.configure(enable_step_logs=False)
step_.configure(enable_step_logs=False)
pipe.run(enable_step_logs=True)
_assert_step_logs_enabled(pipe)
def _assert_step_logs_enabled(pipe: "BasePipeline"):
"""Assert that step logs were enabled in the last run."""
assert METHOD_NAME(pipe).logs
def _assert_step_logs_disabled(pipe: "BasePipeline"):
"""Assert that step logs were disabled in the last run."""
assert not METHOD_NAME(pipe).logs
def METHOD_NAME(
pipe: "BasePipeline",
) -> "StepRunResponseModel":
"""Get the output of the last run."""
return pipe.model.last_run.steps["step_"] | null |
405 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkclickhouse.endpoint import endpoint_data
class OperateLogHubRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'clickhouse', '2019-11-11', 'OperateLogHub')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_UseLorne(self): # Boolean
return self.get_query_params().get('UseLorne')
def set_UseLorne(self, UseLorne): # Boolean
self.add_query_param('UseLorne', UseLorne)
def get_DeliverName(self): # String
return self.get_query_params().get('DeliverName')
def set_DeliverName(self, DeliverName): # String
self.add_query_param('DeliverName', DeliverName)
def get_DeliverTime(self): # String
return self.get_query_params().get('DeliverTime')
def set_DeliverTime(self, DeliverTime): # String
self.add_query_param('DeliverTime', DeliverTime)
def get_DomainUrl(self): # String
return self.get_query_params().get('DomainUrl')
def set_DomainUrl(self, DomainUrl): # String
self.add_query_param('DomainUrl', DomainUrl)
def get_Password(self): # String
return self.get_query_params().get('Password')
def set_Password(self, Password): # String
self.add_query_param('Password', Password)
def get_AccessKey(self): # String
return self.get_query_params().get('AccessKey')
def set_AccessKey(self, AccessKey): # String
self.add_query_param('AccessKey', AccessKey)
def get_Create(self): # Boolean
return self.get_query_params().get('Create')
def set_Create(self, Create): # Boolean
self.add_query_param('Create', Create)
def get_TableName(self): # String
return self.get_query_params().get('TableName')
def set_TableName(self, TableName): # String
self.add_query_param('TableName', TableName)
def get_TaskId(self): # String
return self.get_query_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_query_param('TaskId', TaskId)
def get_ProjectName(self): # String
return self.get_query_params().get('ProjectName')
def set_ProjectName(self, ProjectName): # String
self.add_query_param('ProjectName', ProjectName)
def get_SchemaName(self): # String
return self.get_query_params().get('SchemaName')
def set_SchemaName(self, SchemaName): # String
self.add_query_param('SchemaName', SchemaName)
def get_AccessSecret(self): # String
return self.get_query_params().get('AccessSecret')
def set_AccessSecret(self, AccessSecret): # String
self.add_query_param('AccessSecret', AccessSecret)
def get_LogStoreName(self): # String
return self.get_query_params().get('LogStoreName')
def set_LogStoreName(self, LogStoreName): # String
self.add_query_param('LogStoreName', LogStoreName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_DBClusterId(self): # String
return self.get_query_params().get('DBClusterId')
def set_DBClusterId(self, DBClusterId): # String
self.add_query_param('DBClusterId', DBClusterId)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_LogHubStoress(self): # RepeatList
return self.get_query_params().get('LogHubStores')
def set_LogHubStoress(self, LogHubStores): # RepeatList
for depth1 in range(len(LogHubStores)):
if LogHubStores[depth1].get('LogKey') is not None:
self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.LogKey', LogHubStores[depth1].get('LogKey'))
if LogHubStores[depth1].get('FieldKey') is not None:
self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.FieldKey', LogHubStores[depth1].get('FieldKey'))
if LogHubStores[depth1].get('Type') is not None:
self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.Type', LogHubStores[depth1].get('Type'))
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('FilterDirtyData')
def set_FilterDirtyData(self, FilterDirtyData): # Boolean
self.add_query_param('FilterDirtyData', FilterDirtyData)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName) | null |
406 | # Copyright 2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
def GLU(h):
nc = h.shape[1]
nc = nc // 2
return h[:, :nc] * F.sigmoid(h[:, nc:])
def Upsample(h, nmap_out, scope_name, scale=2):
with nn.parameter_scope(scope_name):
def sn_w(w): return PF.spectral_norm(w, dim=0)
h = F.interpolate(h, scale=(scale, scale), mode="nearest")
h = PF.convolution(h, nmap_out*2, (3, 3), pad=(1, 1),
apply_w=sn_w, with_bias=False, name="conv1")
h = PF.batch_normalization(h)
h = GLU(h)
return h
def Downsample(h, nmap_out, scope_name):
with nn.parameter_scope(scope_name):
def sn_w(w): return PF.spectral_norm(w, dim=0)
h = PF.convolution(h, nmap_out, (4, 4), stride=(
2, 2), pad=(1, 1), apply_w=sn_w, with_bias=False)
h = PF.batch_normalization(h)
h = F.leaky_relu(h, 0.2)
return h
def DownsampleComp(h, nmap_out, scope_name):
with nn.parameter_scope(scope_name):
def sn_w(w): return PF.spectral_norm(w, dim=0)
# Main
h0 = PF.convolution(h, nmap_out, (4, 4), stride=(2, 2), pad=(
1, 1), apply_w=sn_w, with_bias=False, name="main_conv1")
h0 = PF.batch_normalization(h0, name="bn_main1")
h0 = F.leaky_relu(h0, 0.2)
h0 = PF.convolution(h0, nmap_out, (3, 3), pad=(
1, 1), apply_w=sn_w, with_bias=False, name="main_conv2")
h0 = PF.batch_normalization(h0, name="bn_main2")
h0 = F.leaky_relu(h0, 0.2)
# Direct
h1 = F.average_pooling(h, (2, 2), stride=(2, 2))
h1 = PF.convolution(h1, nmap_out, (1, 1), apply_w=sn_w,
with_bias=False, name="direct_conv1")
h1 = PF.batch_normalization(h1, name="direct_bn1")
h1 = F.leaky_relu(h1, 0.2)
return (h0 + h1) / 2.0
def SLE(f_large, f_small, scope_name):
with nn.parameter_scope(scope_name):
def sn_w(w): return PF.spectral_norm(w, dim=0)
ada_pool_size = f_small.shape[2] // 4
h = F.average_pooling(f_small, (ada_pool_size, ada_pool_size), stride=(
ada_pool_size, ada_pool_size))
h = PF.convolution(
h, f_large.shape[1], (4, 4), apply_w=sn_w, with_bias=False, name="conv1")
# Following the official implementation, this implementation uses swish instead of LeakyReLU here.
h = h * F.sigmoid(h)
h = PF.convolution(
h, f_large.shape[1], (1, 1), apply_w=sn_w, with_bias=False, name="conv2")
h = F.sigmoid(h)
h = f_large * h
return h
def SimpleDecoder(fea, scope_name):
# Get number of channels
nfc_multi = {4: 16, 8: 8, 16: 4, 32: 2, 64: 2,
128: 1, 256: 0.5, 512: 0.25, 1024: 0.125}
nfc = {}
for k, v in nfc_multi.items():
nfc[k] = int(v*32)
with nn.parameter_scope(scope_name):
def sn_w(w): return PF.spectral_norm(w, dim=0)
h = Upsample(fea, nfc[16], "up8->16")
h = Upsample(h, nfc[32], "up16->32")
h = Upsample(h, nfc[64], "up32->64")
h = Upsample(h, nfc[128], "up64->128")
h = PF.convolution(h, 3, (3, 3), pad=(
1, 1), apply_w=sn_w, with_bias=False, name="conv1")
img = F.tanh(h)
return img
def METHOD_NAME(img, label="real", scope_name="Discriminator", ndf=64):
with nn.parameter_scope(scope_name):
if type(img) is not list:
img_small = F.interpolate(img, output_size=(128, 128))
else:
img_small = img[1]
img = img[0]
def sn_w(w): return PF.spectral_norm(w, dim=0)
# InitLayer: -> 256x256
with nn.parameter_scope("init"):
h = img
if img.shape[2] == 1024:
h = PF.convolution(h, ndf//8, (4, 4), stride=(2, 2),
pad=(1, 1), apply_w=sn_w, with_bias=False, name="conv1")
h = F.leaky_relu(h, 0.2)
h = PF.convolution(h, ndf//4, (4, 4), stride=(2, 2),
pad=(1, 1), apply_w=sn_w, with_bias=False, name="conv2")
h = PF.batch_normalization(h)
h = F.leaky_relu(h, 0.2)
elif img.shape[2] == 512:
h = PF.convolution(h, ndf//4, (4, 4), stride=(2, 2),
pad=(1, 1), apply_w=sn_w, with_bias=False, name="conv2")
h = F.leaky_relu(h, 0.2)
else:
h = PF.convolution(h, ndf//4, (3, 3), pad=(1, 1),
apply_w=sn_w, with_bias=False, name="conv3")
h = F.leaky_relu(h, 0.2)
# Calc base features
f_256 = h
f_128 = DownsampleComp(f_256, ndf//2, "down256->128")
f_64 = DownsampleComp(f_128, ndf*1, "down128->64")
f_32 = DownsampleComp(f_64, ndf*2, "down64->32")
# Apply SLE
f_32 = SLE(f_32, f_256, "sle256->32")
f_16 = DownsampleComp(f_32, ndf*4, "down32->16")
f_16 = SLE(f_16, f_128, "sle128->16")
f_8 = DownsampleComp(f_16, ndf*16, "down16->8")
f_8 = SLE(f_8, f_64, "sle64->8")
# Conv + BN + LeakyRely + Conv -> logits (5x5)
with nn.parameter_scope("last"):
h = PF.convolution(f_8, ndf*16, (1, 1),
apply_w=sn_w, with_bias=False, name="conv1")
h = PF.batch_normalization(h)
h = F.leaky_relu(h, 0.2)
logit_large = PF.convolution(
h, 1, (4, 4), apply_w=sn_w, with_bias=False, name="conv2")
# Another path: "down_from_small" in the official code
with nn.parameter_scope("down_from_small"):
h_s = PF.convolution(img_small, ndf//2, (4, 4), stride=(2, 2),
pad=(1, 1), apply_w=sn_w, with_bias=False, name="conv1")
h_s = F.leaky_relu(h_s, 0.2)
h_s = Downsample(h_s, ndf*1, "dfs64->32")
h_s = Downsample(h_s, ndf*2, "dfs32->16")
h_s = Downsample(h_s, ndf*4, "dfs16->8")
fea_dec_small = h_s
logit_small = PF.convolution(
h_s, 1, (4, 4), apply_w=sn_w, with_bias=False, name="conv2")
# Concatenate logits
logits = F.concatenate(logit_large, logit_small, axis=1)
# Reconstruct images
rec_img_big = SimpleDecoder(f_8, "dec_big")
rec_img_small = SimpleDecoder(fea_dec_small, "dec_small")
part_ax2 = F.rand(shape=(img.shape[0],))
part_ax3 = F.rand(shape=(img.shape[0],))
f_16_ax2 = F.where(F.greater_scalar(part_ax2, 0.5),
f_16[:, :, :8, :], f_16[:, :, 8:, :])
f_16_part = F.where(F.greater_scalar(part_ax3, 0.5),
f_16_ax2[:, :, :, :8], f_16_ax2[:, :, :, 8:])
rec_img_part = SimpleDecoder(f_16_part, "dec_part")
if label == "real":
return logits, [rec_img_big, rec_img_small, rec_img_part], [part_ax2, part_ax3]
else:
return logits | null |
407 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""Entity implementing the glue between the policy engine and the rest of the system."""
import traceback
from typing import TYPE_CHECKING
from .policy_local import PolicyLocal
from ..dispatch import LogAdapter, LOG_INFO, LOG_DEBUG, LOG_ERROR, LOG_WARNING
if TYPE_CHECKING:
from ..management.agent import Agent
class PolicyManager:
"""
"""
def __init__(self, agent: 'Agent') -> None:
"""
"""
self._agent = agent
self._policy_local = PolicyLocal(self)
self.log_adapter = LogAdapter("POLICY")
self._use_hostname_patterns = False
def log(self, level, text):
info = traceback.extract_stack(limit=2)[0] # Caller frame info
self.log_adapter.log(level, text, info[0], info[1])
def _log(self, level: int, text: str) -> None:
info = traceback.extract_stack(limit=3)[0] # Caller's caller frame info
self.log_adapter.log(level, text, info[0], info[1])
def log_debug(self, text):
self._log(LOG_DEBUG, text)
def log_info(self, text: str) -> None:
self._log(LOG_INFO, text)
def log_error(self, text):
self._log(LOG_ERROR, text)
def log_warning(self, text):
self._log(LOG_WARNING, text)
def get_agent(self):
return self._agent
def get_use_hostname_patterns(self):
return self._use_hostname_patterns
def set_use_hostname_patterns(self, v: bool) -> None:
self._use_hostname_patterns = v
self._policy_local.use_hostname_patterns = v
#
# Management interface to create a ruleset
#
def create_ruleset(self, attributes):
"""
Create named policy ruleset
@param[in] attributes: from config
"""
self._policy_local.create_ruleset(attributes)
#
# Management interface to delete a ruleset
#
def delete_ruleset(self, id):
"""
Delete named policy ruleset
@param[in] id: ruleset name
"""
self._policy_local.policy_delete(id)
#
# Management interface to update a ruleset
#
def update_ruleset(self, attributes):
"""
Update named policy ruleset
@param[in] id: ruleset name
"""
self._policy_local.create_ruleset(attributes)
#
# Management interface to set the default vhost
#
def set_default_vhost(self, name: str) -> None:
"""
Set default application
@param name:
@return:
"""
self._policy_local.set_default_vhost(name)
#
# Runtime query interface
#
def lookup_vhost_alias(self, vhost_in):
"""
Resolve given vhost name to vhost settings name.
If the incoming name is a vhost hostname then return the same name.
If the incoming name is a vhost alias hostname then return the containing vhost name.
If a default vhost is defined then return its name.
:param vhost_in: vhost name to test
:return: name of policy settings vhost to be applied. Or blank if not defined.
"""
return self._policy_local.lookup_vhost_alias(vhost_in)
def lookup_user(self, user, rhost, vhost, conn_name, conn_id):
"""
Lookup function called from C.
Determine if a user on host accessing app through AMQP Open is allowed
according to the policy access rules.
If allowed then return the policy settings name
@param[in] user connection authId
@param[in] rhost connection remote host numeric IP address as string
@param[in] vhost application user is accessing
@param[in] conn_name connection name for accounting purposes
@param[in] conn_id internal connection id
@return settings user-group name if allowed; "" if not allowed
"""
return self._policy_local.lookup_user(user, rhost, vhost, conn_name, conn_id)
def lookup_settings(self, vhost, name, upolicy):
"""
Given a settings name, return the aggregated policy blob.
@param[in] vhost: vhost user is accessing
@param[in] name: user group name
@param[out] upolicy: map that receives the settings
@return settings were retrieved or not
"""
return self._policy_local.lookup_settings(vhost, name, upolicy)
def close_connection(self, conn_id):
"""
The connection identifed is closing. Remove it from the connection
accounting tables.
@param facts:
@return: none
"""
self._policy_local.close_connection(conn_id)
def METHOD_NAME(self, size: int) -> None:
"""
Policy has set global maxMessageSize.
:param size:
:return: none
"""
self._policy_local.METHOD_NAME(size)
#
#
#
def policy_lookup_vhost_alias(mgr, vhost):
"""
Look up a vhost in the policy database
Called by C code
@param mgr: policy_manager
@param vhost: Incoming vhost from an AMQP Open
@return: name of policy settings vhost to be applied or blank if lookup failed.
"""
return mgr.lookup_vhost_alias(vhost)
#
#
#
def policy_lookup_user(mgr, user, rhost, vhost, conn_name, conn_id):
"""
Look up a user in the policy database
Called by C code
@param mgr:
@param user:
@param rhost:
@param vhost:
@param conn_name:
@return:
"""
return mgr.lookup_user(user, rhost, vhost, conn_name, conn_id)
#
#
#
def policy_close_connection(mgr, conn_id):
"""
Close the connection.
Called by C code
@param mgr:
@param conn_id:
@return:
"""
mgr.close_connection(conn_id)
#
#
#
def policy_lookup_settings(mgr, vhost, name, upolicy):
"""
Return settings for <vhost, usergroup> in upolicy map
@param mgr:
@param vhost:
@param name:
@param upolicy:
@return:
"""
return mgr.lookup_settings(vhost, name, upolicy) | null |
408 | from __future__ import print_function
import IMP
import IMP.test
import sys
import IMP.em
import IMP.multifit
import os
try:
from time import process_time # needs python 3.3 or later
except ImportError:
from time import clock as process_time
class Tests(IMP.test.TestCase):
"""Tests for sampled density maps"""
def METHOD_NAME(self):
"""initialize IMP environment create particles"""
IMP.test.TestCase.METHOD_NAME(self)
# init IMP model ( the environment)
self.mdl = IMP.Model()
self.sel = IMP.atom.CAlphaPDBSelector()
self.mhs = IMP.atom.Hierarchies()
self.mhs.append(
IMP.atom.read_pdb(self.get_input_file_name("1z5s_A.pdb"),
self.mdl, self.sel))
self.mhs.append(
IMP.atom.read_pdb(self.get_input_file_name("1z5s_C1.pdb"),
self.mdl, self.sel))
self.voxel_size = 1.
self.rbs = []
for mh in self.mhs:
IMP.atom.add_radii(mh)
IMP.multifit.add_surface_index(mh, self.voxel_size)
self.rbs.append(IMP.atom.create_rigid_body(mh))
restraints = []
# set the restraint
hub = IMP.core.HarmonicUpperBound(0, 1)
sdps = IMP.core.SphereDistancePairScore(hub)
rdps = IMP.core.RigidBodyDistancePairScore(sdps,
IMP.multifit.RigidLeavesRefiner())
lsc = IMP.container.ListSingletonContainer(self.mdl, self.rbs)
self.c_r = IMP.core.ConnectivityRestraint(rdps, lsc)
restraints.append(self.c_r)
self.sf = IMP.core.RestraintsScoringFunction(restraints)
print("going to evaluate 2")
self.sf.evaluate(False)
self.wev_r = IMP.multifit.create_weighted_excluded_volume_restraint(
self.rbs[0], self.rbs[1])
restraints.append(self.wev_r)
self.sf = IMP.core.RestraintsScoringFunction(restraints)
print("going to evaluate 1")
self.sf.evaluate(False)
print("end setup")
def test_weighted_excluded_volume_restraint(self):
"""Check that weighted excluded volume restraint works"""
# IMP.set_log_level(IMP.VERBOSE)
rotations = [[0.960739, 0.177613, -0.196201, 0.0833023],
[0.98373, -0.0268444, -0.115434, -0.135015],
[0.995413, 0.0545123, -0.0635521, 0.0462946],
[0.99739, 0.0503421, -0.000272958, -0.0517479],
[0.953478, 0.148336, -0.198021, 0.17223],
[0.994239, 0.0570374, -0.0140131, 0.089658],
[0.98401, 0.148801, -0.00937242, 0.0974403],
[0.864194, -0.251682, 0.325705, -0.28938],
[0.952222, -0.155235, -0.253447, -0.0702893],
[0.881024, -0.405223, 0.154964, -0.188619]]
translations = [IMP.algebra.Vector3D(7.37269, -32.1143, -68.123),
IMP.algebra.Vector3D(-26.968, 30.7037, -18.9437),
IMP.algebra.Vector3D(-0.990538, -13.8685, -18.359),
IMP.algebra.Vector3D(-4.79513, 3.45006, -9.39788),
IMP.algebra.Vector3D(18.2439, -40.6508, -62.0047),
IMP.algebra.Vector3D(10.7605, -26.9859, -11.0509),
IMP.algebra.Vector3D(13.1573, -34.8041, -26.6549),
IMP.algebra.Vector3D(46.1903, 87.4569, 58.9899),
IMP.algebra.Vector3D(-32.0694, 24.0887, -33.828),
IMP.algebra.Vector3D(20.1398, 111.715, 60.5263)]
# move chain A and calculate weighted excluded volume
ps1 = IMP.core.get_leaves(self.mhs[0])
IMP.set_log_level(IMP.SILENT) # VERBOSE)
for i in range(10):
t = IMP.algebra.Transformation3D(
IMP.algebra.Rotation3D(
rotations[i][0],
rotations[i][1],
rotations[i][2],
rotations[i][3]),
translations[i])
xyz = IMP.core.XYZ(self.rbs[0].get_particle())
xyz.set_coordinates(t.get_transformed(xyz.get_coordinates()))
# check that when the proteins are not connected (self.c_r>0) the excluded volume
# restraint is bigger than 0
start = process_time()
# to make sure the coordinates were transformed
self.mdl.update()
end = process_time()
print("Time elapsed for PairRestraint evaluation = ", end - start, "seconds")
conn_r = self.c_r.evaluate(False)
w_exc_vol_r = self.wev_r.evaluate(False)
self.assertTrue(
((conn_r == 0.)and(w_exc_vol_r > 0.))or(
(conn_r > 0.)and(w_exc_vol_r == 0.)),
"inconsistency between connectivity and excluded volume restraint")
# print "connectivity "+str(conn_r) + " | excluded-volume " +
# str(w_exc_vol_r)
xyz.set_coordinates(
t.get_inverse().get_transformed(xyz.get_coordinates()))
if __name__ == '__main__':
IMP.test.main() | null |
409 | # -*- coding: utf-8 -*-
import os
import math
import codecs
import torch
import pyonmttok
from onmt.constants import DefaultTokens
from onmt.transforms import TransformPipe
class IterOnDevice(torch.utils.data.IterableDataset):
"""Sent items from `iterable` on `device_id` and yield."""
def __init__(self, iterable, device_id):
super(IterOnDevice).__init__()
self.iterable = iterable
self.device_id = device_id
# temporary as long as translation_server and scoring_preparator still use lists
if hasattr(iterable, "transforms"):
self.transform = TransformPipe.build_from(
[iterable.transforms[name] for name in iterable.transforms]
)
@staticmethod
def batch_to_device(tensor_batch, device_id):
"""Move `batch` to `device_id`, cpu if `device_id` < 0."""
device = torch.device(device_id) if device_id >= 0 else torch.device("cpu")
for key in tensor_batch.keys():
if key != "src_ex_vocab":
tensor_batch[key] = tensor_batch[key].to(device)
def __iter__(self):
for tensor_batch in self.iterable:
self.batch_to_device(tensor_batch, self.device_id)
yield tensor_batch
def build_vocab(opt, specials):
"""Build vocabs dict to be stored in the checkpoint
based on vocab files having each line [token, count]
Args:
opt: src_vocab, tgt_vocab, n_src_feats
Return:
vocabs: {'src': pyonmttok.Vocab, 'tgt': pyonmttok.Vocab,
'src_feats' : [pyonmttok.Vocab, ...]},
'data_task': seq2seq or lm
'decoder_start_token': DefaultTokens.BOS
}
"""
def _pad_vocab_to_multiple(vocab, multiple):
vocab_size = len(vocab)
if vocab_size % multiple == 0:
return vocab
target_size = int(math.ceil(vocab_size / multiple)) * multiple
for i in range(target_size - vocab_size):
vocab.add_token(DefaultTokens.VOCAB_PAD + str(i))
return vocab
default_specials = opt.default_specials
vocabs = {}
src_vocab = _read_vocab_file(opt.src_vocab, opt.src_words_min_frequency)
src_specials = [
item for item in (default_specials + specials["src"]) if item not in src_vocab
]
if DefaultTokens.SEP in src_specials and (
"<0x0A>" in src_vocab or "Ċ" in src_vocab
):
# this is hack: if the special separator ⦅newline⦆is returned because of the
# "docify" transform.get_specials we don't add it if the corresponding newline code
# is already included in the sentencepiece or BPE-with-gpt2-pretok.
src_specials.remove(DefaultTokens.SEP)
src_vocab = pyonmttok.build_vocab_from_tokens(
src_vocab, maximum_size=opt.src_vocab_size, special_tokens=src_specials
)
src_vocab.default_id = src_vocab[DefaultTokens.UNK]
if opt.vocab_size_multiple > 1:
src_vocab = _pad_vocab_to_multiple(src_vocab, opt.vocab_size_multiple)
vocabs["src"] = src_vocab
if opt.share_vocab:
vocabs["tgt"] = src_vocab
else:
tgt_vocab = _read_vocab_file(opt.tgt_vocab, opt.tgt_words_min_frequency)
tgt_specials = [
item
for item in (default_specials + specials["tgt"])
if item not in tgt_vocab
]
if DefaultTokens.SEP in tgt_specials and (
"<0x0A>" in tgt_vocab or "Ċ" in src_vocab
):
tgt_specials.remove(DefaultTokens.SEP)
tgt_vocab = pyonmttok.build_vocab_from_tokens(
tgt_vocab, maximum_size=opt.tgt_vocab_size, special_tokens=tgt_specials
)
tgt_vocab.default_id = tgt_vocab[DefaultTokens.UNK]
if opt.vocab_size_multiple > 1:
tgt_vocab = _pad_vocab_to_multiple(tgt_vocab, opt.vocab_size_multiple)
vocabs["tgt"] = tgt_vocab
if opt.n_src_feats > 0:
src_feats_vocabs = []
for i in range(opt.n_src_feats):
src_f_vocab = _read_vocab_file(f"{opt.src_vocab}_feat{i}", 1)
src_f_vocab = pyonmttok.build_vocab_from_tokens(
src_f_vocab,
maximum_size=0,
minimum_frequency=1,
special_tokens=default_specials,
)
src_f_vocab.default_id = src_f_vocab[DefaultTokens.UNK]
if opt.vocab_size_multiple > 1:
src_f_vocab = _pad_vocab_to_multiple(
src_f_vocab, opt.vocab_size_multiple
)
src_feats_vocabs.append(src_f_vocab)
vocabs["src_feats"] = src_feats_vocabs
vocabs["data_task"] = opt.data_task
vocabs["decoder_start_token"] = opt.decoder_start_token
return vocabs
def _read_vocab_file(vocab_path, min_count):
"""Loads a vocabulary from the given path.
Args:
vocab_path (str): Path to utf-8 text file containing vocabulary.
Each token should be on a line, may followed with a count number
seperate by space if `with_count`. No extra whitespace is allowed.
min_count (int): retains only tokens with min_count frequency.
"""
if not os.path.exists(vocab_path):
raise RuntimeError("Vocabulary not found at {}".format(vocab_path))
else:
with codecs.open(vocab_path, "rb", "utf-8") as f:
lines = [line.strip("\n") for line in f if line.strip("\n")]
first_line = lines[0].split(None, 1)
has_count = len(first_line) == 2 and first_line[-1].isdigit()
if has_count:
vocab = []
for line in lines:
if int(line.split(None, 1)[1]) >= min_count:
vocab.append(line.split(None, 1)[0])
else:
vocab = lines
return vocab
def METHOD_NAME(vocabs):
"""
Convert a dict of pyonmttok vocabs
into a plain text dict to be saved in the checkpoint
"""
vocabs_dict = {}
vocabs_dict["src"] = vocabs["src"].ids_to_tokens
vocabs_dict["tgt"] = vocabs["tgt"].ids_to_tokens
if "src_feats" in vocabs.keys():
vocabs_dict["src_feats"] = [
feat_vocab.ids_to_tokens for feat_vocab in vocabs["src_feats"]
]
vocabs_dict["data_task"] = vocabs["data_task"]
if "decoder_start_token" in vocabs.keys():
vocabs_dict["decoder_start_token"] = vocabs["decoder_start_token"]
else:
vocabs_dict["decoder_start_token"] = DefaultTokens.BOS
return vocabs_dict
def dict_to_vocabs(vocabs_dict):
"""
Convert a dict formatted vocabs (as stored in a checkpoint)
into a dict of pyonmttok vocabs objects.
"""
vocabs = {}
vocabs["data_task"] = vocabs_dict["data_task"]
if "decoder_start_token" in vocabs_dict.keys():
vocabs["decoder_start_token"] = vocabs_dict["decoder_start_token"]
else:
vocabs["decoder_start_token"] = DefaultTokens.BOS
vocabs["src"] = pyonmttok.build_vocab_from_tokens(vocabs_dict["src"])
if vocabs_dict["src"] == vocabs_dict["tgt"]:
vocabs["tgt"] = vocabs["src"]
else:
vocabs["tgt"] = pyonmttok.build_vocab_from_tokens(vocabs_dict["tgt"])
if "src_feats" in vocabs_dict.keys():
vocabs["src_feats"] = []
for feat_vocab in vocabs_dict["src_feats"]:
vocabs["src_feats"].append(pyonmttok.build_vocab_from_tokens(feat_vocab))
return vocabs | null |
410 | from pyrokinetics.gk_code import GKOutputReaderGENE
from pyrokinetics.gk_code.gk_output import GKOutput
from pyrokinetics import template_dir
from pyrokinetics.normalisation import SimulationNormalisation as Normalisation
from pathlib import Path
import numpy as np
import pytest
import subprocess
import shutil
from .utils import array_similar
# TODO mock output tests, similar to GS2
@pytest.fixture(scope="module")
def METHOD_NAME(tmp_path_factory):
tmp_dir = tmp_path_factory.mktemp("test_gk_output_reader_gene")
return tmp_dir
@pytest.fixture
def reader():
return GKOutputReaderGENE()
@pytest.fixture
def gene_output_dir(METHOD_NAME):
mock_dir = METHOD_NAME / "mock_dir"
mock_dir.mkdir()
subprocess.run(
["cp", str(template_dir / "input.gene"), str(mock_dir / "parameters_0000")]
)
return mock_dir
@pytest.fixture
def gene_output_dir_missing_parameters(METHOD_NAME):
mock_dir = METHOD_NAME / "broken_mock_dir"
mock_dir.mkdir()
for f in [mock_dir / f for f in ["nrg_0000", "field_0000"]]:
with open(f, "w") as _:
pass
return mock_dir
@pytest.fixture
def empty_gene_dir(METHOD_NAME):
mock_dir = METHOD_NAME / "empty_dir"
mock_dir.mkdir()
return mock_dir
@pytest.fixture
def not_gene_file(METHOD_NAME):
mock_dir = METHOD_NAME / "nongene_dir"
mock_dir.mkdir()
filename = mock_dir / "hello_world.txt"
with open(filename, "w") as file:
file.write("hello world!")
return filename
def test_verify_gene_output(reader, gene_output_dir):
# Expect exception to be raised if this fails
reader.verify_file_type(gene_output_dir)
def test_verify_gene_missing_parameters(reader, gene_output_dir_missing_parameters):
with pytest.raises(Exception):
reader.verify_file_type(gene_output_dir_missing_parameters)
def test_verify_not_gene_dir(reader, empty_gene_dir):
with pytest.raises(Exception):
reader.verify_file_type(empty_gene_dir)
def test_verify_not_gene_file(reader, not_gene_file):
with pytest.raises(Exception):
reader.verify_file_type(not_gene_file)
@pytest.mark.parametrize(
"input_path",
[
Path("dir/to/parameters_0003"),
Path("dir/to/nrg_0017"),
Path("dir/to/input_file"),
Path("dir_0001/to_5102/parameters_0005"),
],
)
def test_infer_path_from_input_file_gene(input_path):
output_path = GKOutputReaderGENE.infer_path_from_input_file(input_path)
# If the last four chars are digits, expect to find "parameters_####".
# Otherwise, get the dir
last_4_chars = str(input_path)[-4:]
if last_4_chars.isdigit():
assert output_path == input_path.parent / f"parameters_{last_4_chars}"
else:
assert output_path == input_path.parent
# Golden answer tests
# Compares against results obtained using GKCode methods from commit 7d551eaa
# Update: Commit 9eae331 accounts for last time step (7d551eaa-2nd last step)
# Update: Commit 3974780 accounts for correct frequency sign
# Update: Commit d3da468c accounts for new gkoutput structure
# This data was gathered from templates/outputs/GENE_linear
reference_data_commit_hash = "d3da468c"
@pytest.fixture(scope="class")
def golden_answer_reference_data(request):
this_dir = Path(__file__).parent
cdf_path = (
this_dir
/ "golden_answers"
/ f"gene_linear_output_{reference_data_commit_hash}.netcdf4"
)
# ds = get_golden_answer_data(cdf_path)
request.cls.reference_data = GKOutput.from_netcdf(cdf_path)
@pytest.fixture(scope="class")
def golden_answer_data(request):
path = template_dir / "outputs" / "GENE_linear" / "parameters_0001"
norm = Normalisation("test_gk_output_gene")
request.cls.data = GKOutputReaderGENE().read_from_file(path, norm=norm)
@pytest.mark.usefixtures("golden_answer_reference_data", "golden_answer_data")
class TestGENEGoldenAnswers:
def test_coords(self):
"""
Ensure that all reference coords are present in data
"""
for c in self.reference_data.coords:
dtype = self.reference_data[c].dtype
if dtype == "float64" or dtype == "complex128":
assert array_similar(self.reference_data[c], self.data[c])
else:
assert np.array_equal(self.reference_data[c], self.data[c])
@pytest.mark.parametrize(
"var",
[
"phi",
"particle",
"momentum",
"heat",
"eigenvalues",
"eigenfunctions",
"growth_rate",
"mode_frequency",
],
)
def test_data_vars(self, var):
assert array_similar(self.reference_data[var], self.data[var])
@pytest.mark.parametrize(
"attr",
[
"linear",
"gk_code",
"input_file",
"attribute_units",
"title",
"growth_rate_tolerance",
],
)
def test_data_attrs(self, attr):
if isinstance(getattr(self.reference_data, attr), float):
assert np.isclose(
getattr(self.reference_data, attr), getattr(self.data, attr)
)
else:
assert getattr(self.reference_data, attr) == getattr(self.data, attr)
def test_gene_read_omega_file(tmp_path):
"""Can we read growth rate/frequency from `omega` text file"""
shutil.copytree(template_dir / "outputs/GENE_linear", tmp_path, dirs_exist_ok=True)
fields_file = tmp_path / "field_0001"
fields_file.unlink()
norm = Normalisation("test_gk_output_gene")
data = GKOutputReaderGENE().read_from_file(tmp_path / "parameters_0001", norm=norm)
assert np.allclose(
data["growth_rate"].isel(time=-1, ky=0, kx=0).data.magnitude, 1.848
)
assert np.allclose(
data["mode_frequency"].isel(time=-1, ky=0, kx=0).data.magnitude, 12.207
) | null |
411 | from __future__ import absolute_import
import unittest
from simulation import action
from simulation import event
from simulation.avatar.avatar_manager import AvatarManager
from simulation.direction import EAST
from simulation.game_state import GameState
from simulation.interactables.pickups.artefacts import YellowOrbArtefact
from simulation.location import Location
from .dummy_avatar import MoveDummy
from .maps import InfiniteMap, EmptyMap, AvatarMap, PickupMap
ORIGIN = Location(x=0, y=0)
EAST_OF_ORIGIN = Location(x=1, y=0)
NORTH_OF_ORIGIN = Location(x=0, y=1)
class TestAction(unittest.TestCase):
def setUp(self):
self.avatar = MoveDummy(1, ORIGIN, EAST)
self.other_avatar = MoveDummy(2, EAST_OF_ORIGIN, EAST)
self.avatar_manager = AvatarManager()
def test_successful_move_action(self):
# Move north
game_state = GameState(InfiniteMap(), self.avatar_manager)
action.MoveAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
target_cell = game_state.world_map.get_cell(NORTH_OF_ORIGIN)
assert self.avatar.location == NORTH_OF_ORIGIN
assert self.avatar == target_cell.avatar
assert self.avatar.events == [event.MovedEvent(ORIGIN, NORTH_OF_ORIGIN)]
# Move east
self.setUp()
game_state = GameState(InfiniteMap(), self.avatar_manager)
action.MoveAction(self.avatar, {"x": 1, "y": 0}).process(game_state.world_map)
assert self.avatar.location == EAST_OF_ORIGIN
assert self.avatar.events == [event.MovedEvent(ORIGIN, EAST_OF_ORIGIN)]
def test_successful_move_east_twice_action(self):
game_state = GameState(InfiniteMap(), self.avatar_manager)
action.MoveAction(self.avatar, {"x": 1, "y": 0}).process(game_state.world_map)
action.MoveAction(self.avatar, {"x": 1, "y": 0}).process(game_state.world_map)
assert self.avatar.location == Location(2, 0)
def test_failed_move_action(self):
game_state = GameState(EmptyMap(), self.avatar_manager)
action.MoveAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
assert self.avatar.location == ORIGIN
assert self.avatar.events == [event.FailedMoveEvent(ORIGIN, NORTH_OF_ORIGIN)]
def test_failed_move_towards_action(self):
game_state = GameState(EmptyMap(), self.avatar_manager)
action.MoveTowardsAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
assert self.avatar.location == ORIGIN
assert self.avatar.events == [event.FailedMoveEvent(ORIGIN, NORTH_OF_ORIGIN)]
assert self.avatar.logs[-1] == action.MoveTowardsAction.REJECT_MESSAGE
def test_successful_attack_action(self):
game_state = GameState(AvatarMap(self.other_avatar), self.avatar_manager)
action.AttackAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
target_location = NORTH_OF_ORIGIN
damage_dealt = 1
assert self.avatar.location == ORIGIN
assert self.other_avatar.location == EAST_OF_ORIGIN
assert self.other_avatar.times_died == 0
assert self.other_avatar.health == 4
assert self.avatar.events == [event.PerformedAttackEvent(self.other_avatar, target_location, damage_dealt)]
assert self.other_avatar.events == [event.ReceivedAttackEvent(self.avatar, damage_dealt)]
def test_successful_multiple_attack_actions(self):
game_state = GameState(AvatarMap(self.other_avatar), self.avatar_manager)
action.AttackAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
assert self.other_avatar.events == [event.ReceivedAttackEvent(self.avatar, 1)]
action.AttackAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
assert self.other_avatar.events == [
event.ReceivedAttackEvent(self.avatar, 1),
event.ReceivedAttackEvent(self.avatar, 1),
]
assert self.avatar.location == ORIGIN
assert self.other_avatar.location == EAST_OF_ORIGIN
assert self.other_avatar.times_died == 0
assert self.other_avatar.health == 3
def test_failed_attack_action(self):
game_state = GameState(InfiniteMap(), self.avatar_manager)
action.AttackAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
target_location = NORTH_OF_ORIGIN
assert self.avatar.location == ORIGIN
assert self.other_avatar.location == EAST_OF_ORIGIN
assert self.avatar.events == [event.FailedAttackEvent(target_location)]
assert self.other_avatar.events == []
def test_avatar_dies(self):
self.other_avatar.health = 1
game_state = GameState(AvatarMap(self.other_avatar), self.avatar_manager)
action.AttackAction(self.avatar, {"x": 0, "y": 1}).process(game_state.world_map)
target_location = NORTH_OF_ORIGIN
damage_dealt = 1
assert self.avatar.events == [event.PerformedAttackEvent(self.other_avatar, target_location, damage_dealt)]
assert self.other_avatar.events == [event.ReceivedAttackEvent(self.avatar, damage_dealt)]
assert self.avatar.location == ORIGIN
assert self.other_avatar.health == 0
assert self.other_avatar.times_died == 1
assert self.other_avatar.location == Location(10, 10)
def METHOD_NAME(self):
game_state = GameState(InfiniteMap(), self.avatar_manager)
action.WaitAction(self.avatar).process(game_state.world_map)
assert self.avatar.location == ORIGIN
def test_successful_pickup_action(self):
game_state = GameState(PickupMap(YellowOrbArtefact), self.avatar_manager)
game_state.world_map.setup_cell(self.avatar.location)
artefact = game_state.world_map.get_cell(self.avatar.location).interactable
assert artefact.in_backpack == False
action.PickupAction(self.avatar).process(game_state.world_map)
assert self.avatar.events == [event.PickedUpEvent({"type": "yellow_orb"})]
assert artefact.in_backpack == True
def test_failed_pickup_action(self):
game_state = GameState(InfiniteMap(), self.avatar_manager)
action.PickupAction(self.avatar).process(game_state.world_map)
assert self.avatar.events == [event.FailedPickupEvent()]
def test_failed_pickup_action_if_backpack_full(self):
game_state = GameState(PickupMap(YellowOrbArtefact), self.avatar_manager)
game_state.world_map.setup_cell(self.avatar.location)
artefact = game_state.world_map.get_cell(self.avatar.location).interactable
self.avatar.backpack = [YellowOrbArtefact for _ in range(self.avatar.BACKPACK_SIZE)]
action.PickupAction(self.avatar).process(game_state.world_map)
assert self.avatar.events == [event.FailedPickupEvent()]
assert artefact.in_backpack == False
def test_successful_drop_action(self):
game_state = GameState(PickupMap(YellowOrbArtefact), self.avatar_manager)
game_state.world_map.setup_cell(self.avatar.location)
artefact = game_state.world_map.get_cell(self.avatar.location).interactable
self.avatar.backpack = [artefact]
action.DropAction(self.avatar, 0).process(game_state.world_map)
assert self.avatar.events == [event.DroppedEvent(index=0)]
def test_failed_drop_action(self):
game_state = GameState(InfiniteMap(), self.avatar_manager)
action.DropAction(self.avatar, 0).process(game_state.world_map)
assert self.avatar.events == [event.FailedDropEvent()] | null |
412 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from abc import abstractmethod
from typing import List
import numpy as np
from overrides import EnforceOverrides
from archai.discrete_search.api.archai_model import ArchaiModel
class DiscreteSearchSpace(EnforceOverrides):
"""Abstract class for discrete search spaces.
This class serves as a base for implementing search spaces. The class enforces
implementation of five methods: `save_arch`, `load_arch`, `save_model_weights`,
`load_model_weights` and `random_sample`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
Examples:
>>> class MyDiscreteSearchSpace(DiscreteSearchSpace):
>>> def __init__(self) -> None:
>>> super().__init__()
>>>
>>> @overrides
>>> def save_arch(self, arch, file_path) -> None:
>>> torch.save(arch, file_path)
>>>
>>> @overrides
>>> def load_arch(self, file_path) -> ArchaiModel:
>>> return torch.load(file_path)
>>>
>>> @overrides
>>> def save_model_weights(self, model, file_path) -> None:
>>> torch.save(model.state_dict(), file_path)
>>>
>>> @overrides
>>> def load_model_weights(self, model, file_path) -> None:
>>> model.load_state_dict(torch.load(file_path))
>>>
>>> @overrides
>>> def random_sample(self, config) -> ArchaiModel:
>>> return ArchaiModel(config)
"""
@abstractmethod
def save_arch(self, model: ArchaiModel, file_path: str) -> None:
"""Save an architecture to a file without saving the weights.
Args:
model: Model's architecture to save.
file_path: File path to save the architecture.
"""
pass
@abstractmethod
def load_arch(self, file_path: str) -> ArchaiModel:
"""Load from a file an architecture that was saved using `SearchSpace.save_arch()`.
Args:
file_path: File path to load the architecture.
Returns:
Loaded model.
"""
pass
@abstractmethod
def save_model_weights(self, model: ArchaiModel, file_path: str) -> None:
"""Save the weights of a model.
Args:
model: Model to save the weights.
file_path: File path to save the weights.
"""
pass
@abstractmethod
def METHOD_NAME(self, model: ArchaiModel, file_path: str) -> None:
"""Load the weights (created with `SearchSpace.save_model_weights()`) into a model
of the same architecture.
Args:
model: Model to load the weights.
file_path: File path to load the weights.
"""
pass
@abstractmethod
def random_sample(self) -> ArchaiModel:
"""Randomly sample an architecture from the search spaces.
Returns:
Sampled architecture.
"""
pass
class EvolutionarySearchSpace(DiscreteSearchSpace, EnforceOverrides):
"""Abstract class for discrete search spaces compatible with evolutionary algorithms.
The class enforces implementation of two methods: `mutate` and `crossover`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
"""
@abstractmethod
def mutate(self, arch: ArchaiModel) -> ArchaiModel:
"""Mutate an architecture from the search space.
This method should not alter the base model architecture directly,
only generate a new one.
Args:
arch: Base model.
Returns:
Mutated model.
"""
pass
@abstractmethod
def crossover(self, arch_list: List[ArchaiModel]) -> ArchaiModel:
"""Combine a list of architectures into a new one.
Args:
arch_list: List of architectures.
Returns:
Resulting model.
"""
pass
class BayesOptSearchSpace(DiscreteSearchSpace, EnforceOverrides):
"""Abstract class for discrete search spaces compatible with Bayesian Optimization algorithms.
The class enforces implementation of a single method: `encode`.
Note:
This class is inherited from `EnforceOverrides` and any overridden methods in the
subclass should be decorated with `@overrides` to ensure they are properly overridden.
"""
@abstractmethod
def encode(self, arch: ArchaiModel) -> np.ndarray:
"""Encode an architecture into a fixed-length vector representation.
Args:
arch: Model from the search space.
Returns:
Fixed-length vector representation of `arch`.
"""
pass | null |
413 | import datetime as dt
from random import random
from django.core.management.base import BaseCommand
from osf.metrics import (
PreprintView,
PreprintDownload,
)
from osf.models import Preprint
"""
This management command can be run to populate impact with fake
preprints metrics data.
All flags are optional with the script defaulting to 3 preprints from
your local database with metrics for the past 7 days and an average
count of 25 for preprint views/downloads per day.
--preprints: Specify preprint guids
--num_preprints: Specify the number of preprint to use from the database (if
preprint guids aren't specified)
--days: Specify the number of days to write metrics data for
--group_counts: Indicates that metric counts should be grouped
in a single record per preprint per day
--avg_counts: The average number of view/download counts to write
for each preprint per day
Example: docker-compose run --rm web python3 manage.py populate_impact_preprint_metrics --num_preprints 1 --days 5 --group_counts --avg_counts 50
"""
def METHOD_NAME(preprints, dates, avg_counts, group_counts=False):
for date in dates:
for preprint in preprints:
preprint_view_count = int((avg_counts * 2) * random())
preprint_download_count = int((avg_counts * 2) * random())
if group_counts:
PreprintView.record_for_preprint(
preprint=preprint,
path=preprint.primary_file.path,
timestamp=date,
count=preprint_view_count
)
PreprintDownload.record_for_preprint(
preprint=preprint,
path=preprint.primary_file.path,
timestamp=date,
count=preprint_download_count
)
else:
for count in range(preprint_view_count):
PreprintView.record_for_preprint(
preprint=preprint,
path=preprint.primary_file.path,
timestamp=date
)
for count in range(preprint_download_count):
PreprintDownload.record_for_preprint(
preprint=preprint,
path=preprint.primary_file.path,
timestamp=date
)
class Command(BaseCommand):
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--preprints',
nargs='*',
help='Specify preprints guids'
)
parser.add_argument(
'--num_preprints',
type=int,
default=3,
help='Specify number of preprints to use if not specifying preprints'
)
parser.add_argument(
'--days',
type=int,
default=7,
help='Specify number of past days to write metrics data for'
)
parser.add_argument(
'--group_counts',
action='store_true',
help='Group counts in metric records for fewer ES requests'
)
parser.add_argument(
'--avg_counts',
type=int,
default=25,
help='Average number of counts to write per day per preprint'
)
def handle(self, *args, **options):
days = options.get('days')
num_preprints = options.get('num_preprints')
group_counts = options.get('group_counts')
avg_counts = options.get('avg_counts')
if options.get('preprints'):
preprints = Preprint.objects.filter(guids___id__in=options.get('preprints'))
else:
preprints = Preprint.objects.all()[:num_preprints]
today = dt.datetime.today()
last_x_days = [(today - dt.timedelta(days=num_days)) for num_days in range(0, days)]
METHOD_NAME(preprints, last_x_days, avg_counts, group_counts) | null |
414 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2023, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import os
import sys
import unittest
import shlex
import subprocess
import io
import json
import geopmpy.io
import geopmdpy.topo
import geopmdpy.pio
from integration.test import geopm_test_launcher
from integration.test import util
def getSystemConfig():
settings = {}
for option in ["--config-default", "--config-override"]:
try:
proc = subprocess.Popen(shlex.split("geopmadmin {}".format(option)),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
proc.wait()
with proc.stdout:
config_file = proc.stdout.readline().decode()
with open(config_file.strip(), "r") as infile:
settings.update(json.load(infile))
except IOError:
# config_file may not exist and will cause Popen to fail
pass
return settings
def METHOD_NAME():
ret = ''
try:
ret = getSystemConfig()['GEOPM_AGENT']
except LookupError:
pass
return ret
def getSystemConfigPolicy():
geopm_system_config = getSystemConfig()
policy = {}
with open(geopm_system_config['GEOPM_POLICY'], 'r') as infile:
policy = json.load(infile)
return policy
def skip_if_geopmadmin_check_fails():
try:
subprocess.check_call(shlex.split("geopmadmin"),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except:
return unittest.skip("geopmadmin check failed, there is an issue with the site configuration.")
return lambda func: func
def skip_unless_freq_agent():
agent = ''
try:
agent = METHOD_NAME()
except BaseException as ex:
return unittest.skip('geopmadmin check failed: {}'.format(ex))
if agent not in ['frequency_map']:
return unittest.skip('Requires environment default/override to be configured to cap frequency.')
return lambda func: func
def skip_unless_power_agent():
agent = ''
try:
agent = METHOD_NAME()
except BaseException as ex:
return unittest.skip('geopmadmin check failed: {}'.format(ex))
if agent not in ['power_governor', 'power_balancer']:
return unittest.skip('Requires environment default/override to be configured to cap power.')
return lambda func: func
@util.skip_unless_batch()
@skip_if_geopmadmin_check_fails()
class TestIntegrationPluginStaticPolicy(unittest.TestCase):
"""Test the static policy enforcement feature of the currently
configured RM plugin.
"""
@classmethod
def setUpClass(cls):
cls._tmp_files = []
cls._geopmadminagent = METHOD_NAME()
cls._geopmadminagentpolicy = getSystemConfigPolicy()
@skip_unless_freq_agent()
def test_frequency_cap_enforced(self):
policy_name = None
if self._geopmadminagent == 'frequency_map':
policy_name = 'FREQ_MAX'
try:
test_freq = self._geopmadminagentpolicy[policy_name]
current_freq = geopmdpy.pio.read_signal("MSR::PERF_CTL:FREQ", "board", 0)
self.assertEqual(test_freq, current_freq)
except KeyError:
self.skipTest('Expected frequency cap "{}" for agent missing from policy'.format(policy_name))
@skip_unless_power_agent()
def test_power_cap_enforced(self):
num_pkg = geopmdpy.topo.num_domain('package')
policy_name = 'CPU_POWER_LIMIT'
try:
test_power = self._geopmadminagentpolicy[policy_name] / num_pkg
for pkg in range(num_pkg):
current_power = geopmdpy.pio.read_signal("MSR::PKG_POWER_LIMIT:PL1_POWER_LIMIT", "package", pkg)
self.assertEqual(test_power, current_power)
except KeyError:
self.skipTest('Expected power cap "{}" for agent missing from policy'.format(policy_name))
if __name__ == '__main__':
unittest.main() | null |
415 | ################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2009-2022 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
from __future__ import annotations
import logging
from typing import TYPE_CHECKING
from django.conf import settings
from django.db import models
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.utils.translation import pgettext_lazy
from creme.creme_core.auth.entity_credentials import EntityCredentials
from creme.creme_core.models import CremeEntity, InstanceBrickConfigItem
from .. import constants
from ..core.graph import (
AbscissaInfo,
OrdinateInfo,
abscissa_constraints,
ordinate_constraints,
)
from ..graph_fetcher_registry import graph_fetcher_registry
if TYPE_CHECKING:
from ..core.graph import ReportGraphHand
logger = logging.getLogger(__name__)
class AbstractReportGraph(CremeEntity):
Group = constants.AbscissaGroup
Aggregator = constants.OrdinateAggregator
name = models.CharField(
pgettext_lazy('reports-graphs', 'Name of the graph'), max_length=100,
)
linked_report = models.ForeignKey(
settings.REPORTS_REPORT_MODEL, editable=False, on_delete=models.CASCADE,
)
# TODO: string IDs instead of integer ?
abscissa_type = models.PositiveIntegerField(
_('X axis (grouping)'), editable=False, choices=Group.choices,
)
abscissa_cell_value = models.CharField(
_('X axis (field)'), max_length=100, editable=False,
)
abscissa_parameter = models.TextField(
_('X axis parameter'), null=True, editable=False,
)
ordinate_type = models.CharField(
_('Y axis (type)'), max_length=100, editable=False,
choices=Aggregator.choices, default='',
)
ordinate_cell_key = models.CharField(
_('Y axis (field)'), max_length=100, editable=False, default='',
)
chart = models.CharField(_('Chart type'), max_length=100, null=True)
asc = models.BooleanField('ASC order', default=True, editable=False) # TODO: not viewable ?
creation_label = _("Create a report's graph")
save_label = pgettext_lazy('reports-graphs', 'Save the graph')
abscissa_constraints = abscissa_constraints
ordinate_constraints = ordinate_constraints
fetcher_registry = graph_fetcher_registry
_hand: ReportGraphHand | None = None
class Meta:
abstract = True
app_label = 'reports'
verbose_name = _("Report's graph")
verbose_name_plural = _("Reports' graphs")
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('reports__view_graph', args=(self.id,))
def get_related_entity(self):
return self.linked_report
def delete(self, *args, **kwargs):
# NB: we call InstanceBrickConfigItem.delete() explicitly to delete
# related BrickDetailviewLocation/BrickHomeLocation/... instances
for ibci in InstanceBrickConfigItem.objects.filter(entity=self.id):
ibci.delete()
super().delete(*args, **kwargs)
@property
def abscissa_info(self) -> AbscissaInfo | None:
report = self.linked_report
assert report is not None
model = report.ct.model_class()
abscissa_constraint = self.abscissa_constraints.get_constraint_by_rgraph_type(
model=model,
rgraph_type=self.abscissa_type,
)
if not abscissa_constraint:
logger.warning(
'AbstractReportGraph.abscissa_info: '
'invalid abscissa info (model=<%s> rgraph_type=%s)',
model, self.abscissa_type,
)
return None
return AbscissaInfo(
cell=abscissa_constraint.cell_class.build(
model,
self.abscissa_cell_value,
),
graph_type=self.abscissa_type,
parameter=self.abscissa_parameter,
)
@abscissa_info.setter
def abscissa_info(self, abs_info: AbscissaInfo):
assert abs_info.cell is not None
self.abscissa_cell_value = abs_info.cell.value
self.abscissa_type = abs_info.graph_type
self.abscissa_parameter = abs_info.parameter
@property
def ordinate_info(self) -> OrdinateInfo | None:
report = self.linked_report
assert report is not None
aggr_id = self.ordinate_type
model = report.ct.model_class()
ordinate_constraint = self.ordinate_constraints.get_constraint_by_aggr_id(
model=model,
aggr_id=aggr_id,
)
if not ordinate_constraint:
logger.warning(
'AbstractReportGraph.ordinate_info: invalid ordinate info (model=<%s> aggr_id=%s)',
model, aggr_id,
)
return None
return OrdinateInfo(
aggr_id=aggr_id,
cell=ordinate_constraint.get_cell(self.ordinate_cell_key, check=False),
)
@ordinate_info.setter
def ordinate_info(self, ord_info: OrdinateInfo):
self.ordinate_type = ord_info.aggr_id
cell = ord_info.cell
self.ordinate_cell_key = cell.key if cell else ''
# TODO: use creme_core.utils.meta.Order
def fetch(self,
user,
extra_q: models.Q | None = None,
order: str = 'ASC',
) -> tuple[list[str], list]:
assert order == 'ASC' or order == 'DESC'
report = self.linked_report
entities = EntityCredentials.filter(
user=user,
queryset=report.ct.get_all_objects_for_this_type(is_deleted=False),
)
if report.filter is not None:
entities = report.filter.filter(entities)
return self.hand.fetch(entities=entities, order=order, user=user, extra_q=extra_q)
@property
def hand(self) -> ReportGraphHand:
from ..core.graph import RGRAPH_HANDS_MAP # Lazy loading
hand = self._hand
if hand is None:
self._hand = hand = RGRAPH_HANDS_MAP[self.abscissa_type](self)
return hand
@property
def model(self) -> type[CremeEntity]:
return self.linked_report.ct.model_class()
def verbose_abscissa(self):
output = self.hand.verbose_abscissa
if self.abscissa_type:
output += f' - {self.hand.verbose_name}'
if self.abscissa_parameter:
output += f' {self.abscissa_parameter}'
return output
def METHOD_NAME(self):
aggregator = self.hand.ordinate
if aggregator.cell:
return f'{aggregator.cell} - {aggregator.verbose_name}'
return aggregator.verbose_name
class ReportGraph(AbstractReportGraph):
class Meta(AbstractReportGraph.Meta):
swappable = 'REPORTS_GRAPH_MODEL' | null |
416 | # Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import datetime
import tempfile
import time
import xml.etree.ElementTree as ET
from pipeline.api import PipelineAPI, TaskStatus
from pipeline.log import Logger
def get_int_run_param(env_var_name, default_value):
return int(os.getenv(env_var_name, default_value))
HCS_PROCESSING_TASK_NAME = 'HCS processing'
HCS_ACTIVE_PROCESSING_TIMEOUT_MIN = get_int_run_param('HCS_PARSING_ACTIVE_PROCESSING_TIMEOUT_MIN', 360)
HCS_CLOUD_FILES_SCHEMA = os.getenv('HCS_PARSING_CLOUD_FILES_SCHEMA', 's3')
HCS_PROCESSING_OUTPUT_FOLDER = os.getenv('HCS_PARSING_OUTPUT_FOLDER')
HCS_INDEX_FILE_NAME = os.getenv('HCS_PARSING_INDEX_FILE_NAME', 'Index.xml')
HCS_IMAGE_DIR_NAME = os.getenv('HCS_PARSING_IMAGE_DIR_NAME', 'Images')
def get_list_run_param(env_var_name, delimiter=','):
param_elements = os.getenv(env_var_name, '').split(delimiter)
return filter(lambda string: string is not None and len(string.strip()) > 0, param_elements)
def get_bool_run_param(env_var_name, default='false'):
return os.getenv(env_var_name, default) == 'true'
def METHOD_NAME(message):
log_run_info(message, status=TaskStatus.SUCCESS)
def log_run_info(message, status=TaskStatus.RUNNING):
Logger.log_task_event(HCS_PROCESSING_TASK_NAME, message, status)
class HcsFileLogger:
def __init__(self, file_path):
self.file_path = file_path
def log_info(self, message, status=TaskStatus.RUNNING):
log_run_info('[{}] {}'.format(self.file_path, message), status)
class HcsParsingUtils:
@staticmethod
def extract_xml_schema(xml_info_root):
full_schema = xml_info_root.tag
return full_schema[:full_schema.rindex('}') + 1]
@staticmethod
def get_file_without_extension(file_path):
return os.path.splitext(file_path)[0]
@staticmethod
def get_basename_without_extension(file_path):
return HcsParsingUtils.get_file_without_extension(os.path.basename(file_path))
@staticmethod
def get_file_last_modification_time(file_path):
return int(os.stat(file_path).st_mtime)
@staticmethod
def extract_plate_from_hcs_xml(hcs_xml_info_root, hcs_schema_prefix=None):
if not hcs_schema_prefix:
hcs_schema_prefix = HcsParsingUtils.extract_xml_schema(hcs_xml_info_root)
plates_list = hcs_xml_info_root.find(hcs_schema_prefix + 'Plates')
plate = plates_list.find(hcs_schema_prefix + 'Plate')
return plate
@staticmethod
def build_preview_file_path(hcs_root_folder_path, with_id=False):
file_name = HcsParsingUtils.build_preview_file_name(hcs_root_folder_path)
if with_id:
file_name = file_name + '.' + hcs_root_folder_path.split('/')[-1]
preview_file_basename = HcsParsingUtils.replace_special_chars(file_name) + '.hcs'
parent_folder = HCS_PROCESSING_OUTPUT_FOLDER \
if HCS_PROCESSING_OUTPUT_FOLDER is not None \
else os.path.dirname(hcs_root_folder_path)
return os.path.join(parent_folder, preview_file_basename)
@staticmethod
def build_preview_file_name(hcs_root_folder_path):
index_file_abs_path = os.path.join(HcsParsingUtils.get_file_without_extension(hcs_root_folder_path),
HCS_IMAGE_DIR_NAME, HCS_INDEX_FILE_NAME)
hcs_xml_info_root = ET.parse(index_file_abs_path).getroot()
hcs_schema_prefix = HcsParsingUtils.extract_xml_schema(hcs_xml_info_root)
file_name = HcsParsingUtils.get_file_without_extension(hcs_root_folder_path)
name_xml_element = HcsParsingUtils.extract_plate_from_hcs_xml(hcs_xml_info_root, hcs_schema_prefix) \
.find(hcs_schema_prefix + 'Name')
if name_xml_element is not None:
file_pretty_name = name_xml_element.text
if file_pretty_name is not None:
file_name = file_pretty_name
return file_name
@staticmethod
def get_stat_active_file_name(hcs_img_path):
return HcsParsingUtils._get_service_file_name(hcs_img_path, 'hcsparser.inprog')
@staticmethod
def get_stat_file_name(hcs_img_path):
return HcsParsingUtils._get_service_file_name(hcs_img_path, 'hcsparser')
@staticmethod
def get_service_directory(hcs_img_path):
name_without_extension = HcsParsingUtils.get_basename_without_extension(hcs_img_path)
parent_dir = HCS_PROCESSING_OUTPUT_FOLDER \
if HCS_PROCESSING_OUTPUT_FOLDER is not None \
else os.path.dirname(hcs_img_path)
return os.path.join(parent_dir, '.hcsparser', name_without_extension)
@staticmethod
def generate_local_service_directory(hcs_img_path):
name_without_extension = HcsParsingUtils.get_basename_without_extension(hcs_img_path)
return tempfile.mkdtemp(prefix=name_without_extension + '.hcsparser.')
@staticmethod
def create_service_dir_if_not_exist(hcs_img_path):
directory = HcsParsingUtils.get_service_directory(hcs_img_path)
if not os.path.exists(directory):
os.makedirs(directory)
@staticmethod
def _get_service_file_name(hcs_img_path, suffix):
parent_dir = HcsParsingUtils.get_service_directory(hcs_img_path)
parser_flag_file = '.stat.{}'.format(suffix)
return os.path.join(parent_dir, parser_flag_file)
@staticmethod
def active_processing_exceed_timeout(active_stat_file):
processing_stat_file_modification_date = HcsParsingUtils.get_file_last_modification_time(active_stat_file)
processing_deadline = datetime.datetime.now() - datetime.timedelta(minutes=HCS_ACTIVE_PROCESSING_TIMEOUT_MIN)
return (processing_stat_file_modification_date - time.mktime(processing_deadline.timetuple())) < 0
@staticmethod
def extract_cloud_path(file_path, cloud_scheme=HCS_CLOUD_FILES_SCHEMA):
path_chunks = file_path.split('/cloud-data/', 1)
if len(path_chunks) != 2:
raise RuntimeError('Unable to determine cloud path of [{}]'.format(file_path))
return '{}://{}'.format(cloud_scheme, path_chunks[1])
@staticmethod
def quote_string(string):
return '"{}"'.format(string)
@staticmethod
def replace_special_chars(file_path):
return file_path.replace('/', '|')
@staticmethod
def find_in_xml(element, name):
if element is None:
return None
else:
return element.find(name)
@staticmethod
def find_all_in_xml(element, name):
if element is None:
return []
else:
return element.findall(name)
@staticmethod
def get_hcs_image_folder():
return HCS_IMAGE_DIR_NAME | null |
417 | from typing import Any, Dict, List, Optional, Union
from geoalchemy2.shape import from_shape, to_shape
from shapely.geometry import Polygon
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from shapely import Point
from src.core.security import get_password_hash, verify_password
from src.crud.base import CRUDBase
from src.db import models
from src.schemas.user import UserCreate, UserUpdate
from src.schemas.isochrone import IsochroneStartingPointCoord
class CRUDUser(CRUDBase[models.User, UserCreate, UserUpdate]):
async def create(self, db: AsyncSession, *, obj_in: UserCreate) -> models.User:
db_obj = models.User.from_orm(obj_in)
db_obj.hashed_password = get_password_hash(obj_in.password)
roles = await db.execute(select(models.Role).filter(models.Role.name.in_(obj_in.roles)))
db_obj.roles = roles.scalars().all()
# combine study_area_ids with active_study_area_id
user_study_area_ids = set()
if obj_in.active_study_area_id:
user_study_area_ids.add(obj_in.active_study_area_id)
user_study_area_ids.update(obj_in.study_areas)
study_areas = await db.execute(
select(models.StudyArea).filter(models.StudyArea.id.in_(user_study_area_ids))
)
db_obj.study_areas = study_areas.scalars().all()
db.add(db_obj)
await db.commit()
await db.refresh(db_obj)
return db_obj
async def update(
self, db: AsyncSession, *, db_obj: models.User, obj_in: Union[UserUpdate, Dict[str, Any]]
) -> models.User:
if isinstance(obj_in, dict):
update_data = obj_in
else:
update_data = obj_in.dict(exclude_unset=True)
if update_data.get("password"):
hashed_password = get_password_hash(update_data["password"])
del update_data["password"]
update_data["hashed_password"] = hashed_password
if update_data.get("roles") or update_data.get("roles") == []:
roles = await db.execute(
select(models.Role).filter(models.Role.name.in_(obj_in.roles))
)
db_obj.roles = roles.scalars().all()
del update_data["roles"]
if update_data.get("study_areas") or update_data.get("study_areas") == []:
study_areas = await db.execute(
select(models.StudyArea).filter(models.StudyArea.id.in_(obj_in.study_areas))
)
db_obj.study_areas = study_areas.scalars().all()
del update_data["study_areas"]
return await super().update(db, db_obj=db_obj, obj_in=update_data)
async def METHOD_NAME(
self, db: AsyncSession, *, email: str, password: str
) -> Optional[models.User]:
user = await self.get_by_key(db, key="email", value=email)
if not user or len(user) == 0:
return None
else:
user = user[0]
if not verify_password(password, user.hashed_password):
return None
return user
async def get_active_study_area(self, db: AsyncSession, user: models.User):
study_area = await CRUDBase(models.StudyArea).get(db, id=user.active_study_area_id)
world_extent = Polygon([[-180, 85], [-180, -85], [180, -85], [180, 85], [-180, 85]])
study_area_geom = to_shape(study_area.geom)
buffer_geom_heatmap = to_shape(study_area.buffer_geom_heatmap)
study_area_crop = world_extent.difference(study_area_geom)
study_area.geom = from_shape(study_area_crop)
study_area_dict = dict(study_area)
study_area_dict["bounds"] = buffer_geom_heatmap.bounds
return study_area_dict
def is_active(self, user: models.User) -> bool:
return user.is_active
def is_superuser(self, user: models.User) -> bool:
role = [r for r in user.roles if r.name == "superuser"]
if len(role) > 0:
return True
return False
user = CRUDUser(models.User) | null |
418 | import logging
from typing import TYPE_CHECKING
from galaxy.tools.actions import (
DefaultToolAction,
OutputCollections,
ToolExecutionCache,
)
if TYPE_CHECKING:
from galaxy.managers.context import ProvidesUserContext
log = logging.getLogger(__name__)
class ModelOperationToolAction(DefaultToolAction):
produces_real_jobs = False
def check_inputs_ready(self, tool, trans, incoming, history, execution_cache=None, collection_info=None):
if execution_cache is None:
execution_cache = ToolExecutionCache(trans)
current_user_roles = execution_cache.current_user_roles
history, inp_data, inp_dataset_collections, _, _, _ = self._collect_inputs(
tool, trans, incoming, history, current_user_roles, collection_info
)
tool.check_inputs_ready(inp_data, inp_dataset_collections)
def METHOD_NAME(
self,
tool,
trans,
incoming=None,
set_output_hid=False,
overwrite=True,
history=None,
job_params=None,
execution_cache=None,
collection_info=None,
job_callback=None,
skip=False,
**kwargs,
):
incoming = incoming or {}
trans.check_user_activation()
if execution_cache is None:
execution_cache = ToolExecutionCache(trans)
current_user_roles = execution_cache.current_user_roles
(
history,
inp_data,
inp_dataset_collections,
preserved_tags,
preserved_hdca_tags,
all_permissions,
) = self._collect_inputs(tool, trans, incoming, history, current_user_roles, collection_info)
# Build name for output datasets based on tool name and input names
on_text = self._get_on_text(inp_data)
# wrapped params are used by change_format action and by output.label; only perform this wrapping once, as needed
wrapped_params = self._wrapped_params(trans, tool, incoming)
out_data = {}
input_collections = {k: v[0][0] for k, v in inp_dataset_collections.items()}
output_collections = OutputCollections(
trans,
history,
tool=tool,
tool_action=self,
input_collections=input_collections,
dataset_collection_elements=kwargs.get("dataset_collection_elements", None),
on_text=on_text,
incoming=incoming,
params=wrapped_params.params,
job_params=job_params,
tags=preserved_tags,
hdca_tags=preserved_hdca_tags,
)
#
# Create job.
#
job, galaxy_session = self._new_job_for_session(trans, tool, history)
self._produce_outputs(
trans,
tool,
out_data,
output_collections,
incoming=incoming,
history=history,
tags=preserved_tags,
hdca_tags=preserved_hdca_tags,
skip=skip,
)
self._record_inputs(trans, tool, job, incoming, inp_data, inp_dataset_collections)
self._record_outputs(job, out_data, output_collections)
if job_callback:
job_callback(job)
if skip:
job.state = job.states.SKIPPED
else:
job.state = job.states.OK
trans.sa_session.add(job)
# Queue the job for execution
# trans.app.job_manager.job_queue.put( job.id, tool.id )
# trans.log_event( "Added database job action to the job queue, id: %s" % str(job.id), tool_id=job.tool_id )
log.info(f"Calling produce_outputs, tool is {tool}")
return job, out_data, history
def _produce_outputs(
self, trans: "ProvidesUserContext", tool, out_data, output_collections, incoming, history, tags, hdca_tags, skip
):
tag_handler = trans.tag_handler
tool.produce_outputs(
trans,
out_data,
output_collections,
incoming,
history=history,
tags=tags,
hdca_tags=hdca_tags,
tag_handler=tag_handler,
)
mapped_over_elements = output_collections.dataset_collection_elements
if mapped_over_elements:
for name, value in out_data.items():
if name in mapped_over_elements:
value.visible = False
mapped_over_elements[name].hda = value
# We probably need to mark all outputs as skipped, not just the outputs of whatever the database op tools do ?
# This is probably not exactly right, but it might also work in most cases
if skip:
for output_collection in output_collections.out_collections.values():
output_collection.mark_as_populated()
for hdca in output_collections.out_collection_instances.values():
hdca.visible = False
# Would we also need to replace the datasets with skipped datasets?
trans.sa_session.add_all(out_data.values()) | null |
419 | # The pharmpy visualization module
# Since the python visualization and plotting landscape is rapidly
# evolving and there are many different modules to choose from
# all visualization API calls should be made from this module so
# that we could start using another API more easily.
# Design conciderations:
# We would like to be able to have interactive plots which currently
# means to select a package that can render to html. The two main
# contenders that can do this are the altair and the bokeh libraries.
# Bokeh seems to have a larger community, but the strength in altair
# is its use of the standard vega-lite format, which decouples the
# creation of a plot from the rendering. Altair plots (or rather vega
# plots) can be changed from the html directly via the online vega
# editor. So using altair for now, but expecting to revisit this
# decision shortly.
# Will provide base functions for creating different types of plots
# or other types of visualizations
from pharmpy.deps import altair as alt
from pharmpy.deps import pandas as pd
_chart_width = 500
_chart_height = 500
def scatter_plot_correlation(df, x, y, tooltip_columns=None, title=""):
if tooltip_columns is None:
tooltip_columns = []
chart = (
alt.Chart(df, width=_chart_width, height=_chart_height)
.mark_circle(size=100)
.encode(alt.X(x), alt.Y(y), tooltip=[x, y] + tooltip_columns)
.properties(
title=title,
)
.interactive()
)
line = (
alt.Chart(pd.DataFrame({x: [min(df[x]), max(df[x])], y: [min(df[y]), max(df[y])]}))
.mark_line()
.encode(
alt.X(x),
alt.Y(y),
)
.interactive()
)
plot = chart + line
plot = plot.configure_title(fontSize=16)
plot = plot.configure_axis(labelFontSize=12, titleFontSize=14)
return plot
def scatter_matrix(df):
"""Scatter matrix plot
Each column will be scatter plotted against all columns.
"""
base = (
alt.Chart(df)
.transform_fold(list(df.columns), as_=['key_x', 'value_x'])
.transform_fold(list(df.columns), as_=['key_y', 'value_y'])
.encode(
x=alt.X('value_y:Q', title=None, scale=alt.Scale(zero=False)),
y=alt.Y('value_x:Q', title=None, scale=alt.Scale(zero=False)),
)
.properties(width=150, height=150)
)
plot = (
alt.layer(
base.mark_circle(),
base.transform_regression('value_y', 'value_x', method='poly', order=4).mark_line(
color='red'
),
)
.facet(
column=alt.Column('key_x:N', sort=list(df.columns), title=None),
row=alt.Row('key_y:N', sort=list(reversed(df.columns)), title=None),
)
.resolve_scale(x='independent', y='independent')
.configure_header(labelFontStyle='bold')
)
return plot
def line_plot(df, x, title='', xlabel='', ylabel='', legend_title=''):
"""Line plot for multiple lines
Parameters
----------
df : pd.DataFrame
DataFrame with one x column and multiple columns with y values
x
Name of the x column
title : str
Plot title
xlabel : str
Label of the x-axis
ylabel : str
Label of the y-axis
legend_title : str
Title of the legend
"""
df = df.melt(id_vars=[x])
plot = (
alt.Chart(df)
.mark_line()
.encode(
alt.X(f'{x}:Q', title=xlabel),
alt.Y('value:Q', title=ylabel),
color=alt.Color(
'variable:N',
legend=alt.Legend(
title=legend_title,
orient='top-left',
fillColor='#EEEEEE',
padding=10,
cornerRadius=10,
),
),
)
.properties(
title=title,
width=800,
height=300,
)
.configure_legend(labelLimit=0)
)
return plot
def histogram(values, title=""):
"""Histogram with percentage on y and a rule at mean
slider for reducing the number of values used.
"""
df = pd.DataFrame({values.name: values, 'num': list(range(1, len(values) + 1))})
slider = alt.binding_range(min=1, max=len(values), step=1, name='Number of samples: ')
selection = alt.selection_point(
bind=slider,
fields=['num'],
name="num",
value=len(values),
)
base = alt.Chart(df).transform_filter('datum.num <= num_num')
plot = (
base.transform_joinaggregate(total='count(*)')
.transform_calculate(pct='1 / datum.total')
.mark_bar()
.encode(alt.X(f'{values.name}:Q', bin=True), alt.Y('sum(pct):Q', axis=alt.Axis(format='%')))
.add_params(selection)
.properties(title=title)
)
rule = base.mark_rule(color='red').encode(x=f'mean({values.name}):Q', size=alt.value(5))
return plot + rule
def METHOD_NAME(df):
"""Facet of one histogram per column with cross filter interaction"""
brush = alt.selection_interval(encodings=['x'])
base = (
alt.Chart()
.mark_bar()
.encode(
x=alt.X(alt.repeat('column'), type='quantitative', bin=alt.Bin(maxbins=20)),
y=alt.Y('count()', axis=alt.Axis(title='')),
)
.properties(width=200, height=150)
)
background = base.encode(color=alt.value('#ddd')).add_params(brush)
highlight = base.transform_filter(brush)
chart = alt.layer(background, highlight, data=df).repeat(column=list(df.columns))
return chart | null |
420 | # Standard Python modules
import os
import unittest
# External modules
from baseclasses import BaseRegTest
import numpy as np
# First party modules
from pygeo import DVConstraints, DVGeometry, geo_utils
class RegTestPyGeo(unittest.TestCase):
N_PROCS = 1
def setUp(self):
# Store the path where this current script lives
# This all paths in the script are relative to this path
# This is needed to support testflo running directories and files as inputs
self.base_path = os.path.dirname(os.path.abspath(__file__))
def make_cylinder_mesh(self, radius=1.0, height=2.0):
Nazimuth = 1000
Nextrude = 100
Npts = Nazimuth * Nextrude
theta = np.linspace(0, 2 * np.pi, Nazimuth)
z = np.linspace(0, height, Nextrude)
pts = np.zeros((Npts, 3))
# First populate the points
for i in range(Nextrude):
for j in range(Nazimuth):
x = radius * np.cos(theta[j])
y = radius * np.sin(theta[j])
k = i * Nazimuth + j
pts[k] = [x, y, z[i]]
p0 = []
v1 = []
v2 = []
# Now create the triangulation
for i in range(Nextrude - 1):
for j in range(Nazimuth - 1):
cur_level = i * Nazimuth
next_level = (i + 1) * Nazimuth
pA = pts[cur_level + j]
pB = pts[cur_level + j + 1]
pC = pts[next_level + j]
pD = pts[next_level + j + 1]
# Triangle 1
p0.append(pA)
v1.append(pB - pA)
v2.append(pD - pA)
# Triangle 2
p0.append(pA)
v1.append(pC - pA)
v2.append(pD - pA)
p0 = np.vstack(p0)
v1 = np.vstack(v1)
v2 = np.vstack(v2)
return [p0, v1, v2]
def METHOD_NAME(self, file_name, radius=1.0, height=2.0):
# Write duplicate of outerbox FFD
axes = ["i", "k", "j"]
r = radius
h = height
dh = 0.01
slices = np.array(
[
# Slice 1
[[[-r, -r, -dh], [r, -r, -dh]], [[-r, r, -dh], [r, r, -dh]]],
# Slice 2
[[[-r, -r, h + dh], [r, -r, h + dh]], [[-r, r, h + dh], [r, r, h + dh]]],
]
)
N0 = [5]
N1 = [2]
N2 = [2]
geo_utils.write_wing_FFD_file(file_name, slices, N0, N1, N2, axes=axes)
def train_1(self, train=True, refDeriv=True):
self.test_1(train=train, refDeriv=refDeriv)
def test_1(self, train=False, refDeriv=False):
refFile = os.path.join(self.base_path, "ref/test_Cylinder_01.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 1: Basic FFD, global DVs")
radius = 1.0
height = 10.0
DVCon = DVConstraints()
surf = self.make_cylinder_mesh(radius, height)
DVCon.setSurface(surf)
# DVCon.writeSurfaceTecplot('cylinder_surface.dat')
ffd_name = os.path.join(self.base_path, "../../input_files/cylinder_ffd.xyz")
self.METHOD_NAME(ffd_name, radius, height)
DVGeo = DVGeometry(ffd_name)
nAxPts = DVGeo.addRefAxis("thru", xFraction=0.5, alignIndex="i", raySize=1.0)
def scale_circle(val, geo):
for i in range(nAxPts):
geo.scale["thru"].coef[i] = val[0]
DVGeo.addGlobalDV("scale_circle", func=scale_circle, value=[1])
DVCon.setDVGeo(DVGeo)
leList = [[0, 0, 0], [-radius / 2, 0, height]]
xAxis = [-1, 0, 0]
yAxis = [0, 1, 0]
DVCon.addLERadiusConstraints(leList, nSpan=5, axis=yAxis, chordDir=xAxis, scaled=False)
# DVCon.writeTecplot('cylinder_constraints.dat')
funcs = {}
DVCon.evalFunctions(funcs)
print(funcs)
handler.root_add_dict("funcs1", funcs, rtol=1e-6, atol=1e-6)
DVGeo.setDesignVars({"scale_circle": 0.5})
funcs = {}
DVCon.evalFunctions(funcs)
handler.root_add_dict("funcs2", funcs, rtol=1e-6, atol=1e-6)
print(funcs)
funcsSens = {}
DVCon.evalFunctionsSens(funcsSens)
print(funcsSens)
handler.root_add_dict("funcsSens", funcsSens, rtol=1e-6, atol=1e-6)
print(funcsSens)
def train_spanwise_dvs(self, train=True, refDeriv=True):
self.test_spanwise_dvs(train=train, refDeriv=refDeriv)
def test_spanwise_dvs(self, train=False, refDeriv=False):
refFile = os.path.join(self.base_path, "ref/test_Cylinder_spanwise_dvs.ref")
with BaseRegTest(refFile, train=train) as handler:
handler.root_print("Test 1: Basic FFD, global DVs")
radius = 1.0
height = 10.0
DVCon = DVConstraints()
surf = self.make_cylinder_mesh(radius, height)
DVCon.setSurface(surf)
# DVCon.writeSurfaceTecplot('cylinder_surface.dat')
ffd_name = os.path.join(self.base_path, "../../input_files/cylinder_ffd.xyz")
self.METHOD_NAME(ffd_name, radius, height)
DVGeo = DVGeometry(ffd_name)
DVGeo.addSpanwiseLocalDV("shape", "i", lower=-0.5, upper=0.5, axis="y", scale=1.0)
size = DVGeo._getNDVSpanwiseLocal()
DVCon.setDVGeo(DVGeo)
leList = [[0, 0, 0], [-radius / 2, 0, height]]
xAxis = [-1, 0, 0]
yAxis = [0, 1, 0]
DVCon.addLERadiusConstraints(leList, nSpan=5, axis=yAxis, chordDir=xAxis, scaled=False)
# DVCon.writeTecplot('cylinder_constraints.dat')
funcs = {}
DVCon.evalFunctions(funcs)
print(funcs)
handler.root_add_dict("funcs1", funcs, rtol=1e-6, atol=1e-6)
np.random.seed(0)
DVGeo.setDesignVars({"shape": (np.random.rand(size) - 0.5)})
funcs = {}
DVCon.evalFunctions(funcs)
handler.root_add_dict("funcs2", funcs, rtol=1e-6, atol=1e-6)
print(funcs)
funcsSens = {}
DVCon.evalFunctionsSens(funcsSens)
print(funcsSens)
handler.root_add_dict("funcsSens", funcsSens, rtol=1e-6, atol=1e-6)
print(funcsSens)
if __name__ == "__main__":
unittest.main() | null |
421 | from shared.database.common import *
import random
import string
from shared.database.action.action import Action
TIME_WINDOW_SECONDS_MAPPER = {
'1_minute': 1 * 60,
'5_minutes': 5 * 60,
'10_minutes': 10 * 60,
'30_minutes': 30 * 60,
'1_hours': 60 * 60,
'4_hours': 60 * 60 * 4,
'12_hours': 60 * 60 * 12,
'1_days': 60 * 60 * 24,
}
class Workflow(Base):
"""
Group of actions.
Ie a user creates a flow where an Action A then Action B happens
"""
__tablename__ = 'workflow'
id = Column(Integer, primary_key = True)
name = Column(String())
string_id = Column(String())
time_window = Column(String())
active = Column(Boolean) # Running / not running
archived = Column(Boolean, default = False) # Hide from list
is_new = Column(Boolean, default = True)
kind = Column(String())
trigger_type = Column(String()) # Reference types from ActionFlowTriggerEventQueue
count_events = Column(Integer)
# New Jun 18 2019 foreign key not added yet
directory_id = Column(Integer, ForeignKey('working_dir.id'))
directory = relationship("WorkingDir",
foreign_keys = [directory_id])
first_action_id = Column(BIGINT, ForeignKey('action.id'))
first_action = relationship(Action, foreign_keys = [first_action_id])
last_action_id = Column(BIGINT, ForeignKey('action.id'))
last_action = relationship(Action, foreign_keys = [last_action_id])
project_id = Column(Integer, ForeignKey('project.id'))
project = relationship("Project")
org_id = Column(Integer, ForeignKey('org.id'))
org = relationship("Org", foreign_keys = [org_id])
member_created_id = Column(Integer, ForeignKey('member.id'))
member_created = relationship("Member", foreign_keys = [member_created_id])
member_updated_id = Column(Integer, ForeignKey('member.id'))
member_updated = relationship("Member", foreign_keys = [member_updated_id])
time_created = Column(DateTime, default = datetime.datetime.utcnow)
time_updated = Column(DateTime, onupdate = datetime.datetime.utcnow)
@staticmethod
def new(
session,
project,
org,
name,
member,
trigger_type = None,
time_window = None,
):
# Else create a new one
workflow = Workflow(
active = False,
project = project,
org = org,
name = name,
trigger_type = trigger_type,
time_window = time_window,
member_created = member)
Workflow.update_string_id(
session = session,
workflow = workflow)
session.add(workflow)
session.flush()
return workflow
def has_time_trigger(self, session) -> bool:
first_action = self.METHOD_NAME(session = session)
return first_action and first_action.trigger_data.get('event_name') == 'time_trigger' and first_action.trigger_data.get('cron_expression')
def get_existing_unmodified_flow(
session,
project,
member
):
"""
Concept that it gets a new flow that exists instead of creating
new one (if unedited)
relies on setting is_new to False by update methods
this could cause issues
but it seems so silly to keep recreating blank ones
member so if two people created same one in same project...
"""
return session.query(Workflow).filter(
Workflow.is_new == True,
Workflow.member_created == member,
Workflow.project == project).first()
def serialize(self):
# Include project id or?
return {
'id': self.id,
'string_id': self.string_id,
'name': self.name,
'trigger_type': self.trigger_type,
'time_window': self.time_window,
'active': self.active,
'time_updated': self.time_updated
}
@staticmethod
def get(session, workflow_id: int):
"""
Gets a single workflow by ID.
:param session:
:param workflow_id:
:return:
"""
return session.query(Workflow).filter(Workflow.id == workflow_id).first()
@staticmethod
def get_by_id(session,
id,
project_id = None):
"""
Must include project id for security check
(This assumes untrusted source)...
"""
return session.query(Workflow).filter(
Workflow.id == id,
Workflow.project_id == project_id).first()
def get_by_string_id(
session,
string_id):
return session.query(Workflow).filter(
Workflow.string_id == string_id).first()
@staticmethod
def list(
session,
project_id,
active_only = None,
archived = False,
trigger_type = None,
limit = 100,
return_kind = "objects"
):
"""
"""
query = session.query(Workflow).filter(
Workflow.archived == False,
Workflow.project_id == project_id)
if active_only is True:
query = query.filter(Workflow.active == True)
if trigger_type is not None:
query = query.filter(Workflow.trigger_type == trigger_type)
if return_kind == "count":
return query.limit(limit).count()
if return_kind == "objects":
return query.order_by(Workflow.time_created).limit(limit).all()
@staticmethod
def update_string_id(
session,
workflow):
# Not super happy with this here...
if workflow.name is None:
workflow.name = "Untitled flow"
workflow.string_id = safe_name(workflow.name)
workflow.string_id += f"_{create_random_string(length=20)}"
def serialize_with_actions(self, session):
data = self.serialize()
actions = Action.list(
session = session,
flow_id = self.id,
project_id = self.project_id,
limit = None
)
data['actions_list'] = [a.serialize() for a in actions]
return data
def METHOD_NAME(self, session) -> Action:
action = session.query(Action).filter(
Action.workflow_id == self.id,
Action.ordinal == 0,
Action.archived == False).first()
return action
# 'abcdefghijklmnopqrstuvwxyz0123456789'
# can add chars in front if needed ie "." etc.
valid_chars = f"{string.ascii_letters}{string.digits}"
def safe_name(name, character_limit = 10):
# TODO review using unicdoe normalize ie
# https://gist.github.com/wassname/1393c4a57cfcbf03641dbc31886123b8
name = name[:character_limit]
name = name.lower() # Email safe no upper case
# Want to preserve user capitilized things though
safe_name = ""
for char in name:
if char in valid_chars:
safe_name += char
return safe_name
# see auth_api_new copied from there
def create_random_string(length):
# Email safe, so no upper case
return ''.join(random.choice(
string.ascii_lowercase +
string.digits) for x in range(length)) | null |
422 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateHybridMonitorTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cms', '2019-01-01', 'CreateHybridMonitorTask','cms')
self.set_method('POST')
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_TaskName(self): # String
return self.get_query_params().get('TaskName')
def set_TaskName(self, TaskName): # String
self.add_query_param('TaskName', TaskName)
def get_CollectInterval(self): # String
return self.get_query_params().get('CollectInterval')
def METHOD_NAME(self, CollectInterval): # String
self.add_query_param('CollectInterval', CollectInterval)
def get_TargetUserId(self): # String
return self.get_query_params().get('TargetUserId')
def set_TargetUserId(self, TargetUserId): # String
self.add_query_param('TargetUserId', TargetUserId)
def get_CollectTargetType(self): # String
return self.get_query_params().get('CollectTargetType')
def set_CollectTargetType(self, CollectTargetType): # String
self.add_query_param('CollectTargetType', CollectTargetType)
def get_AttachLabelss(self): # RepeatList
return self.get_query_params().get('AttachLabels')
def set_AttachLabelss(self, AttachLabels): # RepeatList
for depth1 in range(len(AttachLabels)):
if AttachLabels[depth1].get('Name') is not None:
self.add_query_param('AttachLabels.' + str(depth1 + 1) + '.Name', AttachLabels[depth1].get('Name'))
if AttachLabels[depth1].get('Value') is not None:
self.add_query_param('AttachLabels.' + str(depth1 + 1) + '.Value', AttachLabels[depth1].get('Value'))
def get_TaskType(self): # String
return self.get_query_params().get('TaskType')
def set_TaskType(self, TaskType): # String
self.add_query_param('TaskType', TaskType)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_TargetUserIdList(self): # String
return self.get_query_params().get('TargetUserIdList')
def set_TargetUserIdList(self, TargetUserIdList): # String
self.add_query_param('TargetUserIdList', TargetUserIdList)
def get_YARMConfig(self): # String
return self.get_query_params().get('YARMConfig')
def set_YARMConfig(self, YARMConfig): # String
self.add_query_param('YARMConfig', YARMConfig)
def get_Namespace(self): # String
return self.get_query_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_query_param('Namespace', Namespace)
def get_SLSProcessConfig(self): # Struct
return self.get_query_params().get('SLSProcessConfig')
def set_SLSProcessConfig(self, SLSProcessConfig): # Struct
if SLSProcessConfig.get('Filter') is not None:
if SLSProcessConfig.get('Filter').get('Filters') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('Filter').get('Filters')):
if value1.get('SLSKeyName') is not None:
self.add_query_param('SLSProcessConfig.Filter.Filters.' + str(index1 + 1) + '.SLSKeyName', value1.get('SLSKeyName'))
if value1.get('Value') is not None:
self.add_query_param('SLSProcessConfig.Filter.Filters.' + str(index1 + 1) + '.Value', value1.get('Value'))
if value1.get('Operator') is not None:
self.add_query_param('SLSProcessConfig.Filter.Filters.' + str(index1 + 1) + '.Operator', value1.get('Operator'))
if SLSProcessConfig.get('Filter').get('Relation') is not None:
self.add_query_param('SLSProcessConfig.Filter.Relation', SLSProcessConfig.get('Filter').get('Relation'))
if SLSProcessConfig.get('Express') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('Express')):
if value1.get('Alias') is not None:
self.add_query_param('SLSProcessConfig.Express.' + str(index1 + 1) + '.Alias', value1.get('Alias'))
if value1.get('Express') is not None:
self.add_query_param('SLSProcessConfig.Express.' + str(index1 + 1) + '.Express', value1.get('Express'))
if SLSProcessConfig.get('GroupBy') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('GroupBy')):
if value1.get('SLSKeyName') is not None:
self.add_query_param('SLSProcessConfig.GroupBy.' + str(index1 + 1) + '.SLSKeyName', value1.get('SLSKeyName'))
if value1.get('Alias') is not None:
self.add_query_param('SLSProcessConfig.GroupBy.' + str(index1 + 1) + '.Alias', value1.get('Alias'))
if SLSProcessConfig.get('Statistics') is not None:
for index1, value1 in enumerate(SLSProcessConfig.get('Statistics')):
if value1.get('SLSKeyName') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.SLSKeyName', value1.get('SLSKeyName'))
if value1.get('Function') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Function', value1.get('Function'))
if value1.get('Alias') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Alias', value1.get('Alias'))
if value1.get('Parameter2') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Parameter2', value1.get('Parameter2'))
if value1.get('Parameter1') is not None:
self.add_query_param('SLSProcessConfig.Statistics.' + str(index1 + 1) + '.Parameter1', value1.get('Parameter1')) | null |
423 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from typing import Optional, Tuple
import torch
from overrides import EnforceOverrides
from torch import Tensor, nn
from torch.utils.data import DataLoader
from archai.common import ml_utils
from archai.common.apex_utils import ApexUtils
from archai.common.config import Config
from archai.common.ordered_dict_logger import get_global_logger
from archai.supergraph.utils.metrics import Metrics
logger = get_global_logger()
class Tester(EnforceOverrides):
def __init__(self, conf_val:Config, model:nn.Module, apex:ApexUtils)->None:
self._title = conf_val['title']
self._logger_freq = conf_val['logger_freq']
conf_lossfn = conf_val['lossfn']
self.batch_chunks = conf_val['batch_chunks']
self._apex = apex
self.model = model
self._lossfn = ml_utils.get_lossfn(conf_lossfn).to(apex.device)
self._metrics = None
def test(self, test_dl: DataLoader)->Metrics:
logger.pushd(self._title)
self._metrics = self._create_metrics()
# recreate metrics for this run
self._pre_test()
self._test_epoch(test_dl)
self._post_test()
logger.popd()
return self.get_metrics() # type: ignore
def _test_epoch(self, test_dl: DataLoader)->None:
self._metrics.pre_epoch()
self.model.eval()
steps = len(test_dl)
with torch.no_grad(), logger.pushd('steps'):
for step, (x, y) in enumerate(test_dl):
# derived class might alter the mode through pre/post hooks
assert not self.model.training
logger.pushd(step)
self._pre_step(x, y, self._metrics) # pyright: ignore[reportGeneralTypeIssues]
# divide batch in to chunks if needed so it fits in GPU RAM
if self.batch_chunks > 1:
x_chunks, y_chunks = torch.chunk(x, self.batch_chunks), torch.chunk(y, self.batch_chunks)
else:
x_chunks, y_chunks = (x,), (y,)
logits_chunks = []
loss_sum, loss_count = 0.0, 0
for xc, yc in zip(x_chunks, y_chunks):
xc, yc = xc.to(self.METHOD_NAME(), non_blocking=True), yc.to(self.METHOD_NAME(), non_blocking=True)
logits_c = self.model(xc)
tupled_out = isinstance(logits_c, Tuple) and len(logits_c) >=2
if tupled_out:
logits_c = logits_c[0]
loss_c = self._lossfn(logits_c, yc)
loss_sum += loss_c.item() * len(logits_c)
loss_count += len(logits_c)
logits_chunks.append(logits_c.detach().cpu()) # pyright: ignore[reportGeneralTypeIssues]
self._post_step(x, y,
ml_utils.join_chunks(logits_chunks),
torch.tensor(loss_sum/loss_count),
steps, self._metrics) # pyright: ignore[reportGeneralTypeIssues]
# TODO: we possibly need to sync so all replicas are upto date
self._apex.sync_devices()
logger.popd()
self._metrics.post_epoch() # no "val" dataset for the test phase
def get_metrics(self)->Optional[Metrics]:
return self._metrics
def state_dict(self)->dict:
return {
'metrics': self._metrics.state_dict()
}
def METHOD_NAME(self):
return self._apex.device
def load_state_dict(self, state_dict:dict)->None:
self._metrics.load_state_dict(state_dict['metrics'])
def _pre_test(self)->None:
self._metrics.pre_run()
def _post_test(self)->None:
self._metrics.post_run()
def _pre_step(self, x:Tensor, y:Tensor, metrics:Metrics)->None:
metrics.pre_step(x, y)
def _post_step(self, x:Tensor, y:Tensor, logits:Tensor, loss:Tensor,
steps:int, metrics:Metrics)->None:
metrics.post_step(x, y, logits, loss, steps)
def _create_metrics(self)->Metrics:
return Metrics(self._title, self._apex, logger_freq=self._logger_freq)
| null |
424 | import asyncio
import unittest
from decimal import Decimal
from hummingbot.connector.gateway.gateway_in_flight_order import GatewayInFlightOrder
from hummingbot.core.data_type.common import OrderType, TradeType
from hummingbot.core.data_type.in_flight_order import OrderState, OrderUpdate
s_decimal_0 = Decimal("0")
class GatewayInFlightOrderUnitTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop = asyncio.get_event_loop()
cls.base_asset = "COINALPHA"
cls.quote_asset = "HBOT"
cls.trading_pair = f"{cls.base_asset}-{cls.quote_asset}"
cls.client_order_id = "someClientOrderId"
cls.exchange_order_id = "someTxHash"
cls.nonce = 1
def test_order_life_cycle_of_token_approval_requests(self):
order: GatewayInFlightOrder = GatewayInFlightOrder(
client_order_id=self.client_order_id,
trading_pair=self.quote_asset,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
creation_timestamp=1652324823,
initial_state=OrderState.PENDING_APPROVAL,
)
# Assert that order is in fact a Approval Request
self.assertTrue(order.is_approval_request)
self.assertTrue(order.is_pending_approval)
order_update: OrderUpdate = OrderUpdate(
trading_pair=order.trading_pair,
update_timestamp=1652324824,
new_state=OrderState.APPROVED,
client_order_id=order.client_order_id,
exchange_order_id=self.exchange_order_id,
)
order.update_with_order_update(order_update=order_update)
self.assertFalse(order.is_pending_approval)
def METHOD_NAME(self):
order: GatewayInFlightOrder = GatewayInFlightOrder(
client_order_id=self.client_order_id,
trading_pair=self.quote_asset,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
price=Decimal("1"),
amount=Decimal("1000"),
creation_timestamp=1652324823,
initial_state=OrderState.PENDING_CREATE,
)
# Nonce is not provided upon creation
self.assertEqual(order.nonce, -1)
# Exchange Order Id for GatewayInFlightOrder is only assigned after a TradeUpdate
self.assertIsNone(order.exchange_order_id)
# CancelTxHash is not initialized on creation
self.assertIsNone(order.cancel_tx_hash)
def test_update_creation_transaction_hash_with_order_update(self):
order: GatewayInFlightOrder = GatewayInFlightOrder(
client_order_id=self.client_order_id,
trading_pair=self.quote_asset,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
price=Decimal("1"),
amount=Decimal("1000"),
creation_timestamp=1652324823,
initial_state=OrderState.PENDING_CREATE,
creation_transaction_hash=None,
)
self.assertIsNone(order.creation_transaction_hash)
desired_creation_transaction_hash = "someTransactionHash"
order_update = OrderUpdate(
trading_pair=self.trading_pair,
update_timestamp=1652324823 + 1,
new_state=OrderState.OPEN,
client_order_id=self.client_order_id,
exchange_order_id="someExchangeOrderID",
misc_updates={
"creation_transaction_hash": desired_creation_transaction_hash,
}
)
order.update_with_order_update(order_update=order_update)
self.assertEqual(desired_creation_transaction_hash, order.creation_transaction_hash)
def test_update_cancelation_transaction_hash_with_order_update(self):
order: GatewayInFlightOrder = GatewayInFlightOrder(
client_order_id=self.client_order_id,
trading_pair=self.quote_asset,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
price=Decimal("1"),
amount=Decimal("1000"),
creation_timestamp=1652324823,
initial_state=OrderState.PENDING_CREATE,
)
self.assertIsNone(order.creation_transaction_hash)
desired_cancelation_transaction_hash = "someTransactionHash"
order_update = OrderUpdate(
trading_pair=self.trading_pair,
update_timestamp=1652324823 + 1,
new_state=OrderState.OPEN,
client_order_id=self.client_order_id,
exchange_order_id="someExchangeOrderID",
misc_updates={
"cancelation_transaction_hash": desired_cancelation_transaction_hash,
}
)
order.update_with_order_update(order_update=order_update)
self.assertEqual(desired_cancelation_transaction_hash, order.cancel_tx_hash)
def test_to_and_from_json(self):
base_order = GatewayInFlightOrder(
client_order_id=self.client_order_id,
trading_pair=self.quote_asset,
order_type=OrderType.LIMIT,
trade_type=TradeType.BUY,
price=Decimal("1"),
amount=Decimal("1000"),
creation_timestamp=1652324823,
initial_state=OrderState.PENDING_CREATE,
)
base_order.last_update_timestamp = 1652324824
order_json = base_order.to_json()
derived_order = GatewayInFlightOrder.from_json(order_json)
self.assertEqual(base_order, derived_order) | null |
425 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class AllocateEipAddressRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'AllocateEipAddress','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def METHOD_NAME(self): # String
return self.get_query_params().get('IpAddress')
def set_IpAddress(self, IpAddress): # String
self.add_query_param('IpAddress', IpAddress)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PublicIpAddressPoolId(self): # String
return self.get_query_params().get('PublicIpAddressPoolId')
def set_PublicIpAddressPoolId(self, PublicIpAddressPoolId): # String
self.add_query_param('PublicIpAddressPoolId', PublicIpAddressPoolId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ISP(self): # String
return self.get_query_params().get('ISP')
def set_ISP(self, ISP): # String
self.add_query_param('ISP', ISP)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Zone(self): # String
return self.get_query_params().get('Zone')
def set_Zone(self, Zone): # String
self.add_query_param('Zone', Zone)
def get_Netmode(self): # String
return self.get_query_params().get('Netmode')
def set_Netmode(self, Netmode): # String
self.add_query_param('Netmode', Netmode)
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_Bandwidth(self): # String
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self, Bandwidth): # String
self.add_query_param('Bandwidth', Bandwidth)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ActivityId(self): # Long
return self.get_query_params().get('ActivityId')
def set_ActivityId(self, ActivityId): # Long
self.add_query_param('ActivityId', ActivityId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_InternetChargeType(self): # String
return self.get_query_params().get('InternetChargeType')
def set_InternetChargeType(self, InternetChargeType): # String
self.add_query_param('InternetChargeType', InternetChargeType)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_SecurityProtectionTypess(self): # RepeatList
return self.get_query_params().get('SecurityProtectionTypes')
def set_SecurityProtectionTypess(self, SecurityProtectionTypes): # RepeatList
for depth1 in range(len(SecurityProtectionTypes)):
self.add_query_param('SecurityProtectionTypes.' + str(depth1 + 1), SecurityProtectionTypes[depth1])
def get_PricingCycle(self): # String
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self, PricingCycle): # String
self.add_query_param('PricingCycle', PricingCycle) | null |
426 | import pytest
from osf.management.commands.populate_initial_schema_responses import populate_initial_schema_responses
from osf.models import SchemaResponse, SchemaResponseBlock
from osf.utils.workflows import ApprovalStates, RegistrationModerationStates as RegStates
from osf_tests.factories import ProjectFactory, RegistrationFactory
from osf_tests.utils import get_default_test_schema
DEFAULT_RESPONSES = {
'q1': 'An answer', 'q2': 'Another answer', 'q3': 'A', 'q4': ['E'], 'q5': '', 'q6': [],
}
@pytest.fixture
def control_registration():
return RegistrationFactory()
@pytest.fixture
def test_registration():
registration = RegistrationFactory(schema=get_default_test_schema())
registration.schema_responses.clear()
registration.registration_responses = dict(DEFAULT_RESPONSES)
registration.save()
return registration
@pytest.fixture
def nested_registration(test_registration):
registration = RegistrationFactory(
project=ProjectFactory(parent=test_registration.registered_from),
parent=test_registration
)
registration.schema_responses.clear()
return registration
@pytest.mark.django_db
class TestPopulateInitialSchemaResponses:
def test_schema_response_created(self, test_registration):
assert not test_registration.schema_responses.exists()
count = populate_initial_schema_responses()
assert count == 1
assert test_registration.schema_responses.count() == 1
schema_response = test_registration.schema_responses.get()
assert schema_response.schema == test_registration.registration_schema
assert schema_response.all_responses == test_registration.registration_responses
@pytest.mark.parametrize(
'registration_state, schema_response_state',
[
(RegStates.INITIAL, ApprovalStates.UNAPPROVED),
(RegStates.PENDING, ApprovalStates.PENDING_MODERATION),
(RegStates.ACCEPTED, ApprovalStates.APPROVED),
(RegStates.EMBARGO, ApprovalStates.APPROVED),
(RegStates.PENDING_EMBARGO_TERMINATION, ApprovalStates.APPROVED),
(RegStates.PENDING_WITHDRAW_REQUEST, ApprovalStates.APPROVED),
(RegStates.PENDING_WITHDRAW, ApprovalStates.APPROVED),
(RegStates.WITHDRAWN, ApprovalStates.APPROVED),
(RegStates.REVERTED, ApprovalStates.UNAPPROVED),
(RegStates.REJECTED, ApprovalStates.PENDING_MODERATION),
]
)
def test_schema_response_state(
self, test_registration, registration_state, schema_response_state):
test_registration.moderation_state = registration_state.db_name
test_registration.save()
populate_initial_schema_responses()
schema_response = test_registration.schema_responses.get()
assert schema_response.state == schema_response_state
def test_errors_from_invalid_keys_are_ignored(self, test_registration):
test_registration.registration_responses.update({'invalid_key': 'lolol'})
test_registration.save()
populate_initial_schema_responses()
schema_response = test_registration.schema_responses.get()
assert schema_response.all_responses == DEFAULT_RESPONSES
def test_populate_responses_is_atomic_per_registration(self, test_registration):
invalid_registration = RegistrationFactory()
invalid_registration.schema_responses.clear()
invalid_registration.registered_schema.clear()
count = populate_initial_schema_responses()
assert count == 1
assert test_registration.schema_responses.exists()
assert not invalid_registration.schema_responses.exists()
def test_dry_run(self, test_registration):
# donfirm that the delete works even if the schema_response isn't IN_PROGRESS
test_registration.moderation_state = RegStates.ACCEPTED.db_name
test_registration.save()
with pytest.raises(RuntimeError):
populate_initial_schema_responses(dry_run=True)
assert not test_registration.schema_responses.exists()
assert not SchemaResponse.objects.exists()
assert not SchemaResponseBlock.objects.exists()
def test_batch_size(self):
for _ in range(5):
r = RegistrationFactory()
r.schema_responses.clear()
assert not SchemaResponse.objects.exists()
count = populate_initial_schema_responses(batch_size=3)
assert count == 3
assert SchemaResponse.objects.count() == 3
def test_schema_response_not_created_for_registration_with_response(self, control_registration):
control_registration_response = control_registration.schema_responses.get()
count = populate_initial_schema_responses()
assert count == 0
assert control_registration.schema_responses.get() == control_registration_response
def METHOD_NAME(self, nested_registration):
count = populate_initial_schema_responses()
assert count == 1 # parent registration
assert not nested_registration.schema_responses.exists() | null |
427 | import os
from datetime import datetime
from tempfile import NamedTemporaryFile
from xlrd import XLRDError
from creme.creme_core.tests.base import CremeTestCase
from creme.creme_core.utils.xlrd_utils import XlrdReader
from creme.creme_core.utils.xlwt_utils import XlwtWriter
class XLSUtilsTestCase(CremeTestCase):
files = ('data-xls5.0-95.xls',
'data-xls97-2003.xls',
'data-xlsx.xlsx'
)
current_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
data = [
['Prénom', 'Nom', 'Taille1', 'Taille2', 'Send_Date'],
['Gérard', 'Bouchard', 0.5, 0.5, datetime(2014, 8, 6, 20, 57, 32)],
['Hugo', 'Smett', 122, 122, ''],
['Rémy', 'Rakic', 12, 12, datetime(2014, 8, 6, 19, 48, 32)],
['Florian', 'Fabre', 0.004, 0.004, '51/08/2014 00:00:00'],
['Jean-Michel', 'Armand', 42, 42, datetime(2014, 8, 6, 19, 48, 32)],
['Guillaume', 'Englert', 50, 50, datetime(2014, 8, 6, 19, 48, 32)],
['Jonathan', 'Caruana', -50, -50, datetime(2014, 8, 6, 20, 57, 32)],
]
def get_file_path(self, filename):
return os.path.join(self.current_path, filename)
def test_unknown_filename(self):
with self.assertRaises(IOError):
XlrdReader(filedata=self.get_file_path('unknown.xls'))
def METHOD_NAME(self):
with self.assertRaises(XLRDError) as error:
XlrdReader(filedata=self.get_file_path('data-invalid.xls'))
self.assertEqual(
str(error.exception),
"Unsupported format, or corrupt file: Expected BOF record; found b'this is '"
)
def test_sheet(self):
rd = XlrdReader(filedata=self.get_file_path(self.files[0]))
self.assertIsNotNone(rd.book)
self.assertIsNotNone(rd.sheet)
self.assertEqual(rd.sheet.nrows, len(self.data))
def test_read_next(self):
for filename in self.files:
rd = XlrdReader(filedata=self.get_file_path(filename))
for element in self.data:
self.assertEqual(element, next(rd))
def test_as_list(self):
for filename in self.files:
rd = XlrdReader(filedata=self.get_file_path(filename))
self.assertListEqual(self.data, [*rd])
def test_open_file(self):
for filename in self.files:
with open(self.get_file_path(filename), mode='rb') as file_obj:
file_content = file_obj.read()
rd = XlrdReader(file_contents=file_content)
self.assertEqual([*rd], self.data)
def test_write_and_read(self):
file = NamedTemporaryFile(suffix=".xls")
wt = XlwtWriter()
writerow = wt.writerow
for element in self.data:
writerow(element)
wt.save(file.name)
rd = XlrdReader(filedata=file.name)
self.assertEqual([*rd], self.data)
with open(file.name, mode='rb') as file_obj:
file_content = file_obj.read()
rd = XlrdReader(file_contents=file_content)
self.assertEqual([*rd], self.data)
def test_truncate(self):
content = """Lôrèm ipsum dolor sit amet, consectetur adipiscing elit. Proin ac odio libero.
Praesent sollicitudin, mauris non sagittis tincidunt, magna libero malesuada lectus,
sit amet dictum nulla mi ac justo.
Vivamus laoreet metus eu purus tincidunt, et consectetur justo mattis.
Phasellus egestas a lacus nec pulvinar.
Sed a lectus eleifend, hendrerit ligula nec, aliquet sem.
Quisque nec tortor nec ante pharetra cursus sed facilisis lorem.
Praesent blandit pharetra nulla, id ultrices diam molestie sed.
""" * 100
self.assertGreater(len(content), 32767)
file = NamedTemporaryFile(suffix='.xls')
wt = XlwtWriter()
wt.writerow([content])
with self.assertNoException():
wt.save(file.name)
row = self.get_alone_element(XlrdReader(filedata=file.name))
elt = self.get_alone_element(row)
self.assertEqual(32767, len(elt)) | null |
428 | from shared.database.common import *
from sqlalchemy_serializer import SerializerMixin
from sqlalchemy import desc
from sqlalchemy import nullslast
from shared.shared_logger import get_shared_logger
logger = get_shared_logger()
from sqlalchemy.dialects.postgresql import JSONB
class UI_Schema(Base, SerializerMixin):
"""
"""
__tablename__ = 'ui_schema'
id = Column(BIGINT, primary_key = True)
name = Column(String)
note = Column(String)
version = Column(Integer, default = 0)
created_time = Column(DateTime, default = datetime.datetime.utcnow)
last_updated_time = Column(DateTime, onupdate = datetime.datetime.utcnow)
client_created_time = Column(DateTime, nullable = True)
creation_ref_id = Column(String(), nullable = True)
deleted_time = Column(DateTime, nullable = True)
archived = Column(Boolean)
is_visible = Column(Boolean, default=True)
is_public = Column(Boolean, default=False)
deletion_type = Column(String, nullable = True)
change_source = Column(String, nullable = True)
project_id = Column(Integer, ForeignKey('project.id'), index=True)
project = relationship("Project")
member_created_id = Column(Integer, ForeignKey('member.id'))
member_created = relationship("Member", foreign_keys = [member_created_id])
member_updated_id = Column(Integer, ForeignKey('member.id'))
member_updated = relationship("Member", foreign_keys = [member_updated_id])
allowed_instance_type_list = Column(ARRAY(String()))
allowed_instance_template_id_list = Column(ARRAY(Integer()))
# {visible: bool,
# url: example,
# style: example}
global_theme = Column(MutableDict.as_mutable(JSONB))
logo = Column(MutableDict.as_mutable(JSONB))
home = Column(MutableDict.as_mutable(JSONB))
task_list = Column(MutableDict.as_mutable(JSONB))
undo = Column(MutableDict.as_mutable(JSONB))
redo = Column(MutableDict.as_mutable(JSONB))
complete = Column(MutableDict.as_mutable(JSONB))
defer = Column(MutableDict.as_mutable(JSONB))
zoom = Column(MutableDict.as_mutable(JSONB))
label_selector = Column(MutableDict.as_mutable(JSONB))
instance_selector = Column(MutableDict.as_mutable(JSONB))
edit_instance_template = Column(MutableDict.as_mutable(JSONB))
draw_edit = Column(MutableDict.as_mutable(JSONB))
save = Column(MutableDict.as_mutable(JSONB))
next_task = Column(MutableDict.as_mutable(JSONB))
previous_task = Column(MutableDict.as_mutable(JSONB))
guide = Column(MutableDict.as_mutable(JSONB))
brightness_contrast_filters = Column(MutableDict.as_mutable(JSONB))
hotkeys = Column(MutableDict.as_mutable(JSONB))
overflow_menu = Column(MutableDict.as_mutable(JSONB))
settings = Column(MutableDict.as_mutable(JSONB))
attributes = Column(MutableDict.as_mutable(JSONB))
instances = Column(MutableDict.as_mutable(JSONB))
userscripts = Column(MutableDict.as_mutable(JSONB))
nav_bar = Column(MutableDict.as_mutable(JSONB))
left_bar = Column(MutableDict.as_mutable(JSONB))
main_canvas = Column(MutableDict.as_mutable(JSONB))
label_settings = Column(MutableDict.as_mutable(JSONB))
allow_actions = Column(MutableDict.as_mutable(JSONB))
block_actions = Column(MutableDict.as_mutable(JSONB))
time_tracking = Column(MutableDict.as_mutable(JSONB))
custom_buttons = Column(MutableDict.as_mutable(JSONB))
# example actions
#allow_instance_delete
#allow_instance_move
#allow_new_instance_creation
#allow_new_instance_creation
#allow_label_change
#allow_attribute_change
#allow_copy_paste
#allow_new_template_creation
#allow_history_access
#allow_edit_of_complete_task = Column(Boolean)
# These should be rolled into label settings maybe?
#default_to_view_only_mode
#default_to_qa_slideshow
def serialize(self):
# https://github.com/n0nSmoker/SQLAlchemy-serializer
return self.to_dict(rules=(
'-member_created',
'-member_updated',
'-project'))
@staticmethod
def get_by_id(session,
id: int):
return session.query(UI_Schema).filter(
UI_Schema.id == id).first()
@staticmethod
def get(session,
id: int,
project_id: int):
query = session.query(UI_Schema)
query = query.filter(UI_Schema.id == id)
query = query.filter(or_(
UI_Schema.project_id == project_id,
UI_Schema.is_public == True
))
return query.first()
@staticmethod
def METHOD_NAME(
session,
project_id=None,
org=None,
limit=100,
return_kind="objects",
archived = False,
date_to = None, # datetime
date_from = None, # datetime
date_to_string: str = None,
date_from_string: str = None,
name: str = None,
name_match_type: str = "ilike", # substring and helps if case Aa is off
order_by_class_and_attribute = None,
order_by_direction = desc,
public_only = False
):
"""
"""
query = session.query(UI_Schema)
# Assume we must either have public or project id
if public_only is True:
query = query.filter(UI_Schema.is_public == True)
else:
query = query.filter(UI_Schema.project_id == project_id)
if name:
if name_match_type == "ilike":
name_search = f"%{name}%"
query = query.filter(UI_Schema.name.ilike(name_search))
else:
query = query.filter(UI_Schema.name == name)
if date_from or date_to:
if date_from:
query = query.filter(UI_Schema.created_time >= date_from)
if date_to:
query = query.filter(UI_Schema.created_time <= date_to)
elif date_from_string or date_to_string:
query = regular_methods.regular_query(
query=query,
date_from_string=date_from_string,
date_to_string=date_to_string,
base_class=UI_Schema,
created_time_string='last_updated_time'
)
if archived is False:
query = query.filter(or_(
UI_Schema.archived == None,
UI_Schema.archived == False))
if order_by_class_and_attribute:
query = query.order_by(
nullslast(order_by_direction(order_by_class_and_attribute)))
if return_kind == "count":
return query.limit(limit).count()
if return_kind == "objects":
return query.limit(limit).all()
@staticmethod
def new(
member_created: 'Member' = None,
project: 'Project' = None,
client_created_time = None,
creation_ref_id = None,
name = None
) -> 'UI_Schema':
return UI_Schema(**locals()) | null |
429 | import logging
import json
from pymisp import MISPAttribute, MISPEvent, MISPTag, MISPObject
from . import check_input_attribute, checking_error, standard_error_message
from qintel_helper import search_qsentry
logger = logging.getLogger('qintel_qsentry')
logger.setLevel(logging.DEBUG)
moduleinfo = {
'version': '1.0',
'author': 'Qintel, LLC',
'description': 'Query Qintel QSentry for ip intelligence',
'module-type': ['hover', 'expansion']
}
moduleconfig = ['token', 'remote']
misperrors = {'error': 'Error'}
mispattributes = {
'input': ['ip-src', 'ip-dst'],
'output': ['ip-src', 'ip-dst', 'AS', 'freetext'],
'format': 'misp_standard'
}
TAG_COLOR = {
'benign': '#27ae60',
'suspicious': '#e6a902',
'malicious': '#c0392b'
}
CLIENT_HEADERS = {
'User-Agent': f"MISP/{moduleinfo['version']}",
}
def _return_error(message):
misperrors['error'] = message
return misperrors
def _make_tags(enriched_attr, result):
for tag in result['tags']:
color = TAG_COLOR['suspicious']
if tag == 'criminal':
color = TAG_COLOR['malicious']
t = MISPTag()
t.from_dict(**{
'name': f'qintel:tag="{tag}"',
'colour': color
})
enriched_attr.add_tag(**t)
return enriched_attr
def _make_enriched_attr(event, result, orig_attr):
enriched_object = MISPObject('Qintel Threat Enrichment')
enriched_object.add_reference(orig_attr.uuid, 'related-to')
enriched_attr = MISPAttribute()
enriched_attr.from_dict(**{
'value': orig_attr.value,
'type': orig_attr.type,
'distribution': 0,
'object_relation': 'enriched-attr',
'to_ids': orig_attr.to_ids
})
enriched_attr = _make_tags(enriched_attr, result)
enriched_object.add_attribute(**enriched_attr)
comment_attr = MISPAttribute()
comment_attr.from_dict(**{
'value': '\n'.join(result.get('descriptions', [])),
'type': 'text',
'object_relation': 'descriptions',
'distribution': 0
})
enriched_object.add_attribute(**comment_attr)
last_seen = MISPAttribute()
last_seen.from_dict(**{
'value': result.get('last_seen'),
'type': 'datetime',
'object_relation': 'last-seen',
'distribution': 0
})
enriched_object.add_attribute(**last_seen)
event.add_attribute(**orig_attr)
event.add_object(**enriched_object)
return event
def _make_asn_attr(event, result, orig_attr):
asn_object = MISPObject('asn')
asn_object.add_reference(orig_attr.uuid, 'related-to')
asn_attr = MISPAttribute()
asn_attr.from_dict(**{
'type': 'AS',
'value': result.get('asn'),
'object_relation': 'asn',
'distribution': 0
})
asn_object.add_attribute(**asn_attr)
org_attr = MISPAttribute()
org_attr.from_dict(**{
'type': 'text',
'value': result.get('asn_name', 'unknown').title(),
'object_relation': 'description',
'distribution': 0
})
asn_object.add_attribute(**org_attr)
event.add_object(**asn_object)
return event
def _format_hover(event, result):
enriched_object = event.get_objects_by_name('Qintel Threat Enrichment')[0]
tags = ', '.join(result.get('tags'))
enriched_object.add_attribute('Tags', type='text', value=tags)
return event
def _format_result(attribute, result):
event = MISPEvent()
orig_attr = MISPAttribute()
orig_attr.from_dict(**attribute)
event = _make_enriched_attr(event, result, orig_attr)
event = _make_asn_attr(event, result, orig_attr)
return event
def _check_config(config):
if not config:
return False
if not isinstance(config, dict):
return False
if config.get('token', '') == '':
return False
return True
def METHOD_NAME(request):
if not request.get('attribute'):
return f'{standard_error_message}, {checking_error}'
check_reqs = ('type', 'value')
if not check_input_attribute(request['attribute'],
requirements=check_reqs):
return f'{standard_error_message}, {checking_error}'
if request['attribute']['type'] not in mispattributes['input']:
return 'Unsupported attribute type'
def handler(q=False):
if not q:
return False
request = json.loads(q)
config = request.get('config')
if not _check_config(config):
return _return_error('Missing Qintel token')
check_request_error = METHOD_NAME(request)
if check_request_error:
return _return_error(check_request_error)
search_args = {
'token': config['token'],
'remote': config.get('remote')
}
try:
result = search_qsentry(request['attribute']['value'], **search_args)
except Exception as e:
return _return_error(str(e))
event = _format_result(request['attribute'], result)
if not request.get('event_id'):
event = _format_hover(event, result)
event = json.loads(event.to_json())
ret_result = {key: event[key] for key in ('Attribute', 'Object') if key
in event}
return {'results': ret_result}
def introspection():
return mispattributes
def version():
moduleinfo['config'] = moduleconfig
return moduleinfo | null |
430 | import mdp
from mdp import PreserveDimNode, numx, VariadicCumulator
import operator
class ClassifierNode(PreserveDimNode):
"""A ClassifierNode can be used for classification tasks that should not
interfere with the normal execution flow. A reason for that is that the
labels used for classification do not form a vector space, and so they don't
make much sense in a flow.
"""
def __init__(self, execute_method=None,
input_dim=None, output_dim=None, dtype=None):
"""Initialize classifier.
execute_method -- Set to string value 'label', 'rank', or 'prob' to
force the corresponding classification method being used instead
of the standard identity execution (which is used when
execute_method has the default value None). This can be used when
the node is last in a flow, the return value from Flow.execute
will then consist of the classification results.
"""
self.execute_method = execute_method
super(ClassifierNode, self).__init__(input_dim=input_dim,
output_dim=output_dim,
dtype=dtype)
### Methods to be implemented by the subclasses
def _label(self, x, *args, **kargs):
raise NotImplementedError
def _prob(self, x, *args, **kargs):
raise NotImplementedError
### User interface to the overwritten methods
def METHOD_NAME(self, x, *args, **kwargs):
"""Returns an array with best class labels.
By default, subclasses should overwrite _label to implement
their label. The docstring of the '_label' method
overwrites this docstring.
"""
self._pre_execution_checks(x)
return self._label(self._refcast(x), *args, **kwargs)
def prob(self, x, *args, **kwargs):
"""Returns the probability for each datapoint and label
(e.g., [{1:0.1, 2:0.0, 3:0.9}, {1:1.0, 2:0.0, 3:0.0}, ...])
By default, subclasses should overwrite _prob to implement
their prob. The docstring of the '_prob' method
overwrites this docstring.
"""
self._pre_execution_checks(x)
return self._prob(self._refcast(x), *args, **kwargs)
def rank(self, x, threshold=None):
"""Returns ordered list with all labels ordered according to prob(x)
(e.g., [[3 1 2], [2 1 3], ...]).
The optional threshold parameter is used to exclude labels having equal
or less probability. E.g. threshold=0 excludes all labels with zero
probability.
"""
all_ranking = []
prob = self.prob(x)
for p in prob:
if threshold is None:
ranking = list(p.items())
else:
ranking = ((k, v) for k, v in list(p.items()) if v > threshold)
result = [k for k, v in
sorted(ranking, key=operator.itemgetter(1), reverse=True)]
all_ranking.append(result)
return all_ranking
def _execute(self, x):
if not self.execute_method:
return x
elif self.execute_method == "label":
return self.METHOD_NAME(x)
elif self.execute_method == "rank":
return self.rank(x)
elif self.execute_method == "prob":
return self.prob(x)
# XXX are the _train and _stop_training functions necessary anymore?
class ClassifierCumulator(VariadicCumulator('data', 'labels'), ClassifierNode):
"""A ClassifierCumulator is a Node whose training phase simply collects
all input data and labels. In this way it is possible to easily implement
batch-mode learning.
The data is accessible in the attribute 'self.data' after
the beginning of the '_stop_training' phase. 'self.tlen' contains
the number of data points collected.
'self.labels' contains the assigned label to each data point.
"""
def __init__(self, input_dim=None, output_dim=None, dtype=None):
super(ClassifierCumulator, self).__init__(input_dim=input_dim,
output_dim=output_dim,
dtype=dtype)
def _check_train_args(self, x, labels):
super(ClassifierCumulator, self)._check_train_args(x, labels)
if (isinstance(labels, (list, tuple, numx.ndarray)) and
len(labels) != x.shape[0]):
msg = ("The number of labels must be equal to the number of "
"datapoints (%d != %d)" % (len(labels), x.shape[0]))
raise mdp.TrainingException(msg)
def _train(self, x, labels):
"""Cumulate all input data in a one dimensional list."""
self.tlen += x.shape[0]
self.data.extend(x.ravel().tolist())
# if labels is a number, all x's belong to the same class
if isinstance(labels, (list, tuple, numx.ndarray)):
pass
else:
labels = [labels] * x.shape[0]
self.labels.extend(labels.ravel().tolist())
def _stop_training(self, *args, **kwargs):
"""Transform the data and labels lists to array objects and reshape them."""
self.data = numx.array(self.data, dtype=self.dtype)
self.data.shape = (self.tlen, self.input_dim)
self.labels = numx.array(self.labels)
self.labels.shape = (self.tlen)
| null |
431 | #!/usr/bin/env python
# Build the project on Travis CI.
from __future__ import print_function
import errno, os, shutil, subprocess, sys, urllib
from subprocess import call, check_call, Popen, PIPE, STDOUT
def rmtree_if_exists(dir):
try:
shutil.rmtree(dir)
except OSError as e:
if e.errno == errno.ENOENT:
pass
def makedirs_if_not_exist(dir):
try:
os.makedirs(dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def METHOD_NAME():
branch = os.environ['TRAVIS_BRANCH']
if branch != 'master':
print('Branch: ' + branch)
exit(0) # Ignore non-master branches
check_call('curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key ' +
'| sudo apt-key add -', shell=True)
check_call('echo "deb https://deb.nodesource.com/node_0.10 precise main" ' +
'| sudo tee /etc/apt/sources.list.d/nodesource.list', shell=True)
check_call(['sudo', 'apt-get', 'update'])
check_call(['sudo', 'apt-get', 'install', 'python-virtualenv', 'nodejs'])
check_call(['sudo', 'npm', 'install', '-g', '[email protected]', 'less-plugin-clean-css'])
deb_file = 'doxygen_1.8.6-2_amd64.deb'
urllib.urlretrieve('http://mirrors.kernel.org/ubuntu/pool/main/d/doxygen/' +
deb_file, deb_file)
check_call(['sudo', 'dpkg', '-i', deb_file])
fmt_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build = os.environ['BUILD']
if build == 'Doc':
travis = 'TRAVIS' in os.environ
if travis:
METHOD_NAME()
sys.path.insert(0, os.path.join(fmt_dir, 'doc'))
import build
build.create_build_env()
html_dir = build.build_docs()
repo = 'fmtlib.github.io'
if travis and 'KEY' not in os.environ:
# Don't update the repo if building on Travis from an account that
# doesn't have push access.
print('Skipping update of ' + repo)
exit(0)
# Clone the fmtlib.github.io repo.
rmtree_if_exists(repo)
git_url = 'https://github.com/' if travis else '[email protected]:'
check_call(['git', 'clone', git_url + 'fmtlib/{}.git'.format(repo)])
# Copy docs to the repo.
target_dir = os.path.join(repo, 'dev')
rmtree_if_exists(target_dir)
shutil.copytree(html_dir, target_dir, ignore=shutil.ignore_patterns('.*'))
if travis:
check_call(['git', 'config', '--global', 'user.name', 'amplbot'])
check_call(['git', 'config', '--global', 'user.email', '[email protected]'])
# Push docs to GitHub pages.
check_call(['git', 'add', '--all'], cwd=repo)
if call(['git', 'diff-index', '--quiet', 'HEAD'], cwd=repo):
check_call(['git', 'commit', '-m', 'Update documentation'], cwd=repo)
cmd = 'git push'
if travis:
cmd += ' https://[email protected]/fmtlib/fmtlib.github.io.git master'
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, cwd=repo)
# Print the output without the key.
print(p.communicate()[0].replace(os.environ['KEY'], '$KEY'))
if p.returncode != 0:
raise subprocess.CalledProcessError(p.returncode, cmd)
exit(0)
standard = os.environ['STANDARD']
install_dir = os.path.join(fmt_dir, "_install")
build_dir = os.path.join(fmt_dir, "_build")
test_build_dir = os.path.join(fmt_dir, "_build_test")
# Configure library.
makedirs_if_not_exist(build_dir)
cmake_flags = [
'-DCMAKE_INSTALL_PREFIX=' + install_dir, '-DCMAKE_BUILD_TYPE=' + build,
'-DCMAKE_CXX_STANDARD=' + standard
]
check_call(['cmake', '-DFMT_DOC=OFF', '-DFMT_PEDANTIC=ON', '-DFMT_WERROR=ON', fmt_dir] +
cmake_flags, cwd=build_dir)
# Build library.
check_call(['make', '-j4'], cwd=build_dir)
# Test library.
env = os.environ.copy()
env['CTEST_OUTPUT_ON_FAILURE'] = '1'
if call(['make', 'test'], env=env, cwd=build_dir):
with open(os.path.join(build_dir, 'Testing', 'Temporary', 'LastTest.log'), 'r') as f:
print(f.read())
sys.exit(-1)
# Install library.
check_call(['make', 'install'], cwd=build_dir)
# Test installation.
makedirs_if_not_exist(test_build_dir)
check_call(['cmake', os.path.join(fmt_dir, "test", "find-package-test")] +
cmake_flags, cwd=test_build_dir)
check_call(['make', '-j4'], cwd=test_build_dir) | null |
432 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class DescribeSlowLogRecordsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeSlowLogRecords','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_StartTime(self): # String
return self.get_query_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_query_param('StartTime', StartTime)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def METHOD_NAME(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_NodeId(self): # String
return self.get_query_params().get('NodeId')
def set_NodeId(self, NodeId): # String
self.add_query_param('NodeId', NodeId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_QueryKeyword(self): # String
return self.get_query_params().get('QueryKeyword')
def set_QueryKeyword(self, QueryKeyword): # String
self.add_query_param('QueryKeyword', QueryKeyword)
def get_EndTime(self): # String
return self.get_query_params().get('EndTime')
def set_EndTime(self, EndTime): # String
self.add_query_param('EndTime', EndTime)
def get_OrderBy(self): # String
return self.get_query_params().get('OrderBy')
def set_OrderBy(self, OrderBy): # String
self.add_query_param('OrderBy', OrderBy)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_SlowLogRecordType(self): # String
return self.get_query_params().get('SlowLogRecordType')
def set_SlowLogRecordType(self, SlowLogRecordType): # String
self.add_query_param('SlowLogRecordType', SlowLogRecordType)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_DBName(self): # String
return self.get_query_params().get('DBName')
def set_DBName(self, DBName): # String
self.add_query_param('DBName', DBName)
def get_OrderType(self): # String
return self.get_query_params().get('OrderType')
def set_OrderType(self, OrderType): # String
self.add_query_param('OrderType', OrderType) | null |
433 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeSnapshotGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeSnapshotGroups','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_SnapshotGroupIds(self): # RepeatList
return self.get_query_params().get('SnapshotGroupId')
def set_SnapshotGroupIds(self, SnapshotGroupId): # RepeatList
for depth1 in range(len(SnapshotGroupId)):
self.add_query_param('SnapshotGroupId.' + str(depth1 + 1), SnapshotGroupId[depth1])
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_AdditionalAttributess(self): # RepeatList
return self.get_query_params().get('AdditionalAttributes')
def set_AdditionalAttributess(self, AdditionalAttributes): # RepeatList
for depth1 in range(len(AdditionalAttributes)):
self.add_query_param('AdditionalAttributes.' + str(depth1 + 1), AdditionalAttributes[depth1])
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def METHOD_NAME(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_Statuss(self): # RepeatList
return self.get_query_params().get('Status')
def set_Statuss(self, Status): # RepeatList
for depth1 in range(len(Status)):
self.add_query_param('Status.' + str(depth1 + 1), Status[depth1]) | null |
434 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for pi_utils."""
from absl.testing import parameterized
import tensorflow as tf
import pi_utils # local file import from baselines.privileged_information
class PiUtilsTest(parameterized.TestCase, tf.test.TestCase):
num_dataset_annotators = 5
num_annotators_per_example = 3
annotator_feature_length = 2
num_classes = 10
batch_size = 4
@property
def METHOD_NAME(self):
return {
'pi_features': {
'annotator_ids':
tf.random.uniform(
[self.batch_size, self.num_annotators_per_example],
minval=0,
maxval=self.num_dataset_annotators,
dtype=tf.int32),
'annotator_features':
tf.random.normal([
self.batch_size, self.num_annotators_per_example,
self.annotator_feature_length
]),
'annotator_confidences':
tf.random.normal(
[self.batch_size, self.num_annotators_per_example]),
'annotator_labels':
tf.random.normal([
self.batch_size, self.num_annotators_per_example,
self.num_classes
]),
},
'clean_labels': tf.range(self.batch_size)
}
@property
def example_with_incorrect_labels(self):
return {
'pi_features': {
'annotator_ids': tf.reshape(tf.range(4), [2, 2]),
'annotator_labels': tf.constant([[[0], [1]], [[0], [0]]])
},
'clean_labels': tf.zeros((2,), dtype=tf.int32)
}
@parameterized.parameters(
(('annotator_labels', 'annotator_ids', 'annotator_features',
'annotator_confidences'), (
4,
3,
(10 + 5 + 2 + 1),
)),
(('annotator_labels',), (
4,
3,
10,
)),
)
def test_pi_generation(self, pi_subset, expected_pi_shape):
def annotator_id_encoding_fn(example):
return tf.one_hot(example['pi_features']['annotator_ids'],
self.num_dataset_annotators)
encoding_fn_dict = {
'annotator_ids':
annotator_id_encoding_fn,
'annotator_features':
lambda e: e['pi_features']['annotator_features'],
'annotator_confidences':
lambda e: e['pi_features']['annotator_confidences'],
'annotator_labels':
lambda e: e['pi_features']['annotator_labels'],
}
privileged_information_fn = pi_utils.get_privileged_information_fn(
pi_subset=pi_subset, encoding_fn_dict=encoding_fn_dict)
privileged_information = privileged_information_fn(
self.METHOD_NAME)
self.assertEqual(privileged_information.shape, expected_pi_shape)
def test_feature_repetition(self):
num_annotators_per_example = 2
labels = tf.constant([0, 1, 2])
repeated_labels = pi_utils.repeat_across_annotators(
labels, num_annotators_per_example)
self.assertAllEqual(repeated_labels,
tf.constant([[[0], [0]], [[1], [1]], [[2], [2]]]))
labels_one_hot = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
repeated_labels = pi_utils.repeat_across_annotators(
labels_one_hot, num_annotators_per_example)
self.assertAllEqual(
repeated_labels,
tf.constant([[[1, 0, 0], [1, 0, 0]], [[0, 1, 0], [0, 1, 0]],
[[0, 0, 1], [0, 0, 1]]]))
def test_flatten_annotator_axis(self):
annotator_labels = tf.constant([[[1, 0, 0], [1, 0, 0]],
[[0, 1, 0], [0, 1, 0]],
[[0, 0, 1], [0, 0, 1]]])
flattened_labels = pi_utils.flatten_annotator_axis(annotator_labels)
self.assertAllEqual(
flattened_labels,
tf.constant([[1, 0, 0], [1, 0, 0], [0, 1, 0], [0, 1, 0], [0, 0, 1],
[0, 0, 1]]))
@parameterized.parameters(True, False)
def test_find_noisy_annotators(self, flatten_annotators):
example = {
'pi_features': {
'annotator_ids': tf.reshape(tf.range(4), [2, 2]),
'annotator_labels': tf.constant([[[0], [1]], [[0], [0]]])
},
'clean_labels': tf.zeros((2,), dtype=tf.int32)
}
is_correct_mask = pi_utils.find_noisy_annotators(example,
flatten_annotators)
# The second annotator of the first example is wrong
ground_truth = tf.constant([[0, 1], [0, 0]])
if flatten_annotators:
ground_truth = tf.reshape(ground_truth, [-1])
self.assertAllEqual(is_correct_mask, ground_truth)
def test_annotator_label_if_incorrect(self):
annotator_label_if_incorrect = (
pi_utils.annotator_label_if_incorrect_encoding_fn(
self.example_with_incorrect_labels, label_encoding_fn=None
)
)
self.assertAllClose(
tf.constant([[[-1], [1]], [[-1], [-1]]]), annotator_label_if_incorrect)
def test_annotator_ids_encoding(self):
annotator_ids = pi_utils.annotator_ids_encoding_fn(
self.METHOD_NAME,
num_dataset_annotators=self.num_dataset_annotators)
self.assertAllEqual(
tf.shape(annotator_ids),
tf.constant([
self.batch_size, self.num_annotators_per_example,
self.num_dataset_annotators
]))
def test_clean_labels_encoding_fn(self):
def label_encoding_fn(labels):
return tf.one_hot(
tf.cast(labels, dtype=tf.int32), self.num_classes, dtype=tf.float32)
clean_labels = pi_utils.clean_labels_encoding_fn(
self.METHOD_NAME,
num_annotators_per_example=self.num_annotators_per_example,
label_encoding_fn=label_encoding_fn)
self.assertAllEqual(
tf.shape(clean_labels),
tf.constant([
self.batch_size, self.num_annotators_per_example, self.num_classes
]))
if __name__ == '__main__':
tf.test.main() | null |
435 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class QueryAdvancedDomainListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'QueryAdvancedDomainList')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductDomainType(self): # String
return self.get_query_params().get('ProductDomainType')
def set_ProductDomainType(self, ProductDomainType): # String
self.add_query_param('ProductDomainType', ProductDomainType)
def get_PageNum(self): # Integer
return self.get_query_params().get('PageNum')
def set_PageNum(self, PageNum): # Integer
self.add_query_param('PageNum', PageNum)
def get_Excluded(self): # String
return self.get_query_params().get('Excluded')
def set_Excluded(self, Excluded): # String
self.add_query_param('Excluded', Excluded)
def get_StartLength(self): # Integer
return self.get_query_params().get('StartLength')
def set_StartLength(self, StartLength): # Integer
self.add_query_param('StartLength', StartLength)
def get_ExcludedSuffix(self): # Boolean
return self.get_query_params().get('ExcludedSuffix')
def set_ExcludedSuffix(self, ExcludedSuffix): # Boolean
self.add_query_param('ExcludedSuffix', ExcludedSuffix)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_ExcludedPrefix(self): # Boolean
return self.get_query_params().get('ExcludedPrefix')
def set_ExcludedPrefix(self, ExcludedPrefix): # Boolean
self.add_query_param('ExcludedPrefix', ExcludedPrefix)
def get_KeyWord(self): # String
return self.get_query_params().get('KeyWord')
def set_KeyWord(self, KeyWord): # String
self.add_query_param('KeyWord', KeyWord)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('ProductDomainTypeSort')
def set_ProductDomainTypeSort(self, ProductDomainTypeSort): # Boolean
self.add_query_param('ProductDomainTypeSort', ProductDomainTypeSort)
def get_EndExpirationDate(self): # Long
return self.get_query_params().get('EndExpirationDate')
def set_EndExpirationDate(self, EndExpirationDate): # Long
self.add_query_param('EndExpirationDate', EndExpirationDate)
def get_Suffixs(self): # String
return self.get_query_params().get('Suffixs')
def set_Suffixs(self, Suffixs): # String
self.add_query_param('Suffixs', Suffixs)
def get_DomainNameSort(self): # Boolean
return self.get_query_params().get('DomainNameSort')
def set_DomainNameSort(self, DomainNameSort): # Boolean
self.add_query_param('DomainNameSort', DomainNameSort)
def get_ExpirationDateSort(self): # Boolean
return self.get_query_params().get('ExpirationDateSort')
def set_ExpirationDateSort(self, ExpirationDateSort): # Boolean
self.add_query_param('ExpirationDateSort', ExpirationDateSort)
def get_StartExpirationDate(self): # Long
return self.get_query_params().get('StartExpirationDate')
def set_StartExpirationDate(self, StartExpirationDate): # Long
self.add_query_param('StartExpirationDate', StartExpirationDate)
def get_DomainStatus(self): # Integer
return self.get_query_params().get('DomainStatus')
def set_DomainStatus(self, DomainStatus): # Integer
self.add_query_param('DomainStatus', DomainStatus)
def get_DomainGroupId(self): # Long
return self.get_query_params().get('DomainGroupId')
def set_DomainGroupId(self, DomainGroupId): # Long
self.add_query_param('DomainGroupId', DomainGroupId)
def get_KeyWordSuffix(self): # Boolean
return self.get_query_params().get('KeyWordSuffix')
def set_KeyWordSuffix(self, KeyWordSuffix): # Boolean
self.add_query_param('KeyWordSuffix', KeyWordSuffix)
def get_KeyWordPrefix(self): # Boolean
return self.get_query_params().get('KeyWordPrefix')
def set_KeyWordPrefix(self, KeyWordPrefix): # Boolean
self.add_query_param('KeyWordPrefix', KeyWordPrefix)
def get_TradeType(self): # Integer
return self.get_query_params().get('TradeType')
def set_TradeType(self, TradeType): # Integer
self.add_query_param('TradeType', TradeType)
def get_EndRegistrationDate(self): # Long
return self.get_query_params().get('EndRegistrationDate')
def set_EndRegistrationDate(self, EndRegistrationDate): # Long
self.add_query_param('EndRegistrationDate', EndRegistrationDate)
def get_Form(self): # Integer
return self.get_query_params().get('Form')
def set_Form(self, Form): # Integer
self.add_query_param('Form', Form)
def get_UserClientIp(self): # String
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self, UserClientIp): # String
self.add_query_param('UserClientIp', UserClientIp)
def get_RegistrationDateSort(self): # Boolean
return self.get_query_params().get('RegistrationDateSort')
def set_RegistrationDateSort(self, RegistrationDateSort): # Boolean
self.add_query_param('RegistrationDateSort', RegistrationDateSort)
def get_StartRegistrationDate(self): # Long
return self.get_query_params().get('StartRegistrationDate')
def set_StartRegistrationDate(self, StartRegistrationDate): # Long
self.add_query_param('StartRegistrationDate', StartRegistrationDate)
def get_EndLength(self): # Integer
return self.get_query_params().get('EndLength')
def set_EndLength(self, EndLength): # Integer
self.add_query_param('EndLength', EndLength) | null |
436 | import os
import pytest
from puppetboard import docker_settings
from importlib import reload as reload
@pytest.fixture(scope='function')
def cleanup_env(request):
for env_var in dir(docker_settings):
if (env_var.startswith('__') or env_var.startswith('_') or
env_var.islower()):
continue
if env_var in os.environ:
del os.environ[env_var]
reload(docker_settings)
return
def test_default_host_port(cleanup_env):
assert docker_settings.PUPPETDB_HOST == 'puppetdb'
assert docker_settings.PUPPETDB_PORT == 8080
def test_set_host_port(cleanup_env):
os.environ['PUPPETDB_HOST'] = 'puppetdb2'
os.environ['PUPPETDB_PORT'] = '9081'
reload(docker_settings)
assert docker_settings.PUPPETDB_HOST == 'puppetdb2'
assert docker_settings.PUPPETDB_PORT == 9081
def test_set_proto(cleanup_env):
os.environ['PUPPETDB_PROTO'] = 'https'
reload(docker_settings)
assert docker_settings.PUPPETDB_PROTO == 'https'
def test_cert_true_test(cleanup_env):
os.environ['PUPPETDB_SSL_VERIFY'] = 'True'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is True
os.environ['PUPPETDB_SSL_VERIFY'] = 'true'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is True
def test_cert_false_test(cleanup_env):
os.environ['PUPPETDB_SSL_VERIFY'] = 'False'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is False
os.environ['PUPPETDB_SSL_VERIFY'] = 'false'
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY is False
def test_cert_path(cleanup_env):
ca_file = '/usr/ssl/path/ca.pem'
os.environ['PUPPETDB_SSL_VERIFY'] = ca_file
reload(docker_settings)
assert docker_settings.PUPPETDB_SSL_VERIFY == ca_file
def test_cert_to_file(cleanup_env):
import tempfile
cert_string = '-----BEGIN CERTIFICATE-----\nMIIFkjCCA3qgAwf'
os.environ['PUPPETDB_KEY'] = cert_string
reload(docker_settings)
assert docker_settings.PUPPETDB_KEY.startswith(tempfile.gettempdir())
with open(docker_settings.PUPPETDB_KEY) as test_cert_file:
assert test_cert_file.read() == '-----BEGIN CERTIFICATE-----\nMIIFkjCCA3qgAwf'
# Clean up the generated file
os.unlink(docker_settings.PUPPETDB_KEY)
def test_cert_to_file_base64(cleanup_env):
import tempfile
cert_string = 'LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZrakNDQTNxZ0F3SUI='
os.environ['PUPPETDB_KEY'] = cert_string
reload(docker_settings)
assert docker_settings.PUPPETDB_KEY.startswith(tempfile.gettempdir())
with open(docker_settings.PUPPETDB_KEY) as test_cert_file:
assert test_cert_file.read() == '-----BEGIN CERTIFICATE-----\nMIIFkjCCA3qgAwIB'
# Clean up the generated file
os.unlink(docker_settings.PUPPETDB_KEY)
def validate_facts(facts):
assert isinstance(facts, list)
assert len(facts) > 0
for map in facts:
assert isinstance(map, tuple)
assert len(map) == 2
def test_inventory_facts_default(cleanup_env):
validate_facts(docker_settings.INVENTORY_FACTS)
def test_invtory_facts_custom(cleanup_env):
os.environ['INVENTORY_FACTS'] = "A, B, C, D"
reload(docker_settings)
validate_facts(docker_settings.INVENTORY_FACTS)
def test_inventory_fact_tempaltes_default(cleanup_env):
assert isinstance(docker_settings.INVENTORY_FACT_TEMPLATES, dict)
assert len(docker_settings.INVENTORY_FACT_TEMPLATES) == 3
def test_inventory_fact_tempaltes_custom(cleanup_env):
os.environ['INVENTORY_FACT_TEMPLATES'] = """{"os": "{{ fact_os_detection(value) }}"}"""
reload(docker_settings)
assert isinstance(docker_settings.INVENTORY_FACT_TEMPLATES, dict)
assert len(docker_settings.INVENTORY_FACT_TEMPLATES) == 1
def test_graph_facts_defautl(cleanup_env):
facts = docker_settings.GRAPH_FACTS
assert isinstance(facts, list)
assert 'puppetversion' in facts
def test_graph_facts_custom(cleanup_env):
os.environ['GRAPH_FACTS'] = "architecture, puppetversion, extra"
reload(docker_settings)
facts = docker_settings.GRAPH_FACTS
assert isinstance(facts, list)
assert len(facts) == 3
assert 'puppetversion' in facts
assert 'architecture' in facts
assert 'extra' in facts
def test_default_table_selctor(cleanup_env):
assert [10, 20, 50, 100, 500] == docker_settings.TABLE_COUNT_SELECTOR
def test_env_table_selector(cleanup_env):
os.environ['TABLE_COUNT_SELECTOR'] = '5,15,25'
reload(docker_settings)
assert [5, 15, 25] == docker_settings.TABLE_COUNT_SELECTOR
def test_env_column_options(cleanup_env):
os.environ['DISPLAYED_METRICS'] = 'resources.total, events.failure'
reload(docker_settings)
assert ['resources.total',
'events.failure'] == docker_settings.DISPLAYED_METRICS
def test_enable_class_default(cleanup_env):
assert False == docker_settings.ENABLE_CLASS
def test_enable_class_true(cleanup_env):
os.environ['ENABLE_CLASS'] = 'True'
reload(docker_settings)
assert docker_settings.ENABLE_CLASS is True
os.environ['ENABLE_CLASS'] = 'true'
reload(docker_settings)
assert docker_settings.ENABLE_CLASS is True
def test_enable_class_false(cleanup_env):
os.environ['ENABLE_CLASS'] = 'False'
reload(docker_settings)
assert docker_settings.ENABLE_CLASS is False
os.environ['ENABLE_CLASS'] = 'false'
reload(docker_settings)
assert docker_settings.ENABLE_CLASS is False
def METHOD_NAME(cleanup_env):
assert 3600 == docker_settings.CACHE_DEFAULT_TIMEOUT
def test_cache_type_default(cleanup_env):
assert 'SimpleCache' == docker_settings.CACHE_TYPE
def test_cache_memcached_servers(cleanup_env):
os.environ['CACHE_TYPE'] = 'MemcachedCache'
reload(docker_settings)
assert ['memcached:11211'] == docker_settings.CACHE_MEMCACHED_SERVERS
def test_class_events_status_columns_default(cleanup_env):
assert [('failure', 'Failure'),
('success', 'Success'),
('noop', 'Noop')] == docker_settings.CLASS_EVENTS_STATUS_COLUMNS
def test_scheduler_enabled_true(cleanup_env):
os.environ['SCHEDULER_ENABLED'] = 'True'
reload(docker_settings)
assert docker_settings.SCHEDULER_ENABLED is True
os.environ['SCHEDULER_ENABLED'] = 'true'
reload(docker_settings)
assert docker_settings.SCHEDULER_ENABLED is True
def test_scheduler_enabled_false(cleanup_env):
os.environ['SCHEDULER_ENABLED'] = 'False'
reload(docker_settings)
assert docker_settings.SCHEDULER_ENABLED is False
os.environ['SCHEDULER_ENABLED'] = 'false'
reload(docker_settings)
assert docker_settings.SCHEDULER_ENABLED is False
def test_scheduler_jobs_default(cleanup_env):
assert [{'func': 'puppetboard.schedulers.classes:build_async_cache',
'id': 'do_build_async_cache_1',
'seconds': 300,
'trigger': 'interval'}] == docker_settings.SCHEDULER_JOBS
def test_scheduler_jobs_custom(cleanup_env):
os.environ['SCHEDULER_JOBS'] = "id,do_build_async_cache_1,func,puppetboard.schedulers.classes:build_async_cache,trigger,interval,seconds,600"
reload(docker_settings)
jobs = docker_settings.SCHEDULER_JOBS
assert isinstance(jobs, list)
assert len(jobs) == 1
for job in jobs:
assert isinstance(job, dict)
assert len(job) == 4
assert 'id' in job
assert 'func' in job
assert 'trigger' in job
assert 'seconds' in job
assert 600 == job['seconds'] | null |
437 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, Field, constr, validator
from lightly.openapi_generated.swagger_client.models.s3_region import S3Region
class DatasourceConfigS3DelegatedAccessAllOf(BaseModel):
"""
DatasourceConfigS3DelegatedAccessAllOf
"""
s3_region: S3Region = Field(..., alias="s3Region")
s3_external_id: constr(strict=True, min_length=10) = Field(..., alias="s3ExternalId", description="The external ID specified when creating the role.")
s3_arn: constr(strict=True, min_length=12) = Field(..., alias="s3ARN", description="The ARN of the role you created")
s3_server_side_encryption_kms_key: Optional[constr(strict=True, min_length=1)] = Field(None, alias="s3ServerSideEncryptionKMSKey", description="If set, Lightly Worker will automatically set the headers to use server side encryption https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html with this value as the appropriate KMS key arn. This will encrypt the files created by Lightly (crops, frames, thumbnails) in the S3 bucket. ")
__properties = ["s3Region", "s3ExternalId", "s3ARN", "s3ServerSideEncryptionKMSKey"]
@validator('s3_external_id')
def s3_external_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9_+=,.@:\/-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9_+=,.@:\/-]+$/")
return value
@validator('s3_arn')
def s3_arn_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^arn:aws:iam::[0-9]{12}:role.+$", value):
raise ValueError(r"must validate the regular expression /^arn:aws:iam::[0-9]{12}:role.+$/")
return value
@validator('s3_server_side_encryption_kms_key')
def s3_server_side_encryption_kms_key_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^arn:aws:kms:[a-zA-Z0-9-]*:[0-9]{12}:key.+$", value):
raise ValueError(r"must validate the regular expression /^arn:aws:kms:[a-zA-Z0-9-]*:[0-9]{12}:key.+$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def METHOD_NAME(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DatasourceConfigS3DelegatedAccessAllOf:
"""Create an instance of DatasourceConfigS3DelegatedAccessAllOf from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DatasourceConfigS3DelegatedAccessAllOf:
"""Create an instance of DatasourceConfigS3DelegatedAccessAllOf from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DatasourceConfigS3DelegatedAccessAllOf.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DatasourceConfigS3DelegatedAccessAllOf) in the input: " + str(obj))
_obj = DatasourceConfigS3DelegatedAccessAllOf.parse_obj({
"s3_region": obj.get("s3Region"),
"s3_external_id": obj.get("s3ExternalId"),
"s3_arn": obj.get("s3ARN"),
"s3_server_side_encryption_kms_key": obj.get("s3ServerSideEncryptionKMSKey")
})
return _obj
| null |
438 | from abc import ABC
from typing import Any, Iterable, Set, Sized
from boa3.internal.model.type.collection.icollection import ICollectionType
from boa3.internal.model.type.itype import IType
from boa3.internal.neo.vm.type.AbiType import AbiType
from boa3.internal.neo.vm.type.StackItem import StackItemType
class MappingType(ICollectionType, ABC):
"""
An interface used to represent Python mapping type
"""
def __init__(self, identifier: str, keys_type: Set[IType], values_type: Set[IType]):
super().__init__(identifier, keys_type=keys_type, values_type=values_type)
@property
def identifier(self) -> str:
return '{0}[{1}, {2}]'.format(self._identifier, self.key_type.identifier, self.value_type.identifier)
@property
def value_type(self) -> IType:
return self.item_type
@property
def default_value(self) -> Any:
return {}
def is_type_of(self, value: Any) -> bool:
if self._is_type_of(value):
if isinstance(value, MappingType):
return (self.key_type.is_type_of(value.key_type)
and self.value_type.is_type_of(value.value_type))
return True
return False
@property
def abi_type(self) -> AbiType:
return AbiType.Map
@property
def METHOD_NAME(self) -> StackItemType:
return StackItemType.Map
def is_valid_key(self, key_type: IType) -> bool:
return self.valid_key.is_type_of(key_type)
@property
def valid_key(self) -> IType:
return self.key_type
@classmethod
def filter_types(cls, values_type) -> Set[IType]:
if values_type is None:
values_type = set()
elif not isinstance(values_type, set):
if isinstance(values_type, Iterable):
values_type = set(values_type)
else:
values_type = {values_type}
if len(values_type) > 1 and all(isinstance(x, MappingType) for x in values_type):
first_item: MappingType = list(values_type)[0]
mapping_type = type(first_item) # first mapping type
k_types = set(value.key_type for value in values_type)
v_types = set(value.value_type for value in values_type)
if all(isinstance(x, mapping_type) for x in values_type):
values_type = {mapping_type(keys_type=k_types, values_type=v_types)}
else:
from boa3.internal.model.type.type import Type
generic_type: IType = Type.get_generic_type(*values_type)
if isinstance(generic_type, MappingType):
values_type = {generic_type.build_collection(k_types, v_types)}
return values_type
# if any value is not a map, call the collection filter
return super().filter_types(values_type)
@classmethod
def build(cls, value: Any) -> IType:
if cls._is_type_of(value):
# value is an instance of mapping
if isinstance(value, dict):
keys = list(value.keys())
values = list(value.values())
else:
keys = value.key_type
values = value.value_type
keys_types: Set[IType] = cls.get_types(keys)
values_types: Set[IType] = cls.get_types(values)
return cls(keys_types, values_types)
elif isinstance(value, Sized) and len(value) == 2:
# value is a tuple with two lists of types for contructing the map
keys_type, values_type = value
if not isinstance(keys_type, Iterable):
keys_type = {keys_type}
else:
keys_type = set(keys_type)
if not isinstance(values_type, Iterable):
values_types = {values_type}
else:
values_types = set(values_type)
if all(isinstance(k, IType) for k in keys_type) and all(isinstance(v, IType) for v in values_types):
return cls(keys_type, values_type)
return super(MappingType, cls).build(value) | null |
439 | #!/usr/bin/env python
from __future__ import print_function
import math
import IMP.multifit
import IMP.atom
import IMP.em
from IMP import ArgumentParser
import os
import sys
__doc__ = "Fit subunits into a density map with FFT."
multiproc_exception = None
try:
from multiprocessing import Pool
# Detect whether we are running Windows Python via Wine. Wine does not
# currently support some named pipe functions which the multiprocessing
# module needs: http://bugs.winehq.org/show_bug.cgi?id=17273
if sys.platform == 'win32' and 'WINELOADERNOEXEC' in os.environ:
multiproc_exception = "Wine does not currently support multiprocessing"
except ImportError as detail:
multiproc_exception = str(detail)
class Fitter(object):
def __init__(
self,
em_map,
spacing,
resolution,
origin,
density_threshold,
pdb,
fits_fn,
angle,
num_fits,
angles_per_voxel,
ref_pdb=''):
self.em_map = em_map
self.spacing = spacing
self.resolution = resolution
self.threshold = density_threshold
self.originx = origin[0]
self.originy = origin[1]
self.originz = origin[2]
self.pdb = pdb
self.fits_fn = fits_fn
self.angle = angle
self.num_fits = num_fits
self.angles_per_voxel = angles_per_voxel
self.ref_pdb = ref_pdb
def run(self):
print("resolution is:", self.resolution)
dmap = IMP.em.read_map(self.em_map)
dmap.get_header().set_resolution(self.resolution)
dmap.update_voxel_size(self.spacing)
dmap.set_origin(IMP.algebra.Vector3D(self.originx,
self.originy,
self.originz))
dmap.set_was_used(True)
dmap.get_header().show()
mdl = IMP.Model()
mol2fit = IMP.atom.read_pdb(self.pdb, mdl)
mh_xyz = IMP.core.XYZs(IMP.core.get_leaves(mol2fit))
rb = IMP.atom.create_rigid_body(mol2fit)
ff = IMP.multifit.FFTFitting()
ff.set_was_used(True)
fits = ff.do_global_fitting(dmap, self.threshold, mol2fit,
self.angle / 180.0 * math.pi,
self.num_fits, self.spacing, 0.5,
True, self.angles_per_voxel)
fits.set_was_used(True)
final_fits = fits.best_fits_
if self.ref_pdb != '':
ref_mh = IMP.atom.read_pdb(self.ref_pdb, mdl)
ref_mh_xyz = IMP.core.XYZs(IMP.core.get_leaves(ref_mh))
cur_low = [1e4, 0]
for i, fit in enumerate(final_fits):
fit.set_index(i)
if self.ref_pdb != '':
trans = fit.get_fit_transformation()
IMP.atom.transform(mol2fit, trans)
rmsd = IMP.atom.get_rmsd(mh_xyz, ref_mh_xyz)
if rmsd < cur_low[0]:
cur_low[0] = rmsd
cur_low[1] = i
fit.set_rmsd_to_reference(rmsd)
IMP.atom.transform(mol2fit, trans.get_inverse())
if self.ref_pdb != '':
print('from all fits, lowest rmsd to ref:', cur_low)
IMP.multifit.write_fitting_solutions(self.fits_fn, final_fits)
def do_work(f):
f.run()
def parse_args():
desc = """Fit subunits into a density map with FFT."""
p = ArgumentParser(description=desc)
p.add_argument("-c", "--cpu", dest="cpus", type=int, default=1,
help="number of cpus to use (default 1)")
p.add_argument("-a", "--angle", dest="angle", type=float, default=30,
help="angle delta (degrees) for FFT rotational "
"search (default 30)")
p.add_argument("-n", "--num", dest="num", type=int,
default=100, help="Number of fits to report (default 100)")
p.add_argument("-v", "--angle_voxel", dest="angle_voxel", type=int,
default=10,
help="Number of angles to keep per voxel (default 10)")
p.add_argument("assembly_file", help="assembly file name")
# p.add_argument("-n", "--num", dest="num", type="int",
# default=100,
# help="Number of fits to report"
# "(default 100)")
return p.parse_args()
def run(asmb_fn, options):
if multiproc_exception is None and options.cpus > 1:
work_units = []
asmb_input = IMP.multifit.read_settings(asmb_fn)
asmb_input.set_was_used(True)
em_map = asmb_input.get_assembly_header().get_dens_fn()
resolution = asmb_input.get_assembly_header().get_resolution()
spacing = asmb_input.get_assembly_header().get_spacing()
origin = asmb_input.get_assembly_header().get_origin()
for i in range(asmb_input.get_number_of_component_headers()):
fits_fn = asmb_input.get_component_header(i).get_transformations_fn()
pdb_fn = asmb_input.get_component_header(i).get_filename()
f = Fitter(
em_map,
spacing,
resolution,
origin,
asmb_input.get_assembly_header().get_threshold(),
pdb_fn,
fits_fn,
options.angle,
options.num,
options.angle_voxel)
if multiproc_exception is None and options.cpus > 1:
work_units.append(f)
else:
if options.cpus > 1:
options.cpus = 1
print("""
The Python 'multiprocessing' module (available in Python 2.6 and later) is
needed to run on multiple CPUs, and could not be found
(Python error: '%s').
Running on a single processor.""" % multiproc_exception, file=sys.stderr)
f.run()
if multiproc_exception is None and options.cpus > 1:
# No point in spawning more processes than components
nproc = min(options.cpus, asmb_input.get_number_of_component_headers())
p = Pool(processes=nproc)
out = list(p.imap_unordered(do_work, work_units))
def METHOD_NAME():
args = parse_args()
run(args.assembly_file, args)
if __name__ == "__main__":
METHOD_NAME() | null |
440 | # Drakkar-Software OctoBot-Interfaces
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import threading
import octobot.api as octobot_api
import octobot.constants as octobot_constants
import octobot_commons.logging as bot_logging
import octobot_commons.tentacles_management as tentacles_management
import octobot_commons.time_frame_manager as time_frame_manager
import octobot_evaluators.evaluators as evaluators
import octobot_evaluators.api as evaluators_api
import octobot_services.interfaces.util as interfaces_util
import tentacles.Services.Interfaces.web_interface as web_interface_root
import tentacles.Services.Interfaces.web_interface.constants as constants
import tentacles.Evaluator.Strategies as TentaclesStrategies
LOGGER = bot_logging.get_logger(__name__)
def get_strategies_list(trading_mode):
try:
return trading_mode.get_required_strategies_names_and_count(interfaces_util.get_startup_tentacles_config())[0]
except Exception:
return []
def get_time_frames_list(strategy_name):
if strategy_name:
strategy_class = tentacles_management.get_class_from_string(strategy_name, evaluators.StrategyEvaluator,
TentaclesStrategies,
tentacles_management.evaluator_parent_inspection)
return [tf.value for tf in strategy_class.get_required_time_frames(
interfaces_util.get_global_config(),
interfaces_util.get_bot_api().get_tentacles_setup_config())]
else:
return []
def get_evaluators_list(strategy_name):
if strategy_name:
strategy_class = tentacles_management.get_class_from_string(strategy_name, evaluators.StrategyEvaluator,
TentaclesStrategies,
tentacles_management.evaluator_parent_inspection)
found_evaluators = evaluators_api.get_relevant_TAs_for_strategy(
strategy_class, interfaces_util.get_bot_api().get_tentacles_setup_config())
return set(evaluator.get_name() for evaluator in found_evaluators)
else:
return []
def get_risks_list():
return [i / 10 for i in range(10, 0, -1)]
def cancel_optimizer():
tools = web_interface_root.WebInterface.tools
optimizer = tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER]
if optimizer is None:
return False, "No optimizer is running"
octobot_api.cancel_strategy_optimizer(optimizer)
return True, "Optimizer is being cancelled"
def start_optimizer(strategy, time_frames, evaluators, risks):
if not octobot_constants.ENABLE_BACKTESTING:
return False, "Backtesting is disabled"
try:
tools = web_interface_root.WebInterface.tools
optimizer = tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER]
if optimizer is not None and octobot_api.is_optimizer_computing(optimizer):
return False, "Optimizer already running"
independent_backtesting = tools[constants.BOT_TOOLS_BACKTESTING]
if independent_backtesting and octobot_api.is_independent_backtesting_in_progress(independent_backtesting):
return False, "A backtesting is already running"
formatted_time_frames = time_frame_manager.parse_time_frames(time_frames)
float_risks = [float(risk) for risk in risks]
temp_independent_backtesting = octobot_api.create_independent_backtesting(
interfaces_util.get_global_config(), None, [])
optimizer_config = interfaces_util.run_in_bot_async_executor(
octobot_api.initialize_independent_backtesting_config(temp_independent_backtesting)
)
optimizer = octobot_api.create_strategy_optimizer(optimizer_config,
interfaces_util.get_bot_api().get_edited_tentacles_config(),
strategy)
tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER] = optimizer
thread = threading.Thread(target=octobot_api.find_optimal_configuration,
args=(optimizer, evaluators, formatted_time_frames, float_risks),
name=f"{optimizer.get_name()}-WebInterface-runner")
thread.start()
return True, "Optimizer started"
except Exception as e:
LOGGER.exception(e, True, f"Error when starting optimizer: {e}")
raise e
def METHOD_NAME():
optimizer = web_interface_root.WebInterface.tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER]
if optimizer:
results = octobot_api.METHOD_NAME(optimizer)
return [result.get_result_dict(i) for i, result in enumerate(results)]
else:
return []
def get_optimizer_report():
if get_optimizer_status()[0] == "finished":
optimizer = web_interface_root.WebInterface.tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER]
return octobot_api.get_optimizer_report(optimizer)
else:
return []
def get_current_run_params():
params = {
"strategy_name": [],
"time_frames": [],
"evaluators": [],
"risks": [],
"trading_mode": []
}
if web_interface_root.WebInterface.tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER]:
optimizer = web_interface_root.WebInterface.tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER]
params = {
"strategy_name": [octobot_api.get_optimizer_strategy(optimizer).get_name()],
"time_frames": [tf.value for tf in octobot_api.get_optimizer_all_time_frames(optimizer)],
"evaluators": octobot_api.get_optimizer_all_TAs(optimizer),
"risks": octobot_api.get_optimizer_all_risks(optimizer),
"trading_mode": [octobot_api.get_optimizer_trading_mode(optimizer)]
}
return params
def get_optimizer_status():
optimizer = web_interface_root.WebInterface.tools[constants.BOT_TOOLS_STRATEGY_OPTIMIZER]
if optimizer:
if octobot_api.is_optimizer_computing(optimizer):
overall_progress, remaining_time =\
interfaces_util.run_in_bot_async_executor(octobot_api.get_optimizer_overall_progress(optimizer))
return "computing", octobot_api.get_optimizer_current_test_suite_progress(optimizer), \
overall_progress, remaining_time, \
octobot_api.get_optimizer_errors_description(optimizer)
else:
status = "finished" if octobot_api.is_optimizer_finished(optimizer) else "starting"
return status, 100, 100, 0, octobot_api.get_optimizer_errors_description(optimizer)
else:
return "not started", 0, 0, 0, None | null |
441 | from __future__ import print_function
import ihm.format
import IMP.test
import IMP.pmi.topology
import IMP.pmi.mmcif
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from io import BytesIO as StringIO
class MockMsgPack(object):
@staticmethod
def pack(data, fh, use_bin_type=True):
fh.data = data
class MockFh(object):
pass
class Tests(IMP.test.TestCase):
def assign_entity_asym_ids(self, system):
"""Assign IDs to all Entities and AsymUnits in the system"""
d = ihm.dumper._EntityDumper()
d.finalize(system)
d = ihm.dumper._StructAsymDumper()
d.finalize(system)
def assign_range_ids(self, system):
"""Assign IDs to all Entity/AsymUnit segments in the system"""
d = ihm.dumper._EntityPolySegmentDumper()
d.finalize(system)
def test_component_mapper(self):
"""Test ComponentMapper with PMI2 topology"""
m = IMP.Model()
s = IMP.pmi.topology.System(m)
po = IMP.pmi.mmcif.ProtocolOutput()
s.add_protocol_output(po)
state = s.create_state()
# Should be OK with a multi-character chain ID
nup84 = state.create_molecule("Nup84", "MELS", "AA")
nup84.add_representation(resolutions=[1])
hier = s.build()
c = IMP.pmi.mmcif._ComponentMapper(hier)
r = IMP.atom.get_by_type(hier, IMP.atom.RESIDUE_TYPE)[1]
self.assertEqual(c[r], 'Nup84.0')
def test_hier_system_mapping(self):
"""Test mapping from Hierarchy back to System"""
m = IMP.Model()
s = IMP.pmi.topology.System(m)
po = IMP.pmi.mmcif.ProtocolOutput()
s.add_protocol_output(po)
state = s.create_state()
nup84 = state.create_molecule("Nup84", "MELS", "A")
nup84.add_representation(resolutions=[1])
hier = s.build()
# Check mapping from top-level Hierarchy back to System
self.assertEqual(IMP.pmi.tools._get_system_for_hier(hier), s)
# Invalid particle
self.assertEqual(IMP.pmi.tools._get_system_for_hier(None), None)
# Particle not set up by System
p = IMP.Particle(m)
self.assertEqual(IMP.pmi.tools._get_system_for_hier(p), None)
h = IMP.atom.Hierarchy.setup_particle(p)
self.assertEqual(IMP.pmi.tools._get_system_for_hier(h), None)
# Child particles should be OK
child = hier.get_child(0).get_child(0).get_child(0).get_child(0)
self.assertEqual(IMP.pmi.tools._get_system_for_hier(child), s)
child = child.get_child(3)
self.assertEqual(IMP.pmi.tools._get_system_for_hier(child), s)
# Check mapping from Hierarchy to ProtocolOutput
pos = list(IMP.pmi.tools._all_protocol_outputs(hier))
# Should be a list of (ProtocolOuput, State) tuples
self.assertEqual(len(pos), 1)
self.assertEqual(len(pos[0]), 2)
self.assertEqual(pos[0][0], po)
def METHOD_NAME(self):
"""Test ProtocolOutput.finalize() and mmCIF output"""
m = IMP.Model()
s = IMP.pmi.topology.System(m)
po = IMP.pmi.mmcif.ProtocolOutput()
s.add_protocol_output(po)
po.finalize()
fh = StringIO()
ihm.dumper.write(fh, [po.system])
val = fh.getvalue()
# Work with both latest stable ihm and that bundled with IMP
if '_struct.pdbx_model_details' in val:
self.assertEqual(
val.split('\n')[:5],
['data_model', '_entry.id model', '_struct.entry_id model',
'_struct.pdbx_model_details .',
'_struct.pdbx_structure_determination_methodology '
'integrative'])
else:
self.assertEqual(
val.split('\n')[:5],
['data_model', '_entry.id model', '_struct.entry_id model',
'_struct.pdbx_structure_determination_methodology '
'integrative',
'_struct.title .'])
def test_finalize_write_bcif(self):
"""Test ProtocolOutput.finalize() and BinaryCIF output"""
m = IMP.Model()
s = IMP.pmi.topology.System(m)
sys.modules['msgpack'] = MockMsgPack
po = IMP.pmi.mmcif.ProtocolOutput()
s.add_protocol_output(po)
po.finalize()
fh = MockFh()
ihm.dumper.write(fh, [po.system], format='BCIF')
self.assertEqual(fh.data['dataBlocks'][0]['categories'][0]['name'],
'_entry')
def test_entity(self):
"""Test EntityDump with PMI2-style init"""
m = IMP.Model()
s = IMP.pmi.topology.System(m)
po = IMP.pmi.mmcif.ProtocolOutput()
s.add_protocol_output(po)
state = s.create_state()
nup84 = state.create_molecule("Nup84", "MELS", "A")
nup84.add_representation(resolutions=[1])
hier = s.build()
fh = StringIO()
w = ihm.format.CifWriter(fh)
d = ihm.dumper._EntityDumper()
d.finalize(po.system)
d.dump(po.system, w)
out = fh.getvalue()
self.assertEqual(out, """#
loop_
_entity.id
_entity.type
_entity.src_method
_entity.pdbx_description
_entity.formula_weight
_entity.pdbx_number_of_molecules
_entity.details
1 polymer man Nup84 532.606 1 .
#
""")
def test_model_representation(self):
"""Test ModelRepresentationDumper with PMI2-style init"""
m = IMP.Model()
s = IMP.pmi.topology.System(m)
po = IMP.pmi.mmcif.ProtocolOutput()
s.add_protocol_output(po)
state = s.create_state()
nup84 = state.create_molecule("Nup84", "MELS", "A")
nup84.add_structure(self.get_input_file_name('test.nup84.pdb'), 'A')
nup84.add_representation(resolutions=[1])
hier = s.build()
fh = StringIO()
w = ihm.format.CifWriter(fh)
self.assign_entity_asym_ids(po.system)
self.assign_range_ids(po.system)
# Assign starting model IDs
d = ihm.dumper._StartingModelDumper()
d.finalize(po.system)
d = ihm.dumper._ModelRepresentationDumper()
d.finalize(po.system)
d.dump(po.system, w)
r, = po.system.orphan_representations
self.assertEqual([f.asym_unit.seq_id_range for f in r], [(1,2), (3,4)])
out = fh.getvalue()
self.assertEqual(out, """#
loop_
_ihm_model_representation.id
_ihm_model_representation.name
_ihm_model_representation.details
1 'Default representation' .
#
#
loop_
_ihm_model_representation_details.id
_ihm_model_representation_details.representation_id
_ihm_model_representation_details.entity_id
_ihm_model_representation_details.entity_description
_ihm_model_representation_details.entity_asym_id
_ihm_model_representation_details.entity_poly_segment_id
_ihm_model_representation_details.model_object_primitive
_ihm_model_representation_details.starting_model_id
_ihm_model_representation_details.model_mode
_ihm_model_representation_details.model_granularity
_ihm_model_representation_details.model_object_count
_ihm_model_representation_details.description
1 1 1 Nup84 A 1 sphere 1 flexible by-residue . .
2 1 1 Nup84 A 2 sphere . flexible by-feature 2 .
#
""")
if __name__ == '__main__':
IMP.test.main() | null |
442 | import numpy as np
import pytest
import torch
from lhotse import AudioSource, CutSet, MultiCut, Recording, SupervisionSegment
from lhotse.audio import RecordingSet
from lhotse.cut import PaddingCut
from lhotse.utils import fastcopy
@pytest.fixture
def recording():
return Recording.from_file("test/fixtures/libri/libri-1088-134315-0000_8ch.wav")
@pytest.fixture
def mono_rir():
return Recording.from_file("test/fixtures/rir/sim_1ch.wav")
@pytest.fixture
def multi_channel_rir():
return Recording.from_file("test/fixtures/rir/real_8ch.wav")
@pytest.fixture
def cut_with_supervision(recording, cut_channels=None, sup_channels=None):
if cut_channels is None:
cut_channels = [0, 1, 2, 3, 4, 5, 6, 7]
if sup_channels is None:
sup_channels = [0, 1, 2, 3, 4, 5, 6, 7]
return MultiCut(
id="cut",
start=0.0,
duration=1.0,
channel=cut_channels,
supervisions=[
SupervisionSegment(
id="sup",
recording_id="rec",
start=0.0,
duration=1.0,
channel=sup_channels,
)
],
recording=recording,
)
def test_cut_perturb_speed11(cut_with_supervision):
cut_sp = cut_with_supervision.perturb_speed(1.1)
assert cut_sp.start == 0.0
assert cut_sp.duration == 0.9090625
assert cut_sp.end == 0.9090625
assert cut_sp.num_samples == 14545
assert cut_sp.recording.duration == 14.5818125
assert cut_sp.recording.num_samples == 233309
assert cut_sp.supervisions[0].start == 0.0
assert cut_sp.supervisions[0].duration == 0.9090625
assert cut_sp.supervisions[0].end == 0.9090625
cut_samples = cut_sp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 14545
recording_samples = cut_sp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 233309
def test_cut_perturb_speed09(cut_with_supervision):
cut_sp = cut_with_supervision.perturb_speed(0.9)
assert cut_sp.start == 0.0
assert cut_sp.duration == 1.111125
assert cut_sp.end == 1.111125
assert cut_sp.num_samples == 17778
assert cut_sp.recording.duration == 17.82225
assert cut_sp.recording.num_samples == 285156
assert cut_sp.supervisions[0].start == 0.0
assert cut_sp.supervisions[0].duration == 1.111125
assert cut_sp.supervisions[0].end == 1.111125
cut_samples = cut_sp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 17778
recording_samples = cut_sp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 285156
def test_cut_perturb_tempo09(cut_with_supervision):
cut_tp = cut_with_supervision.perturb_tempo(0.9)
assert cut_tp.start == 0.0
assert cut_tp.duration == 1.111125
assert cut_tp.end == 1.111125
assert cut_tp.num_samples == 17778
assert cut_tp.recording.duration == 17.82225
assert cut_tp.recording.num_samples == 285156
assert cut_tp.supervisions[0].start == 0.0
assert cut_tp.supervisions[0].duration == 1.111125
assert cut_tp.supervisions[0].end == 1.111125
cut_samples = cut_tp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 17778
recording_samples = cut_tp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 285156
def test_cut_perturb_tempo11(cut_with_supervision):
cut_tp = cut_with_supervision.perturb_tempo(1.1)
assert cut_tp.start == 0.0
assert cut_tp.duration == 0.9090625
assert cut_tp.end == 0.9090625
assert cut_tp.num_samples == 14545
assert cut_tp.recording.duration == 14.5818125
assert cut_tp.recording.num_samples == 233309
assert cut_tp.supervisions[0].start == 0.0
assert cut_tp.supervisions[0].duration == 0.9090625
assert cut_tp.supervisions[0].end == 0.9090625
cut_samples = cut_tp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 14545
recording_samples = cut_tp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 233309
def METHOD_NAME(cut_with_supervision):
resampled = cut_with_supervision.resample(8000)
assert cut_with_supervision.sampling_rate == 16000
assert resampled.sampling_rate == 8000
assert cut_with_supervision.num_samples == 2 * resampled.num_samples
samples = resampled.load_audio()
assert samples.shape[1] == resampled.num_samples
@pytest.mark.parametrize("scale", [0.125, 2.0])
def test_cut_perturb_volume(cut_with_supervision, scale):
cut_vp = cut_with_supervision.perturb_volume(scale)
assert cut_vp.start == cut_with_supervision.start
assert cut_vp.duration == cut_with_supervision.duration
assert cut_vp.end == cut_with_supervision.end
assert cut_vp.num_samples == cut_with_supervision.num_samples
assert cut_vp.recording.duration == cut_with_supervision.recording.duration
assert cut_vp.recording.num_samples == cut_with_supervision.recording.num_samples
assert cut_vp.supervisions[0].start == cut_with_supervision.supervisions[0].start
assert (
cut_vp.supervisions[0].duration == cut_with_supervision.supervisions[0].duration
)
assert cut_vp.supervisions[0].end == cut_with_supervision.supervisions[0].end
assert cut_vp.load_audio().shape == cut_with_supervision.load_audio().shape
assert (
cut_vp.recording.load_audio().shape
== cut_with_supervision.recording.load_audio().shape
)
np.testing.assert_array_almost_equal(
cut_vp.load_audio(), cut_with_supervision.load_audio() * scale
)
np.testing.assert_array_almost_equal(
cut_vp.recording.load_audio(),
cut_with_supervision.recording.load_audio() * scale,
)
@pytest.mark.parametrize(
"rir, rir_channels, expected_channels",
[
("mono_rir", [0], [0, 1, 2, 3, 4, 5, 6, 7]),
pytest.param("mono_rir", [1], None, marks=pytest.mark.xfail),
("multi_channel_rir", [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]),
("multi_channel_rir", [0], [0, 1, 2, 3, 4, 5, 6, 7]),
("multi_channel_rir", [1], [0, 1, 2, 3, 4, 5, 6, 7]),
pytest.param("multi_channel_rir", [0, 1], None, marks=pytest.mark.xfail),
],
)
def test_cut_reverb_rir(
cut_with_supervision, rir, rir_channels, expected_channels, request
):
rir = request.getfixturevalue(rir)
cut = cut_with_supervision
cut_rvb = cut.reverb_rir(rir, rir_channels=rir_channels)
print(cut_rvb.channel)
assert cut_rvb.start == cut.start
assert cut_rvb.duration == cut.duration
assert cut_rvb.end == cut.end
assert cut_rvb.num_samples == cut.num_samples
assert cut_rvb.recording.duration == cut.recording.duration
assert cut_rvb.recording.num_samples == cut.recording.num_samples
assert cut_rvb.supervisions[0].start == cut.supervisions[0].start
assert cut_rvb.supervisions[0].duration == cut.supervisions[0].duration
assert cut_rvb.supervisions[0].end == cut.supervisions[0].end
assert cut_rvb.load_audio().shape == cut.load_audio().shape
assert cut_rvb.recording.load_audio().shape == cut.recording.load_audio().shape
assert cut_rvb.channel == expected_channels
def test_cut_reverb_fast_rir(cut_with_supervision):
cut = cut_with_supervision
with pytest.raises(AssertionError):
cut_rvb = cut.reverb_rir(rir_recording=None) | null |
443 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhbase.endpoint import endpoint_data
class RelateDbForHBaseHaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'HBase', '2019-01-01', 'RelateDbForHBaseHa','hbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HaMigrateType(self):
return self.get_query_params().get('HaMigrateType')
def set_HaMigrateType(self,HaMigrateType):
self.add_query_param('HaMigrateType',HaMigrateType)
def get_HaActiveHdfsUri(self):
return self.get_query_params().get('HaActiveHdfsUri')
def set_HaActiveHdfsUri(self,HaActiveHdfsUri):
self.add_query_param('HaActiveHdfsUri',HaActiveHdfsUri)
def get_HaStandbyVersion(self):
return self.get_query_params().get('HaStandbyVersion')
def set_HaStandbyVersion(self,HaStandbyVersion):
self.add_query_param('HaStandbyVersion',HaStandbyVersion)
def get_IsStandbyStandard(self):
return self.get_query_params().get('IsStandbyStandard')
def set_IsStandbyStandard(self,IsStandbyStandard):
self.add_query_param('IsStandbyStandard',IsStandbyStandard)
def get_HaActiveClusterKey(self):
return self.get_query_params().get('HaActiveClusterKey')
def set_HaActiveClusterKey(self,HaActiveClusterKey):
self.add_query_param('HaActiveClusterKey',HaActiveClusterKey)
def get_HaStandbyPassword(self):
return self.get_query_params().get('HaStandbyPassword')
def set_HaStandbyPassword(self,HaStandbyPassword):
self.add_query_param('HaStandbyPassword',HaStandbyPassword)
def get_HaStandbyClusterKey(self):
return self.get_query_params().get('HaStandbyClusterKey')
def set_HaStandbyClusterKey(self,HaStandbyClusterKey):
self.add_query_param('HaStandbyClusterKey',HaStandbyClusterKey)
def get_HaStandbyHbaseFsDir(self):
return self.get_query_params().get('HaStandbyHbaseFsDir')
def set_HaStandbyHbaseFsDir(self,HaStandbyHbaseFsDir):
self.add_query_param('HaStandbyHbaseFsDir',HaStandbyHbaseFsDir)
def get_HaActiveHbaseFsDir(self):
return self.get_query_params().get('HaActiveHbaseFsDir')
def set_HaActiveHbaseFsDir(self,HaActiveHbaseFsDir):
self.add_query_param('HaActiveHbaseFsDir',HaActiveHbaseFsDir)
def get_HaActiveDBType(self):
return self.get_query_params().get('HaActiveDBType')
def set_HaActiveDBType(self,HaActiveDBType):
self.add_query_param('HaActiveDBType',HaActiveDBType)
def get_HaActivePassword(self):
return self.get_query_params().get('HaActivePassword')
def set_HaActivePassword(self,HaActivePassword):
self.add_query_param('HaActivePassword',HaActivePassword)
def get_IsActiveStandard(self):
return self.get_query_params().get('IsActiveStandard')
def METHOD_NAME(self,IsActiveStandard):
self.add_query_param('IsActiveStandard',IsActiveStandard)
def get_HaStandbyUser(self):
return self.get_query_params().get('HaStandbyUser')
def set_HaStandbyUser(self,HaStandbyUser):
self.add_query_param('HaStandbyUser',HaStandbyUser)
def get_HaActive(self):
return self.get_query_params().get('HaActive')
def set_HaActive(self,HaActive):
self.add_query_param('HaActive',HaActive)
def get_HaStandby(self):
return self.get_query_params().get('HaStandby')
def set_HaStandby(self,HaStandby):
self.add_query_param('HaStandby',HaStandby)
def get_HaStandbyHdfsUri(self):
return self.get_query_params().get('HaStandbyHdfsUri')
def set_HaStandbyHdfsUri(self,HaStandbyHdfsUri):
self.add_query_param('HaStandbyHdfsUri',HaStandbyHdfsUri)
def get_HaActiveVersion(self):
return self.get_query_params().get('HaActiveVersion')
def set_HaActiveVersion(self,HaActiveVersion):
self.add_query_param('HaActiveVersion',HaActiveVersion)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_HaStandbyDBType(self):
return self.get_query_params().get('HaStandbyDBType')
def set_HaStandbyDBType(self,HaStandbyDBType):
self.add_query_param('HaStandbyDBType',HaStandbyDBType)
def get_HaTables(self):
return self.get_query_params().get('HaTables')
def set_HaTables(self,HaTables):
self.add_query_param('HaTables',HaTables)
def get_HaActiveUser(self):
return self.get_query_params().get('HaActiveUser')
def set_HaActiveUser(self,HaActiveUser):
self.add_query_param('HaActiveUser',HaActiveUser | null |
444 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class StartK8sAppPrecheckRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'StartK8sAppPrecheck','Edas')
self.set_uri_pattern('/pop/v5/k8s/app_precheck')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ComponentIds(self): # String
return self.get_query_params().get('ComponentIds')
def set_ComponentIds(self, ComponentIds): # String
self.add_query_param('ComponentIds', ComponentIds)
def get_Replicas(self): # Integer
return self.get_query_params().get('Replicas')
def set_Replicas(self, Replicas): # Integer
self.add_query_param('Replicas', Replicas)
def get_RequestsEphemeralStorage(self): # Integer
return self.get_query_params().get('RequestsEphemeralStorage')
def set_RequestsEphemeralStorage(self, RequestsEphemeralStorage): # Integer
self.add_query_param('RequestsEphemeralStorage', RequestsEphemeralStorage)
def get_Envs(self): # String
return self.get_query_params().get('Envs')
def set_Envs(self, Envs): # String
self.add_query_param('Envs', Envs)
def METHOD_NAME(self): # String
return self.get_query_params().get('Annotations')
def set_Annotations(self, Annotations): # String
self.add_query_param('Annotations', Annotations)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_RequestsMem(self): # Integer
return self.get_query_params().get('RequestsMem')
def set_RequestsMem(self, RequestsMem): # Integer
self.add_query_param('RequestsMem', RequestsMem)
def get_LocalVolume(self): # String
return self.get_query_params().get('LocalVolume')
def set_LocalVolume(self, LocalVolume): # String
self.add_query_param('LocalVolume', LocalVolume)
def get_EnvFroms(self): # String
return self.get_query_params().get('EnvFroms')
def set_EnvFroms(self, EnvFroms): # String
self.add_query_param('EnvFroms', EnvFroms)
def get_Labels(self): # String
return self.get_query_params().get('Labels')
def set_Labels(self, Labels): # String
self.add_query_param('Labels', Labels)
def get_LimitMem(self): # Integer
return self.get_query_params().get('LimitMem')
def set_LimitMem(self, LimitMem): # Integer
self.add_query_param('LimitMem', LimitMem)
def get_LimitEphemeralStorage(self): # Integer
return self.get_query_params().get('LimitEphemeralStorage')
def set_LimitEphemeralStorage(self, LimitEphemeralStorage): # Integer
self.add_query_param('LimitEphemeralStorage', LimitEphemeralStorage)
def get_LimitmCpu(self): # Integer
return self.get_query_params().get('LimitmCpu')
def set_LimitmCpu(self, LimitmCpu): # Integer
self.add_query_param('LimitmCpu', LimitmCpu)
def get_ConfigMountDescs(self): # String
return self.get_query_params().get('ConfigMountDescs')
def set_ConfigMountDescs(self, ConfigMountDescs): # String
self.add_query_param('ConfigMountDescs', ConfigMountDescs)
def get_AppName(self): # String
return self.get_query_params().get('AppName')
def set_AppName(self, AppName): # String
self.add_query_param('AppName', AppName)
def get_PackageUrl(self): # String
return self.get_query_params().get('PackageUrl')
def set_PackageUrl(self, PackageUrl): # String
self.add_query_param('PackageUrl', PackageUrl)
def get_AppId(self): # String
return self.get_query_params().get('AppId')
def set_AppId(self, AppId): # String
self.add_query_param('AppId', AppId)
def get_EmptyDirs(self): # String
return self.get_query_params().get('EmptyDirs')
def set_EmptyDirs(self, EmptyDirs): # String
self.add_query_param('EmptyDirs', EmptyDirs)
def get_PvcMountDescs(self): # String
return self.get_query_params().get('PvcMountDescs')
def set_PvcMountDescs(self, PvcMountDescs): # String
self.add_query_param('PvcMountDescs', PvcMountDescs)
def get_ImageUrl(self): # String
return self.get_query_params().get('ImageUrl')
def set_ImageUrl(self, ImageUrl): # String
self.add_query_param('ImageUrl', ImageUrl)
def get_Namespace(self): # String
return self.get_query_params().get('Namespace')
def set_Namespace(self, Namespace): # String
self.add_query_param('Namespace', Namespace)
def get_RequestsmCpu(self): # Integer
return self.get_query_params().get('RequestsmCpu')
def set_RequestsmCpu(self, RequestsmCpu): # Integer
self.add_query_param('RequestsmCpu', RequestsmCpu)
def get_JavaStartUpConfig(self): # String
return self.get_query_params().get('JavaStartUpConfig')
def set_JavaStartUpConfig(self, JavaStartUpConfig): # String
self.add_query_param('JavaStartUpConfig', JavaStartUpConfig) | null |
445 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ListInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cloudphone', '2020-12-30', 'ListInstances')
self.set_method('POST')
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ImageId(self): # String
return self.get_query_params().get('ImageId')
def set_ImageId(self, ImageId): # String
self.add_query_param('ImageId', ImageId)
def get_ShowWebRtcToken(self): # Boolean
return self.get_query_params().get('ShowWebRtcToken')
def set_ShowWebRtcToken(self, ShowWebRtcToken): # Boolean
self.add_query_param('ShowWebRtcToken', ShowWebRtcToken)
def get_KeyPairName(self): # String
return self.get_query_params().get('KeyPairName')
def set_KeyPairName(self, KeyPairName): # String
self.add_query_param('KeyPairName', KeyPairName)
def get_Resolution(self): # String
return self.get_query_params().get('Resolution')
def set_Resolution(self, Resolution): # String
self.add_query_param('Resolution', Resolution)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def METHOD_NAME(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceName(self): # String
return self.get_query_params().get('InstanceName')
def set_InstanceName(self, InstanceName): # String
self.add_query_param('InstanceName', InstanceName)
def get_InstanceIds(self): # RepeatList
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceId): # RepeatList
for depth1 in range(len(InstanceId)):
self.add_query_param('InstanceId.' + str(depth1 + 1), InstanceId[depth1])
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
446 | """HDF5 adapter class
"""
import h5py
import warnings
import numpy as np
from . import ImageSeriesAdapter
from ..imageseriesiter import ImageSeriesIterator
class HDF5ImageSeriesAdapter(ImageSeriesAdapter):
"""collection of images in HDF5 format"""
format = 'hdf5'
def __init__(self, fname, **kwargs):
"""
Constructor for HDF% ImageSeries
Parameters
----------
fname : str or h5py.File object
filename of the HDF5 file, or an open h5py file. Note that this
class will close the h5py.File when finished.
**kwargs : Keyword arguments
See below.
Keyword Arguments
-----------------
path : str, required
The path to the HDF dataset containing the image data
dataname : str, optional
The name of the HDF dataset containing the 2-d or 3d image data.
The default values is 'images'.
Returns
-------
None.
"""
if isinstance(fname, h5py.File):
self.__h5name = fname.filename
self.__h5file = fname
else:
self.__h5name = fname
self.__h5file = h5py.File(self.__h5name, 'r')
self.__path = kwargs['path']
self.__dataname = kwargs.pop('dataname', 'images')
self.__images = '/'.join([self.__path, self.__dataname])
self.METHOD_NAME()
self._meta = self._getmeta()
def close(self):
self.__image_dataset = None
self.__data_group = None
self.__h5file.close()
self.__h5file = None
def __del__(self):
# !!! Note this is not ideal, as the use of __del__ is problematic.
# However, it is highly unlikely that the usage of a ImageSeries
# would pose a problem. A warning will (hopefully) be emitted if
# an issue arises at some point
try:
self.close()
except(Exception):
warnings.warn("HDF5ImageSeries could not close h5 file")
pass
def __getitem__(self, key):
if self._ndim == 2:
if key != 0:
raise IndexError(
f'key {key} is out of range for imageseris with length 1'
)
# !!! necessary when not returning a slice
return np.asarray(self.__image_dataset)
else:
return self.__image_dataset[key]
def __iter__(self):
return ImageSeriesIterator(self)
def __len__(self):
if self._ndim == 2:
return 1
else:
# !!! must be 3-d; exception handled in load_data()
return len(self.__image_dataset)
def __getstate__(self):
# Remove any non-pickleable attributes
to_remove = [
'__h5file',
'__image_dataset',
'__data_group',
]
# Prefix them with the private prefix
prefix = f'_{self.__class__.__name__}'
to_remove = [f'{prefix}{x}' for x in to_remove]
# Make a copy of the dict to modify
state = self.__dict__.copy()
# Remove them
for attr in to_remove:
state.pop(attr)
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.__h5file = h5py.File(self.__h5name, 'r')
self.METHOD_NAME()
def METHOD_NAME(self):
self.__image_dataset = self.__h5file[self.__images]
self._ndim = self.__image_dataset.ndim
if self._ndim not in [2, 3]:
raise RuntimeError(
f'Image data must be a 2-d or 3-d array; yours is {self._ndim}'
)
self.__data_group = self.__h5file[self.__path]
def _getmeta(self):
mdict = {}
for k, v in list(self.__data_group.attrs.items()):
mdict[k] = v
return mdict
@property
def metadata(self):
"""(read-only) Image sequence metadata
note: metadata loaded on open and allowed to be modified
"""
return self._meta
@property
def dtype(self):
return self.__image_dataset.dtype
@property
def shape(self):
if self._ndim == 2:
return self.__image_dataset.shape
else:
return self.__image_dataset.shape[1:]
pass # end class | null |
447 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for (Fashion) MNIST."""
import numpy as np
import scipy
def one_hot(a, num_classes):
return np.squeeze(np.eye(num_classes)[a.reshape(-1)])
def brier_score(y, p):
"""Compute the Brier score.
Brier Score: see
https://www.stat.washington.edu/raftery/Research/PDF/Gneiting2007jasa.pdf,
page 363, Example 1
Args:
y: one-hot encoding of the true classes, size (?, num_classes)
p: numpy array, size (?, num_classes)
containing the output predicted probabilities
Returns:
bs: Brier score.
"""
return np.mean(np.power(p - y, 2))
def METHOD_NAME(y, p_mean, num_bins=10):
"""Compute the calibration.
References:
https://arxiv.org/abs/1706.04599
https://arxiv.org/abs/1807.00263
Args:
y: one-hot encoding of the true classes, size (?, num_classes)
p_mean: numpy array, size (?, num_classes)
containing the mean output predicted probabilities
num_bins: number of bins
Returns:
ece: Expected Calibration Error
mce: Maximum Calibration Error
"""
# Compute for every test sample x, the predicted class.
class_pred = np.argmax(p_mean, axis=1)
# and the confidence (probability) associated with it.
conf = np.max(p_mean, axis=1)
# Convert y from one-hot encoding to the number of the class
y = np.argmax(y, axis=1)
# Storage
acc_tab = np.zeros(num_bins) # empirical (true) confidence
mean_conf = np.zeros(num_bins) # predicted confidence
nb_items_bin = np.zeros(num_bins) # number of items in the bins
tau_tab = np.linspace(0, 1, num_bins+1) # confidence bins
for i in np.arange(num_bins): # iterate over the bins
# select the items where the predicted max probability falls in the bin
# [tau_tab[i], tau_tab[i + 1)]
sec = (tau_tab[i + 1] > conf) & (conf >= tau_tab[i])
nb_items_bin[i] = np.sum(sec) # Number of items in the bin
# select the predicted classes, and the true classes
class_pred_sec, y_sec = class_pred[sec], y[sec]
# average of the predicted max probabilities
mean_conf[i] = np.mean(conf[sec]) if nb_items_bin[i] > 0 else np.nan
# compute the empirical confidence
acc_tab[i] = np.mean(
class_pred_sec == y_sec) if nb_items_bin[i] > 0 else np.nan
# Cleaning
mean_conf = mean_conf[nb_items_bin > 0]
acc_tab = acc_tab[nb_items_bin > 0]
nb_items_bin = nb_items_bin[nb_items_bin > 0]
# Expected Calibration Error
ece = np.average(
np.absolute(mean_conf - acc_tab),
weights=nb_items_bin.astype(float) / np.sum(nb_items_bin))
# Maximum Calibration Error
mce = np.max(np.absolute(mean_conf - acc_tab))
return ece, mce
def ensemble_metrics(x,
y,
model,
log_likelihood_fn,
n_samples=1,
weight_files=None):
"""Evaluate metrics of an ensemble.
Args:
x: numpy array of inputs
y: numpy array of labels
model: tf.keras.Model.
log_likelihood_fn: keras function of log likelihood. For classification
tasks, log_likelihood_fn(...)[1] should return the logits
n_samples: number of Monte Carlo samples to draw per ensemble member (each
weight file).
weight_files: to draw samples from multiple weight sets, specify a list of
weight files to load. These files must have been generated through
keras's model.save_weights(...).
Returns:
metrics_dict: dictionary containing the metrics
"""
if weight_files is None:
ensemble_logprobs = [log_likelihood_fn([x, y])[0] for _ in range(n_samples)]
metric_values = [model.evaluate(x, y, verbose=0)
for _ in range(n_samples)]
ensemble_logits = [log_likelihood_fn([x, y])[1] for _ in range(n_samples)]
else:
ensemble_logprobs = []
metric_values = []
ensemble_logits = []
for filename in weight_files:
model.load_weights(filename)
ensemble_logprobs.extend([log_likelihood_fn([x, y])[0]
for _ in range(n_samples)])
ensemble_logits.extend([log_likelihood_fn([x, y])[1]
for _ in range(n_samples)])
metric_values.extend([model.evaluate(x, y, verbose=0)
for _ in range(n_samples)])
metric_values = np.mean(np.array(metric_values), axis=0)
results = {}
for m, name in zip(metric_values, model.metrics_names):
results[name] = m
ensemble_logprobs = np.array(ensemble_logprobs)
probabilistic_log_likelihood = np.mean(
scipy.special.logsumexp(
np.sum(ensemble_logprobs, axis=2)
if len(ensemble_logprobs.shape) > 2 else ensemble_logprobs,
b=1. / ensemble_logprobs.shape[0],
axis=0),
axis=0)
results['probabilistic_log_likelihood'] = probabilistic_log_likelihood
ensemble_logits = np.array(ensemble_logits)
probs = np.mean(scipy.special.softmax(ensemble_logits, axis=2), axis=0)
class_pred = np.argmax(probs, axis=1)
probabilistic_accuracy = np.mean(np.equal(y, class_pred))
results['probabilistic_accuracy'] = probabilistic_accuracy
results['ece'], results['mce'] = METHOD_NAME(
one_hot(y, probs.shape[1]), probs)
results['brier_score'] = brier_score(one_hot(y, probs.shape[1]), probs)
return results | null |
448 | # Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import logging
import os
import re
import sys
import time
from guild import batch_util
from guild import cli
from guild import op_util
from guild import run_util
from guild import tfevent
from guild import util
from . import runs_impl
log = logging.getLogger("guild")
DEFAULT_PREPARE_THRESHOLD = 5 # seconds
def main(args):
if args.export_scalars:
METHOD_NAME(args)
else:
_run_tensorboard(args)
def METHOD_NAME(args):
with _open_file(args.export_scalars) as f:
out = csv.writer(f)
out.writerow(["run", "path", "tag", "value", "step"])
for run in _list_runs_cb(args)():
for path, _digest, reader in tfevent.scalar_readers(run.dir):
subpath = os.path.relpath(path, run.dir)
for tag, value, step in reader:
out.writerow([run.id, subpath, tag, value, step])
def _open_file(path):
if path == "-":
return util.StdIOContextManager(sys.stdout)
util.ensure_dir(os.path.dirname(path))
try:
return open(path, "w")
except (OSError, IOError) as e:
cli.error(f"error opening {path}: {e}")
def _run_tensorboard(args):
from guild import tensorboard
tensorboard.setup_logging()
with util.TempDir("guild-tensorboard-", keep=_keep_logdir(args)) as tmp:
logdir = tmp.path
(log.info if args.keep_logdir else log.debug)("Using logdir %s", logdir)
tensorboard_options = _tensorboard_options(args)
monitor = tensorboard.RunsMonitor(
logdir,
_list_runs_cb(args),
interval=args.refresh_interval,
log_images=not args.skip_images,
log_hparams=not args.skip_hparams,
run_name_cb=_run_name_cb(args),
)
t0 = time.time()
cli.out("Preparing runs for TensorBoard")
monitor.run_once(exit_on_error=True)
if args.test_logdir:
cli.out(f"Initialized log dir {logdir}")
return
_maybe_log_prepare_time(t0)
monitor.start()
try:
tensorboard.serve_forever(
logdir=logdir,
host=(args.host or "0.0.0.0"),
port=(args.port or util.free_port()),
reload_interval=args.refresh_interval,
tensorboard_options=tensorboard_options,
middleware=_maybe_tb_middleware(args),
ready_cb=_open_cb(args),
)
except tensorboard.TensorboardError as e:
cli.error(str(e))
finally:
log.debug("Stopping")
monitor.stop()
if not args.keep_logdir:
# Removal of temp logdir occurs when context manager
# exits.
log.debug("Removing logdir %s", logdir)
else:
print(f"TensorBoard logs saved in {logdir}")
if util.get_platform() != "Windows":
cli.out()
def _keep_logdir(args):
return args.keep_logdir or args.test_logdir
def _maybe_log_prepare_time(t0):
prepare_time = time.time() - t0
if prepare_time > _prepare_threshold():
log.warning(
"Guild took %0.2f seconds to prepare runs. To reduce startup time, "
"try running with '--skip-images' or '--skip-hparams' options "
"or reduce the number of runs with filters. Try 'guild tensorboard "
"--help' for filter options.",
prepare_time,
)
def _prepare_threshold():
try:
return float(os.environ["PREPARE_THRESHOLD"])
except (KeyError, ValueError):
return DEFAULT_PREPARE_THRESHOLD
def _tensorboard_options(args):
name_vals = [_parse_tensorboard_opt(opt) for opt in args.tensorboard_options]
return dict(name_vals)
def _parse_tensorboard_opt(opt):
parts = opt.split("=", 1)
if len(parts) != 2:
cli.error(f"invalid TensorBoard option {opt!r} - must be OPTION=VALUE")
return parts
def _run_name_cb(args):
if args.run_name_flags is None:
return None
label_template = _run_label_template(args.run_name_flags)
def f(run):
flags = run.get("flags")
return run_util.run_name(run, _run_label(label_template, flags))
return f
def _run_label(label_template, flags):
if not label_template:
return ""
return op_util.run_label(label_template, flags)
def _run_label_template(flags_arg):
flags = _split_flags(flags_arg)
return " ".join([f"{flag}=${{{flag}}}" for flag in flags])
def _split_flags(flags_arg):
return [arg.strip() for arg in re.split(r"[ ,]", flags_arg) if arg]
def _list_runs_cb(args):
def f():
return (
_runs_for_infile(args.runs_infile)
if args.runs_infile else _runs_for_args(args)
)
return f
def _runs_for_infile(path):
run_ids = _read_infile(path).split()
maybe_runs = [run_util.run_for_id(id) for id in run_ids]
return [run for run in maybe_runs if run.opref]
def _read_infile(path):
try:
f = open(path)
except FileNotFoundError:
return ""
else:
with f:
return f.read()
def _runs_for_args(args):
runs = runs_impl.runs_for_args(args)
return runs if args.include_batch else _strip_batch_runs(runs)
def _strip_batch_runs(runs):
return [run for run in runs if not batch_util.is_batch(run)]
def _maybe_tb_middleware(args):
if args.dark_mode:
from guild import tensorboard_util
return tensorboard_util.dark_mode_middleware()
return None
def _open_cb(args):
if args.no_open:
return None
def f(url):
if args.tab:
url += "#" + args.tab
util.open_url(url)
return f | null |
449 | import requests
from grants.sync.helpers import is_txn_done_recently, record_contribution_activity, txn_already_used
def find_txn_on_zcash_explorer(contribution):
subscription = contribution.subscription
grant = subscription.grant
token_symbol = subscription.token_symbol
if subscription.tenant != 'ZCASH':
return None
if token_symbol != 'ZEC':
return None
to_address = grant.zcash_payout_address
from_address = subscription.contributor_address
amount = subscription.amount_per_period
url = f'https://sochain.com/api/v2/address/ZEC/{from_address}'
response = requests.get(url).json()
# Check contributors txn history
if response['status'] == 'success' and response['data'] and response['data']['txs']:
txns = response['data']['txs']
for txn in txns:
if txn.get('outgoing') and txn['outgoing']['outputs']:
for output in txn['outgoing']['outputs']:
if contribution.tx_id and contribution.tx_id != '0x0':
if txn['txid'] == contribution.tx_id:
if (
output['address'] == to_address and
float(output['value']) == float(amount) and
is_txn_done_recently(txn['time'])
):
return txn['txid']
else:
if (
output['address'] == to_address and
response['data']['address'] == from_address and
float(output['value']) == float(amount) and
is_txn_done_recently(txn['time']) and
not txn_already_used(txn['txid'], token_symbol)
):
return txn['txid']
url = f'https://sochain.com/api/v2/address/ZEC/{to_address}'
response = requests.get(url).json()
# Check funders txn history
# if response['status'] == 'success' and response['data'] and response['data']['txs']:
# txns = response['data']['txs']
# for txn in txns:
# if txn.get('incoming') and txn['incoming']['inputs']:
# for input_tx in txn['incoming']['inputs']:
# if (
# input_tx['address'] == from_address and
# response['data']['address'] == to_address and
# is_txn_done_recently(txn['time']) and
# not txn_already_used(txn['txid'], token_symbol)
# ):
# return txn['txid']
return None
def METHOD_NAME(txnid):
if not txnid:
return None
url = f'https://sochain.com/api/v2/is_tx_confirmed/ZEC/{txnid}'
response = requests.get(url).json()
if (
response['status'] == 'success' and
response['data'] and
response['data']['is_confirmed']
):
return True
return None
def is_valid_zcash_txn(contribution):
subscription = contribution.subscription
grant = subscription.grant
txn_id = contribution.tx_id
to_address = grant.zcash_payout_address
amount = subscription.amount_per_period
token_symbol = subscription.token_symbol
if not txn_id or txn_id == '0x0':
return None
url = f'https://sochain.com/api/v2/tx/ZEC/{txn_id}'
response = requests.get(url).json()
if (
response['status'] == 'success' and
response['data'] and
response['data']['outputs']
):
for txn in response['data']['outputs']:
if (
txn['address'] == to_address and
float(txn['value']) == float(amount) and
is_txn_done_recently(response['data']['time']) and
not txn_already_used(txn_id, token_symbol)
):
return True
return None
def sync_zcash_payout(contribution):
is_successfull_txn = False
if not contribution.tx_id or contribution.tx_id == '0x0':
# user entered t-addr.
txn = find_txn_on_zcash_explorer(contribution)
if txn:
contribution.tx_id = txn
contribution.save()
is_successfull_txn = METHOD_NAME(contribution.tx_id)
else:
# user entered txn-id or txn-id picked up by cron.
is_successfull_txn = is_valid_zcash_txn(contribution)
if is_successfull_txn:
contribution.success = True
contribution.tx_cleared = True
contribution.checkout_type = 'zcash_std'
record_contribution_activity(contribution)
contribution.save() | null |
450 | """
Copyright (c) 2022, NVIDIA CORPORATION.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import tensorflow as tf
import horovod.tensorflow as hvd
from sparse_operation_kit import experiment as sok
if __name__ == "__main__":
physical_devices = tf.config.list_physical_devices("GPU")
for gpu_instance in physical_devices:
tf.config.experimental.set_memory_growth(gpu_instance, True)
dim = 128
vocab_size = 1024 * 128
batch = 8192
sok_var = sok.DynamicVariable(dimension=dim)
sok_optimizer = sok.SGD(lr=1.0)
indices_val = [idx for idx in range(vocab_size)]
table_val = tf.nn.embedding_lookup(sok_var, indices_val)
tf_var = tf.Variable(table_val)
tf_optimizer = tf.optimizers.SGD(learning_rate=1.0)
def sok_step(indices, weight, var):
with tf.GradientTape() as tape:
emb = tf.nn.embedding_lookup(var, indices)
emb_mul = emb * weight
loss = tf.reduce_sum(emb_mul)
grads = tape.gradient(loss, [var])
sok_optimizer.apply_gradients(zip(grads, [var]))
return loss
@tf.function
def tf_step(indices, weight, var):
with tf.GradientTape() as tape:
emb = tf.nn.embedding_lookup(var, indices)
emb_mul = emb * weight
loss = tf.reduce_sum(emb_mul)
grads = tape.gradient(loss, [var])
tf_optimizer.apply_gradients(zip(grads, [var]))
return loss
num = np.random.randint(1, batch + 1, 1)[0]
for i in range(100):
print("---------------------Iter %d---------------------" % i)
indices_val = np.random.randint(0, vocab_size, num).astype(np.int64)
indices_val = tf.convert_to_tensor(indices_val, dtype=tf.int64)
weight_val = np.random.rand(num, dim).astype(np.float32)
weight_val = tf.convert_to_tensor(weight_val, dtype=tf.float32)
sok_loss = sok_step(indices_val, weight_val, sok_var)
tf_loss = tf_step(indices_val, weight_val, tf_var)
print(sok_loss, tf_loss)
indices_val = [idx for idx in range(vocab_size)]
table_val = tf.nn.embedding_lookup(sok_var, indices_val)
diff = tf.reduce_mean((table_val - tf_var) ** 2.0)
assert diff < 1e-6
print("[SOK INFO] Test variable with sok.SGD successfully")
# ----------------------------Test eager mode----------------------------
def sok_step_eager(indices, weight, var):
with tf.GradientTape() as tape:
emb = tf.nn.embedding_lookup(var, indices)
emb_mul = emb * weight
loss = tf.reduce_sum(emb_mul)
grads = tape.gradient(loss, [var])
sok_optimizer.apply_gradients(zip(grads, [var]))
return loss
def METHOD_NAME(indices, weight, var):
with tf.GradientTape() as tape:
emb = tf.nn.embedding_lookup(var, indices)
emb_mul = emb * weight
loss = tf.reduce_sum(emb_mul)
grads = tape.gradient(loss, [var])
tf_optimizer.apply_gradients(zip(grads, [var]))
return loss
for i in range(100):
num = np.random.randint(1, batch + 1, 1)[0]
print("---------------------Iter %d---------------------" % i)
indices_val = np.random.randint(0, vocab_size, num).astype(np.int64)
indices_val = tf.convert_to_tensor(indices_val, dtype=tf.int64)
weight_val = np.random.rand(num, dim).astype(np.float32)
weight_val = tf.convert_to_tensor(weight_val, dtype=tf.float32)
sok_loss = sok_step_eager(indices_val, weight_val, sok_var)
tf_loss = METHOD_NAME(indices_val, weight_val, tf_var)
print(sok_loss, tf_loss)
indices_val = [idx for idx in range(vocab_size)]
table_val = tf.nn.embedding_lookup(sok_var, indices_val)
diff = tf.reduce_mean((table_val - tf_var) ** 2.0)
assert diff < 1e-6
print("[SOK INFO] Test variable with sok.SGD and eager mode successfully") | null |
451 | # Copyright 2021 Camptocamp SA
# @author Iván Todorovich <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.addons.shopinvader.tests.common import ProductCommonCase
class ProductCase(ProductCommonCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.env = cls.env(context=dict(cls.env.context, test_queue_job_no_delay=True))
cls.backend = cls.backend.with_context(test_queue_job_no_delay=True)
def _assertDictContains(self, source, values, msg=None):
if msg is None:
msg = ""
for key, value in values.items():
self.assertEqual(source[key], value, f"key='{key}' {msg}")
def test_product_price(self):
self._assertDictContains(
self.shopinvader_variant.price["default"],
{
"value_taxed": 750.0,
"value_untaxed": 652.17,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
)
def test_product_get_price(self):
# self.base_pricelist doesn't define a tax mapping. We are tax included
fiscal_position_fr = self.env.ref("shopinvader.fiscal_position_0")
price = self.shopinvader_variant._get_price(
pricelist=self.base_pricelist, fposition=fiscal_position_fr
)
self._assertDictContains(
price,
{
"value_taxed": 750.0,
"value_untaxed": 652.17,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
)
# promotion price list define a discount of 20% on all product
promotion_price_list = self.env.ref("shopinvader.pricelist_1")
price = self.shopinvader_variant._get_price(
pricelist=promotion_price_list, fposition=fiscal_position_fr
)
self._assertDictContains(
price,
{
"value_taxed": 600.0,
"value_untaxed": 521.74,
"original_value_taxed": 600.0,
"original_value_untaxed": 521.74,
},
)
# use a fiscal position defining a mapping from tax included to tax
# excluded
tax_exclude_fiscal_position = self.env.ref("shopinvader.fiscal_position_1")
price = self.shopinvader_variant._get_price(
pricelist=self.base_pricelist, fposition=tax_exclude_fiscal_position
)
self._assertDictContains(
price,
{
"value_taxed": 750.0,
"value_untaxed": 652.17,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
)
price = self.shopinvader_variant._get_price(
pricelist=promotion_price_list, fposition=tax_exclude_fiscal_position
)
self._assertDictContains(
price,
{
"value_taxed": 600.0,
"value_untaxed": 521.74,
"original_value_taxed": 600.0,
"original_value_untaxed": 521.74,
},
)
def METHOD_NAME(self):
# Define a promotion price for the product with min_qty = 10
fposition = self.env.ref("shopinvader.fiscal_position_0")
pricelist = self.base_pricelist
self.env["product.pricelist.item"].create(
{
"name": "Discount on Product when Qty >= 10",
"pricelist_id": pricelist.id,
"base": "list_price",
"compute_price": "percentage",
"percent_price": "20",
"applied_on": "0_product_variant",
"product_id": self.shopinvader_variant.record_id.id,
"min_quantity": 10.0,
}
)
# Case 1 (qty = 1.0). No discount is applied
price = self.shopinvader_variant._get_price(
qty=1.0, pricelist=pricelist, fposition=fposition
)
self._assertDictContains(
price,
{
"value_taxed": 750.0,
"value_untaxed": 652.17,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
)
# Case 2 (qty = 10.0). Discount is applied
# promotion price list define a discount of 20% on all product
price = self.shopinvader_variant._get_price(
qty=10.0, pricelist=pricelist, fposition=fposition
)
self._assertDictContains(
price,
{
"value_taxed": 600.0,
"value_untaxed": 521.74,
"original_value_taxed": 600.0,
"original_value_untaxed": 521.74,
},
)
def test_product_get_price_discount_policy(self):
# Ensure that discount is with 2 digits
self.env.ref("product.decimal_discount").digits = 2
# self.base_pricelist doesn't define a tax mapping. We are tax included
# we modify the discount_policy
self.base_pricelist.discount_policy = "without_discount"
fiscal_position_fr = self.env.ref("shopinvader.fiscal_position_0")
price = self.shopinvader_variant._get_price(
pricelist=self.base_pricelist, fposition=fiscal_position_fr
)
self._assertDictContains(
price,
{
"value_taxed": 750.0,
"value_untaxed": 652.17,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
)
# promotion price list define a discount of 20% on all product
# we modify the discount_policy
promotion_price_list = self.env.ref("shopinvader.pricelist_1")
promotion_price_list.discount_policy = "without_discount"
price = self.shopinvader_variant._get_price(
pricelist=promotion_price_list, fposition=fiscal_position_fr
)
self._assertDictContains(
price,
{
"value_taxed": 600.0,
"value_untaxed": 521.74,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
)
# use the fiscal position defining a mapping from tax included to tax
# excluded
# Tax mapping should not impact the computation of the discount and
# the original value
tax_exclude_fiscal_position = self.env.ref("shopinvader.fiscal_position_1")
price = self.shopinvader_variant._get_price(
pricelist=self.base_pricelist, fposition=tax_exclude_fiscal_position
)
self._assertDictContains(
price,
{
"value_taxed": 750.0,
"value_untaxed": 652.17,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
)
price = self.shopinvader_variant._get_price(
pricelist=promotion_price_list, fposition=tax_exclude_fiscal_position
)
self._assertDictContains(
price,
{
"value_taxed": 600.0,
"value_untaxed": 521.74,
"original_value_taxed": 750.0,
"original_value_untaxed": 652.17,
},
) | null |
452 | #!/usr/bin/python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from Utils.WAAgentUtil import waagent
from AbstractPatching import AbstractPatching
class redhatPatching(AbstractPatching):
def __init__(self, hutil):
super(redhatPatching,self).__init__(hutil)
self.cron_restart_cmd = 'service crond restart'
self.check_cmd = 'yum -q check-update'
self.check_security_cmd = 'yum -q --security check-update'
self.clean_cmd = 'yum clean packages'
self.download_cmd = 'yum -q -y --downloadonly update'
self.patch_cmd = 'yum -y update'
self.status_cmd = 'yum -q info'
self.pkg_query_cmd = 'repoquery -l'
self.cache_dir = '/var/cache/yum/'
def install(self):
"""
Install for dependencies.
"""
# For yum --downloadonly option
waagent.Run('yum -y install yum-downloadonly', False)
# For yum --security option
retcode = waagent.Run('yum -y install yum-plugin-security')
if retcode > 0:
self.hutil.error("Failed to install yum-plugin-security")
# For package-cleanup, needs-restarting, repoquery
retcode = waagent.Run('yum -y install yum-utils')
if retcode > 0:
self.hutil.error("Failed to install yum-utils")
# For lsof
retcode = waagent.Run('yum -y install lsof')
if retcode > 0:
self.hutil.error("Failed to install lsof")
# Install missing dependencies
missing_dependency_list = self.check_missing_dependencies()
for pkg in missing_dependency_list:
retcode = waagent.Run('yum -y install ' + pkg)
if retcode > 0:
self.hutil.error("Failed to install missing dependency: " + pkg)
def METHOD_NAME(self, category):
"""
Check valid upgrades,
Return the package list to download & upgrade
"""
if category == self.category_all:
check_cmd = self.check_cmd
elif category == self.category_required:
check_cmd = self.check_security_cmd
to_download = []
retcode,output = waagent.RunGetOutput(check_cmd, chk_err=False)
if retcode == 0:
return 0, to_download
elif retcode == 100:
lines = output.strip().split('\n')
for line in lines:
line = re.split(r'\s+', line.strip())
if len(line) != 3:
break
to_download.append(line[0])
return 0, to_download
elif retcode == 1:
return 1, to_download
def download_package(self, package):
retcode = waagent.Run(self.download_cmd + ' ' + package, chk_err=False)
# Yum exit code is not 0 even if succeed, so check if the package rpm exsits to verify that downloading succeeds.
return self.check_download(package)
def patch_package(self, package):
return waagent.Run(self.patch_cmd + ' ' + package)
def check_reboot(self):
retcode,last_kernel = waagent.RunGetOutput("rpm -q --last kernel")
last_kernel = last_kernel.split()[0][7:]
retcode,current_kernel = waagent.RunGetOutput('uname -r')
current_kernel = current_kernel.strip()
self.reboot_required = (last_kernel != current_kernel)
def report(self):
"""
TODO: Report the detail status of patching
"""
for package_patched in self.patched:
self.info_pkg(package_patched)
def info_pkg(self, pkg_name):
"""
Return details about a package
"""
retcode,output = waagent.RunGetOutput(self.status_cmd + ' ' + pkg_name)
if retcode != 0:
self.hutil.error(output)
return None
installed_pkg_info_list = output.rpartition('Available Packages')[0].strip().split('\n')
available_pkg_info_list = output.rpartition('Available Packages')[-1].strip().split('\n')
pkg_info = dict()
pkg_info['installed'] = dict()
pkg_info['available'] = dict()
for item in installed_pkg_info_list:
if item.startswith('Name'):
pkg_info['installed']['name'] = item.split(':')[-1].strip()
elif item.startswith('Arch'):
pkg_info['installed']['arch'] = item.split(':')[-1].strip()
elif item.startswith('Version'):
pkg_info['installed']['version'] = item.split(':')[-1].strip()
elif item.startswith('Release'):
pkg_info['installed']['release'] = item.split(':')[-1].strip()
for item in available_pkg_info_list:
if item.startswith('Name'):
pkg_info['available']['name'] = item.split(':')[-1].strip()
elif item.startswith('Arch'):
pkg_info['available']['arch'] = item.split(':')[-1].strip()
elif item.startswith('Version'):
pkg_info['available']['version'] = item.split(':')[-1].strip()
elif item.startswith('Release'):
pkg_info['available']['release'] = item.split(':')[-1].strip()
return pkg_info
def check_download(self, pkg_name):
pkg_info = self.info_pkg(pkg_name)
name = pkg_info['available']['name']
arch = pkg_info['available']['arch']
version = pkg_info['available']['version']
release = pkg_info['available']['release']
package = '.'.join(['-'.join([name, version, release]), arch, 'rpm'])
retcode,output = waagent.RunGetOutput('cd ' + self.cache_dir + ';find . -name "'+ package + '"')
if retcode != 0:
self.hutil.error("Unable to check whether the downloading secceeds")
else:
if output == '':
return 1
else:
return 0
def check_missing_dependencies(self):
retcode, output = waagent.RunGetOutput('package-cleanup --problems', chk_err=False)
missing_dependency_list = []
for line in output.split('\n'):
if 'requires' not in line:
continue
words = line.split()
missing_dependency = words[words.index('requires') + 1]
if missing_dependency not in missing_dependency_list:
missing_dependency_list.append(missing_dependency)
return missing_dependency_list | null |
453 | from typing import (
Any,
Dict,
List,
Optional,
Set,
Union,
)
from typing_extensions import (
Literal,
TypedDict,
)
BadgeSourceT = Literal["admin", "galaxy"]
# Badges that can be explicitly set by admins.
AdminBadgeT = Literal[
"faster",
"slower",
"short_term",
"backed_up",
"not_backed_up",
"more_secure",
"less_secure",
"more_stable",
"less_stable",
]
# All badges - so AdminBadgeT plus Galaxy specifiable badges.
BadgeT = Union[
AdminBadgeT,
Literal[
"cloud",
"quota",
"no_quota",
"restricted",
],
]
class BadgeSpecDict(TypedDict):
"""Describe badges that can be set by Galaxy admins."""
type: AdminBadgeT
conflicts: List[AdminBadgeT]
BADGE_SPECIFICATION: List[BadgeSpecDict] = [
{"type": "faster", "conflicts": ["slower"]},
{"type": "slower", "conflicts": ["faster"]},
{"type": "short_term", "conflicts": []},
{"type": "backed_up", "conflicts": ["not_backed_up"]},
{"type": "not_backed_up", "conflicts": ["backed_up"]},
{"type": "more_secure", "conflicts": ["less_secure"]},
{"type": "less_secure", "conflicts": ["more_secure"]},
{"type": "more_stable", "conflicts": ["less_stable"]},
{"type": "less_stable", "conflicts": ["more_stable"]},
]
KNOWN_BADGE_TYPES: List[AdminBadgeT] = [s["type"] for s in BADGE_SPECIFICATION]
BADGE_SPECIFICATION_BY_TYPE: Dict[AdminBadgeT, BadgeSpecDict] = {s["type"]: s for s in BADGE_SPECIFICATION}
class BadgeDict(TypedDict):
type: BadgeT
message: Optional[str]
source: BadgeSourceT
class StoredBadgeDict(TypedDict):
type: AdminBadgeT
message: Optional[str]
def read_badges(config_dict: Dict[str, Any]) -> List[StoredBadgeDict]:
raw_badges = config_dict.get("badges", [])
badges: List[StoredBadgeDict] = []
badge_types: Set[str] = set()
badge_conflicts: Dict[str, str] = {}
for badge in raw_badges:
# when recovering serialized badges, skip ones that are set by Galaxy
badge_source = badge.get("source")
if badge_source and badge_source != "admin":
continue
assert "type" in badge
badge_type = badge["type"]
if badge_type not in KNOWN_BADGE_TYPES:
raise Exception(f"badge_type {badge_type} unimplemented/unknown {badge}")
message = badge.get("message", None)
badges.append({"type": badge_type, "message": message})
badge_types.add(badge_type)
if badge_type in badge_conflicts:
conflicting_badge_type = badge_conflicts[badge_type]
raise Exception(
f"Conflicting badge to [{badge_type}] defined on the object store [{conflicting_badge_type}]."
)
conflicts = BADGE_SPECIFICATION_BY_TYPE[badge_type]["conflicts"]
for conflict in conflicts:
badge_conflicts[conflict] = badge_type
return badges
def METHOD_NAME(stored_badges: List[StoredBadgeDict], quota_enabled: bool, private: bool) -> List[BadgeDict]:
"""Produce blended, unified list of badges for target object store entity.
Combine more free form admin information stored about badges with Galaxy tracked
information (quota and access restriction information) to produce a unified list
of badges to be consumed via the API.
"""
badge_dicts: List[BadgeDict] = []
for badge in stored_badges:
badge_dict: BadgeDict = {
"source": "admin",
"type": badge["type"],
"message": badge["message"],
}
badge_dicts.append(badge_dict)
quota_badge_dict: BadgeDict
if quota_enabled:
quota_badge_dict = {
"type": "quota",
"message": None,
"source": "galaxy",
}
else:
quota_badge_dict = {
"type": "no_quota",
"message": None,
"source": "galaxy",
}
badge_dicts.append(quota_badge_dict)
if private:
restricted_badge_dict: BadgeDict = {
"type": "restricted",
"message": None,
"source": "galaxy",
}
badge_dicts.append(restricted_badge_dict)
return badge_dicts | null |
454 | from http import HTTPStatus
from typing import Any, Dict, Optional, Union
import httpx
from ... import errors
from ...client import AuthenticatedClient, Client
from ...models.error_response import ErrorResponse
from ...models.pipeline import Pipeline
from ...types import Response
def _get_kwargs(
pipeline_id: str,
) -> Dict[str, Any]:
pass
return {
"method": "get",
"url": "/pipelines/{pipeline_id}".format(
pipeline_id=pipeline_id,
),
}
def _parse_response(
*, client: Union[AuthenticatedClient, Client], response: httpx.Response
) -> Optional[Union[ErrorResponse, Pipeline]]:
if response.status_code == HTTPStatus.OK:
response_200 = Pipeline.from_dict(response.json())
return response_200
if response.status_code == HTTPStatus.NOT_FOUND:
response_404 = ErrorResponse.from_dict(response.json())
return response_404
if client.raise_on_unexpected_status:
raise errors.UnexpectedStatus(response.status_code, response.content)
else:
return None
def _build_response(
*, client: Union[AuthenticatedClient, Client], response: httpx.Response
) -> Response[Union[ErrorResponse, Pipeline]]:
return Response(
status_code=HTTPStatus(response.status_code),
content=response.content,
headers=response.headers,
parsed=_parse_response(client=client, response=response),
)
def sync_detailed(
pipeline_id: str,
*,
client: Union[AuthenticatedClient, Client],
) -> Response[Union[ErrorResponse, Pipeline]]:
"""Fetch a pipeline by ID.
Fetch a pipeline by ID.
Args:
pipeline_id (str):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[Union[ErrorResponse, Pipeline]]
"""
kwargs = _get_kwargs(
pipeline_id=pipeline_id,
)
response = client.get_httpx_client().request(
**kwargs,
)
return _build_response(client=client, response=response)
def sync(
pipeline_id: str,
*,
client: Union[AuthenticatedClient, Client],
) -> Optional[Union[ErrorResponse, Pipeline]]:
"""Fetch a pipeline by ID.
Fetch a pipeline by ID.
Args:
pipeline_id (str):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Union[ErrorResponse, Pipeline]
"""
return sync_detailed(
pipeline_id=pipeline_id,
client=client,
).parsed
async def asyncio_detailed(
pipeline_id: str,
*,
client: Union[AuthenticatedClient, Client],
) -> Response[Union[ErrorResponse, Pipeline]]:
"""Fetch a pipeline by ID.
Fetch a pipeline by ID.
Args:
pipeline_id (str):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Response[Union[ErrorResponse, Pipeline]]
"""
kwargs = _get_kwargs(
pipeline_id=pipeline_id,
)
response = await client.get_async_httpx_client().request(**kwargs)
return _build_response(client=client, response=response)
async def METHOD_NAME(
pipeline_id: str,
*,
client: Union[AuthenticatedClient, Client],
) -> Optional[Union[ErrorResponse, Pipeline]]:
"""Fetch a pipeline by ID.
Fetch a pipeline by ID.
Args:
pipeline_id (str):
Raises:
errors.UnexpectedStatus: If the server returns an undocumented status code and Client.raise_on_unexpected_status is True.
httpx.TimeoutException: If the request takes longer than Client.timeout.
Returns:
Union[ErrorResponse, Pipeline]
"""
return (
await asyncio_detailed(
pipeline_id=pipeline_id,
client=client,
)
).parsed | null |
455 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pretraining utils."""
from typing import Any, Dict
from absl import logging
import flax
import ml_collections
import numpy as np
from scenic.train_lib_deprecated import train_utils
from tensorflow.io import gfile
def load_bb_config(
config: ml_collections.ConfigDict) -> ml_collections.ConfigDict:
"""Temporary toy bb config.
Args:
config: model config.
Returns:
restored_model_cfg: mock model config
"""
del config
restored_model_cfg = ml_collections.ConfigDict()
restored_model_cfg.classifier = "token"
return restored_model_cfg
def convert_from_pytorch(model: Dict[str, Any],
config: ml_collections.ConfigDict) -> Dict[str, Any]:
"""Update the variables names from pytorch convention to jax convention.
Args:
model: Dictionary of segmenter parameters as numpy arrays from pytorch model
in https://github.com/rstrudel/segmenter
config: model configuration
Returns:
jax_state: Dictionary of segmenter parameters as numpy arrays with updated
names to match segmenter-ub model.
"""
# TODO(kellybuchanan): add configuration files to compute dims of qkv.
del config
jax_state = dict(model)
for key, tensor in model.items():
# Decoder
if "decoder.head." in key:
del jax_state[key]
key = key.replace("decoder.head.", "output_projection/")
if "weight" in key:
key = key.replace("weight", "kernel")
tensor = np.transpose(tensor)
# Encoder
elif "encoder.head." in key:
del jax_state[key]
key = key.replace("encoder.head.", "backbone/Transformer/head/")
elif "encoder.norm." in key:
del jax_state[key]
key = key.replace("encoder.norm.", "backbone/Transformer/encoder_norm/")
if "weight" in key:
key = key.replace("weight", "scale")
elif "encoder.patch_embed.proj." in key:
del jax_state[key]
key = key.replace("encoder.patch_embed.proj.", "backbone/embedding/")
if "weight" in key:
key = key.replace("weight", "kernel")
# mapping tf -> torch in timm torch model:
# w.transpose([3,2,0,1])
tensor = np.transpose(tensor, [2, 3, 1, 0])
elif "encoder.pos_embed" in key:
del jax_state[key]
key = key.replace("encoder.pos_embed",
"backbone/Transformer/posembed_input/pos_embedding")
elif "encoder.cls_token" in key:
del jax_state[key]
key = key.replace("encoder.cls_token", "backbone/cls")
elif "encoder.blocks" in key:
del jax_state[key]
key = key.replace("encoder.", "backbone/Transformer/")
key = key.replace("blocks.", "encoderblock_")
key = key.replace(".norm1.", "/LayerNorm_0/")
key = key.replace(".attn.", "/MultiHeadDotProductAttention_1/")
key = key.replace(".norm2.", "/LayerNorm_2/")
key = key.replace(".mlp.fc1.", "/MlpBlock_3/Dense_0/")
key = key.replace(".mlp.fc2.", "/MlpBlock_3/Dense_1/")
if "LayerNorm" in key:
key = key.replace("weight", "scale")
else:
key = key.replace("weight", "kernel")
if "Dense_" in key:
tensor = np.transpose(tensor)
elif "qkv" in key:
# slice query key and value
dims = tensor.shape[0] // 3
key1 = key.replace("qkv.", "query/")
key2 = key.replace("qkv.", "key/")
key3 = key.replace("qkv.", "value/")
# mapping tf -> torch in timm torch model:
# cat[x.flatten(1).T for x in qkv] \in (3072,1024)
# where q, k, v \in (1024, 16, 64)
tensor_masks = [
np.arange(dims),
np.arange(dims, dims * 2),
np.arange(dims * 2, dims * 3)
]
tensor_keys = [key1, key2, key3]
for key_, tensor_m in zip(tensor_keys, tensor_masks):
tensor_tmp = tensor[tensor_m].reshape(16, 64, -1).squeeze()
if tensor_tmp.ndim == 3:
tensor_tmp = tensor_tmp.transpose([2, 0, 1])
jax_state[key_] = tensor_tmp
continue
elif "proj." in key:
key = key.replace("proj.", "out/")
# mapping tf -> torch in timm torch model:
# w.transpose([2,0,1]) + flatten(1) \in (1024, 1024)
# where w \in (16, 64, 1024)
tensor = tensor.reshape(-1, 16, 64).squeeze()
if tensor.ndim == 3:
tensor = tensor.transpose([1, 2, 0])
else:
tensor = tensor.flatten()
else:
raise NotImplementedError(
"Key {} doesn\'t exist in encoder".format(key))
else:
raise NotImplementedError("Key {} doesn\'t exist in model".format(key))
jax_state[key] = tensor
return jax_state
def METHOD_NAME(
checkpoint_path: str,
config: ml_collections.ConfigDict) -> train_utils.TrainState:
"""Converts a segmm segmenter model checkpoint to an scenic train state.
The model weights are extracted.
Args:
checkpoint_path: Path to checkpoint.
config: config of pretrained model.
Returns:
restored_train_state: Scenic train state with model weights, global step
and accumulated training time.
"""
logging.info("Loading torch/numpy checkpoint from %s", checkpoint_path)
checkpoint_data = np.load(
gfile.GFile(checkpoint_path, "rb"), allow_pickle=True)[()]
restored_params = convert_from_pytorch(checkpoint_data, config)
# Construct tree
restored_params = flax.traverse_util.unflatten_dict(
{tuple(k.split("/")[:]): v for k, v in restored_params.items()})
train_state = train_utils.TrainState()
# pytype: disable=wrong-arg-types
restored_train_state = train_state.replace( # pytype: disable=attribute-error
optimizer={"target": restored_params},)
# pytype: enable=wrong-arg-types
# free memory
del restored_params
return restored_train_state | null |
456 | import numpy as np
import nibabel as nib
import skimage
from sklearn.neighbors import KernelDensity
from sklearn import mixture
def piecewise_linear_normalize(in_img_data, ref_img_data):
"""Function to piecewise linearly scale image intensities to training data landmarks"""
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
clf_in = mixture.GaussianMixture(n_components=3, covariance_type='full')
clf_in.fit(in_img_fg)
ref_img_flat = np.ravel(ref_img_data, 'C')
ref_img_fg = ref_img_flat[ref_img_flat > 0].reshape(-1, 1)
clf_ref = mixture.GaussianMixture(n_components=3, covariance_type='full')
clf_ref.fit(ref_img_fg)
in_landmarks = np.asarray(sorted(clf_in.means_.squeeze()))
in_wm_std = np.sqrt(clf_in.covariances_[np.argmax(clf_in.means_)])
in_wm_threshold = in_landmarks[2] + 2*in_wm_std[0]
in_landmarks = np.append(np.asarray([0]), np.append(in_landmarks, in_wm_threshold))
ref_landmarks = np.asanyarray(sorted(clf_ref.means_.squeeze()))
ref_wm_std = np.sqrt(clf_in.covariances_[np.argmax(clf_in.means_)])
ref_wm_threshold = 255
ref_landmarks = np.append(np.asarray([0]), np.append(ref_landmarks, ref_wm_threshold))
print(ref_landmarks)
print(in_landmarks)
out_img_data = np.zeros(in_img_data.shape)
# map intensities using these landmarks
for i in range(len(in_landmarks)-1):
m = (ref_landmarks[i+1] - ref_landmarks[i])/(in_landmarks[i+1] - in_landmarks[i])
c = (in_landmarks[i+1]*ref_landmarks[i] - in_landmarks[i]*ref_landmarks[i+1])/(in_landmarks[i+1] - in_landmarks[i])
out_img_data[(in_img_data > in_landmarks[i]) & (in_img_data <= in_landmarks[i+1])] = \
m*in_img_data[(in_img_data > in_landmarks[i]) & (in_img_data <= in_landmarks[i+1])] + c
out_img_data[(in_img_data > in_landmarks[-1])] = 255
return out_img_data
def wm_peak_normalize(in_img_data):
"""Function to scale image intensities by setting wm peak to 200"""
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p95 = np.percentile(in_img_fg, q=95)
in_img_fg = in_img_fg[in_img_fg < p95]
in_img_fg = in_img_fg.reshape(-1,1)
# clf = mixture.GMM(n_components=3, covariance_type='full')
#
clf = mixture.GaussianMixture(n_components=3, covariance_type='full')
clf.fit(in_img_fg)
# max of means is the wm centroid for t1w images
wm_peak_intensity = clf.means_.max()
wm_scaling = 200.0 / wm_peak_intensity
out_img_data = in_img_data * wm_scaling
return out_img_data
def wm_peak_normalize_t2w(in_img_data):
"""Function to scale image intensities by setting wm peak to 200"""
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p95 = np.percentile(in_img_fg, q=90)
p05 = np.percentile(in_img_fg, q=10)
in_img_fg = in_img_fg[(in_img_fg < p95) & (in_img_fg > p05)]
in_img_fg = in_img_fg.reshape(-1,1)
# clf = mixture.GMM(n_components=3, covariance_type='full')
#
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.fit(in_img_fg)
print('GMM centroids are ')
print(sorted(clf.means_))
wm_peak_intensity = sorted(clf.means_)[0]
#
#
# h, bin_edges = np.histogram(in_img_fg, 500)
# max_bin = np.argmax(h)
# mode_h = max_bin * (bin_edges[1] - bin_edges[0])
# max of means is the wm centroid for t1w images
# wm_peak_intensity = mode_h
wm_scaling = 0.3 / wm_peak_intensity
out_img_data = in_img_data * wm_scaling
return out_img_data
def METHOD_NAME(in_img_data):
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p01 = np.percentile(in_img_fg, q=1)
p999 = np.percentile(in_img_fg, q=99)
# set p99 to 255
scaling = 255.0 / (p999 - p01)
in_img_data[(in_img_data < p01) & (in_img_data > 0)] = p01
out_img_data = (in_img_data) * scaling
return out_img_data
def max_normalize(in_img_data):
in_img_flat = np.ravel(in_img_data, 'C')
in_img_fg = in_img_flat[in_img_flat > 0].reshape(-1, 1)
p01 = np.percentile(in_img_fg, q=1)
p999 = np.percentile(in_img_fg, q=99)
# set p99 to 255
scaling = 255.0 / (p999 - p01)
in_img_data[in_img_data < p01] = p01
out_img_data = (in_img_data) * scaling
return out_img_data
def histmatch(in_img_data, ref_img_data):
# in_img_data = wm_peak_normalize(in_img_data)
# ref_img_data = wm_peak_normalize(ref_img_data)
in_img_data_flat = in_img_data.flatten()
in_img_fg = in_img_data_flat[in_img_data_flat > 0] # foreground is > 0
ref_img_data_flat = ref_img_data.flatten()
ref_img_fg = ref_img_data_flat[ref_img_data_flat > 0] # foreground is > 0
# plot histograms
# plt.figure()
bins_in = np.linspace(0, 1, 255 / 1)
bins_ref = np.linspace(0, 1, 255 / 1)
hist_in = np.histogram(in_img_fg, bins=bins_in, range=(bins_in.min(), bins_in.max()))
n_in = hist_in[0]
bins_in = hist_in[1]
hist_ref = np.histogram(ref_img_fg, bins=bins_ref, range=(bins_ref.min(), bins_ref.max()))
n_ref = hist_ref[0]
bins_ref = hist_ref[1]
cdf_in_img = np.float64(np.cumsum(n_in))
cdf_in_img = np.divide(cdf_in_img, cdf_in_img[-1])
cdf_ref_img = np.float64(np.cumsum(n_ref))
cdf_ref_img = np.divide(cdf_ref_img, cdf_ref_img[-1])
interp_ref_values = np.interp(cdf_in_img, cdf_ref_img, bins_ref[1:])
bins_in_z = np.append(0, bins_in)
out_img_data = np.copy(in_img_data)
for i in range(1, len(bins_in)):
out_img_data[(in_img_data > bins_in_z[i - 1]) & (in_img_data <= bins_in_z[i])] = interp_ref_values[i - 1]
# print(i)
return out_img_data, bins_in_z, interp_ref_values | null |
457 | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla as nn
import numpy as np
from nnabla.utils.data_iterator import data_iterator
from nnabla.utils.data_source import DataSource
from nnabla.utils import image_utils
import os
import glob
import imageio
from helper import load_K_Rt_from_P, generate_raydir_camloc
class DTUMVSDataSource(DataSource):
'''
Load DTUMVS dataset from the zip file created by the author of "Multiview Neural Surface Reconstruction by Disentangling Geometry and Appearance".
'''
def _get_data(self, position):
img_idx = self._img_indices[position]
image = self._images[img_idx]
mask = self._masks[img_idx]
intrinsic = self._intrinsics[img_idx]
pose = self._poses[img_idx]
H, W, _ = image.shape
R = H * W
color = image.reshape((R, 3))[self.pixel_idx]
mask = mask.reshape((R, 1))[self.pixel_idx]
xy = self._xy[self.pixel_idx]
return color, mask, intrinsic, pose, xy
def _load_dtumvs(self, path):
# Images
image_files = sorted(glob.glob(os.path.join(path, "image", "*")))
METHOD_NAME = np.asarray([image_utils.imread(f) for f in image_files])
METHOD_NAME = METHOD_NAME * (1.0 / 127.5) - 1.0
# Masks
mask_files = sorted(glob.glob(os.path.join(path, "mask", "*")))
masks = np.asarray([imageio.imread(f, as_gray=True)[:, :, np.newaxis] > 127.5
for f in mask_files]) * 1.0
# Camera projection matrix and scale matrix for special correctness
cameras = np.load(os.path.join(path, "cameras.npz"))
world_mats = [cameras['world_mat_%d' % idx].astype(
np.float32) for idx in range(len(METHOD_NAME))]
scale_mats = [cameras['scale_mat_%d' % idx].astype(
np.float32) for idx in range(len(METHOD_NAME))]
intrinsics, poses = [], []
for W, S in zip(world_mats, scale_mats):
P = W @ S
P = P[:3, :4]
intrinsic, pose = load_K_Rt_from_P(P)
intrinsics.append(intrinsic[:3, :3])
poses.append(pose)
# return images[0:1, ...], masks[0:1, ...], np.asarray(intrinsics)[0:1, ...], np.asarray(poses)[0:1, ...]
return METHOD_NAME, masks, np.asarray(intrinsics), np.asarray(poses)
def __init__(self, path, n_rays, train=True, shuffle=False, rng=None):
super(DTUMVSDataSource, self).__init__(shuffle=shuffle)
self._n_rays = n_rays
self._train = train
self._images, self._masks, self._intrinsics, self._poses = self._load_dtumvs(
path)
# assume all images have same resolution
H, W, _ = self._images[0].shape
x = np.arange(W)
y = np.arange(H)
xx, yy = np.meshgrid(x, y)
self._xy = np.asarray([xx.flatten(), yy.flatten()]).T
self._size = len(self._images)
self._pixels = H * W
self._variables = ('image', 'mask', 'intrinsic', 'pose', 'xy')
if rng is None:
rng = np.random.RandomState(313)
self.rng = rng
self.reset()
self.pixel_idx = self._sampling_idx()
dname = os.path.split(path.rstrip("/"))[-1]
nn.logger.info(f"--- Finish loading DTU MVS dataset ({dname}). ---")
nn.logger.info(f"Num. of images = {self._size}")
nn.logger.info(f"Num. of pixels (H x W) = {self._pixels} ({H} x {W})")
nn.logger.info(f"Num. of random rays = {self._n_rays}")
def reset(self):
if self._shuffle:
self._img_indices = self.rng.permutation(self._size)
else:
self._img_indices = np.arange(self._size)
super(DTUMVSDataSource, self).reset()
@property
def METHOD_NAME(self):
"""Get copy of whole data with a shape of (B, H, W, 3)."""
return self._images.copy()
@property
def poses(self):
return self._poses.copy()
@property
def intrinsics(self):
return self._intrinsics.copy()
@property
def masks(self):
return self._masks.copy()
def change_sampling_idx(self):
self.pixel_idx = self._sampling_idx()
def _sampling_idx(self):
return self.rng.randint(0, self._pixels, self._n_rays)
def data_iterator_dtumvs(data_source,
batch_size,
rng=None,
with_memory_cache=False,
with_file_cache=False):
'''
Provide DataIterator with :py:class:`DTUMVSDataSource`
with_memory_cache and with_file_cache option's default value is all False,
because :py:class:`DTUMVSDataSource` is able to store all data into memory.
'''
return data_iterator(data_source,
batch_size,
rng,
with_memory_cache,
with_file_cache)
def main(args):
# Data Iterator
ds = DTUMVSDataSource(args.path, args.n_rays, shuffle=True)
di = data_iterator_dtumvs(ds, 1)
for i in range(args.iters):
pcolor, mask, intrinsic, pose, xy = di.next()
print(f"pcolor.shape = {pcolor.shape}")
print(f"mask.shape = {mask.shape}")
print(f"intrinsic.shape = {intrinsic.shape}")
print(f"pose.shape = {pose.shape}")
print(f"xy.shape = {xy.shape}")
print(f"Pcolor (min, max) = ({pcolor.min()}, {pcolor.max()})")
print(f"Mask (min, max) = ({mask.min()}, {mask.max()})")
# Generate rays
raydir, camloc = generate_raydir_camloc(pose, intrinsic, xy)
print(f"raydir.shape = {raydir.shape}")
np.testing.assert_allclose(
np.sum(raydir ** 2, axis=-1) ** 0.5, 1.0, atol=1e-6)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description="DTU MVS Dataset.")
parser.add_argument('--path', type=str, required=True,
help="Path to scale directory, Ex, DTU/scan24.")
parser.add_argument('--batch-size', '-b', type=int, default=4)
parser.add_argument('--n-rays', '-n', type=int, default=800)
parser.add_argument('--iters', '-i', type=int, default=100)
args = parser.parse_args()
main(args) | null |
458 | import asyncio
import copy
import hashlib
import hmac
import json
import unittest
from typing import Awaitable
from urllib.parse import urlencode
import hummingbot.connector.derivative.phemex_perpetual.phemex_perpetual_constants as CONSTANTS
from hummingbot.connector.derivative.phemex_perpetual.phemex_perpetual_auth import PhemexPerpetualAuth
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, WSJSONRequest
class PhemexPerpetualAuthUnitTests(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
cls.ev_loop = asyncio.get_event_loop()
cls.api_key = "TEST_API_KEY"
cls.secret_key = "TEST_SECRET_KEY"
def setUp(self) -> None:
super().setUp()
self.emulated_time = 1640001112.223
self.path = "/TEST_PATH_URL"
self.test_params = {
"test_param": "test_input",
"timestamp": int(self.emulated_time),
}
self.auth = PhemexPerpetualAuth(
api_key=self.api_key,
api_secret=self.secret_key,
time_provider=self)
def _get_test_payload(self, is_get: bool = True):
payload = ""
if is_get is True:
payload += (
self.path
+ urlencode(dict(copy.deepcopy(self.test_params)))
+ str(int(self.emulated_time) + CONSTANTS.ONE_MINUTE))
else:
payload += (
self.path
+ str(int(self.emulated_time) + CONSTANTS.ONE_MINUTE)
+ json.dumps(copy.deepcopy(self.test_params))
)
return payload
def _get_signature_from_test_payload(self, is_get: bool = True):
return hmac.new(
bytes(self.auth._api_secret.encode("utf-8")), self._get_test_payload(is_get=is_get).encode("utf-8"), hashlib.sha256
).hexdigest()
def async_run_with_timeout(self, coroutine: Awaitable, timeout: float = 1):
ret = self.ev_loop.run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def time(self):
# Implemented to emulate a TimeSynchronizer
return self.emulated_time
def test_generate_signature_from_payload(self):
payload = self._get_test_payload()
signature = self.auth.generate_signature_from_payload(payload)
self.assertEqual(signature, self._get_signature_from_test_payload())
def test_rest_authenticate_parameters_provided(self):
request: RESTRequest = RESTRequest(
method=RESTMethod.GET, url=self.path, params=copy.deepcopy(self.test_params), is_auth_required=True
)
signed_request: RESTRequest = self.async_run_with_timeout(self.auth.rest_authenticate(request))
self.assertIn("x-phemex-access-token", signed_request.headers)
self.assertEqual(signed_request.headers["x-phemex-access-token"], self.api_key)
self.assertIn("x-phemex-request-signature", signed_request.headers)
self.assertEqual(signed_request.headers["x-phemex-request-signature"], self._get_signature_from_test_payload())
def test_rest_authenticate_data_provided(self):
request: RESTRequest = RESTRequest(
method=RESTMethod.POST, url=self.path, data=json.dumps(self.test_params), is_auth_required=True
)
signed_request: RESTRequest = self.async_run_with_timeout(self.auth.rest_authenticate(request))
self.assertIn("x-phemex-access-token", signed_request.headers)
self.assertEqual(signed_request.headers["x-phemex-access-token"], self.api_key)
self.assertIn("x-phemex-request-signature", signed_request.headers)
self.assertEqual(
signed_request.headers["x-phemex-request-signature"],
self._get_signature_from_test_payload(is_get=False)
)
def test_ws_authenticate(self):
request: WSJSONRequest = WSJSONRequest(
payload={"TEST": "SOME_TEST_PAYLOAD"}, throttler_limit_id="TEST_LIMIT_ID", is_auth_required=True
)
signed_request: WSJSONRequest = self.async_run_with_timeout(self.auth.ws_authenticate(request))
self.assertEqual(request, signed_request)
def METHOD_NAME(self):
auth_payload = self.auth.get_ws_auth_payload()
payload = f"{self.api_key}{int(self.emulated_time) + 2}"
signature = hmac.new(self.secret_key.encode("utf-8"), payload.encode("utf-8"), hashlib.sha256).hexdigest()
target_auth_payload = {
"method": "user.auth",
"params": [
"API",
self.api_key,
signature,
int(self.emulated_time) + 2,
],
"id": 0,
}
self.assertEqual(target_auth_payload, auth_payload) | null |
459 | from dataclasses import asdict
from functools import partial
from unittest import TestCase, main as unittest_main
import numpy as np
from gbstats.frequentist.tests import (
FrequentistConfig,
SequentialConfig,
SequentialTwoSidedTTest,
TwoSidedTTest,
)
from gbstats.shared.models import (
FrequentistTestResult,
ProportionStatistic,
RegressionAdjustedStatistic,
SampleMeanStatistic,
Uplift,
)
DECIMALS = 5
round_ = partial(np.round, decimals=DECIMALS)
def _round_result_dict(result_dict):
for k, v in result_dict.items():
if k == "uplift":
v = {
kk: round_(vv) if isinstance(vv, float) else vv for kk, vv in v.items()
}
else:
v = [round_(x) for x in v] if isinstance(v, list) else round_(v)
result_dict[k] = v
return result_dict
class TestTwoSidedTTest(TestCase):
def test_two_sided_ttest(self):
stat_a = SampleMeanStatistic(sum=1396.87, sum_squares=52377.9767, n=3407)
stat_b = SampleMeanStatistic(sum=2422.7, sum_squares=134698.29, n=3461)
result_dict = asdict(TwoSidedTTest(stat_a, stat_b).compute_result())
expected_rounded_dict = asdict(
FrequentistTestResult(
expected=round_((0.7 - 0.41) / 0.41),
ci=[-0.03526, 1.44989],
uplift=Uplift("normal", 0.70732, 0.37879),
p_value=0.06191,
)
)
self.assertDictEqual(_round_result_dict(result_dict), expected_rounded_dict)
def test_two_sided_ttest_binom(self):
stat_a = ProportionStatistic(sum=14, n=28)
stat_b = ProportionStatistic(sum=16, n=30)
result_dict = asdict(TwoSidedTTest(stat_a, stat_b).compute_result())
expected_rounded_dict = asdict(
FrequentistTestResult(
expected=round_((16 / 30 - 0.5) / 0.5),
ci=[-0.47767, 0.61101],
uplift=Uplift("normal", 0.06667, 0.2717),
p_value=0.80707,
)
)
self.assertDictEqual(_round_result_dict(result_dict), expected_rounded_dict)
def test_two_sided_ttest_missing_variance(self):
stat_a = SampleMeanStatistic(sum=1396.87, sum_squares=52377.9767, n=2)
stat_b = SampleMeanStatistic(sum=2422.7, sum_squares=134698.29, n=3461)
default_output = TwoSidedTTest(stat_a, stat_b)._default_output()
result_output = TwoSidedTTest(stat_a, stat_b).compute_result()
self.assertEqual(default_output, result_output)
class TestSequentialTTest(TestCase):
def test_sequential_test_runs(self):
stat_a = SampleMeanStatistic(sum=1396.87, sum_squares=52377.9767, n=3000)
stat_b = SampleMeanStatistic(sum=2422.7, sum_squares=134698.29, n=3461)
config = SequentialConfig(sequential_tuning_parameter=1000)
result_dict = asdict(
SequentialTwoSidedTTest(stat_a, stat_b, config).compute_result()
)
expected_dict = asdict(
FrequentistTestResult(
expected=0.50336,
ci=[-0.55844, 1.56516],
uplift=Uplift("normal", 0.50336, 0.33341),
p_value=1,
)
)
self.assertEqual(_round_result_dict(result_dict), expected_dict)
def METHOD_NAME(self):
stat_a = ProportionStatistic(sum=1396, n=3000)
stat_b = ProportionStatistic(sum=2422, n=3461)
result_dict = asdict(SequentialTwoSidedTTest(stat_a, stat_b).compute_result())
expected_dict = asdict(
FrequentistTestResult(
expected=0.50386,
ci=[0.40098, 0.60675],
uplift=Uplift("normal", 0.50386, 0.03386),
p_value=0.0,
)
)
self.assertEqual(_round_result_dict(result_dict), expected_dict)
def test_sequential_test_runs_ra(self):
stat_a_pre = SampleMeanStatistic(sum=16.87, sum_squares=527.9767, n=3000)
stat_b_pre = SampleMeanStatistic(sum=22.7, sum_squares=1348.29, n=3461)
stat_a_post = SampleMeanStatistic(sum=1396.87, sum_squares=52377.9767, n=3000)
stat_b_post = SampleMeanStatistic(sum=2422.7, sum_squares=134698.29, n=3461)
stat_a_ra = RegressionAdjustedStatistic(
n=3000,
post_statistic=stat_a_post,
pre_statistic=stat_a_pre,
post_pre_sum_of_products=12525,
theta=0.5,
)
stat_b_ra = RegressionAdjustedStatistic(
n=3461,
post_statistic=stat_b_post,
pre_statistic=stat_b_pre,
post_pre_sum_of_products=3333,
theta=0.5,
)
result_dict = asdict(
SequentialTwoSidedTTest(stat_a_ra, stat_b_ra).compute_result()
)
expected_dict = asdict(
FrequentistTestResult(
expected=0.50236,
ci=[-0.43745, 1.44217],
uplift=Uplift("normal", 0.50236, 0.3093),
p_value=1,
)
)
self.assertEqual(_round_result_dict(result_dict), expected_dict)
def test_sequential_test_tuning_as_expected(self):
stat_a = SampleMeanStatistic(sum=1396.87, sum_squares=52377.9767, n=3000)
stat_b = SampleMeanStatistic(sum=2422.7, sum_squares=134698.29, n=3461)
config_below_n = SequentialConfig(sequential_tuning_parameter=10)
result_below = SequentialTwoSidedTTest(
stat_a, stat_b, config_below_n
).compute_result()
config_near_n = SequentialConfig(sequential_tuning_parameter=6461)
result_near = SequentialTwoSidedTTest(
stat_a, stat_b, config_near_n
).compute_result()
config_above_n = SequentialConfig(sequential_tuning_parameter=10000)
result_above = SequentialTwoSidedTTest(
stat_a, stat_b, config_above_n
).compute_result()
# Way underestimating should be worse here
self.assertTrue(
(result_below.ci[0] < result_above.ci[0])
and (result_below.ci[1] > result_above.ci[1])
)
# And estimating well should be both
self.assertTrue(
(result_below.ci[0] < result_near.ci[0])
and (result_below.ci[1] > result_near.ci[1])
)
self.assertTrue(
(result_above.ci[0] < result_near.ci[0])
and (result_above.ci[1] > result_near.ci[1])
)
if __name__ == "__main__":
unittest_main() | null |
460 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class StoreMaterialTemporarilyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Trademark', '2018-07-24', 'StoreMaterialTemporarily','trademark')
def get_ContactEmail(self):
return self.get_query_params().get('ContactEmail')
def set_ContactEmail(self,ContactEmail):
self.add_query_param('ContactEmail',ContactEmail)
def get_ContactAddress(self):
return self.get_query_params().get('ContactAddress')
def set_ContactAddress(self,ContactAddress):
self.add_query_param('ContactAddress',ContactAddress)
def get_EAddress(self):
return self.get_query_params().get('EAddress')
def set_EAddress(self,EAddress):
self.add_query_param('EAddress',EAddress)
def get_Country(self):
return self.get_query_params().get('Country')
def set_Country(self,Country):
self.add_query_param('Country',Country)
def get_LegalNoticeOssKey(self):
return self.get_query_params().get('LegalNoticeOssKey')
def set_LegalNoticeOssKey(self,LegalNoticeOssKey):
self.add_query_param('LegalNoticeOssKey',LegalNoticeOssKey)
def get_Address(self):
return self.get_query_params().get('Address')
def set_Address(self,Address):
self.add_query_param('Address',Address)
def get_Town(self):
return self.get_query_params().get('Town')
def set_Town(self,Town):
self.add_query_param('Town',Town)
def get_ContactNumber(self):
return self.get_query_params().get('ContactNumber')
def set_ContactNumber(self,ContactNumber):
self.add_query_param('ContactNumber',ContactNumber)
def get_City(self):
return self.get_query_params().get('City')
def set_City(self,City):
self.add_query_param('City',City)
def get_IdCardOssKey(self):
return self.get_query_params().get('IdCardOssKey')
def set_IdCardOssKey(self,IdCardOssKey):
self.add_query_param('IdCardOssKey',IdCardOssKey)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_ContactName(self):
return self.get_query_params().get('ContactName')
def set_ContactName(self,ContactName):
self.add_query_param('ContactName',ContactName)
def get_PassportOssKey(self):
return self.get_query_params().get('PassportOssKey')
def set_PassportOssKey(self,PassportOssKey):
self.add_query_param('PassportOssKey',PassportOssKey)
def METHOD_NAME(self):
return self.get_query_params().get('ContactZipcode')
def set_ContactZipcode(self,ContactZipcode):
self.add_query_param('ContactZipcode',ContactZipcode)
def get_EName(self):
return self.get_query_params().get('EName')
def set_EName(self,EName):
self.add_query_param('EName',EName)
def get_Province(self):
return self.get_query_params().get('Province')
def set_Province(self,Province):
self.add_query_param('Province',Province)
def get_BusinessLicenceOssKey(self):
return self.get_query_params().get('BusinessLicenceOssKey')
def set_BusinessLicenceOssKey(self,BusinessLicenceOssKey):
self.add_query_param('BusinessLicenceOssKey',BusinessLicenceOssKey)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_CardNumber(self):
return self.get_query_params().get('CardNumber')
def set_CardNumber(self,CardNumber):
self.add_query_param('CardNumber',CardNumber)
def get_Region(self):
return self.get_query_params().get('Region')
def set_Region(self,Region):
self.add_query_param('Region',Region)
def get_LoaOssKey(self):
return self.get_query_params().get('LoaOssKey')
def set_LoaOssKey(self,LoaOssKey):
self.add_query_param('LoaOssKey',LoaOssKey | null |
461 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
ClassName: typing_extensions.TypeAlias = schemas.StrSchema
@dataclasses.dataclass(frozen=True)
class Color(
schemas.StrSchema
):
types: typing.FrozenSet[typing.Type] = frozenset({
str,
})
default: typing.Literal["red"] = "red"
Properties = typing.TypedDict(
'Properties',
{
"className": typing.Type[ClassName],
"color": typing.Type[Color],
}
)
class AnimalDict(schemas.immutabledict[str, str]):
__required_keys__: typing.FrozenSet[str] = frozenset({
"className",
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
"color",
})
def __new__(
cls,
*,
className: str,
color: typing.Union[
str,
schemas.Unset
] = schemas.unset,
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
**kwargs: schemas.INPUT_TYPES_ALL,
):
arg_: typing.Dict[str, typing.Any] = {
"className": className,
}
for key, val in (
("color", color),
):
if isinstance(val, schemas.Unset):
continue
arg_[key] = val
arg_.update(kwargs)
used_arg_ = typing.cast(AnimalDictInput, arg_)
return Animal.validate(used_arg_, configuration=configuration_)
@staticmethod
def from_dict_(
arg: typing.Union[
AnimalDictInput,
AnimalDict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> AnimalDict:
return Animal.validate(arg, configuration=configuration)
@property
def className(self) -> str:
return typing.cast(
str,
self.__getitem__("className")
)
@property
def color(self) -> typing.Union[str, schemas.Unset]:
val = self.get("color", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
str,
val
)
def METHOD_NAME(self, name: str) -> typing.Union[schemas.OUTPUT_BASE_TYPES, schemas.Unset]:
schemas.raise_if_key_known(name, self.__required_keys__, self.__optional_keys__)
return self.get(name, schemas.unset)
AnimalDictInput = typing.Mapping[str, schemas.INPUT_TYPES_ALL]
@dataclasses.dataclass(frozen=True)
class Animal(
schemas.Schema[AnimalDict, tuple]
):
"""NOTE: This class is auto generated by OpenAPI JSON Schema Generator.
Ref: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
Do not edit the class manually.
"""
types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})
required: typing.FrozenSet[str] = frozenset({
"className",
})
discriminator: typing.Mapping[str, typing.Mapping[str, typing.Type[schemas.Schema]]] = dataclasses.field(
default_factory=lambda: {
'className': {
'Cat': cat.Cat,
'Dog': dog.Dog,
}
}
)
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: AnimalDict
}
)
@classmethod
def validate(
cls,
arg: typing.Union[
AnimalDictInput,
AnimalDict,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> AnimalDict:
return super().validate_base(
arg,
configuration=configuration,
)
from petstore_api.components.schema import cat
from petstore_api.components.schema import dog | null |
462 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import functools
import os
import pykube
from azure.common.client_factory import get_client_from_auth_file, get_client_from_cli_profile
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.network import NetworkManagementClient
from azure.mgmt.compute import ComputeManagementClient
RUN_ID_LABEL = 'runid'
CLOUD_REGION_LABEL = 'cloud_region'
auth_file = os.environ.get('AZURE_AUTH_LOCATION', None)
if auth_file:
res_client = get_client_from_auth_file(ResourceManagementClient, auth_path=auth_file)
network_client = get_client_from_auth_file(NetworkManagementClient, auth_path=auth_file)
compute_client = get_client_from_auth_file(ComputeManagementClient, auth_path=auth_file)
else:
res_client = get_client_from_cli_profile(ResourceManagementClient)
network_client = get_client_from_cli_profile(NetworkManagementClient)
compute_client = get_client_from_cli_profile(ComputeManagementClient)
resource_group_name = os.environ["AZURE_RESOURCE_GROUP"]
def find_instance(run_id):
for resource in res_client.resources.list(filter="tagName eq 'Name' and tagValue eq '" + run_id + "'"):
if str(resource.type).split('/')[-1].lower() == "virtualmachines":
return resource.name
elif str(resource.type).split('/')[-1].lower() == "virtualmachinescalesets":
instance_name, _ = get_instance_name_and_private_ip_from_vmss(resource.name)
return instance_name
return None
def run_id_filter(run_id):
return {
'Name': 'tag:Name',
'Values': [run_id]
}
def get_instance_name_and_private_ip_from_vmss(scale_set_name):
vm_vmss_id = None
for vm in compute_client.virtual_machine_scale_set_vms.list(resource_group_name, scale_set_name):
vm_vmss_id = vm.instance_id
break
instance_name = compute_client.virtual_machine_scale_set_vms \
.get_instance_view(resource_group_name, scale_set_name, vm_vmss_id) \
.additional_properties["computerName"]
private_ip = network_client.network_interfaces. \
get_virtual_machine_scale_set_ip_configuration(resource_group_name, scale_set_name, vm_vmss_id,
scale_set_name + "-nic", scale_set_name + "-ip") \
.private_ip_address
return instance_name, private_ip
def verify_regnode(ins_id, api):
if find_node(api, ins_id):
return ins_id
raise RuntimeError("Failed to find Node {}".format(ins_id))
def find_node(api, node_name):
node = pykube.Node.objects(api).filter(field_selector={'metadata.name': node_name})
if len(node.response['items']) > 0:
return node_name
else:
return ''
def delete_kube_node(nodename, run_id, api):
if nodename is None:
nodes = pykube.Node.objects(api).filter(selector={RUN_ID_LABEL: run_id})
if len(nodes.response['items']) > 0:
node = nodes.response['items'][0]
nodename = node['metadata']['name']
if nodename is not None:
obj = {
"apiVersion": "v1",
"kind": "Node",
"metadata": {
"name": nodename,
"labels": {
"runid": run_id
}
}
}
pykube.Node(api, obj).delete()
def get_cloud_region(api, run_id):
nodes = pykube.Node.objects(api).filter(selector={RUN_ID_LABEL: run_id})
if len(nodes.response['items']) == 0:
raise RuntimeError('Cannot find node matching RUN ID %s' % run_id)
node = nodes.response['items'][0]
labels = node['metadata']['labels']
if CLOUD_REGION_LABEL not in labels:
raise RuntimeError('Node %s is not labeled with Azure Region' % node['metadata']['name'])
return labels[CLOUD_REGION_LABEL]
def get_kube_api():
try:
api = pykube.HTTPClient(pykube.KubeConfig.from_service_account())
except Exception as e:
api = pykube.HTTPClient(pykube.KubeConfig.from_file("~/.kube/config"))
api.session.verify = False
return api
def METHOD_NAME(resource):
""" This method retrieves the latest non-preview api version for
the given resource (unless the preview version is the only available
api version) """
provider = res_client.providers.get(resource.id.split('/')[6])
rt = next((t for t in provider.resource_types
if t.resource_type.lower() == '/'.join(resource.type.split('/')[1:]).lower()), None)
if rt and 'api_versions' in rt.__dict__:
api_version = [v for v in rt.__dict__['api_versions'] if 'preview' not in v.lower()]
return api_version[0] if api_version else rt.__dict__['api_versions'][0]
def azure_resource_type_cmp(r1, r2):
if str(r1.type).split('/')[-1].lower().startswith("virtualmachine"):
return -1
elif str(r1.type).split('/')[-1].lower() == "networkinterfaces" and not str(r2.type).split('/')[-1].lower().startswith("virtualmachine"):
return -1
return 0
def delete_resources_by_tag(run_id):
resources = []
for resource in res_client.resources.list(filter="tagName eq 'Name' and tagValue eq '" + run_id + "'"):
resources.append(resource)
# we need to sort resources to be sure that vm and nic will be deleted first, because it has attached resorces(disks and ip)
resources.sort(key=functools.cmp_to_key(azure_resource_type_cmp))
for resource in resources:
res_client.resources.delete(
resource_group_name=resource.id.split('/')[4],
resource_provider_namespace=resource.id.split('/')[6],
parent_resource_path='',
resource_type=str(resource.type).split('/')[-1],
resource_name=resource.name,
api_version=METHOD_NAME(resource),
parameters=resource
).wait()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--run_id", "-kid", type=str, required=True)
parser.add_argument("--ins_id", "-id", type=str, required=False) # do we need?
args, unknown = parser.parse_known_args()
run_id = args.run_id
api = get_kube_api()
try:
ins_id = find_instance(run_id)
except Exception:
ins_id = None
if ins_id is None:
delete_kube_node(None, run_id, api)
delete_resources_by_tag(run_id)
else:
try:
nodename = verify_regnode(ins_id, api)
except Exception:
nodename = None
delete_kube_node(nodename, run_id, api)
delete_resources_by_tag(run_id)
if __name__ == '__main__':
main() | null |
463 | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import requests
import time
class CloudType:
S3 = 'S3'
GS = 'GS'
def __init__(self):
pass
class TemporaryCredentials:
def __init__(self):
self.access_key_id = None
self.secret_key = None
self.session_token = None
self.expiration = None
self.region = None
@classmethod
def load(cls, json):
instance = cls()
instance.access_key_id = json['keyID'] if 'keyID' in json else None
instance.secret_key = json['accessKey']
instance.session_token = json['token']
instance.expiration = json['expiration']
instance.region = json['region'] if 'region' in json else None
return instance
class StorageLifecycle:
def __init__(self):
self.path = None
self.status = None
self.restored_till = None
@classmethod
def load(cls, json):
instance = StorageLifecycle()
if 'path' in json:
instance.path = json['path']
if 'status' in json:
instance.status = json['status']
if 'restoredTill' in json:
instance.restored_till = json['restoredTill']
return instance
def is_restored(self):
return self.status == 'SUCCEEDED'
class DataStorage:
_READ_MASK = 1
_WRITE_MASK = 1 << 1
def __init__(self):
self.id = None
self.path = None
self.root = None
self.mask = None
self.sensitive = False
self.ro = False
self.type = None
self.region_name = None
@classmethod
def load(cls, json, region_info=[]):
instance = DataStorage()
instance.id = json['id']
instance.path = json['path']
instance.root = json['root']
instance.mask = json['mask']
instance.sensitive = json['sensitive']
instance.type = json['type']
instance.region_name = cls._find_region_code(json.get('regionId', 0), region_info)
return instance
@staticmethod
def _find_region_code(region_id, region_data):
for region in region_data:
if int(region.get('id', 0)) == int(region_id):
return region.get('regionId', None)
return None
def is_read_allowed(self):
return self.METHOD_NAME(self._READ_MASK)
def is_write_allowed(self):
return not self.ro and self.METHOD_NAME(self._WRITE_MASK)
def METHOD_NAME(self, mask):
return self.mask & mask == mask
class ServerError(RuntimeError):
pass
class HTTPError(ServerError):
pass
class APIError(ServerError):
pass
class CloudPipelineClient:
def __init__(self, api, token):
self._api = api.strip('/')
self._token = token
self.__headers__ = {'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(self._token)}
self.__attempts__ = 3
self.__timeout__ = 5
self.__connection_timeout__ = 10
def init_bucket_object(self, name):
storage_payload = self.get_storage(name)
regions_payload = self.get_regions()
bucket = DataStorage.load(storage_payload, regions_payload)
# When regular bucket is mounted inside a sensitive run, the only way
# check whether actual write will be allowed is to request write credentials
# from API server and parse response
if bucket.is_write_allowed():
bucket.ro = not self._is_write_allowed(bucket)
return bucket
def _is_write_allowed(self, bucket):
try:
self.get_temporary_credentials(bucket)
return True
except RuntimeError as e:
if 'Write operations are forbidden' in str(e):
return False
else:
raise e
def get_storage(self, name):
logging.info('Getting data storage %s...' % name)
return self._retryable_call('GET', 'datastorage/findByPath?id={}'.format(name)) or {}
def get_regions(self):
logging.info('Getting regions...')
return self._retryable_call('GET', 'cloud/region/info') or []
def get_temporary_credentials(self, bucket):
logging.info('Getting temporary credentials for data storage #%s...' % bucket.id)
data = [{
'id': bucket.id,
'read': bucket.is_read_allowed(),
'write': bucket.is_write_allowed()
}]
payload = self._retryable_call('POST', 'datastorage/tempCredentials/', data=data) or {}
return TemporaryCredentials.load(payload)
def get_storage_lifecycle(self, bucket, path, is_file=False):
logging.info('Getting storage lifecycle for data storage #%s...' % bucket.id)
request_url = 'datastorage/%s/lifecycle/restore/effectiveHierarchy?path=%s&pathType=%s' \
% (str(bucket.id), path, 'FILE' if is_file else 'FOLDER&recursive=false')
payload = self._retryable_call('GET', request_url) or []
return [StorageLifecycle.load(lifecycles_json) for lifecycles_json in payload]
def create_system_logs(self, entries):
self._retryable_call('POST', 'log', data=entries)
def whoami(self):
return self._retryable_call('GET', 'whoami') or {}
def _retryable_call(self, http_method, endpoint, data=None):
url = '{}/{}'.format(self._api, endpoint)
count = 0
exceptions = []
while count < self.__attempts__:
count += 1
try:
response = requests.request(method=http_method, url=url, data=json.dumps(data),
headers=self.__headers__, verify=False,
timeout=self.__connection_timeout__)
if response.status_code != 200:
raise HTTPError('API responded with http status %s.' % str(response.status_code))
response_data = response.json()
status = response_data.get('status') or 'ERROR'
message = response_data.get('message') or 'No message'
if status != 'OK':
raise APIError('%s: %s' % (status, message))
return response_data.get('payload')
except APIError as e:
raise e
except Exception as e:
exceptions.append(e)
time.sleep(self.__timeout__)
raise exceptions[-1] | null |
464 | # coding=utf-8
# Copyright 2018-2023 EvaDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from test.util import get_evadb_for_testing
from unittest.mock import patch
import pytest
from evadb.catalog.models.utils import DatabaseCatalogEntry
from evadb.server.command_handler import execute_query_fetch_all
class NativeQueryResponse:
def __init__(self):
self.error = None
self.data = None
@pytest.mark.notparallel
class SQLiteNativeStorageEngineTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def get_sqlite_params(self):
return {
"database": "evadb.db",
}
def setUp(self):
connection_params = self.get_sqlite_params()
self.evadb = get_evadb_for_testing()
# Create all class level patches
self.get_database_catalog_entry_patcher = patch(
"evadb.catalog.catalog_manager.CatalogManager.get_database_catalog_entry"
)
self.get_database_catalog_entry_mock = (
self.get_database_catalog_entry_patcher.start()
)
self.execute_native_query_patcher = patch(
"evadb.third_party.databases.sqlite.sqlite_handler.SQLiteHandler.execute_native_query"
)
self.execute_native_query_mock = self.execute_native_query_patcher.start()
self.connect_patcher = patch(
"evadb.third_party.databases.sqlite.sqlite_handler.SQLiteHandler.connect"
)
self.connect_mock = self.connect_patcher.start()
self.disconnect_patcher = patch(
"evadb.third_party.databases.sqlite.sqlite_handler.SQLiteHandler.disconnect"
)
self.disconnect_mock = self.disconnect_patcher.start()
# set return values
self.execute_native_query_mock.return_value = NativeQueryResponse()
self.get_database_catalog_entry_mock.return_value = DatabaseCatalogEntry(
name="test_data_source", engine="sqlite", params=connection_params, row_id=1
)
def tearDown(self):
self.get_database_catalog_entry_patcher.stop()
self.execute_native_query_patcher.stop()
self.connect_patcher.stop()
self.disconnect_patcher.stop()
def METHOD_NAME(self):
execute_query_fetch_all(
self.evadb,
"""USE test_data_source {
SELECT * FROM test_table
}""",
)
self.connect_mock.assert_called_once()
self.execute_native_query_mock.assert_called_once()
self.get_database_catalog_entry_mock.assert_called_once()
self.disconnect_mock.assert_called_once()
def test_execute_sqlite_insert_query(self):
execute_query_fetch_all(
self.evadb,
"""USE test_data_source {
INSERT INTO test_table (
name, age, comment
) VALUES (
'val', 5, 'testing'
)
}""",
)
self.connect_mock.assert_called_once()
self.execute_native_query_mock.assert_called_once()
self.get_database_catalog_entry_mock.assert_called_once()
self.disconnect_mock.assert_called_once()
def test_execute_sqlite_update_query(self):
execute_query_fetch_all(
self.evadb,
"""USE test_data_source {
UPDATE test_table
SET comment = 'update'
WHERE age > 5
}""",
)
self.connect_mock.assert_called_once()
self.execute_native_query_mock.assert_called_once()
self.get_database_catalog_entry_mock.assert_called_once()
self.disconnect_mock.assert_called_once()
def test_execute_sqlite_delete_query(self):
execute_query_fetch_all(
self.evadb,
"""USE test_data_source {
DELETE FROM test_table
WHERE age < 5
}""",
)
self.connect_mock.assert_called_once()
self.execute_native_query_mock.assert_called_once()
self.get_database_catalog_entry_mock.assert_called_once()
self.disconnect_mock.assert_called_once() | null |
465 | ######################################################################
# BioSimSpace: Making biomolecular simulation a breeze!
#
# Copyright: 2017-2023
#
# Authors: Lester Hedges <[email protected]>
#
# BioSimSpace is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BioSimSpace is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BioSimSpace. If not, see <http://www.gnu.org/licenses/>.
#####################################################################
"""
A thin wrapper around Sire.Mol.Atom. This is an internal package and should
not be directly exposed to the user.
"""
__author__ = "Lester Hedges"
__email__ = "[email protected]"
__all__ = ["Atom"]
from sire.legacy import Mol as _SireMol
from ..Types import Coordinate as _Coordinate
from ..Types import Length as _Length
from ._sire_wrapper import SireWrapper as _SireWrapper
class Atom(_SireWrapper):
"""A class for storing an atom."""
def __init__(self, atom):
"""
Constructor.
Parameters
----------
atom : Sire.Mol.Atom, :class:`Atom <BioSimSpace._SireWrappers.Atom>`
A Sire or BioSimSpace Atom object.
"""
# Check that the atom is valid.
# A Sire Atom object.
if isinstance(atom, _SireMol._Mol.Atom):
sire_object = atom
# Another BioSimSpace Atom object.
elif isinstance(atom, Atom):
sire_object = atom._sire_object
# Invalid type.
else:
raise TypeError(
"'atom' must be of type 'Sire.Mol.Atom' "
"or 'BioSimSpace._SireWrappers.Atom'."
)
# Call the base class constructor.
super().__init__(sire_object)
def __str__(self):
"""Return a human readable string representation of the object."""
return "<BioSimSpace.Atom: name=%r, molecule=%d, index=%d>" % (
self.name(),
self.moleculeNumber(),
self.index(),
)
def __repr__(self):
"""Return a string showing how to instantiate the object."""
return "<BioSimSpace.Atom: name=%r, molecule=%d, index=%d>" % (
self.name(),
self.moleculeNumber(),
self.index(),
)
def name(self):
"""
Return the name of the atom.
Returns
-------
name : str
The name of the atom.
"""
return self._sire_object.name().value()
def index(self):
"""
Return the index of the atom.
Returns
-------
index : int
The index of the atom.
"""
return self._sire_object.index().value()
def moleculeNumber(self):
"""
Return the number of the molecule to which this atom belongs.
Returns
-------
number : int
The number of the molecule to which the atom belongs.
"""
return self._sire_object.molecule().number().value()
def coordinates(self, property_map={}):
"""
Return the coordinates of the atom.
Parameters
----------
property_map : dict
A dictionary that maps system "properties" to their user defined
values. This allows the user to refer to properties with their
own naming scheme, e.g. { "charge" : "my-charge" }
Returns
-------
coordinates : class:`Coordinate <BioSimSpace.Types.Coordinate>`
The coordinates of the atom.
"""
prop = property_map.get("coordinates", "coordinates")
# Get the "coordinates" property from the atom.
try:
sire_coord = self._sire_object.property(prop)
coordinates = _Coordinate(
_Length(sire_coord[0], "Angstrom"),
_Length(sire_coord[1], "Angstrom"),
_Length(sire_coord[2], "Angstrom"),
)
except:
return None
# Return the coordinates.
return coordinates
def METHOD_NAME(self, property_map={}):
"""
Return the element.
Parameters
----------
property_map : dict
A dictionary that maps system "properties" to their user defined
values. This allows the user to refer to properties with their
own naming scheme, e.g. { "charge" : "my-charge" }
Returns
-------
element : str
The element.
"""
prop = property_map.get("element", "element")
# Get the element property from the atom.
try:
METHOD_NAME = self._sire_object.property(prop).toString()
except:
METHOD_NAME = ""
# Return the element.
return METHOD_NAME
def toMolecule(self):
"""
Convert a single Atom to a Molecule.
Returns
-------
system : :class:`Molecule <BioSimSpace._SireWrappers.Molecule>`
"""
return _Molecule(
_SireMol.PartialMolecule(self._sire_object).extract().molecule()
)
# Import at bottom of module to avoid circular dependency.
from ._molecule import Molecule as _Molecule | null |
466 | import unittest
import numpy as np
import scipy.sparse as sp
from Orange.data import DiscreteVariable
from Orange.preprocess.transformation import \
Transformation, _Indicator, Normalizer, Lookup, Indicator, Indicator1, \
MappingTransform
class TestTransformEquality(unittest.TestCase):
def METHOD_NAME(self):
self.disc1 = DiscreteVariable("d1", values=tuple("abc"))
self.disc1a = DiscreteVariable("d1", values=tuple("abc"))
self.disc2 = DiscreteVariable("d2", values=tuple("abc"))
assert self.disc1 == self.disc1a
def test_transformation(self):
t1 = Transformation(self.disc1)
t1a = Transformation(self.disc1a)
t2 = Transformation(self.disc2)
self.assertEqual(t1, t1)
self.assertEqual(t1, t1a)
self.assertNotEqual(t1, t2)
self.assertEqual(hash(t1), hash(t1a))
self.assertNotEqual(hash(t1), hash(t2))
def test_indicator(self):
t1 = _Indicator(self.disc1, 0)
t1a = _Indicator(self.disc1a, 0)
t2 = _Indicator(self.disc2, 0)
self.assertEqual(t1, t1)
self.assertEqual(t1, t1a)
self.assertNotEqual(t1, t2)
self.assertEqual(hash(t1), hash(t1a))
self.assertNotEqual(hash(t1), hash(t2))
t1 = _Indicator(self.disc1, 0)
t1a = _Indicator(self.disc1a, 1)
self.assertNotEqual(t1, t1a)
self.assertNotEqual(hash(t1), hash(t1a))
def test_normalizer(self):
t1 = Normalizer(self.disc1, 0, 1)
t1a = Normalizer(self.disc1a, 0, 1)
t2 = Normalizer(self.disc2, 0, 1)
self.assertEqual(t1, t1)
self.assertEqual(t1, t1a)
self.assertNotEqual(t1, t2)
self.assertEqual(hash(t1), hash(t1a))
self.assertNotEqual(hash(t1), hash(t2))
t1 = Normalizer(self.disc1, 0, 1)
t1a = Normalizer(self.disc1a, 1, 1)
self.assertNotEqual(t1, t1a)
self.assertNotEqual(hash(t1), hash(t1a))
t1 = Normalizer(self.disc1, 0, 1)
t1a = Normalizer(self.disc1a, 0, 2)
self.assertNotEqual(t1, t1a)
self.assertNotEqual(hash(t1), hash(t1a))
def test_lookup(self):
t1 = Lookup(self.disc1, np.array([0, 2, 1]), 1)
t1a = Lookup(self.disc1a, np.array([0, 2, 1]), 1)
t2 = Lookup(self.disc2, np.array([0, 2, 1]), 1)
self.assertEqual(t1, t1)
self.assertEqual(t1, t1a)
self.assertNotEqual(t1, t2)
self.assertEqual(hash(t1), hash(t1a))
self.assertNotEqual(hash(t1), hash(t2))
t1 = Lookup(self.disc1, np.array([0, 2, 1]), 1)
t1a = Lookup(self.disc1a, np.array([1, 2, 0]), 1)
self.assertNotEqual(t1, t1a)
self.assertNotEqual(hash(t1), hash(t1a))
t1 = Lookup(self.disc1, np.array([0, 2, 1]), 1)
t1a = Lookup(self.disc1a, np.array([0, 2, 1]), 2)
self.assertNotEqual(t1, t1a)
self.assertNotEqual(hash(t1), hash(t1a))
def test_mapping(self):
def test_equal(a, b):
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
t1 = MappingTransform(self.disc1, {"a": "1", "b": "2", "c":"3"})
t1a = MappingTransform(self.disc1a, {"a": "1", "b": "2", "c":"3"})
t2 = MappingTransform(self.disc2, {"a": "1", "b": "2", "c":"3"},
unknown="")
test_equal(t1, t1a)
self.assertNotEqual(t1, t2)
t1 = MappingTransform(self.disc1, {"a": 1, "b": 2, "c": float("nan")},
unknown=float("nan"))
t1_ = MappingTransform(self.disc1, {"a": 1, "b": 2, "c": float("nan")},
unknown=float("nan"))
test_equal(t1, t1_)
t1_ = MappingTransform(self.disc1, {"a": 1, "b": float("nan"), "c": 2},
unknown=float("nan"))
self.assertNotEqual(t1, t1_)
t1_ = MappingTransform(self.disc1, {}, unknown=float("nan"))
self.assertNotEqual(t1, t1_)
t1_ = MappingTransform(self.disc1, {"f": 4, "k": 2, "j": 10},
unknown=float("nan"))
self.assertNotEqual(t1, t1_)
with self.assertRaises(ValueError):
MappingTransform(self.disc1, {float("nan"): 1})
class TestIndicator(unittest.TestCase):
def test_nan(self):
var = DiscreteVariable("d", tuple("abcde"))
col = np.array([1.0, 4, 2, np.nan, 2, 0])
transform = Indicator(var, 2).transform
expected = [0, 0, 1, np.nan, 1, 0]
np.testing.assert_equal(transform(col), expected)
sparse = transform(sp.csr_matrix(col))
self.assertTrue(sp.issparse(sparse))
np.testing.assert_equal(sparse.toarray().ravel(), expected)
self.assertEqual(transform(1), 0)
self.assertEqual(transform(2), 1)
self.assertTrue(np.isnan(transform(np.nan)))
transform = Indicator(var, 0).transform
expected = [0, 0, 0, np.nan, 0, 1]
np.testing.assert_equal(transform(col), expected)
sparse = transform(sp.csr_matrix(col))
# Currently, this always returns dense array
assert not sp.issparse(sparse)
np.testing.assert_equal(sparse, expected)
self.assertEqual(transform(1), 0)
self.assertEqual(transform(0), 1)
self.assertTrue(np.isnan(transform(np.nan)))
transform = Indicator1(var, 2).transform
expected = [-1, -1, 1, np.nan, 1, -1]
np.testing.assert_equal(transform(col), expected)
np.testing.assert_equal(transform(sp.csr_matrix(col).toarray().ravel()),
expected)
self.assertEqual(transform(1), -1)
self.assertEqual(transform(2), 1)
self.assertTrue(np.isnan(transform(np.nan)))
if __name__ == '__main__':
unittest.main() | null |
467 | from collections import UserDict
import functools
from falcon import errors
from falcon.constants import MEDIA_JSON
from falcon.constants import MEDIA_MULTIPART
from falcon.constants import MEDIA_URLENCODED
from falcon.constants import PYPY
from falcon.media.json import JSONHandler
from falcon.media.multipart import MultipartFormHandler
from falcon.media.multipart import MultipartParseOptions
from falcon.media.urlencoded import URLEncodedFormHandler
from falcon.util import deprecation
from falcon.util import misc
from falcon.vendor import mimeparse
class MissingDependencyHandler:
"""Placeholder handler that always raises an error.
This handler is used by the framework for media types that require an
external dependency that can not be found.
"""
def __init__(self, handler: str, library: str):
self._msg = ('The {} requires the {} library, which is not installed.').format(
handler, library
)
def _raise(self, *args, **kwargs):
raise RuntimeError(self._msg)
# TODO(kgriffs): Add support for async later if needed.
serialize = deserialize = _raise
class Handlers(UserDict):
"""A :class:`dict`-like object that manages Internet media type handlers."""
def __init__(self, initial=None):
self._resolve = self._create_resolver()
handlers = initial or {
MEDIA_JSON: JSONHandler(),
MEDIA_MULTIPART: MultipartFormHandler(),
MEDIA_URLENCODED: URLEncodedFormHandler(),
}
# NOTE(jmvrbanac): Directly calling UserDict as it's not inheritable.
# Also, this results in self.update(...) being called.
UserDict.__init__(self, handlers)
def __setitem__(self, key, value):
super().__setitem__(key, value)
# NOTE(kgriffs): When the mapping changes, we do not want to use a
# cached handler from the previous mapping, in case it was
# replaced.
self._resolve.cache_clear()
def __delitem__(self, key):
super().__delitem__(key)
# NOTE(kgriffs): Similar to __setitem__(), we need to avoid resolving
# to a cached handler that was removed.
self._resolve.cache_clear()
def _create_resolver(self):
# PERF(kgriffs): Under PyPy the LRU is relatively expensive as compared
# to the common case of the self.data lookup succeeding. Using
# _lru_cache_for_simple_logic() takes this into account by essentially
# creating a nop but also decorating the method with a dummy
# cache_clear().
# PERF(kgriffs): Most apps will probably only use one or two media handlers,
# but we use maxsize=64 to give us some wiggle room just in case someone
# is using versioned media types or something, and to cover various
# combinations of the method args. We may need to tune this later.
@misc._lru_cache_for_simple_logic(maxsize=64)
def resolve(media_type, default, raise_not_found=True):
if media_type == '*/*' or not media_type:
media_type = default
# PERF(kgriffs): Under CPython we do not need this shortcut to
# improve performance since most calls will be resolved by the
# LRU cache on resolve(). On the other hand, it doesn't hurt,
# and it certainly makes a difference under PyPy.
try:
handler = self.data[media_type]
except KeyError:
handler = None
if not handler:
# PERF(kgriffs): We just do this slower check every time, rather
# than trying to first check the dict directly, since the result
# will almost always be cached anyway.
# NOTE(kgriffs): Wrap keys in a tuple to make them hashable.
matched_type = METHOD_NAME(media_type, tuple(self.data.keys()))
if not matched_type:
if raise_not_found:
raise errors.HTTPUnsupportedMediaType(
description='{0} is an unsupported media type.'.format(
media_type
)
)
return None, None, None
handler = self.data[matched_type]
return (
handler,
getattr(handler, '_serialize_sync', None),
getattr(handler, '_deserialize_sync', None),
)
return resolve
@deprecation.deprecated(
'This undocumented method is no longer supported as part of the public '
'interface and will be removed in a future release.'
)
def find_by_media_type(self, media_type, default, raise_not_found=True):
# PERF(jmvrbanac): Check via a quick methods first for performance
if media_type == '*/*' or not media_type:
media_type = default
try:
return self.data[media_type]
except KeyError:
pass
# PERF(jmvrbanac): Fallback to the slower method.
# NOTE(kgriffs): Wrap keys in a tuple to make them hashable.
resolved = METHOD_NAME(media_type, tuple(self.data.keys()))
if not resolved:
if raise_not_found:
raise errors.HTTPUnsupportedMediaType(
description='{0} is an unsupported media type.'.format(media_type)
)
return None
return self.data[resolved]
def METHOD_NAME(media_type, all_media_types):
result = None
try:
# NOTE(jmvrbanac): Mimeparse will return an empty string if it can
# parse the media type, but cannot find a suitable type.
result = mimeparse.best_match(all_media_types, media_type)
except ValueError:
pass
return result
if PYPY:
# NOTE(kgriffs): The most common case for resolve() is that the
# direct self.data shortcut will succeed. In this case, the LRU
# lookup for resolve() is actually slower under PyPy than just
# executing the method's body each time.
#
# However, if the shortcut does not succeed, invoking best_match()
# is relatively expensive, so it does make sense to use an LRU
# in that case.
METHOD_NAME = functools.lru_cache(maxsize=64)(METHOD_NAME) # pragma: nocover
# NOTE(vytas): An ugly way to work around circular imports.
MultipartParseOptions._DEFAULT_HANDLERS = Handlers(
{
MEDIA_JSON: JSONHandler(),
MEDIA_URLENCODED: URLEncodedFormHandler(),
}
) # type: ignore | null |
468 | import os
import ssl
import sys
import signal
import tornado.web
import tornado.process
import tornado.httpserver
import tornado.autoreload
from DIRAC import gLogger, S_OK
from WebAppDIRAC.Core.HandlerMgr import HandlerMgr
from WebAppDIRAC.Core.TemplateLoader import TemplateLoader
from WebAppDIRAC.Lib.SessionData import SessionData
from WebAppDIRAC.Lib import Conf
# If we are running with python3, Tornado will use asyncio,
# and we have to convince it to let us run in a different thread
# Doing this ensures a consistent behavior between py2 and py3
# see https://www.tornadoweb.org/en/stable/asyncio.html#tornado.platform.asyncio.AnyThreadEventLoopPolicy
import asyncio
from tornado.platform.asyncio import AnyThreadEventLoopPolicy
asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy())
class App:
def __init__(self, handlersLoc="WebApp.handler"):
self.__handlerMgr = HandlerMgr(handlersLoc, Conf.rootURL())
self.__servers = {}
self.log = gLogger.getSubLogger("Web")
def _logRequest(self, handler):
status = handler.get_status()
if status < 400:
logm = self.log.notice
elif status < 500:
logm = self.log.warn
else:
logm = self.log.error
request_time = 1000.0 * handler.request.request_time()
logm("%d %s %.2fms" % (status, handler._request_summary(), request_time))
def __reloadAppCB(self):
gLogger.notice("\n !!!!!! Reloading web app...\n")
def stopChildProcesses(self, sig, frame):
"""
It is used to properly stop tornado when more than one process is used.
In principle this is doing the job of runsv....
:param int sig: the signal sent to the process
:param object frame: execution frame which contains the child processes
"""
# tornado.ioloop.IOLoop.instance().add_timeout(time.time()+5, sys.exit)
for child in frame.f_locals.get("children", []):
gLogger.info("Stopping child processes: %d" % child)
os.kill(child, signal.SIGTERM)
# tornado.ioloop.IOLoop.instance().stop()
# gLogger.info('exit success')
sys.exit(0)
def METHOD_NAME(self, port=None):
"""Load Web portals
:return: S_OK(dict)/S_ERROR()
"""
app = {"port": port or Conf.HTTPSPort()}
# Calculating routes
result = self.__handlerMgr.getRoutes()
if not result["OK"]:
return result
app["routes"] = result["Value"]
# Initialize the session data
SessionData.setHandlers(self.__handlerMgr.getHandlers()["Value"])
# Create the app
tLoader = TemplateLoader(self.__handlerMgr.getPaths("template"))
app["settings"] = dict(
debug=Conf.devMode(),
template_loader=tLoader,
cookie_secret=str(Conf.cookieSecret()),
log_function=self._logRequest,
)
return S_OK(app)
def bootstrap(self):
"""
Configure and create web app
"""
self.log.always("\n ====== Starting DIRAC web app ====== \n")
# Calculating routes
result = self.__handlerMgr.getRoutes()
if not result["OK"]:
return result
routes = result["Value"]
# Initialize the session data
SessionData.setHandlers(self.__handlerMgr.getHandlers()["Value"])
# Create the app
tLoader = TemplateLoader(self.__handlerMgr.getPaths("template"))
kw = dict(
debug=Conf.devMode(),
template_loader=tLoader,
cookie_secret=str(Conf.cookieSecret()),
log_function=self._logRequest,
autoreload=Conf.numProcesses() < 2,
)
# please do no move this lines. The lines must be before the fork_processes
signal.signal(signal.SIGTERM, self.stopChildProcesses)
signal.signal(signal.SIGINT, self.stopChildProcesses)
# Check processes if we're under a load balancert
if Conf.balancer() and Conf.numProcesses() not in (0, 1):
tornado.process.fork_processes(Conf.numProcesses(), max_restarts=0)
kw["debug"] = False
# Debug mode?
if kw["debug"]:
self.log.info("Configuring in developer mode...")
# Configure tornado app
self.__app = tornado.web.Application(routes, **kw)
port = Conf.HTTPPort()
self.log.notice(f"Configuring HTTP on port {port}")
# Create the web servers
srv = tornado.httpserver.HTTPServer(self.__app, xheaders=True)
srv.listen(port)
self.__servers[("http", port)] = srv
Conf.generateRevokedCertsFile() # it is used by nginx....
if Conf.HTTPS():
self.log.notice(f"Configuring HTTPS on port {Conf.HTTPSPort()}")
sslops = dict(
certfile=Conf.HTTPSCert(),
keyfile=Conf.HTTPSKey(),
cert_reqs=ssl.CERT_OPTIONAL,
ca_certs=Conf.generateCAFile(),
ssl_version="tls",
)
sslprotocol = str(Conf.SSLProtocol())
aviableProtocols = [i for i in dir(ssl) if i.find("PROTOCOL") == 0]
if sslprotocol and sslprotocol != "":
if sslprotocol in aviableProtocols:
sslops["ssl_version"] = getattr(ssl, sslprotocol)
else:
message = f"{sslprotocol} protocol is not provided."
message += f"The following protocols are provided: {str(aviableProtocols)}"
gLogger.warn(message)
self.log.debug(" - %s" % "\n - ".join([f"{k} = {sslops[k]}" for k in sslops]))
srv = tornado.httpserver.HTTPServer(self.__app, ssl_options=sslops, xheaders=True)
port = Conf.HTTPSPort()
srv.listen(port)
self.__servers[("https", port)] = srv
else:
# when NGINX is used then the Conf.HTTPS return False, it means tornado
# does not have to be configured using 443 port
Conf.generateCAFile() # if we use Nginx we have to generate the cas as well...
return result
def run(self):
"""
Start web servers
"""
bu = Conf.rootURL().strip("/")
urls = []
for proto, port in self.__servers:
urls.append(f"{proto}://0.0.0.0:{port}/{bu}/")
self.log.always(f"Listening on {' and '.join(urls)}")
tornado.autoreload.add_reload_hook(self.__reloadAppCB)
tornado.ioloop.IOLoop.instance().start() | null |
469 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class DescribeDBInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'DescribeDBInstances')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ConnectionString(self): # String
return self.get_query_params().get('ConnectionString')
def set_ConnectionString(self, ConnectionString): # String
self.add_query_param('ConnectionString', ConnectionString)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_proxyId(self): # String
return self.get_query_params().get('proxyId')
def set_proxyId(self, proxyId): # String
self.add_query_param('proxyId', proxyId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBInstanceType(self): # String
return self.get_query_params().get('DBInstanceType')
def set_DBInstanceType(self, DBInstanceType): # String
self.add_query_param('DBInstanceType', DBInstanceType)
def get_DBInstanceClass(self): # String
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self, DBInstanceClass): # String
self.add_query_param('DBInstanceClass', DBInstanceClass)
def get_Tags(self): # String
return self.get_query_params().get('Tags')
def set_Tags(self, Tags): # String
self.add_query_param('Tags', Tags)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_InstanceNetworkType(self): # String
return self.get_query_params().get('InstanceNetworkType')
def set_InstanceNetworkType(self, InstanceNetworkType): # String
self.add_query_param('InstanceNetworkType', InstanceNetworkType)
def get_ConnectionMode(self): # String
return self.get_query_params().get('ConnectionMode')
def set_ConnectionMode(self, ConnectionMode): # String
self.add_query_param('ConnectionMode', ConnectionMode)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_InstanceLevel(self): # Integer
return self.get_query_params().get('InstanceLevel')
def set_InstanceLevel(self, InstanceLevel): # Integer
self.add_query_param('InstanceLevel', InstanceLevel)
def get_SearchKey(self): # String
return self.get_query_params().get('SearchKey')
def set_SearchKey(self, SearchKey): # String
self.add_query_param('SearchKey', SearchKey)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_Expired(self): # String
return self.get_query_params().get('Expired')
def set_Expired(self, Expired): # String
self.add_query_param('Expired', Expired)
def get_Engine(self): # String
return self.get_query_params().get('Engine')
def set_Engine(self, Engine): # String
self.add_query_param('Engine', Engine)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DBInstanceStatus(self): # String
return self.get_query_params().get('DBInstanceStatus')
def set_DBInstanceStatus(self, DBInstanceStatus): # String
self.add_query_param('DBInstanceStatus', DBInstanceStatus)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_DedicatedHostGroupId(self): # String
return self.get_query_params().get('DedicatedHostGroupId')
def set_DedicatedHostGroupId(self, DedicatedHostGroupId): # String
self.add_query_param('DedicatedHostGroupId', DedicatedHostGroupId)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_DedicatedHostId(self): # String
return self.get_query_params().get('DedicatedHostId')
def set_DedicatedHostId(self, DedicatedHostId): # String
self.add_query_param('DedicatedHostId', DedicatedHostId)
def get_Filter(self): # String
return self.get_query_params().get('Filter')
def set_Filter(self, Filter): # String
self.add_query_param('Filter', Filter)
def METHOD_NAME(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_Category(self): # String
return self.get_query_params().get('Category')
def set_Category(self, Category): # String
self.add_query_param('Category', Category)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType) | null |
470 | # Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring, protected-access
from unittest import skip
from unittest.mock import patch, Mock
import numpy as np
from scipy import sparse
from Orange.data import Table, Domain, ContinuousVariable, DiscreteVariable
from Orange.widgets.tests.base import WidgetTest
from Orange.widgets.tests.utils import simulate, possible_duplicate_table
from Orange.widgets.unsupervised.owmanifoldlearning import OWManifoldLearning
class TestOWManifoldLearning(WidgetTest):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.iris = Table("iris")
def setUp(self):
self.widget = self.create_widget(
OWManifoldLearning, stored_settings={"auto_apply": False}) # type: OWManifoldLearning
def click_apply(self):
self.widget.apply_button.button.clicked.emit()
def test_input_data(self):
"""Check widget's data"""
self.assertEqual(self.widget.data, None)
self.send_signal(self.widget.Inputs.data, self.iris)
self.assertEqual(self.widget.data, self.iris)
self.send_signal(self.widget.Inputs.data, None)
self.assertEqual(self.widget.data, None)
def test_output_data(self):
"""Check if data is on output after apply"""
self.assertIsNone(self.get_output(self.widget.Outputs.transformed_data))
self.send_signal(self.widget.Inputs.data, self.iris)
self.click_apply()
self.assertIsInstance(self.get_output(self.widget.Outputs.transformed_data), Table)
self.send_signal(self.widget.Inputs.data, None)
self.click_apply()
self.assertIsNone(self.get_output(self.widget.Outputs.transformed_data))
def test_n_components(self):
"""Check the output for various numbers of components"""
self.send_signal(self.widget.Inputs.data, self.iris)
for i in range(self.widget.n_components_spin.minimum(),
self.widget.n_components_spin.maximum()):
self.assertEqual(self.widget.data, self.iris)
self.widget.n_components_spin.setValue(i)
self.widget.n_components_spin.onEnter()
self.click_apply()
self._compare_tables(self.get_output(self.widget.Outputs.transformed_data), i)
def test_manifold_methods(self):
"""Check output for various manifold methods"""
self.send_signal(self.widget.Inputs.data, self.iris)
n_comp = self.widget.n_components
for i in range(len(self.widget.MANIFOLD_METHODS)):
self.assertEqual(self.widget.data, self.iris)
self.widget.manifold_methods_combo.activated.emit(i)
self.click_apply()
self._compare_tables(self.get_output(self.widget.Outputs.transformed_data), n_comp)
def _compare_tables(self, _output, n_components):
"""Helper function for table comparison"""
self.assertEqual((len(self.iris), n_components), _output.X.shape)
np.testing.assert_array_equal(self.iris.Y, _output.Y)
np.testing.assert_array_equal(self.iris.metas, _output.metas)
def test_sparse_data(self):
data = Table("iris").to_sparse()
self.assertTrue(sparse.issparse(data.X))
def __callback():
# Send sparse data to input
self.send_signal(self.widget.Inputs.data, data)
self.click_apply()
self.assertTrue(self.widget.Error.sparse_not_supported.is_shown())
# Clear input
self.send_signal(self.widget.Inputs.data, None)
self.click_apply()
self.assertFalse(self.widget.Error.sparse_not_supported.is_shown())
simulate.combobox_run_through_all(
self.widget.manifold_methods_combo, callback=__callback,
)
def test_metrics(self):
# Select t-SNE method, which is the only method that supports metrics
simulate.combobox_activate_item(self.widget.manifold_methods_combo, "t-SNE")
def __callback():
# Send data to input
self.send_signal(self.widget.Inputs.data, self.iris)
self.click_apply()
self.assertFalse(self.widget.Error.manifold_error.is_shown())
# Clear input
self.send_signal(self.widget.Inputs.data, None)
self.click_apply()
self.assertFalse(self.widget.Error.manifold_error.is_shown())
simulate.combobox_run_through_all(
self.widget.tsne_editor.controls.metric_index, callback=__callback,
)
def METHOD_NAME(self):
simulate.combobox_activate_item(self.widget.manifold_methods_combo, "MDS")
data = possible_duplicate_table('C0', class_var=True)
self.send_signal(self.widget.Inputs.data, data)
self.click_apply()
out = self.get_output(self.widget.Outputs.transformed_data)
self.assertTrue(out.domain.attributes[0], 'C0 (1)')
@skip
def test_singular_matrices(self):
"""
Handle singular matrices.
GH-2228
TODO: This test makes sense with the ``Mahalanobis`` distance metric
which is currently not supported by tSNE. In case it is ever
re-introduced, this test is very much required.
"""
table = Table(
Domain(
[ContinuousVariable("a"), ContinuousVariable("b")],
class_vars=DiscreteVariable("c", values=("0", "1"))),
list(zip(
[1, 1, 1],
[0, 1, 2],
[0, 1, 1]))
)
self.send_signal(self.widget.Inputs.data, table)
self.widget.manifold_methods_combo.activated.emit(0) # t-SNE
self.widget.tsne_editor.metric_combo.activated.emit(4) # Mahalanobis
self.assertFalse(self.widget.Error.manifold_error.is_shown())
self.click_apply()
self.assertTrue(self.widget.Error.manifold_error.is_shown())
def test_out_of_memory(self):
"""
Show error message when out of memory.
GH-2441
"""
table = Table("iris")
with patch("Orange.projection.manifold.MDS.__call__", Mock()) as mock:
mock.side_effect = MemoryError
self.send_signal("Data", table)
self.widget.manifold_methods_combo.activated.emit(1)
self.click_apply()
self.assertTrue(self.widget.Error.out_of_memory.is_shown())
def test_unconditional_commit_on_new_signal(self):
with patch.object(self.widget.commit, 'now') as apply:
self.widget.auto_apply = False
apply.reset_mock()
self.send_signal(self.widget.Inputs.data, self.iris)
apply.assert_called()
@patch("Orange.widgets.unsupervised.owmanifoldlearning.OWManifoldLearning.report_items")
def test_report(self, mocked_report: Mock):
for i in range(len(self.widget.MANIFOLD_METHODS)):
self.send_signal(self.widget.Inputs.data, self.iris)
self.widget.manifold_methods_combo.activated.emit(i)
self.wait_until_finished()
self.widget.send_report()
mocked_report.assert_called()
self.assertEqual(mocked_report.call_count, 3)
mocked_report.reset_mock()
self.send_signal(self.widget.Inputs.data, None)
self.widget.send_report()
self.assertEqual(mocked_report.call_count, 2)
mocked_report.reset_mock() | null |
471 | # coding=utf-8
# Copyright 2018-2023 EvaDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from evadb.constants import UNDEFINED_GROUP_ID
from evadb.database import EvaDBDatabase
from evadb.optimizer.cost_model import CostModel
from evadb.optimizer.group_expression import GroupExpression
from evadb.optimizer.memo import Memo
from evadb.optimizer.operators import Dummy, Operator
from evadb.optimizer.optimizer_task_stack import OptimizerTaskStack
from evadb.optimizer.rules.rules_manager import RulesManager
class OptimizerContext:
"""
Maintain context information for the optimizer
Arguments:
_task_queue(OptimizerTaskStack):
stack to keep track outstanding tasks
"""
def __init__(
self,
db: EvaDBDatabase,
cost_model: CostModel,
rules_manager: RulesManager = None,
):
self._db = db
self._task_stack = OptimizerTaskStack()
self._memo = Memo()
self._cost_model = cost_model
self._rules_manager = rules_manager or RulesManager(db.config)
@property
def db(self):
return self._db
@property
def rules_manager(self):
return self._rules_manager
@property
def cost_model(self):
return self._cost_model
@property
def METHOD_NAME(self):
return self._task_stack
@property
def memo(self):
return self._memo
def _xform_opr_to_group_expr(self, opr: Operator) -> GroupExpression:
"""
Note: Internal function Generate a group expressions from a
logical operator tree. Caller is responsible for assigning
the group to the returned GroupExpression.
"""
# Go through the children first.
child_ids = []
for child_opr in opr.children:
if isinstance(child_opr, Dummy):
child_ids.append(child_opr.group_id)
else:
child_expr = self._xform_opr_to_group_expr(opr=child_opr)
# add the expr to memo
# handles duplicates and assigns group id
memo_expr = self.memo.add_group_expr(child_expr)
child_ids.append(memo_expr.group_id)
# Group Expression only needs the operator content. Remove
# the opr children as parent-child relationship is captured
# by the group expressions.
# Hack: Shallow copy all the content except children and
# manually clearing the children as we don't need the
# dependency. Better fix is to rewrite the operator class to
# support exposing only the content
opr_copy = copy.copy(opr)
opr_copy.clear_children()
expr = GroupExpression(opr=opr_copy, children=child_ids)
return expr
def replace_expression(self, opr: Operator, group_id: int):
"""
Removes all the expressions from the specified group and
create a new expression. This is called by rewrite rules. The
new expr gets assigned a new group id
"""
self.memo.erase_group(group_id)
new_expr = self._xform_opr_to_group_expr(opr)
new_expr = self.memo.add_group_expr(new_expr, group_id)
return new_expr
def add_opr_to_group(self, opr: Operator, group_id: int = UNDEFINED_GROUP_ID):
"""
Convert operator to group_expression and add to the group
"""
grp_expr = self._xform_opr_to_group_expr(opr)
grp_expr = self.memo.add_group_expr(grp_expr, group_id)
return grp_expr | null |
472 | from typing import Any, Dict, List, Optional
from boa3.internal import constants
from boa3.internal.model.callable import Callable
from boa3.internal.model.method import Method
from boa3.internal.model.property import Property
from boa3.internal.model.symbol import ISymbol
from boa3.internal.model.type.classes.classarraytype import ClassArrayType
from boa3.internal.model.type.classes.classscope import ClassScope
from boa3.internal.model.type.classes.classtype import ClassType
from boa3.internal.model.type.itype import IType
from boa3.internal.model.variable import Variable
class UserClass(ClassArrayType):
def __init__(self, identifier: str, decorators: List[Callable] = None,
bases: List[ClassType] = None):
super(ClassArrayType, self).__init__(identifier, decorators, bases)
self._static_methods: Dict[str, Method] = {}
self._class_variables: Dict[str, Variable] = {}
self._class_methods: Dict[str, Method] = {}
self._instance_variables: Dict[str, Variable] = {}
self._instance_methods: Dict[str, Method] = {}
self._properties: Dict[str, Property] = {}
self.imported_symbols = {}
@property
def shadowing_name(self) -> str:
return 'class'
@property
def class_variables(self) -> Dict[str, Variable]:
class_vars = super().class_variables
class_vars.update(self._class_variables)
return class_vars
@property
def instance_variables(self) -> Dict[str, Variable]:
instance_vars = super().instance_variables
instance_vars.update(self._instance_variables)
return instance_vars
@property
def properties(self) -> Dict[str, Property]:
props = super().properties
props.update(self._properties)
return props
@property
def static_methods(self) -> Dict[str, Method]:
static_funcs = super().static_methods
static_funcs.update(self._static_methods)
return static_funcs
@property
def class_methods(self) -> Dict[str, Method]:
class_funcs = super().class_methods
class_funcs.update(self._class_methods)
return class_funcs
@property
def instance_methods(self) -> Dict[str, Method]:
instance_funcs = super().instance_methods
instance_funcs.update(self._instance_methods)
return instance_funcs
def include_variable(self, var_id: str, var: Variable, is_instance: bool):
"""
Includes a variable into the list of class variables
:param var_id: variable identifier
:param var: variable to be included
:param is_instance: whether is a instance variable or a class variable
"""
if not is_instance:
self._class_variables[var_id] = var
else:
self._instance_variables[var_id] = var
def METHOD_NAME(self, prop_id: str, prop: Property):
"""
Includes a property into the list of properties
:param prop_id: property identifier
:param prop: property to be included
"""
self._properties[prop_id] = prop
def include_callable(self, method_id: str, method: Callable, scope: ClassScope = ClassScope.INSTANCE) -> bool:
"""
Includes a method into the scope of the class
:param method_id: method identifier
:param method: method to be included
:param scope: which class scope this method should be included
"""
from boa3.internal.model.builtin.builtin import Builtin
if isinstance(method, Method):
if Builtin.ClassMethodDecorator in method.decorators or scope is ClassScope.CLASS:
methods_map = self._class_methods
elif Builtin.StaticMethodDecorator in method.decorators or scope is ClassScope.STATIC:
methods_map = self._static_methods
else:
methods_map = self._instance_methods
if method_id not in methods_map:
methods_map[method_id] = method
return True
return False
def include_symbol(self, symbol_id: str, symbol: ISymbol, scope: ClassScope = ClassScope.INSTANCE):
"""
Includes a method into the scope of the module
:param symbol_id: method identifier
:param symbol: method to be included
:param scope: which class scope this symbol should be included
"""
if symbol_id not in self.symbols:
if isinstance(symbol, Variable):
self.include_variable(symbol_id, symbol, scope == ClassScope.INSTANCE)
elif isinstance(symbol, Property):
self.METHOD_NAME(symbol_id, symbol)
elif isinstance(symbol, Callable):
self.include_callable(symbol_id, symbol, scope)
else:
self.imported_symbols[symbol_id] = symbol
def constructor_method(self) -> Optional[Method]:
if constants.INIT_METHOD_ID not in self._class_methods:
# TODO: create a generic __init__ for instantiating classes that doesn't have it declared
from boa3.internal.model.type.classes.classinitmethoddefault import ClassInitMethod
self._class_methods[constants.INIT_METHOD_ID] = ClassInitMethod(self)
return self._class_methods[constants.INIT_METHOD_ID]
def is_type_of(self, value: Any) -> bool:
if value is self:
return True
return any(base.is_type_of(value) for base in self.bases)
@classmethod
def _is_type_of(cls, value: Any) -> bool:
return isinstance(value, UserClass)
@classmethod
def build(cls, value: Any) -> IType:
return cls()
_EMPTY_CLASS = UserClass('-internal_use') | null |
473 | #!/usr/bin/env python
import asyncio
import logging
import websockets
import json
from hummingbot.core.api_throttler.async_throttler import AsyncThrottler
from hummingbot.core.utils.async_utils import safe_ensure_future
from typing import (
Any,
AsyncIterable,
Dict,
List,
Optional,
)
from websockets.exceptions import ConnectionClosed
from hummingbot.logger import HummingbotLogger
from hummingbot.connector.exchange.altmarkets.altmarkets_constants import Constants
from hummingbot.connector.exchange.altmarkets.altmarkets_auth import AltmarketsAuth
from hummingbot.connector.exchange.altmarkets.altmarkets_utils import RequestId
# reusable websocket class
# ToDo: We should eventually remove this class, and instantiate web socket connection normally (see Binance for example)
class AltmarketsWebsocket(RequestId):
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self,
auth: Optional[AltmarketsAuth] = None,
throttler: Optional[AsyncThrottler] = None):
self._auth: Optional[AltmarketsAuth] = auth
self._isPrivate = True if self._auth is not None else False
self._WS_URL = Constants.WS_PRIVATE_URL if self._isPrivate else Constants.WS_PUBLIC_URL
self._client: Optional[websockets.WebSocketClientProtocol] = None
self._is_subscribed = False
self._throttler = throttler or AsyncThrottler(Constants.RATE_LIMITS)
@property
def is_connected(self):
return self._client.open if self._client is not None else False
@property
def is_subscribed(self):
return self._is_subscribed
# connect to exchange
async def connect(self):
extra_headers = self._auth.get_headers() if self._isPrivate else {"User-Agent": Constants.USER_AGENT}
self._client = await websockets.connect(self._WS_URL, extra_headers=extra_headers)
return self._client
# disconnect from exchange
async def disconnect(self):
if self._client is None:
return
await self._client.close()
# receive & parse messages
async def _messages(self) -> AsyncIterable[Any]:
try:
while True:
try:
raw_msg_str: str = await asyncio.wait_for(self._client.recv(), timeout=Constants.MESSAGE_TIMEOUT)
try:
msg = json.loads(raw_msg_str)
if "ping" in msg:
payload = {"op": "pong", "timestamp": str(msg["ping"])}
safe_ensure_future(self._client.send(json.dumps(payload)))
yield None
elif "success" in msg:
ws_method: str = msg.get('success', {}).get('message')
if ws_method in ['subscribed', 'unsubscribed']:
if ws_method == 'subscribed' and len(msg['success']['streams']) > 0:
self._is_subscribed = True
yield None
elif ws_method == 'unsubscribed':
self._is_subscribed = False
yield None
else:
yield msg
except ValueError:
continue
except asyncio.TimeoutError:
await asyncio.wait_for(self._client.ping(), timeout=Constants.PING_TIMEOUT)
except asyncio.TimeoutError:
self.logger().warning("WebSocket ping timed out. Going to reconnect...")
return
except ConnectionClosed:
return
finally:
await self.disconnect()
# emit messages
async def METHOD_NAME(self, method: str, data: Optional[Dict[str, Any]] = {}, no_id: bool = False) -> int:
async with self._throttler.execute_task(method):
id = self.generate_request_id()
payload = {
"id": id,
"event": method,
}
await self._client.send(json.dumps({**payload, **data}))
return id
# request via websocket
async def request(self, method: str, data: Optional[Dict[str, Any]] = {}) -> int:
return await self.METHOD_NAME(method, data)
# subscribe to a method
async def subscribe(self,
streams: Optional[Dict[str, List]] = {}) -> int:
return await self.request(Constants.WS_EVENT_SUBSCRIBE, {"streams": streams})
# unsubscribe to a method
async def unsubscribe(self,
streams: Optional[Dict[str, List]] = {}) -> int:
return await self.request(Constants.WS_EVENT_UNSUBSCRIBE, {"streams": streams})
# listen to messages by method
async def on_message(self) -> AsyncIterable[Any]:
async for msg in self._messages():
if msg is None:
yield None
yield msg | null |
474 | ## @file
# Parser a Inf file and Get specify section data.
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
## Import Modules
#
import Common.EdkLogger as EdkLogger
from Common.BuildToolError import *
from Common.DataType import *
class InfSectionParser():
def __init__(self, FilePath):
self._FilePath = FilePath
self._FileSectionDataList = []
self._ParserInf()
def _ParserInf(self):
FileLinesList = []
UserExtFind = False
FindEnd = True
FileLastLine = False
SectionLine = ''
SectionData = []
try:
with open(self._FilePath, "r") as File:
FileLinesList = File.readlines()
except BaseException:
EdkLogger.error("build", AUTOGEN_ERROR, 'File %s is opened failed.' % self._FilePath)
for Index in range(0, len(FileLinesList)):
line = str(FileLinesList[Index]).strip()
if Index + 1 == len(FileLinesList):
FileLastLine = True
NextLine = ''
else:
NextLine = str(FileLinesList[Index + 1]).strip()
if UserExtFind and FindEnd == False:
if line:
SectionData.append(line)
if line.startswith(TAB_SECTION_START) and line.endswith(TAB_SECTION_END):
SectionLine = line
UserExtFind = True
FindEnd = False
if (NextLine != '' and NextLine[0] == TAB_SECTION_START and \
NextLine[-1] == TAB_SECTION_END) or FileLastLine:
UserExtFind = False
FindEnd = True
self._FileSectionDataList.append({SectionLine: SectionData[:]})
del SectionData[:]
SectionLine = ''
# Get user extension TianoCore data
#
# @return: a list include some dictionary that key is section and value is a list contain all data.
def METHOD_NAME(self):
UserExtensionTianoCore = []
if not self._FileSectionDataList:
return UserExtensionTianoCore
for SectionDataDict in self._FileSectionDataList:
for key in SectionDataDict:
if key.lower().startswith("[userextensions") and key.lower().find('.tianocore.') > -1:
SectionLine = key.lstrip(TAB_SECTION_START).rstrip(TAB_SECTION_END)
SubSectionList = [SectionLine]
if str(SectionLine).find(TAB_COMMA_SPLIT) > -1:
SubSectionList = str(SectionLine).split(TAB_COMMA_SPLIT)
for SubSection in SubSectionList:
if SubSection.lower().find('.tianocore.') > -1:
UserExtensionTianoCore.append({SubSection: SectionDataDict[key]})
return UserExtensionTianoCore
# Get depex expression
#
# @return: a list include some dictionary that key is section and value is a list contain all data.
def GetDepexExpresionList(self):
DepexExpressionList = []
if not self._FileSectionDataList:
return DepexExpressionList
for SectionDataDict in self._FileSectionDataList:
for key in SectionDataDict:
if key.lower() == "[depex]" or key.lower().startswith("[depex."):
SectionLine = key.lstrip(TAB_SECTION_START).rstrip(TAB_SECTION_END)
SubSectionList = [SectionLine]
if str(SectionLine).find(TAB_COMMA_SPLIT) > -1:
SubSectionList = str(SectionLine).split(TAB_COMMA_SPLIT)
for SubSection in SubSectionList:
SectionList = SubSection.split(TAB_SPLIT)
SubKey = ()
if len(SectionList) == 1:
SubKey = (TAB_ARCH_COMMON, TAB_ARCH_COMMON)
elif len(SectionList) == 2:
SubKey = (SectionList[1], TAB_ARCH_COMMON)
elif len(SectionList) == 3:
SubKey = (SectionList[1], SectionList[2])
else:
EdkLogger.error("build", AUTOGEN_ERROR, 'Section %s is invalid.' % key)
DepexExpressionList.append({SubKey: SectionDataDict[key]})
return DepexExpressionList
| null |
475 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
import json
class AddGatewayRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'AddGateway','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InternetSlbSpec(self): # String
return self.get_query_params().get('InternetSlbSpec')
def set_InternetSlbSpec(self, InternetSlbSpec): # String
self.add_query_param('InternetSlbSpec', InternetSlbSpec)
def get_EnableXtrace(self): # Boolean
return self.get_query_params().get('EnableXtrace')
def set_EnableXtrace(self, EnableXtrace): # Boolean
self.add_query_param('EnableXtrace', EnableXtrace)
def get_Replica(self): # Integer
return self.get_query_params().get('Replica')
def set_Replica(self, Replica): # Integer
self.add_query_param('Replica', Replica)
def get_EnableHardwareAcceleration(self): # Boolean
return self.get_query_params().get('EnableHardwareAcceleration')
def set_EnableHardwareAcceleration(self, EnableHardwareAcceleration): # Boolean
self.add_query_param('EnableHardwareAcceleration', EnableHardwareAcceleration)
def get_EnableSls(self): # Boolean
return self.get_query_params().get('EnableSls')
def set_EnableSls(self, EnableSls): # Boolean
self.add_query_param('EnableSls', EnableSls)
def get_Spec(self): # String
return self.get_query_params().get('Spec')
def set_Spec(self, Spec): # String
self.add_query_param('Spec', Spec)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_EnterpriseSecurityGroup(self): # Boolean
return self.get_query_params().get('EnterpriseSecurityGroup')
def set_EnterpriseSecurityGroup(self, EnterpriseSecurityGroup): # Boolean
self.add_query_param('EnterpriseSecurityGroup', EnterpriseSecurityGroup)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def METHOD_NAME(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_SlbSpec(self): # String
return self.get_query_params().get('SlbSpec')
def set_SlbSpec(self, SlbSpec): # String
self.add_query_param('SlbSpec', SlbSpec)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_Region(self): # String
return self.get_query_params().get('Region')
def set_Region(self, Region): # String
self.add_query_param('Region', Region)
def get_ZoneInfo(self): # Array
return self.get_query_params().get('ZoneInfo')
def set_ZoneInfo(self, ZoneInfo): # Array
self.add_query_param("ZoneInfo", json.dumps(ZoneInfo))
def get_XtraceRatio(self): # String
return self.get_query_params().get('XtraceRatio')
def set_XtraceRatio(self, XtraceRatio): # String
self.add_query_param('XtraceRatio', XtraceRatio)
def get_VSwitchId2(self): # String
return self.get_query_params().get('VSwitchId2')
def set_VSwitchId2(self, VSwitchId2): # String
self.add_query_param('VSwitchId2', VSwitchId2)
def get_Vpc(self): # String
return self.get_query_params().get('Vpc')
def set_Vpc(self, Vpc): # String
self.add_query_param('Vpc', Vpc)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
476 | from shlex import quote
from typing import Iterable
from pcs.cli.common.output import (
INDENT_STEP,
bool_to_cli_value,
pairs_to_cmd,
)
from pcs.cli.reports.output import warn
from pcs.common.pacemaker.constraint import (
CibConstraintOrderAttributesDto,
CibConstraintOrderDto,
CibConstraintOrderSetDto,
)
from pcs.common.str_tools import (
format_optional,
indent,
pairs_to_text,
)
from . import set as _set
def METHOD_NAME(
attributes_dto: CibConstraintOrderAttributesDto,
) -> list[tuple[str, str]]:
pairs = []
if attributes_dto.symmetrical is not None:
pairs.append(
("symmetrical", bool_to_cli_value(attributes_dto.symmetrical))
)
if attributes_dto.require_all is not None:
pairs.append(
("require-all", bool_to_cli_value(attributes_dto.require_all))
)
if attributes_dto.score:
pairs.append(("score", attributes_dto.score))
if attributes_dto.kind:
pairs.append(("kind", attributes_dto.kind))
return pairs
def _attributes_to_text(
attributes_dto: CibConstraintOrderAttributesDto,
) -> list[str]:
return pairs_to_text(METHOD_NAME(attributes_dto))
def plain_constraint_to_text(
constraint_dto: CibConstraintOrderDto,
with_id: bool,
) -> list[str]:
result = [
"{first_action}resource '{first_resource}' then {then_action}resource '{then_resource}'".format(
first_action=format_optional(constraint_dto.first_action),
first_resource=constraint_dto.first_resource_id,
then_action=format_optional(constraint_dto.then_action),
then_resource=constraint_dto.then_resource_id,
)
]
if with_id:
result[0] += f" (id: {constraint_dto.attributes.constraint_id})"
result.extend(
indent(
_attributes_to_text(constraint_dto.attributes),
indent_step=INDENT_STEP,
)
)
return result
def set_constraint_to_text(
constraint_dto: CibConstraintOrderSetDto,
with_id: bool,
) -> list[str]:
return _set.set_constraint_to_text(
constraint_dto.attributes.constraint_id,
_attributes_to_text(constraint_dto.attributes),
constraint_dto.resource_sets,
with_id,
)
def constraints_to_text(
plain_dtos: Iterable[CibConstraintOrderDto],
set_dtos: Iterable[CibConstraintOrderSetDto],
with_id: bool,
) -> list[str]:
result = []
if plain_dtos:
result.append("Order Constraints:")
for constraint_dto in plain_dtos:
result.extend(
indent(
plain_constraint_to_text(constraint_dto, with_id),
indent_step=INDENT_STEP,
)
)
if set_dtos:
result.append("Order Set Constraints:")
for set_constraint_dto in set_dtos:
result.extend(
indent(
set_constraint_to_text(set_constraint_dto, with_id),
indent_step=INDENT_STEP,
)
)
return result
def _attributes_to_cmd_pairs(
attributes_dto: CibConstraintOrderAttributesDto,
) -> list[tuple[str, str]]:
return [("id", attributes_dto.constraint_id)] + METHOD_NAME(
attributes_dto
)
def plain_constraint_to_cmd(
constraint_dto: CibConstraintOrderDto,
) -> list[str]:
if (
constraint_dto.first_resource_instance is not None
or constraint_dto.then_resource_instance is not None
):
warn(
"Resource instance(s) detected in constraint "
f"'{constraint_dto.attributes.constraint_id}' but not supported by "
"this command."
" Command for creating the constraint is omitted."
)
return []
result = [
"pcs -- constraint order {first_action}{first_resource_id} then {then_action}{then_resource_id}".format(
first_action=format_optional(constraint_dto.first_action),
first_resource_id=quote(constraint_dto.first_resource_id),
then_action=format_optional(constraint_dto.then_action),
then_resource_id=quote(constraint_dto.then_resource_id),
)
]
params = pairs_to_cmd(_attributes_to_cmd_pairs(constraint_dto.attributes))
if params:
result.extend(indent([params], indent_step=INDENT_STEP))
return result
def set_constraint_to_cmd(
constraint_dto: CibConstraintOrderSetDto,
) -> list[str]:
result = ["pcs -- constraint order"]
for resource_set in constraint_dto.resource_sets:
set_cmd_part = _set.resource_set_to_cmd(resource_set)
if not set_cmd_part:
return []
result.extend(indent(set_cmd_part, indent_step=INDENT_STEP))
pairs = []
for pair in _attributes_to_cmd_pairs(constraint_dto.attributes):
# this list is based on pcs.lib.cib.constraint.order.ATTRIB
if pair[0] not in ("symmetrical", "kind", "id"):
warn(
f"Option '{pair[0]}' detected in constraint "
f"'{constraint_dto.attributes.constraint_id}' but not "
"supported by this command."
" Command for creating the constraint is omitted."
)
return []
pairs.append(pair)
if pairs:
result.extend(
indent(
[f"setoptions {pairs_to_cmd(pairs)}"], indent_step=INDENT_STEP
)
)
return result | null |
477 | import pytest
from unittest.mock import (
Mock,
)
from web3.providers.eth_tester.middleware import (
async_default_transaction_fields_middleware,
default_transaction_fields_middleware,
)
from web3.types import (
BlockData,
)
SAMPLE_ADDRESS_LIST = [
"0x0000000000000000000000000000000000000001",
"0x0000000000000000000000000000000000000002",
"0x0000000000000000000000000000000000000003",
]
SAMPLE_ADDRESS = "0x0000000000000000000000000000000000000004"
@pytest.mark.parametrize("block_number", {0, "0x0", "earliest"})
def test_get_transaction_count_formatters(w3, block_number):
tx_counts = w3.eth.get_transaction_count(w3.eth.accounts[-1], block_number)
assert tx_counts == 0
def METHOD_NAME(w3):
all_block_keys = BlockData.__annotations__.keys()
all_non_poa_block_keys = set(
[k for k in all_block_keys if k != "proofOfAuthorityData"]
)
latest_block = w3.eth.get_block("latest")
latest_block_keys = set(latest_block.keys())
assert all_non_poa_block_keys == latest_block_keys
@pytest.mark.parametrize(
"w3_accounts, w3_coinbase, method, from_field_added, from_field_value",
(
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "eth_call", True, SAMPLE_ADDRESS),
(
SAMPLE_ADDRESS_LIST,
SAMPLE_ADDRESS,
"eth_estimateGas",
True,
SAMPLE_ADDRESS,
),
(
SAMPLE_ADDRESS_LIST,
SAMPLE_ADDRESS,
"eth_sendTransaction",
True,
SAMPLE_ADDRESS,
),
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "eth_gasPrice", False, None),
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "eth_blockNumber", False, None),
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "meow", False, None),
(SAMPLE_ADDRESS_LIST, None, "eth_call", True, SAMPLE_ADDRESS_LIST[0]),
(SAMPLE_ADDRESS_LIST, None, "eth_estimateGas", True, SAMPLE_ADDRESS_LIST[0]),
(
SAMPLE_ADDRESS_LIST,
None,
"eth_sendTransaction",
True,
SAMPLE_ADDRESS_LIST[0],
),
(SAMPLE_ADDRESS_LIST, None, "eth_gasPrice", False, None),
(SAMPLE_ADDRESS_LIST, None, "eth_blockNumber", False, None),
(SAMPLE_ADDRESS_LIST, None, "meow", False, None),
(None, SAMPLE_ADDRESS, "eth_call", True, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_estimateGas", True, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_sendTransaction", True, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_gasPrice", False, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_blockNumber", False, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "meow", False, SAMPLE_ADDRESS),
(None, None, "eth_call", True, None),
(None, None, "eth_estimateGas", True, None),
(None, None, "eth_sendTransaction", True, None),
(None, None, "eth_gasPrice", False, None),
(None, None, "eth_blockNumber", False, None),
(None, None, "meow", False, None),
),
)
def test_default_transaction_fields_middleware(
w3_accounts, w3_coinbase, method, from_field_added, from_field_value
):
def mock_request(_method, params):
return params
mock_w3 = Mock()
mock_w3.eth.accounts = w3_accounts
mock_w3.eth.coinbase = w3_coinbase
middleware = default_transaction_fields_middleware(mock_request, mock_w3)
base_params = {"chainId": 5}
filled_transaction = middleware(method, [base_params])
filled_params = filled_transaction[0]
assert ("from" in filled_params.keys()) == from_field_added
if "from" in filled_params.keys():
assert filled_params["from"] == from_field_value
filled_transaction[0].pop("from", None)
assert filled_transaction[0] == base_params
# -- async -- #
@pytest.mark.parametrize(
"w3_accounts, w3_coinbase, method, from_field_added, from_field_value",
(
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "eth_call", True, SAMPLE_ADDRESS),
(
SAMPLE_ADDRESS_LIST,
SAMPLE_ADDRESS,
"eth_estimateGas",
True,
SAMPLE_ADDRESS,
),
(
SAMPLE_ADDRESS_LIST,
SAMPLE_ADDRESS,
"eth_sendTransaction",
True,
SAMPLE_ADDRESS,
),
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "eth_gasPrice", False, None),
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "eth_blockNumber", False, None),
(SAMPLE_ADDRESS_LIST, SAMPLE_ADDRESS, "meow", False, None),
(SAMPLE_ADDRESS_LIST, None, "eth_call", True, SAMPLE_ADDRESS_LIST[0]),
(SAMPLE_ADDRESS_LIST, None, "eth_estimateGas", True, SAMPLE_ADDRESS_LIST[0]),
(
SAMPLE_ADDRESS_LIST,
None,
"eth_sendTransaction",
True,
SAMPLE_ADDRESS_LIST[0],
),
(SAMPLE_ADDRESS_LIST, None, "eth_gasPrice", False, None),
(SAMPLE_ADDRESS_LIST, None, "eth_blockNumber", False, None),
(SAMPLE_ADDRESS_LIST, None, "meow", False, None),
(None, SAMPLE_ADDRESS, "eth_call", True, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_estimateGas", True, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_sendTransaction", True, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_gasPrice", False, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "eth_blockNumber", False, SAMPLE_ADDRESS),
(None, SAMPLE_ADDRESS, "meow", False, SAMPLE_ADDRESS),
(None, None, "eth_call", True, None),
(None, None, "eth_estimateGas", True, None),
(None, None, "eth_sendTransaction", True, None),
(None, None, "eth_gasPrice", False, None),
(None, None, "eth_blockNumber", False, None),
(None, None, "meow", False, None),
),
)
@pytest.mark.asyncio
async def test_async_default_transaction_fields_middleware(
w3_accounts,
w3_coinbase,
method,
from_field_added,
from_field_value,
):
async def mock_request(_method, params):
return params
async def mock_async_accounts():
return w3_accounts
async def mock_async_coinbase():
return w3_coinbase
mock_w3 = Mock()
mock_w3.eth.accounts = mock_async_accounts()
mock_w3.eth.coinbase = mock_async_coinbase()
middleware = await async_default_transaction_fields_middleware(
mock_request, mock_w3
)
base_params = {"chainId": 5}
filled_transaction = await middleware(method, [base_params])
filled_params = filled_transaction[0]
assert ("from" in filled_params.keys()) == from_field_added
if "from" in filled_params.keys():
assert filled_params["from"] == from_field_value
filled_transaction[0].pop("from", None)
assert filled_transaction[0] == base_params
# clean up
mock_w3.eth.accounts.close()
mock_w3.eth.coinbase.close() | null |
478 | import pytest
from web3.exceptions import (
Web3ValidationError,
)
def test_contract_estimate_gas(w3, math_contract, estimate_gas, transact):
gas_estimate = estimate_gas(
contract=math_contract, contract_function="incrementCounter"
)
txn_hash = transact(contract=math_contract, contract_function="incrementCounter")
txn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
def test_contract_fallback_estimate_gas(w3, fallback_function_contract):
gas_estimate = fallback_function_contract.fallback.estimate_gas()
txn_hash = fallback_function_contract.fallback.transact()
txn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
def test_contract_estimate_gas_with_arguments(
w3, math_contract, estimate_gas, transact
):
gas_estimate = estimate_gas(
contract=math_contract, contract_function="add", func_args=[5, 6]
)
txn_hash = transact(
contract=math_contract, contract_function="add", func_args=[5, 6]
)
txn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
def test_estimate_gas_not_sending_ether_to_nonpayable_function(
w3, payable_tester_contract, estimate_gas, transact
):
gas_estimate = estimate_gas(
contract=payable_tester_contract, contract_function="doNoValueCall"
)
txn_hash = transact(
contract=payable_tester_contract, contract_function="doNoValueCall"
)
txn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
def test_estimate_gas_sending_ether_to_nonpayable_function(
w3, payable_tester_contract, estimate_gas
):
with pytest.raises(Web3ValidationError):
estimate_gas(
contract=payable_tester_contract,
contract_function="doNoValueCall",
tx_params={"value": 1},
)
def test_estimate_gas_accepts_latest_block(w3, math_contract, transact):
gas_estimate = math_contract.functions.counter().estimate_gas(
block_identifier="latest"
)
txn_hash = transact(contract=math_contract, contract_function="incrementCounter")
txn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
def test_estimate_gas_block_identifier_unique_estimates(w3, math_contract, transact):
txn_hash = transact(contract=math_contract, contract_function="incrementCounter")
w3.eth.wait_for_transaction_receipt(txn_hash)
latest_gas_estimate = math_contract.functions.counter().estimate_gas(
block_identifier="latest"
)
earliest_gas_estimate = math_contract.functions.counter().estimate_gas(
block_identifier="earliest"
)
assert latest_gas_estimate != earliest_gas_estimate
@pytest.mark.asyncio
async def test_async_contract_estimate_gas(
async_w3, async_math_contract, async_estimate_gas, async_transact
):
gas_estimate = await async_estimate_gas(
contract=async_math_contract, contract_function="incrementCounter"
)
txn_hash = await async_transact(
contract=async_math_contract, contract_function="incrementCounter"
)
txn_receipt = await async_w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
@pytest.mark.asyncio
async def METHOD_NAME(
async_w3, async_fallback_function_contract
):
gas_estimate = await async_fallback_function_contract.fallback.estimate_gas()
txn_hash = await async_fallback_function_contract.fallback.transact()
txn_receipt = await async_w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
@pytest.mark.asyncio
async def test_async_contract_estimate_gas_with_arguments(
async_w3, async_math_contract, async_estimate_gas, async_transact
):
gas_estimate = await async_estimate_gas(
contract=async_math_contract, contract_function="add", func_args=[5, 6]
)
txn_hash = await async_transact(
contract=async_math_contract, contract_function="add", func_args=[5, 6]
)
txn_receipt = await async_w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
@pytest.mark.asyncio
async def test_async_estimate_gas_not_sending_ether_to_nonpayable_function(
async_w3, async_payable_tester_contract, async_estimate_gas, async_transact
):
gas_estimate = await async_estimate_gas(
contract=async_payable_tester_contract, contract_function="doNoValueCall"
)
txn_hash = await async_transact(
contract=async_payable_tester_contract, contract_function="doNoValueCall"
)
txn_receipt = await async_w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
@pytest.mark.asyncio
async def test_async_estimate_gas_sending_ether_to_nonpayable_function(
async_w3, async_payable_tester_contract, async_estimate_gas
):
with pytest.raises(Web3ValidationError):
await async_estimate_gas(
contract=async_payable_tester_contract,
contract_function="doNoValueCall",
tx_params={"value": 1},
)
@pytest.mark.asyncio
async def test_async_estimate_gas_accepts_latest_block(
async_w3, async_math_contract, async_transact
):
gas_estimate = await async_math_contract.functions.counter().estimate_gas(
block_identifier="latest"
)
txn_hash = await async_transact(
contract=async_math_contract, contract_function="incrementCounter"
)
txn_receipt = await async_w3.eth.wait_for_transaction_receipt(txn_hash)
gas_used = txn_receipt.get("gasUsed")
assert abs(gas_estimate - gas_used) < 21000
@pytest.mark.asyncio
async def test_async_estimate_gas_block_identifier_unique_estimates(
async_w3, async_math_contract, async_transact
):
txn_hash = await async_transact(
contract=async_math_contract, contract_function="incrementCounter"
)
await async_w3.eth.wait_for_transaction_receipt(txn_hash)
latest_gas_estimate = await async_math_contract.functions.counter().estimate_gas(
block_identifier="latest"
)
earliest_gas_estimate = await async_math_contract.functions.counter().estimate_gas(
block_identifier="earliest"
)
assert latest_gas_estimate != earliest_gas_estimate | null |
479 | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""GCP secrets manager flavor."""
import re
from typing import TYPE_CHECKING, ClassVar, Optional, Type
from zenml.integrations.gcp import GCP_SECRETS_MANAGER_FLAVOR
from zenml.secrets_managers import (
BaseSecretsManagerConfig,
BaseSecretsManagerFlavor,
)
from zenml.secrets_managers.base_secrets_manager import SecretsManagerScope
if TYPE_CHECKING:
from zenml.integrations.gcp.secrets_manager import GCPSecretsManager
def validate_gcp_secret_name_or_namespace(name: str) -> None:
"""Validate a secret name or namespace.
A Google secret ID is a string with a maximum length of 255 characters
and can contain uppercase and lowercase letters, numerals, and the
hyphen (-) and underscore (_) characters. For scoped secrets, we have to
limit the size of the name and namespace even further to allow space for
both in the Google secret ID.
Given that we also save secret names and namespaces as labels, we are
also limited by the limitation that Google imposes on label values: max
63 characters and must only contain lowercase letters, numerals
and the hyphen (-) and underscore (_) characters
Args:
name: the secret name or namespace
Raises:
ValueError: if the secret name or namespace is invalid
"""
if not re.fullmatch(r"[a-z0-9_\-]+", name):
raise ValueError(
f"Invalid secret name or namespace '{name}'. Must contain "
f"only lowercase alphanumeric characters and the hyphen (-) and "
f"underscore (_) characters."
)
if name and len(name) > 63:
raise ValueError(
f"Invalid secret name or namespace '{name}'. The length is "
f"limited to maximum 63 characters."
)
class GCPSecretsManagerConfig(BaseSecretsManagerConfig):
"""Configuration for the GCP Secrets Manager.
Attributes:
project_id: This is necessary to access the correct GCP project.
The project_id of your GCP project space that contains the Secret
Manager.
"""
SUPPORTS_SCOPING: ClassVar[bool] = True
project_id: str
@classmethod
def _validate_scope(
cls,
scope: SecretsManagerScope,
namespace: Optional[str],
) -> None:
"""Validate the scope and namespace value.
Args:
scope: Scope value.
namespace: Optional namespace value.
"""
if namespace:
validate_gcp_secret_name_or_namespace(namespace)
class GCPSecretsManagerFlavor(BaseSecretsManagerFlavor):
"""Class for the `GCPSecretsManagerFlavor`."""
@property
def name(self) -> str:
"""Name of the flavor.
Returns:
The name of the flavor.
"""
return GCP_SECRETS_MANAGER_FLAVOR
@property
def docs_url(self) -> Optional[str]:
"""A url to point at docs explaining this flavor.
Returns:
A flavor docs url.
"""
return self.generate_default_docs_url()
@property
def sdk_docs_url(self) -> Optional[str]:
"""A url to point at SDK docs explaining this flavor.
Returns:
A flavor SDK docs url.
"""
return self.generate_default_sdk_docs_url()
@property
def METHOD_NAME(self) -> str:
"""A url to represent the flavor in the dashboard.
Returns:
The flavor logo.
"""
return "https://public-flavor-logos.s3.eu-central-1.amazonaws.com/secrets_managers/gcp.png"
@property
def config_class(self) -> Type[GCPSecretsManagerConfig]:
"""Returns GCPSecretsManagerConfig config class.
Returns:
The config class.
"""
return GCPSecretsManagerConfig
@property
def implementation_class(self) -> Type["GCPSecretsManager"]:
"""Implementation class for this flavor.
Returns:
The implementation class.
"""
from zenml.integrations.gcp.secrets_manager import GCPSecretsManager
return GCPSecretsManager | null |
480 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
AdditionalProperties: typing_extensions.TypeAlias = schemas.NotAnyTypeSchema
from petstore_api.paths.fake.get.parameters.parameter_0 import schema as schema_2
from petstore_api.paths.fake.get.parameters.parameter_1 import schema
Properties = typing.TypedDict(
'Properties',
{
"enum_header_string": typing.Type[schema.Schema],
"enum_header_string_array": typing.Type[schema_2.Schema],
}
)
class HeaderParametersDict(schemas.immutabledict[str, schemas.OUTPUT_BASE_TYPES]):
__required_keys__: typing.FrozenSet[str] = frozenset({
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
"enum_header_string",
"enum_header_string_array",
})
def __new__(
cls,
*,
enum_header_string: typing.Union[
typing.Literal[
"_abc",
"-efg",
"(xyz)"
],
schemas.Unset
] = schemas.unset,
enum_header_string_array: typing.Union[
schema_2.SchemaTupleInput,
schema_2.SchemaTuple,
schemas.Unset
] = schemas.unset,
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
):
arg_: typing.Dict[str, typing.Any] = {}
for key, val in (
("enum_header_string", enum_header_string),
("enum_header_string_array", enum_header_string_array),
):
if isinstance(val, schemas.Unset):
continue
arg_[key] = val
used_arg_ = typing.cast(HeaderParametersDictInput, arg_)
return HeaderParameters.METHOD_NAME(used_arg_, configuration=configuration_)
@staticmethod
def from_dict_(
arg: typing.Union[
HeaderParametersDictInput,
HeaderParametersDict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> HeaderParametersDict:
return HeaderParameters.METHOD_NAME(arg, configuration=configuration)
@property
def enum_header_string(self) -> typing.Union[typing.Literal["_abc", "-efg", "(xyz)"], schemas.Unset]:
val = self.get("enum_header_string", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
typing.Literal["_abc", "-efg", "(xyz)"],
val
)
@property
def enum_header_string_array(self) -> typing.Union[schema_2.SchemaTuple, schemas.Unset]:
val = self.get("enum_header_string_array", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
schema_2.SchemaTuple,
val
)
HeaderParametersDictInput = typing.TypedDict(
'HeaderParametersDictInput',
{
"enum_header_string": typing.Literal[
"_abc",
"-efg",
"(xyz)"
],
"enum_header_string_array": typing.Union[
schema_2.SchemaTupleInput,
schema_2.SchemaTuple
],
},
total=False
)
@dataclasses.dataclass(frozen=True)
class HeaderParameters(
schemas.Schema[HeaderParametersDict, tuple]
):
types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
additional_properties: typing.Type[AdditionalProperties] = dataclasses.field(default_factory=lambda: AdditionalProperties) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: HeaderParametersDict
}
)
@classmethod
def METHOD_NAME(
cls,
arg: typing.Union[
HeaderParametersDictInput,
HeaderParametersDict,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> HeaderParametersDict:
return super().validate_base(
arg,
configuration=configuration,
)
| null |
481 | #!/usr/bin/env python3
# Copyright (c) 2018-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the deriveaddresses rpc call."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.descriptors import descsum_create
from test_framework.util import assert_equal, assert_raises_rpc_error
class DeriveaddressesTest(BitcoinTestFramework):
def METHOD_NAME(self):
self.num_nodes = 1
def run_test(self):
assert_raises_rpc_error(-5, "Missing checksum", self.nodes[0].deriveaddresses, "a")
descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)#t6wfjs64"
address = "ert1qjqmxmkpmxt80xz4y3746zgt0q3u3ferrfpgxn5"
assert_equal(self.nodes[0].deriveaddresses(descriptor), [address])
descriptor = descriptor[:-9]
assert_raises_rpc_error(-5, "Missing checksum", self.nodes[0].deriveaddresses, descriptor)
descriptor_pubkey = "wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)#s9ga3alw"
address = "ert1qjqmxmkpmxt80xz4y3746zgt0q3u3ferrfpgxn5"
assert_equal(self.nodes[0].deriveaddresses(descriptor_pubkey), [address])
ranged_descriptor = "wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)#kft60nuy"
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, [1, 2]), ["ert1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rqdpq5ny", "ert1qpgptk2gvshyl0s9lqshsmx932l9ccsv2zq7jrq"])
assert_equal(self.nodes[0].deriveaddresses(ranged_descriptor, 2), [address, "ert1qhku5rq7jz8ulufe2y6fkcpnlvpsta7rqdpq5ny", "ert1qpgptk2gvshyl0s9lqshsmx932l9ccsv2zq7jrq"])
assert_raises_rpc_error(-8, "Range should not be specified for an un-ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"), [0, 2])
assert_raises_rpc_error(-8, "Range must be specified for a ranged descriptor", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"))
assert_raises_rpc_error(-8, "End of range is too high", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), 10000000000)
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [1000000000, 2000000000])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [2, 0])
assert_raises_rpc_error(-8, "Range should be greater or equal than 0", self.nodes[0].deriveaddresses, descsum_create("wpkh(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)"), [-1, 0])
combo_descriptor = descsum_create("combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)")
assert_equal(self.nodes[0].deriveaddresses(combo_descriptor), ["2dnaGtwYgBhXYQGTArxKKapi52Mkf3KTQhb", "2dnaGtwYgBhXYQGTArxKKapi52Mkf3KTQhb", address, "XY2Fo8bxL1EViXjWrZ5iZrb5thmfPvWJxw"])
hardened_without_privkey_descriptor = descsum_create("wpkh(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1'/1/0)")
assert_raises_rpc_error(-5, "Cannot derive script without private keys", self.nodes[0].deriveaddresses, hardened_without_privkey_descriptor)
bare_multisig_descriptor = descsum_create("multi(1,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0,tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)")
assert_raises_rpc_error(-5, "Descriptor does not have a corresponding address", self.nodes[0].deriveaddresses, bare_multisig_descriptor)
if __name__ == '__main__':
DeriveaddressesTest().main() | null |
482 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdas.endpoint import endpoint_data
class CreateCloudBenchTasksRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'DAS', '2020-01-16', 'CreateCloudBenchTasks')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientType(self): # String
return self.get_query_params().get('ClientType')
def set_ClientType(self, ClientType): # String
self.add_query_param('ClientType', ClientType)
def get_DstPort(self): # String
return self.get_query_params().get('DstPort')
def set_DstPort(self, DstPort): # String
self.add_query_param('DstPort', DstPort)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_RequestStartTime(self): # String
return self.get_query_params().get('RequestStartTime')
def set_RequestStartTime(self, RequestStartTime): # String
self.add_query_param('RequestStartTime', RequestStartTime)
def get_DstConnectionString(self): # String
return self.get_query_params().get('DstConnectionString')
def set_DstConnectionString(self, DstConnectionString): # String
self.add_query_param('DstConnectionString', DstConnectionString)
def get_DstSuperPassword(self): # String
return self.get_query_params().get('DstSuperPassword')
def set_DstSuperPassword(self, DstSuperPassword): # String
self.add_query_param('DstSuperPassword', DstSuperPassword)
def get_DstSuperAccount(self): # String
return self.get_query_params().get('DstSuperAccount')
def set_DstSuperAccount(self, DstSuperAccount): # String
self.add_query_param('DstSuperAccount', DstSuperAccount)
def get_DstInstanceId(self): # String
return self.get_query_params().get('DstInstanceId')
def set_DstInstanceId(self, DstInstanceId): # String
self.add_query_param('DstInstanceId', DstInstanceId)
def get_Rate(self): # String
return self.get_query_params().get('Rate')
def set_Rate(self, Rate): # String
self.add_query_param('Rate', Rate)
def get_RequestDuration(self): # String
return self.get_query_params().get('RequestDuration')
def set_RequestDuration(self, RequestDuration): # String
self.add_query_param('RequestDuration', RequestDuration)
def get_DtsJobId(self): # String
return self.get_query_params().get('DtsJobId')
def set_DtsJobId(self, DtsJobId): # String
self.add_query_param('DtsJobId', DtsJobId)
def get_RequestEndTime(self): # String
return self.get_query_params().get('RequestEndTime')
def set_RequestEndTime(self, RequestEndTime): # String
self.add_query_param('RequestEndTime', RequestEndTime)
def get_Amount(self): # String
return self.get_query_params().get('Amount')
def set_Amount(self, Amount): # String
self.add_query_param('Amount', Amount)
def get_TaskType(self): # String
return self.get_query_params().get('TaskType')
def set_TaskType(self, TaskType): # String
self.add_query_param('TaskType', TaskType)
def get_EndState(self): # String
return self.get_query_params().get('EndState')
def METHOD_NAME(self, EndState): # String
self.add_query_param('EndState', EndState)
def get_BackupId(self): # String
return self.get_query_params().get('BackupId')
def set_BackupId(self, BackupId): # String
self.add_query_param('BackupId', BackupId)
def get_SrcSuperPassword(self): # String
return self.get_query_params().get('SrcSuperPassword')
def set_SrcSuperPassword(self, SrcSuperPassword): # String
self.add_query_param('SrcSuperPassword', SrcSuperPassword)
def get_BackupTime(self): # String
return self.get_query_params().get('BackupTime')
def set_BackupTime(self, BackupTime): # String
self.add_query_param('BackupTime', BackupTime)
def get_GatewayVpcIp(self): # String
return self.get_query_params().get('GatewayVpcIp')
def set_GatewayVpcIp(self, GatewayVpcIp): # String
self.add_query_param('GatewayVpcIp', GatewayVpcIp)
def get_WorkDir(self): # String
return self.get_query_params().get('WorkDir')
def set_WorkDir(self, WorkDir): # String
self.add_query_param('WorkDir', WorkDir)
def get_DtsJobClass(self): # String
return self.get_query_params().get('DtsJobClass')
def set_DtsJobClass(self, DtsJobClass): # String
self.add_query_param('DtsJobClass', DtsJobClass)
def get_SrcPublicIp(self): # String
return self.get_query_params().get('SrcPublicIp')
def set_SrcPublicIp(self, SrcPublicIp): # String
self.add_query_param('SrcPublicIp', SrcPublicIp)
def get_SrcInstanceId(self): # String
return self.get_query_params().get('SrcInstanceId')
def set_SrcInstanceId(self, SrcInstanceId): # String
self.add_query_param('SrcInstanceId', SrcInstanceId)
def get_DstType(self): # String
return self.get_query_params().get('DstType')
def set_DstType(self, DstType): # String
self.add_query_param('DstType', DstType)
def get_SrcSuperAccount(self): # String
return self.get_query_params().get('SrcSuperAccount')
def set_SrcSuperAccount(self, SrcSuperAccount): # String
self.add_query_param('SrcSuperAccount', SrcSuperAccount)
def get_GatewayVpcId(self): # String
return self.get_query_params().get('GatewayVpcId')
def set_GatewayVpcId(self, GatewayVpcId): # String
self.add_query_param('GatewayVpcId', GatewayVpcId)
def get_SmartPressureTime(self): # String
return self.get_query_params().get('SmartPressureTime')
def set_SmartPressureTime(self, SmartPressureTime): # String
self.add_query_param('SmartPressureTime', SmartPressureTime) | null |
483 | # Copyright 2013 by Rackspace Hosting, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous deprecation utilities.
This module provides decorators to mark functions and classes as deprecated.
"""
import functools
from typing import Any
from typing import Callable
from typing import Optional
import warnings
__all__ = (
'AttributeRemovedError',
'DeprecatedWarning',
'deprecated',
'deprecated_args',
)
class AttributeRemovedError(AttributeError):
"""A deprecated attribute, class, or function has been subsequently removed."""
# NOTE(kgriffs): We don't want our deprecations to be ignored by default,
# so create our own type.
#
# TODO(kgriffs): Revisit this decision if users complain.
class DeprecatedWarning(UserWarning):
pass
def deprecated(
instructions: str, is_property: bool = False, method_name: Optional[str] = None
) -> Callable[[Callable[..., Any]], Any]:
"""Flag a method as deprecated.
This function returns a decorator which can be used to mark deprecated
functions. Applying this decorator will result in a warning being
emitted when the function is used.
Args:
instructions (str): Specific guidance for the developer, e.g.:
'Please migrate to add_proxy(...)'.
is_property (bool): If the deprecated object is a property. It
will omit the ``(...)`` from the generated documentation.
method_name (str, optional): Set to override the name of the
deprecated function or property in the generated
documentation (default ``None``). This is useful when
decorating an alias that carries the target's ``__name__``.
"""
def decorator(func: Callable[..., Any]) -> Callable[[Callable[..., Any]], Any]:
object_name = 'property' if is_property else 'function'
post_name = '' if is_property else '(...)'
message = 'Call to deprecated {} {}{}. {}'.format(
object_name, method_name or func.__name__, post_name, instructions
)
@functools.wraps(func)
def METHOD_NAME(*args: Any, **kwargs: Any) -> Callable[..., Any]:
warnings.warn(message, category=DeprecatedWarning, stacklevel=2)
return func(*args, **kwargs)
return METHOD_NAME
return decorator
def deprecated_args(
*, allowed_positional: int, is_method: bool = True
) -> Callable[..., Callable[..., Any]]:
"""Flag a method call with positional args as deprecated.
Keyword Args:
allowed_positional (int): Number of allowed positional arguments
is_method (bool, optional): The decorated function is a method. Will
add one to the number of allowed positional args to account for
``self``. Defaults to True.
"""
template = (
'Calls to {{fn}}(...) with{arg_text} positional args are deprecated.'
' Please specify them as keyword arguments instead.'
)
text = ' more than {}'.format(allowed_positional) if allowed_positional else ''
warn_text = template.format(arg_text=text)
if is_method:
allowed_positional += 1
def deprecated_args(fn: Callable[..., Any]) -> Callable[..., Callable[..., Any]]:
@functools.wraps(fn)
def wraps(*args: Any, **kwargs: Any) -> Callable[..., Any]:
if len(args) > allowed_positional:
warnings.warn(
warn_text.format(fn=fn.__qualname__),
DeprecatedWarning,
stacklevel=2,
)
return fn(*args, **kwargs)
return wraps
return deprecated_args | null |
484 | import pytest
from simulation.errors import NoNearbyArtefactsError
from simulation.location import Location
from simulation.utils import NearbyArtefactsList
from simulation.world_map import ARTEFACT_TYPES, WorldMapCreator
@pytest.fixture
def METHOD_NAME():
return {"location": {"x": 0, "y": 0}, "backpack": [{"type": "key"}]}
def _generate_cells(columns=3, rows=3):
cells = [
{
"location": {"x": x, "y": y},
"habitable": True,
"avatar": None,
"interactable": None,
}
for x in range(-columns // 2 + 1, 1 + columns // 2)
for y in range(-rows // 2 + 1, 1 + rows // 2)
]
return cells
def assertGridSize(map, expected_rows, expected_columns=None):
if expected_columns is None:
expected_columns = expected_rows
assert len(list(map.all_cells())) == expected_rows * expected_columns
def assertLocationsEqual(actual_cells, expected_locations):
actual_cells = list(actual_cells)
actual = frozenset(cell.location for cell in actual_cells)
assert actual == frozenset(expected_locations)
assert len(actual_cells) == len(list(expected_locations))
def test_grid_size():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells(1, 3))
assertGridSize(map, 1, 3)
def test_all_cells():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
assertLocationsEqual(
map.all_cells(),
[Location(x, y) for x in range(-1, 2) for y in range(-1, 2)],
)
def test_score_cells():
cells = _generate_cells()
cells[0]["interactable"] = {"type": "score"}
cells[8]["interactable"] = {"type": "score"}
map = WorldMapCreator.generate_world_map_from_cells_data(cells)
assertLocationsEqual(map.score_cells(), (Location(-1, -1), Location(1, 1)))
def test_interactable_cells():
cells = _generate_cells()
cells[0]["interactable"] = {"type": "health"}
cells[8]["interactable"] = {"type": "damage_boost"}
map = WorldMapCreator.generate_world_map_from_cells_data(cells)
assertLocationsEqual(map.interactable_cells(), (Location(-1, -1), Location(1, 1)))
def test_artefact_cell():
cells = _generate_cells()
cells[0]["interactable"] = {"type": ARTEFACT_TYPES[0]}
map = WorldMapCreator.generate_world_map_from_cells_data(cells)
assert map.get_cell(Location(-1, -1)).has_artefact() == True
def test_location_is_visible():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
for x in (0, 1):
for y in (0, 1):
assert map.is_visible(Location(x, y)) == True
def test_x_off_map_is_not_visible():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
for y in (0, 1):
assert map.is_visible(Location(-2, y)) == False
assert map.is_visible(Location(2, y)) == False
def test_y_off_map_is_not_visible():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
for x in (0, 1):
assert map.is_visible(Location(x, -2)) == False
assert map.is_visible(Location(x, 2)) == False
def test_get_valid_cell():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
for x in (0, 1):
for y in (0, 1):
location = Location(x, y)
assert map.get_cell(location).location == location
def test_get_x_off_map():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
for y in (0, 1):
with pytest.raises(KeyError):
map.get_cell(Location(-2, y))
with pytest.raises(KeyError):
map.get_cell(Location(2, y))
def test_get_y_off_map():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
for x in (0, 1):
with pytest.raises(KeyError):
map.get_cell(Location(x, -2))
with pytest.raises(KeyError):
map.get_cell(Location(x, 2))
def test_can_move_to():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
target = Location(1, 1)
assert map.can_move_to(target) == True
def test_cannot_move_to_cell_off_grid():
map = WorldMapCreator.generate_world_map_from_cells_data(_generate_cells())
target = Location(4, 1)
assert map.can_move_to(target) == False
def test_cannot_move_to_uninhabitable_cell():
cells = _generate_cells()
cells[0]["obstacle"] = {"location": {"x": -1, "y": -1}}
map = WorldMapCreator.generate_world_map_from_cells_data(cells)
assert map.can_move_to(Location(-1, -1)) == False
def test_cannot_move_to_inhabited_cell(METHOD_NAME):
cells = _generate_cells()
cells[1]["avatar"] = METHOD_NAME
map = WorldMapCreator.generate_world_map_from_cells_data(cells)
assert map.can_move_to(Location(-1, 0)) == False
def test_scan_nearby(METHOD_NAME, capsys):
cells = _generate_cells(5, 5)
cells[0]["avatar"] = METHOD_NAME
cells[2]["obstacle"] = {"location": {"x": 0, "y": 0}}
cells[4]["interactable"] = {"type": ARTEFACT_TYPES[-1]}
map = WorldMapCreator.generate_world_map_from_cells_data(cells)
artefacts = map.scan_nearby(Location(-1, 0))
assert type(artefacts) == NearbyArtefactsList
assert len(artefacts) == 1
with pytest.raises(IndexError):
artefacts[1]
# Test NoNearbyArtefactsError
artefacts = map.scan_nearby(Location(5, 5), radius=1)
assert type(artefacts) == NearbyArtefactsList
assert len(artefacts) == 0
artefacts[0]
captured = capsys.readouterr()
# check the print statement matches
assert captured.out == "There aren't any nearby artefacts, you need to move closer!\n" | null |
485 | from __future__ import print_function
from threading import Thread
from time import sleep
from acq4.drivers.sensapex import UMP
from acq4.util import Qt
from .PressureControl import PressureControl, PressureControlWidget
from ..util.debug import logExc
class SensapexPressureControl(PressureControl):
"""Pressure control device driven by Sensapex analog/digital channels. User and
Atmosphere are the same port for this device.
Additional config options::
deviceId : int
address : str
group : int
pressureChannel : int
pollInterval : float
"""
sigMeasuredPressureChanged = Qt.Signal(object, object) # self, pressure
def __init__(self, manager, config, name):
self.devId = config.get('deviceId')
address = config.pop('address', None)
group = config.pop('group', None)
self._pollInterval = config.get('pollInterval', 1)
ump = UMP.get_ump(address=address, group=group)
self.dev = ump.get_device(self.devId)
config.setdefault("maximum", 7e4)
config.setdefault("minimum", -7e4)
PressureControl.__init__(self, manager, config, name)
self.pressureChannel = config.pop('pressureChannel')
self._valveValueBySource = {"regulator": 1, "atmosphere": 0, "user": 0}
self.sources = tuple(self._valveValueBySource.keys())
self._busy = self.dev.is_busy()
self._measurement = self.dev.measure_pressure(self.pressureChannel)
# 'user' and 'atmosphere' are the same channel on this device, so
# we remember what channel was requested rather than relying entirely on the valve state
self._source = None
self.source = self.METHOD_NAME()
self.pressure = self.getPressure()
self._pollThread = Thread(target=self._poll)
self._pollThread.daemon = True
self._pollThread.start()
def _poll(self):
while True:
try:
self.getBusyStatus()
self.measurePressure()
except Exception:
logExc("Pressure poller thread hit an error; retrying")
finally:
sleep(self._pollInterval)
def _setPressure(self, p):
self.dev.set_pressure(self.pressureChannel, p / 1000.)
def getPressure(self):
return self.dev.get_pressure(self.pressureChannel) * 1000
def measurePressure(self):
pressure = self.dev.measure_pressure(self.pressureChannel)
if pressure != self._measurement:
self._measurement = pressure
self.sigMeasuredPressureChanged.emit(self, pressure)
return pressure
def METHOD_NAME(self):
valveIsReg = self.dev.get_valve(self.pressureChannel) == 1
if valveIsReg:
return "regulator"
else:
return self._source or "atmosphere"
def _setSource(self, source):
self._source = source
self.dev.set_valve(self.pressureChannel, self._valveValueBySource[source])
def calibrate(self):
self.dev.calibrate_pressure(self.pressureChannel)
def getBusyStatus(self):
busy = self.dev.is_busy()
if busy != self._busy:
self._busy = busy
self.sigBusyChanged.emit(self, busy)
return busy
def deviceInterface(self, win):
return SensapexPressureControlWidget(dev=self)
class SensapexPressureControlWidget(Qt.QWidget):
"""Supports measured pressure display and calibration"""
def __init__(self, dev):
super(SensapexPressureControlWidget, self).__init__()
self.dev = dev
self.layout = Qt.QGridLayout()
self.setLayout(self.layout)
self.layout.setContentsMargins(0, 0, 0, 0)
self.calibrateButton = Qt.QPushButton("Calibrate")
self.calibrateButton.clicked.connect(self.dev.calibrate)
self.layout.addWidget(self.calibrateButton, 0, 0)
self.controlWidget = PressureControlWidget(self, dev)
self.layout.addWidget(self.controlWidget, 0, 1)
self.measurement = Qt.QLineEdit()
self.measurement.setPlaceholderText("-")
self.measurement.setReadOnly(True)
self.measurement.setTextMargins(20, 4, 20, 4)
self.layout.addWidget(self.measurement, 0, 2)
self._measurementChanged(dev, dev.measurePressure())
dev.sigMeasuredPressureChanged.connect(self._measurementChanged)
self._busyChanged(dev, dev.getBusyStatus())
dev.sigBusyChanged.connect(self._busyChanged)
def _measurementChanged(self, dev, pressure):
self.measurement.setText(f"{pressure:+.04f} kPa")
def _busyChanged(self, dev, isBusy):
self.calibrateButton.setEnabled(not isBusy) | null |
486 | # Support for a manual controlled stepper
#
# Copyright (C) 2019-2021 Kevin O'Connor <[email protected]>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
import stepper, chelper
from . import force_move
class ManualStepper:
def __init__(self, config):
self.printer = config.get_printer()
if config.get('endstop_pin', None) is not None:
self.can_home = True
self.rail = stepper.PrinterRail(
config, need_position_minmax=False, default_position_endstop=0.)
self.steppers = self.rail.get_steppers()
else:
self.can_home = False
self.rail = stepper.PrinterStepper(config)
self.steppers = [self.rail]
self.velocity = config.getfloat('velocity', 5., above=0.)
self.accel = self.homing_accel = config.getfloat('accel', 0., minval=0.)
self.next_cmd_time = 0.
# Setup iterative solver
ffi_main, ffi_lib = chelper.get_ffi()
self.trapq = ffi_main.gc(ffi_lib.trapq_alloc(), ffi_lib.trapq_free)
self.trapq_append = ffi_lib.trapq_append
self.trapq_finalize_moves = ffi_lib.trapq_finalize_moves
self.rail.setup_itersolve('cartesian_stepper_alloc', b'x')
self.rail.set_trapq(self.trapq)
# Register commands
stepper_name = config.get_name().split()[1]
gcode = self.printer.lookup_object('gcode')
gcode.register_mux_command('MANUAL_STEPPER', "STEPPER",
stepper_name, self.cmd_MANUAL_STEPPER,
desc=self.cmd_MANUAL_STEPPER_help)
def METHOD_NAME(self):
toolhead = self.printer.lookup_object('toolhead')
print_time = toolhead.get_last_move_time()
if self.next_cmd_time > print_time:
toolhead.dwell(self.next_cmd_time - print_time)
else:
self.next_cmd_time = print_time
def do_enable(self, enable):
self.METHOD_NAME()
stepper_enable = self.printer.lookup_object('stepper_enable')
if enable:
for s in self.steppers:
se = stepper_enable.lookup_enable(s.get_name())
se.motor_enable(self.next_cmd_time)
else:
for s in self.steppers:
se = stepper_enable.lookup_enable(s.get_name())
se.motor_disable(self.next_cmd_time)
self.METHOD_NAME()
def do_set_position(self, setpos):
self.rail.set_position([setpos, 0., 0.])
def do_move(self, movepos, speed, accel, sync=True):
self.METHOD_NAME()
cp = self.rail.get_commanded_position()
dist = movepos - cp
axis_r, accel_t, cruise_t, cruise_v = force_move.calc_move_time(
dist, speed, accel)
self.trapq_append(self.trapq, self.next_cmd_time,
accel_t, cruise_t, accel_t,
cp, 0., 0., axis_r, 0., 0.,
0., cruise_v, accel)
self.next_cmd_time = self.next_cmd_time + accel_t + cruise_t + accel_t
self.rail.generate_steps(self.next_cmd_time)
self.trapq_finalize_moves(self.trapq, self.next_cmd_time + 99999.9)
toolhead = self.printer.lookup_object('toolhead')
toolhead.note_kinematic_activity(self.next_cmd_time)
if sync:
self.METHOD_NAME()
def do_homing_move(self, movepos, speed, accel, triggered, check_trigger):
if not self.can_home:
raise self.printer.command_error(
"No endstop for this manual stepper")
self.homing_accel = accel
pos = [movepos, 0., 0., 0.]
endstops = self.rail.get_endstops()
phoming = self.printer.lookup_object('homing')
phoming.manual_home(self, endstops, pos, speed,
triggered, check_trigger)
cmd_MANUAL_STEPPER_help = "Command a manually configured stepper"
def cmd_MANUAL_STEPPER(self, gcmd):
enable = gcmd.get_int('ENABLE', None)
if enable is not None:
self.do_enable(enable)
setpos = gcmd.get_float('SET_POSITION', None)
if setpos is not None:
self.do_set_position(setpos)
speed = gcmd.get_float('SPEED', self.velocity, above=0.)
accel = gcmd.get_float('ACCEL', self.accel, minval=0.)
homing_move = gcmd.get_int('STOP_ON_ENDSTOP', 0)
if homing_move:
movepos = gcmd.get_float('MOVE')
self.do_homing_move(movepos, speed, accel,
homing_move > 0, abs(homing_move) == 1)
elif gcmd.get_float('MOVE', None) is not None:
movepos = gcmd.get_float('MOVE')
sync = gcmd.get_int('SYNC', 1)
self.do_move(movepos, speed, accel, sync)
elif gcmd.get_int('SYNC', 0):
self.METHOD_NAME()
# Toolhead wrappers to support homing
def flush_step_generation(self):
self.METHOD_NAME()
def get_position(self):
return [self.rail.get_commanded_position(), 0., 0., 0.]
def set_position(self, newpos, homing_axes=()):
self.do_set_position(newpos[0])
def get_last_move_time(self):
self.METHOD_NAME()
return self.next_cmd_time
def dwell(self, delay):
self.next_cmd_time += max(0., delay)
def drip_move(self, newpos, speed, drip_completion):
self.do_move(newpos[0], speed, self.homing_accel)
def get_kinematics(self):
return self
def get_steppers(self):
return self.steppers
def calc_position(self, stepper_positions):
return [stepper_positions[self.rail.get_name()], 0., 0.]
def load_config_prefix(config):
return ManualStepper(config) | null |
487 | '''
Copyright (C) 2017-2023 Bryant Moscon - [email protected]
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import asyncio
from decimal import Decimal
import logging
from yapic import json
from cryptofeed.defines import BALANCES, BUY, CANCELLED, CANCEL_ORDER, FILLED, FILL_OR_KILL, IMMEDIATE_OR_CANCEL, L2_BOOK, LIMIT, MAKER_OR_CANCEL, OPEN, ORDER_STATUS, PARTIAL, PLACE_ORDER, SELL, TICKER, TRADES, TRADE_HISTORY
from cryptofeed.exchange import RestExchange
from cryptofeed.types import OrderBook
LOG = logging.getLogger('feedhandler')
class GeminiRestMixin(RestExchange):
api = "https://api.gemini.com"
sandbox_api = "https://api.sandbox.gemini.com"
rest_channels = (
TRADES, TICKER, L2_BOOK, ORDER_STATUS, CANCEL_ORDER, PLACE_ORDER, BALANCES, TRADE_HISTORY
)
order_options = {
LIMIT: 'exchange limit',
FILL_OR_KILL: 'fill-or-kill',
IMMEDIATE_OR_CANCEL: 'immediate-or-cancel',
MAKER_OR_CANCEL: 'maker-or-cancel',
}
def _order_status(self, data):
status = PARTIAL
if data['is_cancelled']:
status = CANCELLED
elif Decimal(data['remaining_amount']) == 0:
status = FILLED
elif Decimal(data['executed_amount']) == 0:
status = OPEN
price = Decimal(data['price']) if Decimal(data['avg_execution_price']) == 0 else Decimal(data['avg_execution_price'])
return {
'order_id': data['order_id'],
'symbol': self.exchange_symbol_to_std_symbol(data['symbol'].upper()), # Gemini uses lowercase symbols for REST and uppercase for WS
'side': BUY if data['side'] == 'buy' else SELL,
'order_type': LIMIT,
'price': price,
'total': Decimal(data['original_amount']),
'executed': Decimal(data['executed_amount']),
'pending': Decimal(data['remaining_amount']),
'timestamp': data['timestampms'] / 1000,
'order_status': status
}
async def _get(self, command: str, retry_count, retry_delay, params=''):
api = self.api if not self.sandbox else self.sandbox_api
resp = await self.http_conn.read(f"{api}{command}{params}", retry_count=retry_count, retry_delay=retry_delay)
return json.loads(resp, parse_float=Decimal)
async def _post(self, command: str, payload=None):
headers = self.generate_token(command, payload=payload)
headers['Content-Type'] = "text/plain"
headers['Content-Length'] = "0"
headers['Cache-Control'] = "no-cache"
api = self.api if not self.sandbox else self.sandbox_api
api = f"{api}{command}"
resp = await self.http_conn.write(api, header=headers)
return json.loads(resp, parse_float=Decimal)
# Public Routes
async def ticker(self, symbol: str, retry_count=1, retry_delay=60):
sym = self.std_symbol_to_exchange_symbol(symbol)
data = await self._get(f"/v1/pubticker/{sym}", retry_count, retry_delay)
return {'symbol': symbol,
'feed': self.id,
'bid': Decimal(data['bid']),
'ask': Decimal(data['ask'])
}
async def l2_book(self, symbol: str, retry_count=1, retry_delay=60):
ret = OrderBook(self.id, symbol)
sym = self.std_symbol_to_exchange_symbol(symbol)
data = await self._get(f"/v1/book/{sym}", retry_count, retry_delay)
ret.book.bids = {Decimal(u['price']): Decimal(u['amount']) for u in data['bids']}
ret.book.asks = {Decimal(u['price']): Decimal(u['amount']) for u in data['asks']}
return ret
async def trades(self, symbol: str, start=None, end=None, retry_count=1, retry_delay=60):
sym = self.std_symbol_to_exchange_symbol(symbol)
start, end = self._interval_normalize(start, end)
params = "&limit_trades=500"
if start:
end_ts = int(end * 1000)
params += f"&since={int(start * 1000)}"
def METHOD_NAME(trade):
return {
'feed': self.id,
'order_id': trade['tid'],
'symbol': self.exchange_symbol_to_std_symbol(sym),
'side': trade['type'],
'amount': Decimal(trade['amount']),
'price': Decimal(trade['price']),
'timestamp': trade['timestampms'] / 1000.0
}
while True:
data = reversed(await self._get(f"/v1/trades/{sym}?", retry_count, retry_delay, params=params))
if end:
data = [METHOD_NAME(d) for d in data if d['timestampms'] <= end_ts]
else:
data = [METHOD_NAME(d) for d in data]
yield data
if start:
params['since'] = int(data[-1]['timestamp'] * 1000) + 1
if len(data) < 500 or not start:
break
await asyncio.sleep(1 / self.request_limit)
# Trading APIs
async def place_order(self, symbol: str, side: str, order_type: str, amount: Decimal, price=None, client_order_id=None, options=None):
if not price:
raise ValueError('Gemini only supports limit orders, must specify price')
ot = self.normalize_order_options(order_type)
sym = self.std_symbol_to_exchange_symbol(symbol)
parameters = {
'type': ot,
'symbol': sym,
'side': side,
'amount': str(amount),
'price': str(price),
'options': [self.normalize_order_options(o) for o in options] if options else []
}
if client_order_id:
parameters['client_order_id'] = client_order_id
data = await self._post("/v1/order/new", parameters)
return await self._order_status(data)
async def cancel_order(self, order_id: str):
data = await self._post("/v1/order/cancel", {'order_id': int(order_id)})
return await self._order_status(data)
async def order_status(self, order_id: str):
data = await self._post("/v1/order/status", {'order_id': int(order_id)})
return await self._order_status(data)
async def orders(self):
data = await self._post("/v1/orders")
return [await self._order_status(d) for d in data]
async def trade_history(self, symbol: str, start=None, end=None):
sym = self.std_symbol_to_exchange_symbol(symbol)
params = {
'symbol': sym,
'limit_trades': 500
}
if start:
params['timestamp'] = self._datetime_normalize(start) * 1000
data = await self._post("/v1/mytrades", params)
return [
{
'price': Decimal(trade['price']),
'amount': Decimal(trade['amount']),
'timestamp': trade['timestampms'] / 1000,
'side': BUY if trade['type'].lower() == 'buy' else SELL,
'fee_currency': trade['fee_currency'],
'fee_amount': trade['fee_amount'],
'trade_id': trade['tid'],
'order_id': trade['order_id']
}
for trade in data
]
async def balances(self):
data = await self._post("/v1/balances")
return {
entry['currency']: {
'total': Decimal(entry['amount']),
'available': Decimal(entry['available'])
} for entry in data} | null |
488 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
r"""Tests the text output of Google C++ Mocking Framework.
To update the golden file:
gmock_output_test.py --build_dir=BUILD/DIR --gengolden
where BUILD/DIR contains the built gmock_output_test_ file.
gmock_output_test.py --gengolden
gmock_output_test.py
"""
from io import open # pylint: disable=redefined-builtin, g-importing-member
import os
import re
import sys
from googlemock.test import gmock_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_')
COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0']
GOLDEN_NAME = 'gmock_output_test_golden.txt'
GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveReportHeaderAndFooter(output):
"""Removes Google Test result report's header and footer from the output."""
output = re.sub(r'.*gtest_main.*\n', '', output)
output = re.sub(r'\[.*\d+ tests.*\n', '', output)
output = re.sub(r'\[.* test environment .*\n', '', output)
output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output)
output = re.sub(r'.* FAILED TESTS\n', '', output)
return output
def RemoveLocations(output):
"""Removes all file location info from a Google Test program's output.
Args:
output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output)
def METHOD_NAME(output):
"""Normalizes the error marker, which is different on Windows vs on Linux."""
return re.sub(r' error: ', ' Failure\n', output)
def RemoveMemoryAddresses(output):
"""Removes memory addresses from the test output."""
return re.sub(r'@\w+', '@0x#', output)
def RemoveTestNamesOfLeakedMocks(output):
"""Removes the test names of leaked mock objects from the test output."""
return re.sub(r'\(used in test .+\) ', '', output)
def GetLeakyTests(output):
"""Returns a list of test names that leak mock objects."""
# findall() returns a list of all matches of the regex in output.
# For example, if '(used in test FooTest.Bar)' is in output, the
# list will contain 'FooTest.Bar'.
return re.findall(r'\(used in test (.+)\)', output)
def GetNormalizedOutputAndLeakyTests(output):
"""Normalizes the output of gmock_output_test_.
Args:
output: The test output.
Returns:
A tuple (the normalized test output, the list of test names that have
leaked mocks).
"""
output = ToUnixLineEnding(output)
output = RemoveReportHeaderAndFooter(output)
output = METHOD_NAME(output)
output = RemoveLocations(output)
output = RemoveMemoryAddresses(output)
return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output))
def GetShellCommandOutput(cmd):
"""Runs a command in a sub-process, and returns its STDOUT in a string."""
return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output
def GetNormalizedCommandOutputAndLeakyTests(cmd):
"""Runs a command and returns its normalized output and a list of leaky tests.
Args:
cmd: the shell command.
"""
# Disables exception pop-ups on Windows.
os.environ['GTEST_CATCH_EXCEPTIONS'] = '1'
return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd))
class GMockOutputTest(gmock_test_utils.TestCase):
def testOutput(self):
(output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'rb')
golden = golden_file.read().decode('utf-8')
golden_file.close()
# The normalized output should match the golden file.
self.assertEqual(golden, output)
# The raw output should contain 2 leaked mock object errors for
# test GMockOutputTest.CatchesLeakedMocks.
self.assertEqual(['GMockOutputTest.CatchesLeakedMocks',
'GMockOutputTest.CatchesLeakedMocks'],
leaky_tests)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
(output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND)
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
# Suppress the error "googletest was imported but a call to its main()
# was never detected."
os._exit(0)
else:
gmock_test_utils.Main() | null |
489 | #! /usr/bin/env python3
import asyncio
import json
import logging
import os
import re
import smtplib
import subprocess
import weakref
from email.mime.text import MIMEText
_send_mail_tasks = weakref.WeakSet()
_rx_term_escapes = re.compile(r"(\x9b|\x1b\[)[0-?]*[ -\/]*[@-~]")
log = logging.getLogger("ai.backend.manager.monitor")
SMTP_HOST = "127.0.0.1"
SMTP_PORT = 25
SENDER_EMAIL = "[email protected]"
async def monitor_events():
# This script assumes that .env is already configured as a super-admin account with API-mode access.
args = [
"backend.ai",
"session",
"events",
"*", # monitor all session events
]
try:
while True:
log.info("(re)starting 'session events' command")
proc = await asyncio.create_subprocess_exec(
*args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env={**os.environb, b"PYTHONUNBUFFERED": b"1"},
)
try:
while True:
line = await proc.stdout.readline()
if not line: # terminated
break
event_raw_name, event_raw_data = line.split(b" ", maxsplit=1)
event_name = _rx_term_escapes.sub("", event_raw_name.decode())
try:
event_data = json.loads(event_raw_data)
task = asyncio.create_task(METHOD_NAME(event_name, event_data))
_send_mail_tasks.add(task)
except Exception:
log.exception("ooops")
finally:
await proc.wait()
finally:
# cancel any ongoing send_email() task
remaining_tasks = {*_send_mail_tasks}
cancelled_tasks = []
for task in remaining_tasks:
if not task.done() and not task.cancelled():
task.cancel()
cancelled_tasks.append(task)
await asyncio.gather(*cancelled_tasks, return_exceptions=True)
async def METHOD_NAME(event_name, event_data):
# reference for event_name: https://github.com/lablup/backend.ai/blob/f5bb6c1/src/ai/backend/common/events.py
# reference for event_data: https://github.com/lablup/backend.ai/blob/f5bb6c1/src/ai/backend/manager/api/events.py#L149-L155
session_name = event_data["sessionName"]
session_id = event_data["sessionId"]
match event_name:
case "session_terminated" | "session_success" | "session_failure":
user_email = await extract_user_email(event_data["ownerAccessKey"])
reason = event_data["reason"]
exit_code = event_data["exitCode"]
if user_email is None:
log.info(
f"{event_name} for {session_name} -> skipped due to the missing user email"
)
return
else:
log.info(f"{event_name} for {session_name} -> notifying to {user_email}")
await send_mail(
SENDER_EMAIL,
[user_email],
f"[Backend.AI] {event_name} in your session {session_name}",
(
f"This is a notification for a lifecycle event of your session.\n\n"
f"Session ID: {session_id}\n"
f"Session Name: {session_name}\n"
f"Reason: {reason}\n"
f"Exit Code: {exit_code}\n"
),
)
case _:
log.debug(f"{event_name} for {session_name} -> skipped")
async def extract_user_email(access_key):
args = [
"backend.ai",
"--output",
"json",
"admin",
"keypair",
"list",
"--filter",
f'access_key == "{access_key}"',
]
proc = await asyncio.create_subprocess_exec(*args, stdout=subprocess.PIPE)
data = json.loads(await proc.stdout.read())
try:
return data["items"][0]["user_id"]
except (IndexError, KeyError):
return None
async def send_mail(from_addr, to_addrs, subject, body):
def _send_mail(from_addr, to_addrs, subject, body):
# To use SSL, replace smtplib.SMTP with smtplib.SMTP_SSL
server = smtplib.SMTP(SMTP_HOST, SMTP_PORT)
# server.set_debuglevel(1) # for diagnosis with smtp issues
msg = MIMEText(body)
msg["From"] = from_addr
msg["To"] = ", ".join(to_addrs)
msg["Subject"] = subject
server.sendmail(from_addr, to_addrs, msg.as_string())
server.quit()
loop = asyncio.get_running_loop()
await loop.run_in_executor(None, _send_mail, from_addr, to_addrs, subject, body)
async def main():
try:
log.info("starting monitoring of session events...")
await monitor_events()
finally:
await asyncio.sleep(0)
log.info("terminated")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)s %(name)s %(message)s")
try:
asyncio.run(main())
except (KeyboardInterrupt, SystemExit):
pass
# vim: sts=4 sw=4 et | null |
490 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoutboundbot.endpoint import endpoint_data
class CreateTaskExportTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OutboundBot', '2019-12-26', 'CreateTaskExportTask')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HasAnswered(self): # Boolean
return self.get_query_params().get('HasAnswered')
def set_HasAnswered(self, HasAnswered): # Boolean
self.add_query_param('HasAnswered', HasAnswered)
def get_ActualTimeLte(self): # Long
return self.get_query_params().get('ActualTimeLte')
def set_ActualTimeLte(self, ActualTimeLte): # Long
self.add_query_param('ActualTimeLte', ActualTimeLte)
def get_OtherId(self): # String
return self.get_query_params().get('OtherId')
def set_OtherId(self, OtherId): # String
self.add_query_param('OtherId', OtherId)
def get_TaskCreateTimeLte(self): # Long
return self.get_query_params().get('TaskCreateTimeLte')
def set_TaskCreateTimeLte(self, TaskCreateTimeLte): # Long
self.add_query_param('TaskCreateTimeLte', TaskCreateTimeLte)
def get_JobId(self): # String
return self.get_query_params().get('JobId')
def set_JobId(self, JobId): # String
self.add_query_param('JobId', JobId)
def get_TaskCreateTimeGte(self): # Long
return self.get_query_params().get('TaskCreateTimeGte')
def set_TaskCreateTimeGte(self, TaskCreateTimeGte): # Long
self.add_query_param('TaskCreateTimeGte', TaskCreateTimeGte)
def get_CalledNumber(self): # String
return self.get_query_params().get('CalledNumber')
def set_CalledNumber(self, CalledNumber): # String
self.add_query_param('CalledNumber', CalledNumber)
def get_UserIdMatch(self): # String
return self.get_query_params().get('UserIdMatch')
def set_UserIdMatch(self, UserIdMatch): # String
self.add_query_param('UserIdMatch', UserIdMatch)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_ScriptNameQuery(self): # String
return self.get_query_params().get('ScriptNameQuery')
def set_ScriptNameQuery(self, ScriptNameQuery): # String
self.add_query_param('ScriptNameQuery', ScriptNameQuery)
def get_PageIndex(self): # Integer
return self.get_query_params().get('PageIndex')
def set_PageIndex(self, PageIndex): # Integer
self.add_query_param('PageIndex', PageIndex)
def get_SortOrder(self): # String
return self.get_query_params().get('SortOrder')
def set_SortOrder(self, SortOrder): # String
self.add_query_param('SortOrder', SortOrder)
def get_TaskStatusStringList(self): # String
return self.get_query_params().get('TaskStatusStringList')
def set_TaskStatusStringList(self, TaskStatusStringList): # String
self.add_query_param('TaskStatusStringList', TaskStatusStringList)
def METHOD_NAME(self): # String
return self.get_query_params().get('JobGroupNameQuery')
def set_JobGroupNameQuery(self, JobGroupNameQuery): # String
self.add_query_param('JobGroupNameQuery', JobGroupNameQuery)
def get_TaskId(self): # String
return self.get_query_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_query_param('TaskId', TaskId)
def get_HasHangUpByRejection(self): # Boolean
return self.get_query_params().get('HasHangUpByRejection')
def set_HasHangUpByRejection(self, HasHangUpByRejection): # Boolean
self.add_query_param('HasHangUpByRejection', HasHangUpByRejection)
def get_HasReachedEndOfFlow(self): # Boolean
return self.get_query_params().get('HasReachedEndOfFlow')
def set_HasReachedEndOfFlow(self, HasReachedEndOfFlow): # Boolean
self.add_query_param('HasReachedEndOfFlow', HasReachedEndOfFlow)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_RecordingDurationGte(self): # Long
return self.get_query_params().get('RecordingDurationGte')
def set_RecordingDurationGte(self, RecordingDurationGte): # Long
self.add_query_param('RecordingDurationGte', RecordingDurationGte)
def get_CallDurationLte(self): # Long
return self.get_query_params().get('CallDurationLte')
def set_CallDurationLte(self, CallDurationLte): # Long
self.add_query_param('CallDurationLte', CallDurationLte)
def get_JobGroupId(self): # String
return self.get_query_params().get('JobGroupId')
def set_JobGroupId(self, JobGroupId): # String
self.add_query_param('JobGroupId', JobGroupId)
def get_SortBy(self): # String
return self.get_query_params().get('SortBy')
def set_SortBy(self, SortBy): # String
self.add_query_param('SortBy', SortBy)
def get_JobStatusStringList(self): # String
return self.get_query_params().get('JobStatusStringList')
def set_JobStatusStringList(self, JobStatusStringList): # String
self.add_query_param('JobStatusStringList', JobStatusStringList)
def get_ActualTimeGte(self): # Long
return self.get_query_params().get('ActualTimeGte')
def set_ActualTimeGte(self, ActualTimeGte): # Long
self.add_query_param('ActualTimeGte', ActualTimeGte)
def get_CallDurationGte(self): # Long
return self.get_query_params().get('CallDurationGte')
def set_CallDurationGte(self, CallDurationGte): # Long
self.add_query_param('CallDurationGte', CallDurationGte)
def get_RecordingDurationLte(self): # Long
return self.get_query_params().get('RecordingDurationLte')
def set_RecordingDurationLte(self, RecordingDurationLte): # Long
self.add_query_param('RecordingDurationLte', RecordingDurationLte) | null |
491 | # -*- coding: utf-8 -*-
import pytest
from django.utils.timezone import now
from api.base.settings.defaults import API_BASE
from api_tests.registrations.filters.test_filters import RegistrationListFilteringMixin
from osf_tests.factories import (
AuthUserFactory,
CollectionFactory,
ProjectFactory,
RegistrationFactory,
OSFGroupFactory
)
from osf.utils import permissions
from tests.base import ApiTestCase
from website.views import find_bookmark_collection
@pytest.mark.django_db
class TestUserRegistrations:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory()
user_one.social['twitter'] = 'rheisendennis'
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def group_member(self):
return AuthUserFactory()
@pytest.fixture()
def METHOD_NAME(self, group_member):
return OSFGroupFactory(creator=group_member)
@pytest.fixture()
def project_public_user_one(self, user_one):
return ProjectFactory(
title='Public Project User One',
is_public=True,
creator=user_one)
@pytest.fixture()
def project_private_user_one(self, user_one):
return ProjectFactory(
title='Private Project User One',
is_public=False,
creator=user_one)
@pytest.fixture()
def project_public_user_two(self, user_two):
return ProjectFactory(
title='Public Project User Two',
is_public=True,
creator=user_two)
@pytest.fixture()
def project_private_user_two(self, user_two):
return ProjectFactory(
title='Private Project User Two',
is_public=False,
creator=user_two)
@pytest.fixture()
def project_private_group_member(self, user_one, METHOD_NAME):
project = ProjectFactory(
title='Private Project Group Member',
is_public=False,
creator=user_one
)
project.add_osf_group(METHOD_NAME, permissions.ADMIN)
return project
@pytest.fixture()
def project_deleted_user_one(self, user_one):
return CollectionFactory(
title='Deleted Project User One',
is_public=False,
creator=user_one,
deleted=now())
@pytest.fixture()
def folder(self):
return CollectionFactory()
@pytest.fixture()
def folder_deleted(self, user_one):
return CollectionFactory(
title='Deleted Folder User One',
is_public=False,
creator=user_one,
deleted=now())
@pytest.fixture()
def bookmark_collection(self, user_one):
return find_bookmark_collection(user_one)
@pytest.fixture()
def reg_project_public_user_one(self, user_one, project_public_user_one):
return RegistrationFactory(
project=project_public_user_one,
creator=user_one,
is_public=True)
@pytest.fixture()
def reg_project_private_user_one(self, user_one, project_private_user_one):
return RegistrationFactory(
project=project_private_user_one,
creator=user_one,
is_private=True)
@pytest.fixture()
def reg_project_public_user_two(self, user_two, project_public_user_two):
return RegistrationFactory(
project=project_public_user_two,
creator=user_two,
is_public=True)
@pytest.fixture()
def reg_project_private_user_two(self, user_two, project_private_user_two):
return RegistrationFactory(
project=project_private_user_two,
creator=user_two,
is_private=True)
@pytest.fixture()
def reg_project_private_group_member(self, user_one, project_private_group_member):
return RegistrationFactory(
project=project_private_group_member,
creator=user_one,
is_private=True)
def test_user_registrations(
self, app, user_one, user_two, group_member,
reg_project_public_user_one,
reg_project_public_user_two,
reg_project_private_user_one,
reg_project_private_user_two,
reg_project_private_group_member,
folder, folder_deleted,
project_deleted_user_one):
# test_authorized_in_gets_200
url = '/{}users/{}/registrations/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_anonymous_gets_200
url = '/{}users/{}/registrations/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_get_registrations_logged_in
url = '/{}users/{}/registrations/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert reg_project_public_user_one._id in ids
assert reg_project_private_user_one._id in ids
assert reg_project_public_user_two._id not in ids
assert reg_project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_registrations_not_logged_in
url = '/{}users/{}/registrations/'.format(API_BASE, user_one._id)
res = app.get(url)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert reg_project_public_user_one._id in ids
assert reg_project_private_user_one._id not in ids
assert reg_project_public_user_two._id not in ids
assert reg_project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_registrations_logged_in_as_different_user
url = '/{}users/{}/registrations/'.format(API_BASE, user_two._id)
res = app.get(url, auth=user_one.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert reg_project_public_user_one._id not in ids
assert reg_project_private_user_one._id not in ids
assert reg_project_public_user_two._id in ids
assert reg_project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_registrations_logged_in_group_member
url = '/{}users/{}/registrations/'.format(API_BASE, group_member._id)
res = app.get(url, auth=group_member.auth)
node_json = res.json['data']
ids = [each['id'] for each in node_json]
assert reg_project_public_user_one._id not in ids
assert reg_project_private_user_one._id not in ids
assert reg_project_public_user_two._id not in ids
assert reg_project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# project group members not copied to registration.
assert reg_project_private_group_member not in ids
class TestRegistrationListFiltering(
RegistrationListFilteringMixin,
ApiTestCase):
url = '/{}users/me/registrations/?'.format(API_BASE) | null |
492 | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Collection of all deploy related views
"""
from deploy_board.settings import SITE_METRICS_CONFIGS, TELETRAAN_DISABLE_CREATE_ENV_PAGE, TELETRAAN_REDIRECT_CREATE_ENV_PAGE_URL
from django.middleware.csrf import get_token
import json
from django.shortcuts import render
from django.views.generic import View
from django.template.loader import render_to_string
from django.http import HttpResponse
from helpers import builds_helper, deploys_helper, environs_helper, tags_helper
DEFAULT_PAGE_SIZE = 30
DEFAULT_ONGOING_DEPLOY_SIZE = 10
def _get_running_deploys_count(request):
deploy_states = ["RUNNING"] # current running deploys
page_size = 1 # only need to return 1 detail, the return structure has the "total" value
deployResult = deploys_helper.get_all(request, deployState=deploy_states, pageSize=page_size)
return deployResult['total']
def _get_sidecars(request):
# returns a list of env id for sidecars which are identified by having a system priority
envs = environs_helper.get_all_sidecar_envs(request)
env_ids = []
for env in envs:
env_ids.append(env['id'])
return env_ids
def _get_ongoing_sidecar_deploys(request):
deploy_summaries = []
env_ids = _get_sidecars(request)
if env_ids:
deploy_states = ["RUNNING", "FAILING"]
deployResult = deploys_helper.get_all(request, envId=env_ids, deployState=deploy_states)
for deploy in deployResult['deploys']:
env = environs_helper.get(request, deploy['envId'])
build = builds_helper.get_build(request, deploy['buildId'])
summary = {}
summary['deploy'] = deploy
summary['env'] = env
summary['build'] = build
deploy_summaries.append(summary)
return deploy_summaries
def METHOD_NAME(request, index, size):
# ongoing deploys are defined as deploys with states as:
deploy_states = ["RUNNING", "FAILING"]
deployResult = deploys_helper.get_all(request, deployState=deploy_states,
pageIndex=index, pageSize=size)
deploy_summaries = []
for deploy in deployResult['deploys']:
env = environs_helper.get(request, deploy['envId'])
build = builds_helper.get_build(request, deploy['buildId'])
summary = {}
summary['deploy'] = deploy
summary['env'] = env
summary['build'] = build
deploy_summaries.append(summary)
return deploy_summaries
def get_landing_page(request):
envs_tag = tags_helper.get_latest_by_target_id(request, 'TELETRAAN')
metrics = SITE_METRICS_CONFIGS
return render(request, 'landing.html', {
"metrics": metrics,
'envs_tag': envs_tag,
"disable_create_env_page": TELETRAAN_DISABLE_CREATE_ENV_PAGE,
"redirect_create_env_page_url": TELETRAAN_REDIRECT_CREATE_ENV_PAGE_URL
})
def get_ongoing_sidecar_deploys(request):
deploy_summeries = _get_ongoing_sidecar_deploys(request)
html = render_to_string('deploys/ongoing_deploys.tmpl', {
"deploy_summaries": deploy_summeries,
"pageIndex": 1,
"pageSize": 100,
"disablePrevious": True,
"disableNext": True,
})
return HttpResponse(html)
def get_ongoing_deploys(request):
index = int(request.GET.get('page_index', '1'))
size = int(request.GET.get('page_size', DEFAULT_ONGOING_DEPLOY_SIZE))
deploy_summeries = METHOD_NAME(request, index, size)
html = render_to_string('deploys/ongoing_deploys.tmpl', {
"deploy_summaries": deploy_summeries,
"pageIndex": index,
"pageSize": size,
"disablePrevious": index <= 1,
"disableNext": len(deploy_summeries) < DEFAULT_ONGOING_DEPLOY_SIZE,
})
return HttpResponse(html)
def get_daily_deploy_count(request):
daily_deploy_count = deploys_helper.get_daily_deploy_count(request)
running_deploy_count = _get_running_deploys_count(request)
html = render_to_string('deploys/daily_deploy_count.tmpl', {
"daily_deploy_count": daily_deploy_count,
"running_deploy_count": running_deploy_count
})
return HttpResponse(html)
def get_duplicate_commit_deploy_message(request, name, stage, buildId):
env = environs_helper.get_env_by_stage(request, name, stage)
if env.get('deployId') is None:
return HttpResponse('')
current_deploy = deploys_helper.get_current(request, name, stage)
current_build = builds_helper.get_build(request, current_deploy['buildId'])
current_commit = current_build['commit']
next_build = builds_helper.get_build(request, buildId)
next_commit = next_build['commit']
if current_commit == next_commit:
return render(request, 'deploys/duplicate_commit_deploy_message.tmpl',{
"commit":next_build['commitShort']})
return HttpResponse('')
class DeployView(View):
def get(self, request, deploy_id):
deploy = deploys_helper.get(request, deploy_id)
build = builds_helper.get_build(request, deploy['buildId'])
env = None
if deploy.get('envId'):
env = environs_helper.get(request, deploy['envId'])
return render(request, 'deploys/deploy_details.html', {
"deploy": deploy,
"build": build,
"csrf_token": get_token(request),
"env": env,
})
def inline_update(request):
query_dict = request.POST
name = query_dict["name"]
value = query_dict["value"]
deploy_id = query_dict["deploy_id"]
if name == "description":
deploys_helper.update(request, deploy_id, {"description": value})
else:
log.error("Unsupport deploy update on field " + name)
return HttpResponse(json.dumps({'html': ''}), content_type="application/json") | null |
493 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateLoadBalancerHTTPListenerRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ens', '2017-11-10', 'CreateLoadBalancerHTTPListener','ens')
self.set_method('POST')
def get_ListenerForward(self): # String
return self.get_query_params().get('ListenerForward')
def set_ListenerForward(self, ListenerForward): # String
self.add_query_param('ListenerForward', ListenerForward)
def get_HealthCheckTimeout(self): # Integer
return self.get_query_params().get('HealthCheckTimeout')
def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer
self.add_query_param('HealthCheckTimeout', HealthCheckTimeout)
def get_XForwardedFor(self): # String
return self.get_query_params().get('XForwardedFor')
def set_XForwardedFor(self, XForwardedFor): # String
self.add_query_param('XForwardedFor', XForwardedFor)
def get_HealthCheckURI(self): # String
return self.get_query_params().get('HealthCheckURI')
def set_HealthCheckURI(self, HealthCheckURI): # String
self.add_query_param('HealthCheckURI', HealthCheckURI)
def get_HealthCheck(self): # String
return self.get_query_params().get('HealthCheck')
def set_HealthCheck(self, HealthCheck): # String
self.add_query_param('HealthCheck', HealthCheck)
def get_HealthCheckMethod(self): # String
return self.get_query_params().get('HealthCheckMethod')
def set_HealthCheckMethod(self, HealthCheckMethod): # String
self.add_query_param('HealthCheckMethod', HealthCheckMethod)
def get_HealthCheckDomain(self): # String
return self.get_query_params().get('HealthCheckDomain')
def set_HealthCheckDomain(self, HealthCheckDomain): # String
self.add_query_param('HealthCheckDomain', HealthCheckDomain)
def get_RequestTimeout(self): # Integer
return self.get_query_params().get('RequestTimeout')
def set_RequestTimeout(self, RequestTimeout): # Integer
self.add_query_param('RequestTimeout', RequestTimeout)
def get_LoadBalancerId(self): # String
return self.get_query_params().get('LoadBalancerId')
def set_LoadBalancerId(self, LoadBalancerId): # String
self.add_query_param('LoadBalancerId', LoadBalancerId)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def METHOD_NAME(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_UnhealthyThreshold(self): # Integer
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer
self.add_query_param('UnhealthyThreshold', UnhealthyThreshold)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_Scheduler(self): # String
return self.get_query_params().get('Scheduler')
def set_Scheduler(self, Scheduler): # String
self.add_query_param('Scheduler', Scheduler)
def get_ForwardPort(self): # Integer
return self.get_query_params().get('ForwardPort')
def set_ForwardPort(self, ForwardPort): # Integer
self.add_query_param('ForwardPort', ForwardPort)
def get_ListenerPort(self): # Integer
return self.get_query_params().get('ListenerPort')
def set_ListenerPort(self, ListenerPort): # Integer
self.add_query_param('ListenerPort', ListenerPort)
def get_IdleTimeout(self): # Integer
return self.get_query_params().get('IdleTimeout')
def set_IdleTimeout(self, IdleTimeout): # Integer
self.add_query_param('IdleTimeout', IdleTimeout)
def get_HealthCheckConnectPort(self): # Integer
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer
self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort)
def get_HealthCheckHttpCode(self): # String
return self.get_query_params().get('HealthCheckHttpCode')
def set_HealthCheckHttpCode(self, HealthCheckHttpCode): # String
self.add_query_param('HealthCheckHttpCode', HealthCheckHttpCode) | null |
494 | # Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from deploy_board.webapp.helpers.rodimus_client import RodimusClient
rodimus_client = RodimusClient()
MAX_BASE_IMAGE_UPDATE_EVENTS = 1e4
def promote_image(request, image_id, tag):
params = [('stage', tag)]
return rodimus_client.put("/base_images/%s/golden" % image_id, request.teletraan_user_id.token, params=params)
def demote_image(request, image_id):
return rodimus_client.delete("/base_images/%s/golden" % image_id, request.teletraan_user_id.token)
def cancel_image_update(request, image_id):
return rodimus_client.put("/base_images/%s/golden/cancel" % image_id, request.teletraan_user_id.token)
def get_image_tag_by_id(request, image_id):
return rodimus_client.get("/base_images/%s/tags" % image_id, request.teletraan_user_id.token)
def create_base_image(request, base_image_info):
return rodimus_client.post("/base_images", request.teletraan_user_id.token, data=base_image_info)
def get_all(request, index, size):
params = [('pageIndex', index), ('pageSize', size)]
return rodimus_client.get("/base_images", request.teletraan_user_id.token, params=params)
def get_all_with_acceptance(request, index, size):
base_images = get_all(request, index, size)
fetched_names = set()
golden = dict()
name_acceptance_map = {}
for img in base_images:
name = img['abstract_name']
cell = img['cell_name']
if name not in fetched_names and name.startswith('cmp_base'):
fetched_names.add(name)
base_image_infos = get_acceptance_by_name(request, name,
img.get('cell', None))
for img_info in base_image_infos:
name_acceptance_map[img_info['baseImage'][
'provider_name']] = img_info.get('acceptance') or 'UNKNOWN'
img['acceptance'] = name_acceptance_map.get(img['provider_name'],
'N/A')
if name.startswith('cmp_base'):
key = (name, cell)
if key not in golden:
golden_image = get_current_golden_image(request, name, cell)
golden[key] = golden_image['id'] if golden_image else None
if img['id'] == golden[key]:
img['current_golden'] = True
return base_images
def get_image_names(request, provider, cell_name):
params = [('provider', provider), ('cellName', cell_name)]
return rodimus_client.get("/base_images/names", request.teletraan_user_id.token, params=params)
def METHOD_NAME(request, provider, cell_name, arch_name):
params = [('provider', provider), ('cellName', cell_name), ('archName', arch_name)]
return rodimus_client.get("/base_images/names", request.teletraan_user_id.token, params=params)
def get_all_by(request, provider, cell_name):
if cell_name:
return rodimus_client.get("/base_images/cell/%s" % cell_name, request.teletraan_user_id.token)
params = [('provider', provider)]
return rodimus_client.get("/base_images", request.teletraan_user_id.token, params=params)
def get_by_name(request, name, cell_name):
params = [('cellName', cell_name)]
return rodimus_client.get("/base_images/names/%s" % name, request.teletraan_user_id.token, params=params)
def get_acceptance_by_name(request, name, cell_name):
params = [('cellName', cell_name)]
return rodimus_client.get("/base_images/acceptances/%s" % name, request.teletraan_user_id.token, params=params)
def get_current_golden_image(request, name, cell):
return rodimus_client.get("/base_images/names/%s/cells/%s/golden" % (name, cell), request.teletraan_user_id.token)
def get_by_provider_name(request, name):
return rodimus_client.get("/base_images/provider_names/%s" % name, request.teletraan_user_id.token)
def get_by_id(request, image_id):
return rodimus_client.get("/base_images/%s" % image_id, request.teletraan_user_id.token)
def get_all_providers(request):
return rodimus_client.get("/base_images/provider", request.teletraan_user_id.token)
def get_image_update_events_by_new_id(request, image_id):
events = rodimus_client.get("/base_images/updates/%s" % image_id, request.teletraan_user_id.token)
for event in events:
event['status'] = generate_image_update_event_status(event)
return events
# Heuristic way to get the latest update events batch
# TODO: update rodimus for better update events batching
def get_latest_image_update_events(events):
if not events:
return events
# Group update events batch by create_time.
# Events are sorted by create_time
# create_time is milisecond timestamp and gets increased by 1 per cluster.
# The total number of clusters should not be 10K.
lastest_timestamp = events[0]['create_time']
latest_events = [event for event in events if abs(
event['create_time'] - lastest_timestamp) < MAX_BASE_IMAGE_UPDATE_EVENTS]
return latest_events
def get_image_update_events_by_cluster(request, cluster_name):
events = rodimus_client.get("/base_images/updates/cluster/%s" % cluster_name, request.teletraan_user_id.token)
for event in events:
event['status'] = generate_image_update_event_status(event)
return events
def generate_image_update_event_status(event):
if event['state'] == 'INIT':
if event['start_time']:
return 'UPDATING'
else:
return 'INIT'
elif event['state'] == 'COMPLETED':
if event['error_message']:
return 'FAILED'
else:
return 'SUCCEEDED'
return event['state']
def get_base_image_update_progress(events):
if not events:
return None
total = len(events)
succeeded = len([event for event in events if event['status'] == 'SUCCEEDED'])
state = 'COMPLETED' if all(event["state"] == 'COMPLETED' for event in events) else 'IN PROGRESS'
success_rate = succeeded * 100 / total
return {
'state': state,
'total': total,
'succeeded': succeeded,
'progressTip': 'Among total {} clusters, {} successfully updated, {} failed or are pending.'.format(
total, succeeded, total - succeeded),
'successRatePercentage': success_rate,
'successRate': '{}% ({}/{})'.format(success_rate, succeeded, total),
} | null |
495 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdts.endpoint import endpoint_data
class DescribeDtsJobsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dts', '2020-01-01', 'DescribeDtsJobs','dts')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_WithoutDbList(self): # Boolean
return self.get_query_params().get('WithoutDbList')
def set_WithoutDbList(self, WithoutDbList): # Boolean
self.add_query_param('WithoutDbList', WithoutDbList)
def get_OrderDirection(self): # String
return self.get_query_params().get('OrderDirection')
def set_OrderDirection(self, OrderDirection): # String
self.add_query_param('OrderDirection', OrderDirection)
def get_DedicatedClusterId(self): # String
return self.get_query_params().get('DedicatedClusterId')
def set_DedicatedClusterId(self, DedicatedClusterId): # String
self.add_query_param('DedicatedClusterId', DedicatedClusterId)
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_OrderColumn(self): # String
return self.get_query_params().get('OrderColumn')
def set_OrderColumn(self, OrderColumn): # String
self.add_query_param('OrderColumn', OrderColumn)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_DtsBisLabel(self): # String
return self.get_query_params().get('DtsBisLabel')
def set_DtsBisLabel(self, DtsBisLabel): # String
self.add_query_param('DtsBisLabel', DtsBisLabel)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DtsJobId(self): # String
return self.get_query_params().get('DtsJobId')
def set_DtsJobId(self, DtsJobId): # String
self.add_query_param('DtsJobId', DtsJobId)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_Params(self): # String
return self.get_query_params().get('Params')
def set_Params(self, Params): # String
self.add_query_param('Params', Params)
def get_OwnerId(self): # String
return self.get_query_params().get('OwnerId')
def METHOD_NAME(self, OwnerId): # String
self.add_query_param('OwnerId', OwnerId)
def get_JobType(self): # String
return self.get_query_params().get('JobType')
def set_JobType(self, JobType): # String
self.add_query_param('JobType', JobType)
def get_Tags(self): # String
return self.get_query_params().get('Tags')
def set_Tags(self, Tags): # String
self.add_query_param('Tags', Tags)
def get_Region(self): # String
return self.get_query_params().get('Region')
def set_Region(self, Region): # String
self.add_query_param('Region', Region)
def get_DtsInstanceId(self): # String
return self.get_query_params().get('DtsInstanceId')
def set_DtsInstanceId(self, DtsInstanceId): # String
self.add_query_param('DtsInstanceId', DtsInstanceId)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
496 | import unittest
from unittest import TestCase
import pytest
import torch
import torch.nn.functional as F
from pytest_mock import MockerFixture
from torch import distributed as dist
from torch import nn
from torch.optim import SGD
from lightly.loss import msn_loss
from lightly.loss.msn_loss import MSNLoss
from lightly.models.modules.heads import MSNProjectionHead
class TestMSNLoss:
def test__gather_distributed(self, mocker: MockerFixture) -> None:
mock_is_available = mocker.patch.object(dist, "is_available", return_value=True)
MSNLoss(gather_distributed=True)
mock_is_available.assert_called_once()
def test__gather_distributed_dist_not_available(
self, mocker: MockerFixture
) -> None:
mock_is_available = mocker.patch.object(
dist, "is_available", return_value=False
)
with pytest.raises(ValueError):
MSNLoss(gather_distributed=True)
mock_is_available.assert_called_once()
class TestMSNLossUnitTest(TestCase):
# Old tests in unittest style, please add new tests to TestMSNLoss using pytest.
def METHOD_NAME(self) -> None:
MSNLoss(temperature=1.0)
with self.assertRaises(ValueError):
MSNLoss(temperature=0.0)
with self.assertRaises(ValueError):
MSNLoss(temperature=-1.0)
def test__init__sinkhorn_iterations(self) -> None:
MSNLoss(sinkhorn_iterations=0)
with self.assertRaises(ValueError):
MSNLoss(sinkhorn_iterations=-1)
def test__init__me_max_weight(self) -> None:
criterion = MSNLoss(regularization_weight=0.0, me_max_weight=0.5)
assert criterion.regularization_weight == 0.5
def test_prototype_probabilitiy(self) -> None:
torch.manual_seed(0)
queries = F.normalize(torch.rand((8, 10)), dim=1)
prototypes = F.normalize(torch.rand((4, 10)), dim=1)
prob = msn_loss.prototype_probabilities(queries, prototypes, temperature=0.5)
self.assertEqual(prob.shape, (8, 4))
self.assertLessEqual(prob.max(), 1.0)
self.assertGreater(prob.min(), 0.0)
# verify sharpening
prob1 = msn_loss.prototype_probabilities(queries, prototypes, temperature=0.1)
# same prototypes should be assigned regardless of temperature
self.assertTrue(torch.all(prob.argmax(dim=1) == prob1.argmax(dim=1)))
# probabilities of selected prototypes should be higher for lower temperature
self.assertTrue(torch.all(prob.max(dim=1)[0] < prob1.max(dim=1)[0]))
def test_sharpen(self) -> None:
torch.manual_seed(0)
prob = torch.rand((8, 10))
p0 = msn_loss.sharpen(prob, temperature=0.5)
p1 = msn_loss.sharpen(prob, temperature=0.1)
# indices of max probabilities should be the same regardless of temperature
self.assertTrue(torch.all(p0.argmax(dim=1) == p1.argmax(dim=1)))
# max probabilities should be higher for lower temperature
self.assertTrue(torch.all(p0.max(dim=1)[0] < p1.max(dim=1)[0]))
def test_sinkhorn(self) -> None:
torch.manual_seed(0)
prob = torch.rand((8, 10))
out = msn_loss.sinkhorn(prob)
self.assertTrue(torch.all(prob != out))
def test_sinkhorn_no_iter(self) -> None:
torch.manual_seed(0)
prob = torch.rand((8, 10))
out = msn_loss.sinkhorn(prob, iterations=0)
self.assertTrue(torch.all(prob == out))
def test_forward(self) -> None:
torch.manual_seed(0)
for num_target_views in range(1, 4):
with self.subTest(num_views=num_target_views):
criterion = MSNLoss()
anchors = torch.rand((8 * num_target_views, 10))
targets = torch.rand((8, 10))
prototypes = torch.rand((4, 10), requires_grad=True)
criterion(anchors, targets, prototypes)
@unittest.skipUnless(torch.cuda.is_available(), "cuda not available")
def test_forward_cuda(self) -> None:
torch.manual_seed(0)
criterion = MSNLoss()
anchors = torch.rand((8 * 2, 10)).cuda()
targets = torch.rand((8, 10)).cuda()
prototypes = torch.rand((4, 10), requires_grad=True).cuda()
criterion(anchors, targets, prototypes)
def test_backward(self) -> None:
torch.manual_seed(0)
head = MSNProjectionHead(5, 16, 6)
criterion = MSNLoss()
optimizer = SGD(head.parameters(), lr=0.1)
anchors = torch.rand((8 * 4, 5))
targets = torch.rand((8, 5))
prototypes = nn.Linear(6, 4).weight # 4 prototypes with dim 6
optimizer.zero_grad()
anchors = head(anchors)
with torch.no_grad():
targets = head(targets)
loss = criterion(anchors, targets, prototypes)
loss.backward()
weights_before = head.layers[0].weight.data.clone()
optimizer.step()
weights_after = head.layers[0].weight.data
# backward pass should update weights
self.assertTrue(torch.any(weights_before != weights_after))
@unittest.skipUnless(torch.cuda.is_available(), "cuda not available")
def test_backward_cuda(self) -> None:
torch.manual_seed(0)
head = MSNProjectionHead(5, 16, 6)
head.to("cuda")
criterion = MSNLoss()
optimizer = SGD(head.parameters(), lr=0.1)
anchors = torch.rand((8 * 4, 5)).cuda()
targets = torch.rand((8, 5)).cuda()
prototypes = nn.Linear(6, 4).weight.cuda() # 4 prototypes with dim 6
optimizer.zero_grad()
anchors = head(anchors)
with torch.no_grad():
targets = head(targets)
loss = criterion(anchors, targets, prototypes)
loss.backward()
weights_before = head.layers[0].weight.data.clone()
optimizer.step()
weights_after = head.layers[0].weight.data
# backward pass should update weights
self.assertTrue(torch.any(weights_before != weights_after)) | null |
497 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateFullNatEntryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateFullNatEntry','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_FullNatEntryDescription(self): # String
return self.get_query_params().get('FullNatEntryDescription')
def set_FullNatEntryDescription(self, FullNatEntryDescription): # String
self.add_query_param('FullNatEntryDescription', FullNatEntryDescription)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_AccessIp(self): # String
return self.get_query_params().get('AccessIp')
def set_AccessIp(self, AccessIp): # String
self.add_query_param('AccessIp', AccessIp)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_NatIpPort(self): # String
return self.get_query_params().get('NatIpPort')
def set_NatIpPort(self, NatIpPort): # String
self.add_query_param('NatIpPort', NatIpPort)
def get_FullNatTableId(self): # String
return self.get_query_params().get('FullNatTableId')
def set_FullNatTableId(self, FullNatTableId): # String
self.add_query_param('FullNatTableId', FullNatTableId)
def get_AccessPort(self): # String
return self.get_query_params().get('AccessPort')
def set_AccessPort(self, AccessPort): # String
self.add_query_param('AccessPort', AccessPort)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_IpProtocol(self): # String
return self.get_query_params().get('IpProtocol')
def set_IpProtocol(self, IpProtocol): # String
self.add_query_param('IpProtocol', IpProtocol)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_FullNatEntryName(self): # String
return self.get_query_params().get('FullNatEntryName')
def set_FullNatEntryName(self, FullNatEntryName): # String
self.add_query_param('FullNatEntryName', FullNatEntryName)
def get_NatIp(self): # String
return self.get_query_params().get('NatIp')
def set_NatIp(self, NatIp): # String
self.add_query_param('NatIp', NatIp)
def METHOD_NAME(self): # String
return self.get_query_params().get('NetworkInterfaceId')
def set_NetworkInterfaceId(self, NetworkInterfaceId): # String
self.add_query_param('NetworkInterfaceId', NetworkInterfaceId) | null |
498 | import datetime
import logging
import re
from flask_sqlalchemy import (
_QueryProperty,
DefaultMeta,
get_state,
SessionBase,
SignallingSession,
SQLAlchemy,
)
from sqlalchemy import orm
try:
from sqlalchemy.ext.declarative import as_declarative
except ImportError:
from sqlalchemy.ext.declarative.api import as_declarative
try:
from sqlalchemy.orm.util import identity_key # noqa
has_identity_key = True
except ImportError:
has_identity_key = False
log = logging.getLogger(__name__)
_camelcase_re = re.compile(r"([A-Z]+)(?=[a-z0-9])")
class CustomSignallingSession(SignallingSession):
"""
Custom Signaling Session to support SQLALchemy>=1.4 with flask-sqlalchemy 2.X
https://github.com/pallets/flask-sqlalchemy/issues/953
"""
def get_bind(self, mapper=None, *args, **kwargs):
"""Return the engine or connection for a given model or
table, using the ``__bind_key__`` if it is set.
Patch from https://github.com/pallets/flask-sqlalchemy/pull/1001
"""
# mapper is None if someone tries to just get a connection
if mapper is not None:
try:
# SA >= 1.3
persist_selectable = mapper.persist_selectable
except AttributeError:
# SA < 1.3
persist_selectable = mapper.mapped_table
info = getattr(persist_selectable, "info", {})
bind_key = info.get("bind_key")
if bind_key is not None:
state = get_state(self.app)
return state.db.get_engine(self.app, bind=bind_key)
return SessionBase.get_bind(self, mapper, *args, **kwargs)
class SQLA(SQLAlchemy):
"""
This is a child class of flask_SQLAlchemy
It's purpose is to override the declarative base of the original
package. So that it is bound to F.A.B. Model class allowing the dev
to be in the same namespace of the security tables (and others)
and can use AuditMixin class alike.
Use it and configure it just like flask_SQLAlchemy
"""
def make_declarative_base(self, model, metadata=None):
base = Model
base.query = _QueryProperty(self)
return base
def get_tables_for_bind(self, bind=None):
"""Returns a list of all tables relevant for a bind."""
result = []
tables = Model.metadata.tables
for key in tables:
if tables[key].info.get("bind_key") == bind:
result.append(tables[key])
return result
def METHOD_NAME(self, options):
"""
Custom Session factory to support SQLALchemy>=1.4 with flask-sqlalchemy 2.X
https://github.com/pallets/flask-sqlalchemy/issues/953
:param options: dict of keyword arguments passed to session class
"""
return orm.sessionmaker(class_=CustomSignallingSession, db=self, **options)
class ModelDeclarativeMeta(DefaultMeta):
"""
Base Model declarative meta for all Models definitions.
Setups bind_keys to support multiple databases.
Setup the table name based on the class camelcase name.
"""
@as_declarative(name="Model", metaclass=ModelDeclarativeMeta)
class Model(object):
"""
Use this class has the base for your models,
it will define your table names automatically
MyModel will be called my_model on the database.
::
from sqlalchemy import Integer, String
from flask_appbuilder import Model
class MyModel(Model):
id = Column(Integer, primary_key=True)
name = Column(String(50), unique = True, nullable=False)
"""
__table_args__ = {"extend_existing": True}
def to_json(self):
result = dict()
for key in self.__mapper__.c.keys():
col = getattr(self, key)
if isinstance(col, datetime.datetime) or isinstance(col, datetime.date):
col = col.isoformat()
result[key] = col
return result
"""
This is for retro compatibility
"""
Base = Model | null |
499 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeSecurityGroupsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeSecurityGroups','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_FuzzyQuery(self): # Boolean
return self.get_query_params().get('FuzzyQuery')
def set_FuzzyQuery(self, FuzzyQuery): # Boolean
self.add_query_param('FuzzyQuery', FuzzyQuery)
def get_SecurityGroupId(self): # String
return self.get_query_params().get('SecurityGroupId')
def set_SecurityGroupId(self, SecurityGroupId): # String
self.add_query_param('SecurityGroupId', SecurityGroupId)
def get_IsQueryEcsCount(self): # Boolean
return self.get_query_params().get('IsQueryEcsCount')
def set_IsQueryEcsCount(self, IsQueryEcsCount): # Boolean
self.add_query_param('IsQueryEcsCount', IsQueryEcsCount)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_SecurityGroupName(self): # String
return self.get_query_params().get('SecurityGroupName')
def set_SecurityGroupName(self, SecurityGroupName): # String
self.add_query_param('SecurityGroupName', SecurityGroupName)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def METHOD_NAME(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_SecurityGroupIds(self): # String
return self.get_query_params().get('SecurityGroupIds')
def set_SecurityGroupIds(self, SecurityGroupIds): # String
self.add_query_param('SecurityGroupIds', SecurityGroupIds)
def get_SecurityGroupType(self): # String
return self.get_query_params().get('SecurityGroupType')
def set_SecurityGroupType(self, SecurityGroupType): # String
self.add_query_param('SecurityGroupType', SecurityGroupType)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults) | null |