label
stringlengths 1
61
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
bump sdk version | import glob
import os
import re
import sys
from lxml import etree
from functions import replace_files
class VersionBumper:
module_list = [
"AWSAPIGateway",
"AWSAutoScaling",
"AWSChimeSDKIdentity",
"AWSChimeSDKMessaging",
"AWSCloudWatch",
"AWSCognitoAuth",
"AWSCognitoIdentityProvider",
"AWSCognitoIdentityProviderASF",
"AWSComprehend",
"AWSConnect",
"AWSConnectParticipant",
"AWSCore",
"AWSDynamoDB",
"AWSEC2",
"AWSElasticLoadBalancing",
"AWSIoT",
"AWSKMS",
"AWSKinesis",
"AWSKinesisVideo",
"AWSKinesisVideoArchivedMedia",
"AWSKinesisVideoSignaling",
"AWSKinesisVideoWebRTCStorage",
"AWSLambda",
"AWSLex",
"AWSLocation",
"AWSLocationXCF",
"AWSLogs",
"AWSMachineLearning",
"AWSPinpoint",
"AWSPolly",
"AWSRekognition",
"AWSS3",
"AWSSES",
"AWSSNS",
"AWSSQS",
"AWSSageMakerRuntime",
"AWSSimpleDB",
"AWSTextract",
"AWSTranscribe",
"AWSTranscribeStreaming",
"AWSTranslate",
"AWSAuthSDK/Sources/AWSAuthCore",
"AWSAuthSDK/Sources/AWSAuthUI",
"AWSAuthSDK/Sources/AWSAppleSignIn",
"AWSAuthSDK/Sources/AWSFacebookSignIn",
"AWSAuthSDK/Sources/AWSGoogleSignIn",
"AWSAuthSDK/Sources/AWSMobileClient",
"AWSAuthSDK/Sources/AWSMobileClientXCF",
"AWSAuthSDK/Sources/AWSUserPoolsSignIn",
]
def __init__(self, root, new_sdk_version):
self._root = root
self._new_sdk_version = new_sdk_version
def METHOD_NAME(self):
self.bump_plists()
self.bump_services()
self.bump_podspecs()
self.bump_changelog()
self.bump_generate_docs()
def bump_plists(self):
for module in VersionBumper.module_list:
filename = os.path.join(root, module, "Info.plist")
self.bump_plist(filename)
def bump_plist(self, filename):
tree = etree.parse(filename)
root_node = tree.getroot()
namespaces = root_node.nsmap
dict_node = root_node.find("./dict", namespaces)
set_version = False
for child in dict_node:
if child.tag == "key" and child.text == "CFBundleShortVersionString":
set_version = True
else:
if set_version:
child.text = self._new_sdk_version
break
plist_string = VersionBumper.format_plist(tree)
plist = open(filename, "w")
plist.write(plist_string)
plist.close()
@staticmethod
def format_plist(tree):
"""
Adjusts lxml's pretty-printed XML format to match Xcode's default and avoid
semantically uninteresting diffs
:param tree: the lxml tree to format
:return: a pretty-printed string matching Xcode's plist format
"""
plist_string = VersionBumper.tree_to_string(tree)
flags = re.MULTILINE | re.IGNORECASE
# Prepend XML prolog. This could be partially done with `lxml.tostring`'s
# xml_declaration option, but that returns single-quoted attributes
formatted_plist = '<?xml version="1.0" encoding="UTF-8"?>\n' + plist_string
# Replace self-closing '<string/>' tags with explicitly closed tags
formatted_plist = re.sub(r"<string/>", "<string></string>", formatted_plist, flags=flags)
# Adjust lxml's default space-based indentation to Xcode's tab-based. Use the
# multiline flag and match beginning of each line.
formatted_plist = re.sub(r"^\s+<", "\t<", formatted_plist, flags=flags)
return formatted_plist
@staticmethod
def tree_to_string(tree):
plist_bytes = etree.tostring(
tree, pretty_print=True, xml_declaration=False, encoding="UTF-8"
)
plist_string = plist_bytes.decode("utf-8")
return plist_string
def bump_services(self):
service_pattern = {
"match": r'(NSString[[:space:]]+\*const[[:space:]]+AWS.+SDKVersion[[:space:]]*=[[:space:]]+@")[0-9]+\.[0-9]+\.[0-9]+"', # noqa: E501
"replace": r'\1{version}"'.format(version=self._new_sdk_version),
"files": [],
}
# Add files for each module
for module in VersionBumper.module_list:
path = "{0}/{0}Service.m".format(module)
if os.path.isfile(os.path.join(root, path)):
service_pattern["files"].append(path)
# Add files for special modules
service_pattern["files"].extend(
[
"AWSAPIGateway/AWSAPIGatewayClient.m",
"AWSCognitoAuth/AWSCognitoAuth.m",
"AWSCore/Service/AWSService.m",
"AWSIoT/AWSIoTDataService.m",
"AWSKinesis/AWSFirehoseService.m",
"AWSLex/AWSLexInteractionKit.m",
"AWSPinpoint/AWSPinpointTargeting/AWSPinpointTargetingService.m",
"AWSPolly/AWSPollySynthesizeSpeechURLBuilder.m",
"AWSS3/AWSS3PreSignedURL.m",
]
)
replace_files(self._root, service_pattern)
def bump_podspecs(self):
podspec_pattern1 = {
"match": r"(dependency[[:space:]]+'AWS.+'[[:space:]]*,[[:space:]]*')[0-9]+\.[0-9]+\.[0-9]+(')", # noqa: E501
"replace": r"\1{version}\2".format(version=self._new_sdk_version),
"files": [],
}
podspec_pattern2 = {
"match": r"(s\.version[[:space:]]+=[[:space:]]*')[0-9]+\.[0-9]+\.[0-9]+(')",
"replace": r"\1{version}\2".format(version=self._new_sdk_version),
"files": [],
}
for file in glob.glob(os.path.join(root, "*.podspec")):
podspec_pattern1["files"].append(file)
podspec_pattern2["files"].append(file)
replace_files(self._root, podspec_pattern1)
replace_files(self._root, podspec_pattern2)
def bump_changelog(self):
changelog_pattern = {
"match": r"## Unreleased",
"replace": "## Unreleased\\\n\\\n-Features for next release\\\n\\\n## {version}".format(
version=self._new_sdk_version
),
"files": ["CHANGELOG.md"],
}
replace_files(self._root, changelog_pattern)
def bump_generate_docs(self):
generate_documentation_pattern = {
"match": r'VERSION="[0-9]+\.[0-9]+\.[0-9]+"',
"replace": r'VERSION="{version}"'.format(version=self._new_sdk_version),
"files": ["CircleciScripts/generate_documentation.sh"],
}
replace_files(self._root, generate_documentation_pattern)
if __name__ == "__main__":
root = sys.argv[1]
new_sdk_version = sys.argv[2]
bumper = VersionBumper(root, new_sdk_version)
bumper.METHOD_NAME() | null |
prepare request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class EnvironmentTypeOperations:
"""EnvironmentTypeOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~dev_center_dataplane_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
top: Optional[int] = None,
**kwargs
) -> AsyncIterable["models.EnvironmentTypeListResult"]:
"""Lists all environment types configured for a project.
:param top: The maximum number of resources to return from the operation. Example: 'top=10'.
:type top: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EnvironmentTypeListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~dev_center_dataplane_client.models.EnvironmentTypeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.EnvironmentTypeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2023-04-01"
accept = "application/json"
def METHOD_NAME(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'projectName': self._serialize.url("self._config.project_name", self._config.project_name, 'str', max_length=63, min_length=3, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9-_.]{2,62}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if top is not None:
query_parameters['top'] = self._serialize.query("top", top, 'int')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'projectName': self._serialize.url("self._config.project_name", self._config.project_name, 'str', max_length=63, min_length=3, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9-_.]{2,62}$'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('EnvironmentTypeListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/projects/{projectName}/environmentTypes'} # type: ignore | null |
swap32 | import struct
from PIL import Image
def METHOD_NAME(i):
return struct.unpack("<I", struct.pack(">I", i))[0]
def _clamp(n, smallest, largest):
return max(smallest, min(n, largest))
def yuv422_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
byte4 = bytesequence[offset + 3]
Y = byte1
U = byte2
Y1 = byte3
V = byte4
R1 = Y + 1.4075 * (V - 128)
G1 = Y - 0.3455 * (U - 128) - (0.7169 * (V - 128))
B1 = Y + 1.7790 * (U - 128)
R2 = Y1 + 1.4075 * (V - 128)
G2 = Y1 - 0.3455 * (U - 128) - (0.7169 * (V - 128))
B2 = Y1 + 1.7790 * (U - 128)
img.append(_clamp(int(R1), 0, 255))
img.append(_clamp(int(G1), 0, 255))
img.append(_clamp(int(B1), 0, 255))
img.append(_clamp(int(R2), 0, 255))
img.append(_clamp(int(G2), 0, 255))
img.append(_clamp(int(B2), 0, 255))
return img
def yuv422_to_blackAndWhite(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
byte4 = bytesequence[offset + 3]
Y = byte1
U = byte2
Y1 = byte3
V = byte4
r = Y
g = Y
b = Y
img.append(r)
img.append(g)
img.append(b)
r = Y1
g = Y1
b = Y1
img.append(r)
img.append(g)
img.append(b)
return img
def rgb888_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
r = byte1
g = byte2
b = byte3
img.append(r)
img.append(g)
img.append(b)
return img
def blackAndWhite_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence)):
byte1 = bytesequence[i]
r = byte1
g = byte1
b = byte1
img.append(r)
img.append(g)
img.append(b)
return img
def rgb565_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 2]
byte2 = bytesequence[offset + 3]
byte3 = bytesequence[offset + 0]
byte4 = bytesequence[offset + 1]
pixel1 = byte1 * 0x100 + byte2
r1 = (pixel1 >> 11) & 0x1f
g1 = (pixel1 >> 5) & 0x3f
b1 = (pixel1 >> 0) & 0x1f
r1 = (r1 * 255) / 31
g1 = (g1 * 255) / 63
b1 = (b1 * 255) / 31
pixel2 = byte3 * 0x100 + byte4
r2 = (pixel2 >> 11) & 0x1f
g2 = (pixel2 >> 5) & 0x3f
b2 = (pixel2 >> 0) & 0x1f
r2 = (r2 * 255) / 31
g2 = (g2 * 255) / 63
b2 = (b2 * 255) / 31
img.append(int(r2))
img.append(int(g2))
img.append(int(b2))
img.append(int(r1))
img.append(int(g1))
img.append(int(b1))
return img
def rgb555_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
byte4 = bytesequence[offset + 3]
value = byte1 * 0x100 + byte2
r = (value & 0x7C00) >> 10
g = (value & 0x03e0) >> 5
b = (value & 0x001f) >> 0
img.append(r)
img.append(g)
img.append(b)
value = byte3 * 0x100 + byte4
r = (value & 0x7C00) >> 11
g = (value & 0x03e0) >> 5
b = (value & 0x001f) >> 0
img.append(r)
img.append(g)
img.append(b)
return img
#
# generate_img
#
def generate_img(output, color, resolution):
# img = Image.open(backdrop)
img = Image.new("RGB", resolution, color)
img.save(output, quality=100)
return img
def convert(bytesequence, outputfile, xres, yres, pixelformat):
image = []
if (pixelformat == "YUV422"):
imagepixels = yuv422_to_rgb(bytesequence)
elif (pixelformat == "RGB555"):
imagepixels = rgb555_to_rgb(bytesequence)
elif (pixelformat == "RGB565"):
imagepixels = rgb565_to_rgb(bytesequence)
elif (pixelformat == "RGB888"):
imagepixels = rgb888_to_rgb(bytesequence)
elif (pixelformat == "GRAYSCALE"): #Black and white yuv422
imagepixels = blackAndWhite_to_rgb(bytesequence)
elif (pixelformat == "BAYER"): #Black and white raw
imagepixels = blackAndWhite_to_rgb(bytesequence)
offset = 0
for i in range(yres):
line = []
offset = (xres * 3) * i
for j in range(xres * 3):
line.append(imagepixels[j + offset])
image.append(line)
print("Output image to file xres {}, yres {}".format(xres,yres), flush=True)
g_pil_image = generate_img(outputfile, (0, 0, 0), (xres, yres))
x = 0
y = 0
for i in range(int(len(imagepixels) / 3)):
color_r = imagepixels[i * 3 + 0]
color_g = imagepixels[i * 3 + 1]
color_b = imagepixels[i * 3 + 2]
g_pil_image.putpixel( (x, y), (color_r, color_g, color_b, 255))
x = x + 1
if x > (xres - 1):
x = 0
y = y + 1
if y > (yres - 1):
break
g_pil_image.save(outputfile)
| null |
optimizer config | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.optim import Adagrad
from fairseq.optim import LegacyFairseqOptimizer, register_optimizer
@register_optimizer("adagrad_with_grad_clip")
class FairseqAdagradWithGradClip(LegacyFairseqOptimizer):
def __init__(self, args, params):
super().__init__(args)
self._optimizer = AdagradWithGradClip(params, **self.METHOD_NAME)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--adagrad-clip', default=0.0, type=float, metavar='D',
help='internal grad clip')
# fmt: on
@property
def METHOD_NAME(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
"lr": self.args.lr[0],
"weight_decay": self.args.weight_decay,
"grad_clip": self.args.adagrad_clip,
}
@property
def supports_flat_params(self):
return False
def _clip_grad(clr, grad, group_grad_clip):
if group_grad_clip > 0:
norm = grad.norm(2).item()
if norm > group_grad_clip:
clr *= group_grad_clip / (norm + 1e-10)
return clr
class AdagradWithGradClip(Adagrad):
"""Adagrad algorithm with custom gradient clipping"""
def __init__(
self,
params,
lr=1e-2,
lr_decay=0,
weight_decay=0,
initial_accumulator_value=0,
grad_clip=0,
):
Adagrad.__init__(
self,
params,
lr=lr,
lr_decay=lr_decay,
weight_decay=weight_decay,
initial_accumulator_value=initial_accumulator_value,
)
self.defaults["grad_clip"] = grad_clip
self.param_groups[0].setdefault("grad_clip", grad_clip)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
state = self.state[p]
state["step"] += 1
if group["weight_decay"] != 0:
if p.grad.data.is_sparse:
raise RuntimeError(
"weight_decay option is "
"not compatible with sparse "
"gradients"
)
grad = grad.add(group["weight_decay"], p.data)
clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"])
# clip
clr = _clip_grad(clr=clr, grad=grad, group_grad_clip=group["grad_clip"])
if grad.is_sparse:
# the update is non-linear so indices must be unique
grad = grad.coalesce()
grad_indices = grad._indices()
grad_values = grad._values()
size = grad.size()
def make_sparse(values):
constructor = grad.new
if grad_indices.dim() == 0 or values.dim() == 0:
return constructor().resize_as_(grad)
return constructor(grad_indices, values, size)
state["sum"].add_(make_sparse(grad_values.pow(2)))
std = state["sum"]._sparse_mask(grad)
std_values = std._values().sqrt_().add_(1e-10)
p.data.add_(-clr, make_sparse(grad_values / std_values))
else:
state["sum"].addcmul_(1, grad, grad)
std = state["sum"].sqrt().add_(1e-10)
p.data.addcdiv_(-clr, grad, std)
return loss | null |
merge informative pair | from typing import Optional
import pandas as pd
from freqtrade.exchange import timeframe_to_minutes
def METHOD_NAME(dataframe: pd.DataFrame, informative: pd.DataFrame,
timeframe: str, timeframe_inf: str, ffill: bool = True,
append_timeframe: bool = True,
date_column: str = 'date',
suffix: Optional[str] = None) -> pd.DataFrame:
"""
Correctly merge informative samples to the original dataframe, avoiding lookahead bias.
Since dates are candle open dates, merging a 15m candle that starts at 15:00, and a
1h candle that starts at 15:00 will result in all candles to know the close at 16:00
which they should not know.
Moves the date of the informative pair by 1 time interval forward.
This way, the 14:00 1h candle is merged to 15:00 15m candle, since the 14:00 1h candle is the
last candle that's closed at 15:00, 15:15, 15:30 or 15:45.
Assuming inf_tf = '1d' - then the resulting columns will be:
date_1d, open_1d, high_1d, low_1d, close_1d, rsi_1d
:param dataframe: Original dataframe
:param informative: Informative pair, most likely loaded via dp.get_pair_dataframe
:param timeframe: Timeframe of the original pair sample.
:param timeframe_inf: Timeframe of the informative pair sample.
:param ffill: Forwardfill missing values - optional but usually required
:param append_timeframe: Rename columns by appending timeframe.
:param date_column: A custom date column name.
:param suffix: A string suffix to add at the end of the informative columns. If specified,
append_timeframe must be false.
:return: Merged dataframe
:raise: ValueError if the secondary timeframe is shorter than the dataframe timeframe
"""
minutes_inf = timeframe_to_minutes(timeframe_inf)
minutes = timeframe_to_minutes(timeframe)
if minutes == minutes_inf:
# No need to forwardshift if the timeframes are identical
informative['date_merge'] = informative[date_column]
elif minutes < minutes_inf:
# Subtract "small" timeframe so merging is not delayed by 1 small candle
# Detailed explanation in https://github.com/freqtrade/freqtrade/issues/4073
informative['date_merge'] = (
informative[date_column] + pd.to_timedelta(minutes_inf, 'm') -
pd.to_timedelta(minutes, 'm')
)
else:
raise ValueError("Tried to merge a faster timeframe to a slower timeframe."
"This would create new rows, and can throw off your regular indicators.")
# Rename columns to be unique
date_merge = 'date_merge'
if suffix and append_timeframe:
raise ValueError("You can not specify `append_timeframe` as True and a `suffix`.")
elif append_timeframe:
date_merge = f'date_merge_{timeframe_inf}'
informative.columns = [f"{col}_{timeframe_inf}" for col in informative.columns]
elif suffix:
date_merge = f'date_merge_{suffix}'
informative.columns = [f"{col}_{suffix}" for col in informative.columns]
# Combine the 2 dataframes
# all indicators on the informative sample MUST be calculated before this point
if ffill:
# https://pandas.pydata.org/docs/user_guide/merging.html#timeseries-friendly-merging
# merge_ordered - ffill method is 2.5x faster than seperate ffill()
dataframe = pd.merge_ordered(dataframe, informative, fill_method="ffill", left_on='date',
right_on=date_merge, how='left')
else:
dataframe = pd.merge(dataframe, informative, left_on='date',
right_on=date_merge, how='left')
dataframe = dataframe.drop(date_merge, axis=1)
# if ffill:
# dataframe = dataframe.ffill()
return dataframe
def stoploss_from_open(
open_relative_stop: float,
current_profit: float,
is_short: bool = False,
leverage: float = 1.0
) -> float:
"""
Given the current profit, and a desired stop loss value relative to the trade entry price,
return a stop loss value that is relative to the current price, and which can be
returned from `custom_stoploss`.
The requested stop can be positive for a stop above the open price, or negative for
a stop below the open price. The return value is always >= 0.
`open_relative_stop` will be considered as adjusted for leverage if leverage is provided..
Returns 0 if the resulting stop price would be above/below (longs/shorts) the current price
:param open_relative_stop: Desired stop loss percentage, relative to the open price,
adjusted for leverage
:param current_profit: The current profit percentage
:param is_short: When true, perform the calculation for short instead of long
:param leverage: Leverage to use for the calculation
:return: Stop loss value relative to current price
"""
# formula is undefined for current_profit -1 (longs) or 1 (shorts), return maximum value
_current_profit = current_profit / leverage
if (_current_profit == -1 and not is_short) or (is_short and _current_profit == 1):
return 1
if is_short is True:
stoploss = -1 + ((1 - open_relative_stop / leverage) / (1 - _current_profit))
else:
stoploss = 1 - ((1 + open_relative_stop / leverage) / (1 + _current_profit))
# negative stoploss values indicate the requested stop price is higher/lower
# (long/short) than the current price
return max(stoploss * leverage, 0.0)
def stoploss_from_absolute(stop_rate: float, current_rate: float, is_short: bool = False,
leverage: float = 1.0) -> float:
"""
Given current price and desired stop price, return a stop loss value that is relative to current
price.
The requested stop can be positive for a stop above the open price, or negative for
a stop below the open price. The return value is always >= 0.
Returns 0 if the resulting stop price would be above the current price.
:param stop_rate: Stop loss price.
:param current_rate: Current asset price.
:param is_short: When true, perform the calculation for short instead of long
:param leverage: Leverage to use for the calculation
:return: Positive stop loss value relative to current price
"""
# formula is undefined for current_rate 0, return maximum value
if current_rate == 0:
return 1
stoploss = 1 - (stop_rate / current_rate)
if is_short:
stoploss = -stoploss
# negative stoploss values indicate the requested stop price is higher/lower
# (long/short) than the current price
# shorts can yield stoploss values higher than 1, so limit that as well
return max(min(stoploss, 1.0), 0.0) * leverage | null |
test area padding logx | from holoviews.element import Area, Overlay
import pandas as pd
import numpy as np
from ...utils import LoggingComparisonTestCase
from .test_plot import TestMPLPlot, mpl_renderer
class TestAreaPlot(LoggingComparisonTestCase, TestMPLPlot):
def test_area_padding_square(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=0.1)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.8)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], 0)
self.assertEqual(y_range[1], 3.2)
def test_area_padding_square_per_axis(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=((0, 0.1), (0.1, 0.2)))
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 1)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], 0)
self.assertEqual(y_range[1], 3.4)
def test_area_with_lower_vdim(self):
area = Area([(1, 0.5, 1), (2, 1.5, 2), (3, 2.5, 3)], vdims=['y', 'y2']).opts(padding=0.1)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.8)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], 0.25)
self.assertEqual(y_range[1], 3.25)
def test_area_padding_negative(self):
area = Area([(1, -1), (2, -2), (3, -3)]).opts(padding=0.1)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.8)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], -3.2)
self.assertEqual(y_range[1], 0)
def test_area_padding_mixed(self):
area = Area([(1, 1), (2, -2), (3, 3)]).opts(padding=0.1)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.8)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], -2.5)
self.assertEqual(y_range[1], 3.5)
def test_area_padding_hard_range(self):
area = Area([(1, 1), (2, 2), (3, 3)]).redim.range(y=(0, 4)).opts(padding=0.1)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.8)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], 0)
self.assertEqual(y_range[1], 4)
def test_area_padding_soft_range(self):
area = Area([(1, 1), (2, 2), (3, 3)]).redim.soft_range(y=(0, 3.5)).opts(padding=0.1)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.8)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], 0)
self.assertEqual(y_range[1], 3.5)
def test_area_padding_nonsquare(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=0.1, aspect=2)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.9)
self.assertEqual(x_range[1], 3.1)
self.assertEqual(y_range[0], 0)
self.assertEqual(y_range[1], 3.2)
def METHOD_NAME(self):
area = Area([(1, 1), (2, 2), (3,3)]).opts(padding=0.1, logx=True)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.89595845984076228)
self.assertEqual(x_range[1], 3.3483695221017129)
self.assertEqual(y_range[0], 0)
self.assertEqual(y_range[1], 3.2)
def test_area_padding_logy(self):
area = Area([(1, 1), (2, 2), (3, 3)]).opts(padding=0.1, logy=True)
plot = mpl_renderer.get_plot(area)
x_range, y_range = plot.handles['axis'].get_xlim(), plot.handles['axis'].get_ylim()
self.assertEqual(x_range[0], 0.8)
self.assertEqual(x_range[1], 3.2)
self.assertEqual(y_range[0], 0.03348369522101712)
self.assertEqual(y_range[1], 3.3483695221017129)
self.log_handler.assertContains('WARNING', 'Logarithmic axis range encountered value less than')
def test_area_stack_vdims(self):
df = pd.DataFrame({'x': [1, 2, 3], 'y_1': [1, 2, 3], 'y_2': [6, 4, 2], 'y_3': [8, 1, 2]})
overlay = Overlay([Area(df, kdims='x', vdims=col, label=col) for col in ['y_1', 'y_2', 'y_3']])
plot = Area.stack(overlay)
baselines = [np.array([0, 0, 0]), np.array([1., 2., 3.]), np.array([7., 6., 5.])]
for n, baseline in zip(plot.data, baselines):
self.assertEqual(plot.data[n].data.Baseline.to_numpy(), baseline) | null |
update notification to cancelled | from app.notify_client import NotifyAdminAPIClient, _attach_current_user
class NotificationApiClient(NotifyAdminAPIClient):
def get_notifications_for_service(
self,
service_id,
job_id=None,
template_type=None,
status=None,
page=None,
page_size=None,
count_pages=None,
limit_days=None,
include_jobs=None,
include_from_test_key=None,
format_for_csv=None,
to=None,
include_one_off=None,
):
# TODO: if "to" is included, this should be a POST
params = {
"page": page,
"page_size": page_size,
"template_type": template_type,
"status": status,
"include_jobs": include_jobs,
"include_from_test_key": include_from_test_key,
"format_for_csv": format_for_csv,
"to": to,
"include_one_off": include_one_off,
"count_pages": count_pages,
}
params = {k: v for k, v in params.items() if v is not None}
if job_id:
return self.get(
url="/service/{}/job/{}/notifications".format(service_id, job_id),
params=params,
)
else:
if limit_days is not None:
params["limit_days"] = limit_days
return self.get(url="/service/{}/notifications".format(service_id), params=params)
def send_notification(self, service_id, *, template_id, recipient, personalisation, sender_id):
data = {
"template_id": template_id,
"to": recipient,
"personalisation": personalisation,
}
if sender_id:
data["sender_id"] = sender_id
data = _attach_current_user(data)
return self.post(url="/service/{}/send-notification".format(service_id), data=data)
def get_notification(self, service_id, notification_id):
return self.get(url="/service/{}/notifications/{}".format(service_id, notification_id))
def get_api_notifications_for_service(self, service_id):
ret = self.get_notifications_for_service(
service_id,
include_jobs=True,
include_from_test_key=True,
include_one_off=False,
count_pages=False,
)
ret["notifications"] = [n for n in ret["notifications"] if n.get("api_key") is not None]
return self.map_letters_to_accepted(ret)
@staticmethod
def map_letters_to_accepted(notifications):
for notification in notifications["notifications"]:
if notification["notification_type"] == "letter":
if notification["status"] in ("created", "sending"):
notification["status"] = "accepted"
if notification["status"] in ("delivered", "returned-letter"):
notification["status"] = "received"
return notifications
def get_notification_letter_preview(self, service_id, notification_id, file_type, page=None):
get_url = "/service/{}/template/preview/{}/{}{}".format(
service_id,
notification_id,
file_type,
"?page={}".format(page) if page else "",
)
return self.get(url=get_url)
def get_notification_letter_preview_with_overlay(self, service_id, notification_id, file_type, page=None):
get_url = "/service/{}/template/preview/{}/{}{}{}".format(
service_id,
notification_id,
file_type,
"?overlay=1",
"&page={}".format(page) if page else "",
)
return self.get(url=get_url)
def METHOD_NAME(self, service_id, notification_id):
return self.post(
url="/service/{}/notifications/{}/cancel".format(service_id, notification_id),
data={},
)
def get_notification_status_by_service(self, start_date, end_date):
return self.get(
url="service/monthly-data-by-service",
params={
"start_date": str(start_date),
"end_date": str(end_date),
},
)
notification_api_client = NotificationApiClient() | null |
num color blocks | # Authors: see git history
#
# Copyright (c) 2010 Authors
# Licensed under the GNU GPL version 3.0 or later. See the file LICENSE for details.
from sys import exit
from typing import List
from inkex import errormsg
from ..i18n import _
from ..svg import PIXELS_PER_MM
from ..utils.geometry import Point
from ..utils.threading import check_stop_flag
from .color_block import ColorBlock
def stitch_groups_to_stitch_plan(stitch_groups, collapse_len=None, min_stitch_len=0.1, disable_ties=False): # noqa: C901
"""Convert a collection of StitchGroups to a StitchPlan.
* applies instructions embedded in the StitchGroup such as trim_after and stop_after
* adds tie-ins and tie-offs
* adds jump-stitches between stitch_group if necessary
"""
if not stitch_groups:
errormsg(_("There is no selected stitchable element. Please run "
"Extensions > Ink/Stitch > Troubleshoot > Troubleshoot objects in case you have expected a stitchout."))
exit(1)
if collapse_len is None:
collapse_len = 3.0
collapse_len = collapse_len * PIXELS_PER_MM
stitch_plan = StitchPlan()
color_block = stitch_plan.new_color_block(color=stitch_groups[0].color)
previous_stitch_group = None
need_tie_in = True
for stitch_group in stitch_groups:
check_stop_flag()
if not stitch_group.stitches:
continue
if color_block.color != stitch_group.color:
# add a lock stitch to the last element of the previous group
if not need_tie_in:
lock_stitches = previous_stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
# end the previous block with a color change
color_block.add_stitch(color_change=True)
# make a new block of our color
color_block = stitch_plan.new_color_block(color=stitch_group.color)
else:
if (len(color_block) and not need_tie_in and
((stitch_group.stitches[0] - color_block.stitches[-1]).length() > collapse_len or
previous_stitch_group.force_lock_stitches)):
lock_stitches = previous_stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
if need_tie_in is True:
lock_stitches = stitch_group.get_lock_stitches("start", disable_ties)
if lock_stitches:
color_block.add_stitch(lock_stitches[0], jump=True)
color_block.add_stitches(stitches=lock_stitches)
else:
color_block.add_stitch(stitch_group.stitches[0], jump=True)
need_tie_in = False
color_block.add_stitches(stitches=stitch_group.stitches)
if stitch_group.trim_after or stitch_group.stop_after:
lock_stitches = stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
if stitch_group.trim_after:
color_block.add_stitch(trim=True)
if stitch_group.stop_after:
color_block.add_stitch(stop=True)
previous_stitch_group = stitch_group
if not need_tie_in:
# tie off at the end if we haven't already
lock_stitches = stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
if len(color_block) == 0:
# last block ended in a stop, so now we have an empty block
del stitch_plan.color_blocks[-1]
stitch_plan.filter_duplicate_stitches(min_stitch_len)
return stitch_plan
class StitchPlan(object):
"""Holds a set of color blocks, each containing stitches."""
def __init__(self):
self.color_blocks = []
def new_color_block(self, *args, **kwargs):
color_block = ColorBlock(*args, **kwargs)
self.color_blocks.append(color_block)
return color_block
def delete_empty_color_blocks(self):
color_blocks = []
for color_block in self.color_blocks:
if len(color_block) > 0:
color_blocks.append(color_block)
self.color_blocks = color_blocks
def add_color_block(self, color_block):
self.color_blocks.append(color_block)
def filter_duplicate_stitches(self, min_stitch_len):
for color_block in self:
color_block.filter_duplicate_stitches(min_stitch_len)
def __iter__(self):
return iter(self.color_blocks)
def __len__(self):
return len(self.color_blocks)
def __repr__(self):
return "StitchPlan(%s)" % ", ".join(repr(cb) for cb in self.color_blocks)
def __json__(self):
return dict(color_blocks=self.color_blocks,
num_stops=self.num_stops,
num_trims=self.num_trims,
num_stitches=self.num_stitches,
bounding_box=self.bounding_box,
estimated_thread=self.estimated_thread
)
@property
def num_colors(self):
"""Number of unique colors in the stitch plan."""
return len({block.color for block in self})
@property
def METHOD_NAME(self):
return len(self.color_blocks)
@property
def num_stops(self):
return sum(1 for block in self if block.stop_after)
@property
def num_trims(self):
return sum(block.num_trims for block in self)
@property
def num_stitches(self):
return sum(block.num_stitches for block in self)
@property
def bounding_box(self):
color_block_bounding_boxes = [cb.bounding_box for cb in self]
minx = min(bb[0] for bb in color_block_bounding_boxes)
miny = min(bb[1] for bb in color_block_bounding_boxes)
maxx = max(bb[2] for bb in color_block_bounding_boxes)
maxy = max(bb[3] for bb in color_block_bounding_boxes)
return minx, miny, maxx, maxy
@property
def estimated_thread(self):
thread_meter = sum(block.estimated_thread for block in self) / PIXELS_PER_MM / 1000
return round(thread_meter, 2)
@property
def dimensions(self):
minx, miny, maxx, maxy = self.bounding_box
return (maxx - minx, maxy - miny)
@property
def extents(self):
minx, miny, maxx, maxy = self.bounding_box
return max(-minx, maxx), max(-miny, maxy)
@property
def dimensions_mm(self):
dimensions = self.dimensions
return (dimensions[0] / PIXELS_PER_MM, dimensions[1] / PIXELS_PER_MM)
@property
def last_color_block(self):
if self.color_blocks:
return self.color_blocks[-1]
else:
return None
def make_offsets(self, offsets: List[Point]):
out = StitchPlan()
out.color_blocks = [block.make_offsets(offsets) for block in self]
return out | null |
conditional decorator | # -*- coding: utf-8 -*-
"""
mslib.utils
~~~~~~~~~~~~~~
Collection of utility routines for the Mission Support System.
This file is part of MSS.
:copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.
:copyright: Copyright 2011-2014 Marc Rautenhaus (mr)
:copyright: Copyright 2016-2023 by the MSS team, see AUTHORS.
:license: APACHE-2.0, see LICENSE for details.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import subprocess
def subprocess_startupinfo():
"""
config options to hide windows terminals on subprocess call
"""
startupinfo = None
if os.name == 'nt':
# thx to https://gist.github.com/nitely/3862493
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.CREATE_NEW_CONSOLE | subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
return startupinfo
class FatalUserError(Exception):
def __init__(self, error_string):
logging.debug("%s", error_string)
def setup_logging(args):
logger = logging.getLogger()
# this is necessary as "someone" has already initialized logging, preventing basicConfig from doing stuff
for ch in logger.handlers:
logger.removeHandler(ch)
debug_formatter = logging.Formatter("%(asctime)s (%(module)s.%(funcName)s:%(lineno)s): %(levelname)s: %(message)s")
default_formatter = logging.Formatter("%(levelname)s: %(message)s")
# Console handler (suppress DEBUG by default)
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
ch.setLevel(logging.DEBUG)
ch.setFormatter(debug_formatter)
else:
logger.setLevel(logging.INFO)
ch.setLevel(logging.INFO)
ch.setFormatter(default_formatter)
logger.addHandler(ch)
# File handler (always on DEBUG level)
# TODO: Change this to write to a rotating log handler (so that the file size
# is kept constant). (mr, 2011-02-25)
if args.logfile:
logfile = args.logfile
try:
fh = logging.FileHandler(logfile, "w")
except (OSError, IOError) as ex:
logger.error("Could not open logfile '%s': %s %s", logfile, type(ex), ex)
else:
logger.setLevel(logging.DEBUG)
fh.setLevel(logging.DEBUG)
fh.setFormatter(debug_formatter)
logger.addHandler(fh)
# modified Version from minidom, https://github.com/python/cpython/blob/2.7/Lib/xml/dom/minidom.py
# MSS needed to change all writings as unicode not str
from xml.dom.minidom import _write_data, Node
# Copyright © 2001-2018 Python Software Foundation. All rights reserved.
# Copyright © 2000 BeOpen.com. All rights reserved.
def writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
for a_name in sorted(attrs.keys()):
writer.write(" %s=\"" % a_name)
_write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1 and self.childNodes[0].nodeType == Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
def METHOD_NAME(dec, condition):
def decorator(func):
if not condition:
# Return the function unchanged, not decorated.
return func
return dec(func)
return decorator
def prefix_route(route_function, prefix='', mask='{0}{1}'):
'''
https://stackoverflow.com/questions/18967441/add-a-prefix-to-all-flask-routes/18969161#18969161
Defines a new route function with a prefix.
The mask argument is a `format string` formatted with, in that order:
prefix, route
'''
def newroute(route, *args, **kwargs):
''' prefix route '''
return route_function(mask.format(prefix, route), *args, **kwargs)
return newroute | null |
test supported single version | import pytest
from pip._vendor.packaging.tags import Tag
from pip._internal.exceptions import InvalidWheelFilename
from pip._internal.models.wheel import Wheel
from pip._internal.utils import compatibility_tags
class TestWheelFile:
def test_std_wheel_pattern(self) -> None:
w = Wheel("simple-1.1.1-py2-none-any.whl")
assert w.name == "simple"
assert w.version == "1.1.1"
assert w.pyversions == ["py2"]
assert w.abis == ["none"]
assert w.plats == ["any"]
def test_wheel_pattern_multi_values(self) -> None:
w = Wheel("simple-1.1-py2.py3-abi1.abi2-any.whl")
assert w.name == "simple"
assert w.version == "1.1"
assert w.pyversions == ["py2", "py3"]
assert w.abis == ["abi1", "abi2"]
assert w.plats == ["any"]
def test_wheel_with_build_tag(self) -> None:
# pip doesn't do anything with build tags, but theoretically, we might
# see one, in this case the build tag = '4'
w = Wheel("simple-1.1-4-py2-none-any.whl")
assert w.name == "simple"
assert w.version == "1.1"
assert w.pyversions == ["py2"]
assert w.abis == ["none"]
assert w.plats == ["any"]
def test_single_digit_version(self) -> None:
w = Wheel("simple-1-py2-none-any.whl")
assert w.version == "1"
def test_non_pep440_version(self) -> None:
w = Wheel("simple-_invalid_-py2-none-any.whl")
assert w.version == "-invalid-"
def test_missing_version_raises(self) -> None:
with pytest.raises(InvalidWheelFilename):
Wheel("Cython-cp27-none-linux_x86_64.whl")
def test_invalid_filename_raises(self) -> None:
with pytest.raises(InvalidWheelFilename):
Wheel("invalid.whl")
def METHOD_NAME(self) -> None:
"""
Test single-version wheel is known to be supported
"""
w = Wheel("simple-0.1-py2-none-any.whl")
assert w.supported(tags=[Tag("py2", "none", "any")])
def test_supported_multi_version(self) -> None:
"""
Test multi-version wheel is known to be supported
"""
w = Wheel("simple-0.1-py2.py3-none-any.whl")
assert w.supported(tags=[Tag("py3", "none", "any")])
def test_not_supported_version(self) -> None:
"""
Test unsupported wheel is known to be unsupported
"""
w = Wheel("simple-0.1-py2-none-any.whl")
assert not w.supported(tags=[Tag("py1", "none", "any")])
def test_supported_osx_version(self) -> None:
"""
Wheels built for macOS 10.6 are supported on 10.9
"""
tags = compatibility_tags.get_supported(
"27", platforms=["macosx_10_9_intel"], impl="cp"
)
w = Wheel("simple-0.1-cp27-none-macosx_10_6_intel.whl")
assert w.supported(tags=tags)
w = Wheel("simple-0.1-cp27-none-macosx_10_9_intel.whl")
assert w.supported(tags=tags)
def test_not_supported_osx_version(self) -> None:
"""
Wheels built for macOS 10.9 are not supported on 10.6
"""
tags = compatibility_tags.get_supported(
"27", platforms=["macosx_10_6_intel"], impl="cp"
)
w = Wheel("simple-0.1-cp27-none-macosx_10_9_intel.whl")
assert not w.supported(tags=tags)
def test_supported_multiarch_darwin(self) -> None:
"""
Multi-arch wheels (intel) are supported on components (i386, x86_64)
"""
universal = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_universal"], impl="cp"
)
intel = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_intel"], impl="cp"
)
x64 = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_x86_64"], impl="cp"
)
i386 = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_i386"], impl="cp"
)
ppc = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_ppc"], impl="cp"
)
ppc64 = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_ppc64"], impl="cp"
)
w = Wheel("simple-0.1-cp27-none-macosx_10_5_intel.whl")
assert w.supported(tags=intel)
assert w.supported(tags=x64)
assert w.supported(tags=i386)
assert not w.supported(tags=universal)
assert not w.supported(tags=ppc)
assert not w.supported(tags=ppc64)
w = Wheel("simple-0.1-cp27-none-macosx_10_5_universal.whl")
assert w.supported(tags=universal)
assert w.supported(tags=intel)
assert w.supported(tags=x64)
assert w.supported(tags=i386)
assert w.supported(tags=ppc)
assert w.supported(tags=ppc64)
def test_not_supported_multiarch_darwin(self) -> None:
"""
Single-arch wheels (x86_64) are not supported on multi-arch (intel)
"""
universal = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_universal"], impl="cp"
)
intel = compatibility_tags.get_supported(
"27", platforms=["macosx_10_5_intel"], impl="cp"
)
w = Wheel("simple-0.1-cp27-none-macosx_10_5_i386.whl")
assert not w.supported(tags=intel)
assert not w.supported(tags=universal)
w = Wheel("simple-0.1-cp27-none-macosx_10_5_x86_64.whl")
assert not w.supported(tags=intel)
assert not w.supported(tags=universal)
def test_support_index_min(self) -> None:
"""
Test results from `support_index_min`
"""
tags = [
Tag("py2", "none", "TEST"),
Tag("py2", "TEST", "any"),
Tag("py2", "none", "any"),
]
w = Wheel("simple-0.1-py2-none-any.whl")
assert w.support_index_min(tags=tags) == 2
w = Wheel("simple-0.1-py2-none-TEST.whl")
assert w.support_index_min(tags=tags) == 0
def test_support_index_min__none_supported(self) -> None:
"""
Test a wheel not supported by the given tags.
"""
w = Wheel("simple-0.1-py2-none-any.whl")
with pytest.raises(ValueError):
w.support_index_min(tags=[])
def test_version_underscore_conversion(self) -> None:
"""
Test that we convert '_' to '-' for versions parsed out of wheel
filenames
"""
w = Wheel("simple-0.1_1-py2-none-any.whl")
assert w.version == "0.1-1" | null |
is defined in parent | import json
import re
from pathlib import Path
from typing import Iterator
from ..diagnostic import Diagnostic
from .linter import Linter
from ..replacement import Replacement
class Definition(Linter):
""" Finds issues in definition files, such as overriding default parameters """
def __init__(self, file: Path, settings: dict) -> None:
super().__init__(file, settings)
self._definitions = {}
self._loadDefinitionFiles(file)
self._content = self._file.read_text()
self._loadBasePrinterSettings()
@property
def base_def(self):
if "fdmextruder" in self._definitions:
return "fdmextruder"
return "fdmprinter"
def check(self) -> Iterator[Diagnostic]:
if self._settings["checks"].get("diagnostic-definition-redundant-override", False):
for check in self.checkRedefineOverride():
yield check
# Add other which will yield Diagnostic's
# TODO: A check to determine if the user set value is with the min and max value defined in the parent and doesn't trigger a warning
# TODO: A check if the key exist in the first place
# TODO: Check if the model platform exist
yield
def checkRedefineOverride(self) -> Iterator[Diagnostic]:
""" Checks if definition file overrides its parents settings with the same value. """
definition_name = list(self._definitions.keys())[0]
definition = self._definitions[definition_name]
if "overrides" in definition and definition_name not in ("fdmprinter", "fdmextruder"):
for key, value_dict in definition["overrides"].items():
is_redefined, child_key, child_value, parent = self.METHOD_NAME(key, value_dict, definition['inherits'])
if is_redefined:
redefined = re.compile(r'.*(\"' + key + r'\"[\s\:\S]*?)\{[\s\S]*?\},?')
found = redefined.search(self._content)
# TODO: Figure out a way to support multiline fixes in the PR review GH Action, for now suggest no fix to ensure no ill-formed json are created
# see: https://github.com/platisd/clang-tidy-pr-comments/issues/37
if len(found.group().splitlines()) > 1:
replacements = []
else:
replacements = [Replacement(
file = self._file,
offset = found.span(1)[0],
length = len(found.group()),
replacement_text = "")]
yield Diagnostic(
file = self._file,
diagnostic_name = "diagnostic-definition-redundant-override",
message = f"Overriding {key} with the same value ({child_key}: {child_value}) as defined in parent definition: {definition['inherits']}",
level = "Warning",
offset = found.span(0)[0],
replacements = replacements
)
def _loadDefinitionFiles(self, definition_file) -> None:
""" Loads definition file contents into self._definitions. Also load parent definition if it exists. """
definition_name = Path(definition_file.stem).stem
if not definition_file.exists() or definition_name in self._definitions:
return
# Load definition file into dictionary
self._definitions[definition_name] = json.loads(definition_file.read_text())
# Load parent definition if it exists
if "inherits" in self._definitions[definition_name]:
if self._definitions[definition_name]['inherits'] in ("fdmextruder", "fdmprinter"):
parent_file = definition_file.parent.parent.joinpath("definitions", f"{self._definitions[definition_name]['inherits']}.def.json")
else:
parent_file = definition_file.parent.joinpath(f"{self._definitions[definition_name]['inherits']}.def.json")
self._loadDefinitionFiles(parent_file)
def METHOD_NAME(self, key, value_dict, inherits_from):
if self._ignore(key, "diagnostic-definition-redundant-override"):
return False, None, None, None
if "overrides" not in self._definitions[inherits_from]:
return self.METHOD_NAME(key, value_dict, self._definitions[inherits_from]["inherits"])
parent = self._definitions[inherits_from]["overrides"]
if key not in self._definitions[self.base_def]["overrides"]:
is_number = False
else:
is_number = self._definitions[self.base_def]["overrides"][key]["type"] in ("float", "int")
for child_key, child_value in value_dict.items():
if key in parent:
if child_key in ("default_value", "value"):
check_values = [cv for cv in [parent[key].get("default_value", None), parent[key].get("value", None)] if cv is not None]
else:
check_values = [parent[key].get(child_key, None)]
for check_value in check_values:
if is_number and child_key in ("default_value", "value"):
try:
v = str(float(child_value))
except:
v = child_value
try:
cv = str(float(check_value))
except:
cv = check_value
else:
v = child_value
cv = check_value
if v == cv:
return True, child_key, child_value, parent
if "inherits" in parent:
return self.METHOD_NAME(key, value_dict, parent["inherits"])
return False, None, None, None
def _loadBasePrinterSettings(self):
settings = {}
for k, v in self._definitions[self.base_def]["settings"].items():
self._getSetting(k, v, settings)
self._definitions[self.base_def] = {"overrides": settings}
def _getSetting(self, name, setting, settings) -> None:
if "children" in setting:
for childname, child in setting["children"].items():
self._getSetting(childname, child, settings)
settings |= {name: setting}
def _ignore(self, key: dict, type_of_check: str) -> bool:
if f"{type_of_check}-ignore" in self._settings:
filters = [re.compile(f) for f in self._settings[f"{type_of_check}-ignore"]]
for f in filters:
if f.match(key):
return True
return False | null |
endheaders | import email.message
import io
import ssl
import sys
import types
from _typeshed import ReadableBuffer, SupportsRead, WriteableBuffer
from collections.abc import Callable, Iterable, Iterator, Mapping
from socket import socket
from typing import Any, BinaryIO, TypeVar, overload
from typing_extensions import Self, TypeAlias
__all__ = [
"HTTPResponse",
"HTTPConnection",
"HTTPException",
"NotConnected",
"UnknownProtocol",
"UnknownTransferEncoding",
"UnimplementedFileMode",
"IncompleteRead",
"InvalidURL",
"ImproperConnectionState",
"CannotSendRequest",
"CannotSendHeader",
"ResponseNotReady",
"BadStatusLine",
"LineTooLong",
"RemoteDisconnected",
"error",
"responses",
"HTTPSConnection",
]
_DataType: TypeAlias = SupportsRead[bytes] | Iterable[ReadableBuffer] | ReadableBuffer
_T = TypeVar("_T")
HTTP_PORT: int
HTTPS_PORT: int
CONTINUE: int
SWITCHING_PROTOCOLS: int
PROCESSING: int
OK: int
CREATED: int
ACCEPTED: int
NON_AUTHORITATIVE_INFORMATION: int
NO_CONTENT: int
RESET_CONTENT: int
PARTIAL_CONTENT: int
MULTI_STATUS: int
IM_USED: int
MULTIPLE_CHOICES: int
MOVED_PERMANENTLY: int
FOUND: int
SEE_OTHER: int
NOT_MODIFIED: int
USE_PROXY: int
TEMPORARY_REDIRECT: int
BAD_REQUEST: int
UNAUTHORIZED: int
PAYMENT_REQUIRED: int
FORBIDDEN: int
NOT_FOUND: int
METHOD_NOT_ALLOWED: int
NOT_ACCEPTABLE: int
PROXY_AUTHENTICATION_REQUIRED: int
REQUEST_TIMEOUT: int
CONFLICT: int
GONE: int
LENGTH_REQUIRED: int
PRECONDITION_FAILED: int
REQUEST_ENTITY_TOO_LARGE: int
REQUEST_URI_TOO_LONG: int
UNSUPPORTED_MEDIA_TYPE: int
REQUESTED_RANGE_NOT_SATISFIABLE: int
EXPECTATION_FAILED: int
UNPROCESSABLE_ENTITY: int
LOCKED: int
FAILED_DEPENDENCY: int
UPGRADE_REQUIRED: int
PRECONDITION_REQUIRED: int
TOO_MANY_REQUESTS: int
REQUEST_HEADER_FIELDS_TOO_LARGE: int
INTERNAL_SERVER_ERROR: int
NOT_IMPLEMENTED: int
BAD_GATEWAY: int
SERVICE_UNAVAILABLE: int
GATEWAY_TIMEOUT: int
HTTP_VERSION_NOT_SUPPORTED: int
INSUFFICIENT_STORAGE: int
NOT_EXTENDED: int
NETWORK_AUTHENTICATION_REQUIRED: int
responses: dict[int, str]
class HTTPMessage(email.message.Message):
def getallmatchingheaders(self, name: str) -> list[str]: ... # undocumented
def parse_headers(fp: io.BufferedIOBase, _class: Callable[[], email.message.Message] = ...) -> HTTPMessage: ...
class HTTPResponse(io.BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible method definitions in the base classes
msg: HTTPMessage
headers: HTTPMessage
version: int
debuglevel: int
fp: io.BufferedReader
closed: bool
status: int
reason: str
chunked: bool
chunk_left: int | None
length: int | None
will_close: bool
# url is set on instances of the class in urllib.request.AbstractHTTPHandler.do_open
# to match urllib.response.addinfourl's interface.
# It's not set in HTTPResponse.__init__ or any other method on the class
url: str
def __init__(self, sock: socket, debuglevel: int = 0, method: str | None = None, url: str | None = None) -> None: ...
def peek(self, n: int = -1) -> bytes: ...
def read(self, amt: int | None = None) -> bytes: ...
def read1(self, n: int = -1) -> bytes: ...
def readinto(self, b: WriteableBuffer) -> int: ...
def readline(self, limit: int = -1) -> bytes: ... # type: ignore[override]
@overload
def getheader(self, name: str) -> str | None: ...
@overload
def getheader(self, name: str, default: _T) -> str | _T: ...
def getheaders(self) -> list[tuple[str, str]]: ...
def isclosed(self) -> bool: ...
def __iter__(self) -> Iterator[bytes]: ...
def __enter__(self) -> Self: ...
def __exit__(
self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: types.TracebackType | None
) -> None: ...
def info(self) -> email.message.Message: ...
def geturl(self) -> str: ...
def getcode(self) -> int: ...
def begin(self) -> None: ...
class HTTPConnection:
auto_open: int # undocumented
debuglevel: int
default_port: int # undocumented
response_class: type[HTTPResponse] # undocumented
timeout: float | None
host: str
port: int
sock: socket | Any # can be `None` if `.connect()` was not called
def __init__(
self,
host: str,
port: int | None = None,
timeout: float | None = ...,
source_address: tuple[str, int] | None = None,
blocksize: int = 8192,
) -> None: ...
def request(
self,
method: str,
url: str,
body: _DataType | str | None = None,
headers: Mapping[str, str] = {},
*,
encode_chunked: bool = False,
) -> None: ...
def getresponse(self) -> HTTPResponse: ...
def set_debuglevel(self, level: int) -> None: ...
def set_tunnel(self, host: str, port: int | None = None, headers: Mapping[str, str] | None = None) -> None: ...
def connect(self) -> None: ...
def close(self) -> None: ...
def putrequest(self, method: str, url: str, skip_host: bool = False, skip_accept_encoding: bool = False) -> None: ...
def putheader(self, header: str, *argument: str) -> None: ...
def METHOD_NAME(self, message_body: _DataType | None = None, *, encode_chunked: bool = False) -> None: ...
def send(self, data: _DataType | str) -> None: ...
class HTTPSConnection(HTTPConnection):
# Can be `None` if `.connect()` was not called:
sock: ssl.SSLSocket | Any
if sys.version_info >= (3, 12):
def __init__(
self,
host: str,
port: str | None = None,
*,
timeout: float | None = ...,
source_address: tuple[str, int] | None = None,
context: ssl.SSLContext | None = None,
blocksize: int = 8192,
) -> None: ...
else:
def __init__(
self,
host: str,
port: int | None = None,
key_file: str | None = None,
cert_file: str | None = None,
timeout: float | None = ...,
source_address: tuple[str, int] | None = None,
*,
context: ssl.SSLContext | None = None,
check_hostname: bool | None = None,
blocksize: int = 8192,
) -> None: ...
class HTTPException(Exception): ...
error = HTTPException
class NotConnected(HTTPException): ...
class InvalidURL(HTTPException): ...
class UnknownProtocol(HTTPException):
def __init__(self, version: str) -> None: ...
class UnknownTransferEncoding(HTTPException): ...
class UnimplementedFileMode(HTTPException): ...
class IncompleteRead(HTTPException):
def __init__(self, partial: bytes, expected: int | None = None) -> None: ...
partial: bytes
expected: int | None
class ImproperConnectionState(HTTPException): ...
class CannotSendRequest(ImproperConnectionState): ...
class CannotSendHeader(ImproperConnectionState): ...
class ResponseNotReady(ImproperConnectionState): ...
class BadStatusLine(HTTPException):
def __init__(self, line: str) -> None: ...
class LineTooLong(HTTPException):
def __init__(self, line_type: str) -> None: ...
class RemoteDisconnected(ConnectionResetError, BadStatusLine): ... | null |
ci client | import io
import pytest
from typing import Iterator
from dlt.common.schema import Schema
from dlt.common.configuration.container import Container
from dlt.common.configuration.specs.config_section_context import ConfigSectionContext
from dlt.common.utils import uniq_id
from dlt.destinations import weaviate
from dlt.destinations.weaviate.exceptions import PropertyNameConflict
from dlt.destinations.weaviate.weaviate_client import WeaviateClient
from dlt.common.storages.file_storage import FileStorage
from dlt.common.schema.utils import new_table
from tests.load.utils import TABLE_ROW_ALL_DATA_TYPES, TABLE_UPDATE, TABLE_UPDATE_COLUMNS_SCHEMA, expect_load_file, write_dataset
from tests.utils import TEST_STORAGE_ROOT
from .utils import drop_active_pipeline_data
@pytest.fixture(autouse=True)
def drop_weaviate_schema() -> None:
yield
drop_active_pipeline_data()
def get_client_instance(schema: Schema) -> WeaviateClient:
config = weaviate.spec()()
config.dataset_name = "ClientTest" + uniq_id()
with Container().injectable_context(ConfigSectionContext(sections=('destination', 'weaviate'))):
return weaviate.client(schema, config) # type: ignore[return-value]
@pytest.fixture(scope='function')
def client() -> Iterator[WeaviateClient]:
yield from make_client("naming")
@pytest.fixture(scope='function')
def METHOD_NAME() -> Iterator[WeaviateClient]:
yield from make_client("ci_naming")
def make_client(naming_convention: str) -> Iterator[WeaviateClient]:
schema = Schema('test_schema', {
'names': f"dlt.destinations.weaviate.{naming_convention}",
'json': None
})
_client = get_client_instance(schema)
try:
yield _client
finally:
_client.drop_storage()
@pytest.fixture
def file_storage() -> FileStorage:
return FileStorage(TEST_STORAGE_ROOT, file_type="b", makedirs=True)
@pytest.mark.parametrize('write_disposition', ["append", "replace", "merge"])
def test_all_data_types(client: WeaviateClient, write_disposition: str, file_storage: FileStorage) -> None:
class_name = "AllTypes"
# we should have identical content with all disposition types
client.schema.update_schema(new_table(class_name, write_disposition=write_disposition, columns=TABLE_UPDATE))
client.schema.bump_version()
client.update_stored_schema()
# write row
with io.BytesIO() as f:
write_dataset(client, f, [TABLE_ROW_ALL_DATA_TYPES], TABLE_UPDATE_COLUMNS_SCHEMA)
query = f.getvalue().decode()
expect_load_file(client, file_storage, query, class_name)
_, table_columns = client.get_storage_table("AllTypes")
# for now check if all columns are there
assert len(table_columns) == len(TABLE_UPDATE_COLUMNS_SCHEMA)
for col_name in table_columns:
assert col_name in TABLE_UPDATE_COLUMNS_SCHEMA
if TABLE_UPDATE_COLUMNS_SCHEMA[col_name]["data_type"] in ["decimal", "complex", "time"]:
# no native representation
assert table_columns[col_name]["data_type"] == "text"
elif TABLE_UPDATE_COLUMNS_SCHEMA[col_name]["data_type"] == "wei":
assert table_columns[col_name]["data_type"] == "double"
elif TABLE_UPDATE_COLUMNS_SCHEMA[col_name]["data_type"] == "date":
assert table_columns[col_name]["data_type"] == "timestamp"
else:
assert table_columns[col_name]["data_type"] == TABLE_UPDATE_COLUMNS_SCHEMA[col_name]["data_type"]
def test_case_sensitive_properties_create(client: WeaviateClient) -> None:
class_name = "col_class"
# we have two properties which will map to the same name in Weaviate
table_create = [
{
"name": "col1",
"data_type": "bigint",
"nullable": False
},
{
"name": "coL1",
"data_type": "double",
"nullable": False
},
]
client.schema.update_schema(client.schema.normalize_table_identifiers(new_table(class_name, columns=table_create)))
client.schema.bump_version()
with pytest.raises(PropertyNameConflict):
client.update_stored_schema()
def test_case_insensitive_properties_create(METHOD_NAME: WeaviateClient) -> None:
class_name = "col_class"
# we have two properties which will map to the same name in Weaviate
table_create = [
{
"name": "col1",
"data_type": "bigint",
"nullable": False
},
{
"name": "coL1",
"data_type": "double",
"nullable": False
},
]
METHOD_NAME.schema.update_schema(METHOD_NAME.schema.normalize_table_identifiers(new_table(class_name, columns=table_create)))
METHOD_NAME.schema.bump_version()
METHOD_NAME.update_stored_schema()
_, table_columns = METHOD_NAME.get_storage_table("ColClass")
# later column overwrites earlier one so: double
assert table_columns == {'col1': {'name': 'col1', 'data_type': 'double'}}
def test_case_sensitive_properties_add(client: WeaviateClient) -> None:
class_name = "col_class"
# we have two properties which will map to the same name in Weaviate
table_create = [{
"name": "col1",
"data_type": "bigint",
"nullable": False
}]
table_update = [{
"name": "coL1",
"data_type": "double",
"nullable": False
},
]
client.schema.update_schema(
client.schema.normalize_table_identifiers(new_table(class_name, columns=table_create))
)
client.schema.bump_version()
client.update_stored_schema()
client.schema.update_schema(
client.schema.normalize_table_identifiers(new_table(class_name, columns=table_update))
)
client.schema.bump_version()
with pytest.raises(PropertyNameConflict):
client.update_stored_schema()
# _, table_columns = client.get_storage_table("ColClass")
# print(table_columns)
def test_load_case_sensitive_data(client: WeaviateClient, file_storage: FileStorage) -> None:
class_name = "col_class"
# we have two properties which will map to the same name in Weaviate
table_create = {"col1":
{
"name": "col1",
"data_type": "bigint",
"nullable": False
}}
client.schema.update_schema(new_table(class_name, columns=[table_create["col1"]]))
client.schema.bump_version()
client.update_stored_schema()
# prepare a data item where is name clash due to Weaviate being CI
data_clash = {"col1": 72187328, "coL1": 726171}
# write row
with io.BytesIO() as f:
write_dataset(client, f, [data_clash], table_create)
query = f.getvalue().decode()
with pytest.raises(PropertyNameConflict):
expect_load_file(client, file_storage, query, class_name)
def test_load_case_sensitive_data_ci(METHOD_NAME: WeaviateClient, file_storage: FileStorage) -> None:
class_name = "col_class"
# we have two properties which will map to the same name in Weaviate
table_create = {"col1":
{
"name": "col1",
"data_type": "bigint",
"nullable": False
}}
METHOD_NAME.schema.update_schema(new_table(class_name, columns=[table_create["col1"]]))
METHOD_NAME.schema.bump_version()
METHOD_NAME.update_stored_schema()
# prepare a data item where is name clash due to Weaviate being CI
# but here we normalize the item
data_clash = list(
METHOD_NAME.schema.normalize_data_item({"col1": 72187328, "coL1": 726171}, "_load_id_", "col_class")
)[0][1]
# write row
with io.BytesIO() as f:
write_dataset(METHOD_NAME, f, [data_clash], table_create)
query = f.getvalue().decode()
expect_load_file(METHOD_NAME, file_storage, query, class_name)
response = METHOD_NAME.query_class(class_name, ["col1"]).do()
objects = response["data"]["Get"][METHOD_NAME.make_qualified_class_name(class_name)]
# the latter of conflicting fields is stored (so data is lost)
assert objects == [{'col1': 726171}] | null |
get pixel | # Copyright 1996-2023 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# References:
# - http://paulbourke.net/dataformats/pic/
# - https://github.com/plepers/hdr2png/blob/master/hdrloader.cpp
# - https://github.com/enkimute/hdrpng.js/blob/master/hdrpng.js
import math
import re
import sys
from utils.range import clamp_int
assert sys.version_info >= (3, 0), 'Python 3 is required to run this script.'
GAMMA = 2.0
class HDR:
@classmethod
def load_from_file(cls, filename):
"""Parse the HDR file."""
# HDR Format Specifications: http://paulbourke.net/dataformats/pic/
#
# Typical header:
# #?RADIANCE
# SOFTWARE=gegl 0.4.12
# FORMAT=32-bit_rle_rgbe
#
# -Y 1024 +X 2048
# Data
hdr = HDR()
data = []
header = False
with open(filename, "rb") as f:
while True:
line = ''
c = f.read(1).decode('ascii')
while c != '\n':
line += c
c = f.read(1).decode('ascii')
# Case: Empty lines
if line == '' or (len(line) == 1 and ord(line[0]) == 10):
continue
# Case: header
m = re.match(r'^#\?RADIANCE$', line)
if m:
header = True
continue
# Case: Size
m = re.match(r'^(.)(.)\s(\d+)\s(.)(.)\s(\d+)$', line)
if m:
hdr.rotated = m.group(2) == 'X'
hdr.xFlipped = m.group(1 if hdr.rotated else 4) == '-'
hdr.yFlipped = m.group(4 if hdr.rotated else 1) == '+'
hdr.width = int(m.group(6))
hdr.height = int(m.group(3))
break
# Case: ignored header entries
if line.startswith('FORMAT=') or \
line.startswith('EXPOSURE=') or \
line.startswith('COLORCORR=') or \
line.startswith('SOFTWARE=') or \
line.startswith('PIXASPECT=') or \
line.startswith('VIEW=') or \
line.startswith('PRIMARIES=') or \
line.startswith('GAMMA=') or \
line.startswith('# '):
continue
break
# Case: Data
data = f.read()
assert header, 'Invalid header.'
assert 4 * hdr.width * hdr.height == len(data) and len(data) > 0, \
'Invalid dimensions (expected dimension: 4x%dx%d, get %d floats)' % (hdr.width, hdr.height, len(data))
assert not (hdr.rotated or hdr.xFlipped or hdr.yFlipped), 'Flip or rotation flags are not supported.'
# Convert data to floats
hdr.data = [0.0] * (3 * hdr.width * hdr.height)
for i in range(hdr.width * hdr.height):
r = float(data[4 * i])
g = float(data[4 * i + 1])
b = float(data[4 * i + 2])
e = pow(2.0, float(data[4 * i + 3]) - 128.0 + 8.0)
hdr.data[3 * i] = pow(r * e, 1.0 / GAMMA) / 255.0
hdr.data[3 * i + 1] = pow(g * e, 1.0 / GAMMA) / 255.0
hdr.data[3 * i + 2] = pow(b * e, 1.0 / GAMMA) / 255.0
return hdr
@classmethod
def create_black_image(cls, width, height):
"""Create an HDR black image."""
hdr = HDR()
hdr.width = width
hdr.height = height
hdr.data = [0.0] * (3 * hdr.width * hdr.height)
return hdr
def __init__(self):
"""Constructor: simply reset the fields. Prefer the static methods."""
self.data = [] # Contains the 1D array of floats (size: 3*w*h, black: 0.0, white: 1.0, hdr: >1.0)
self.width = -1
self.height = -1
self.xFlipped = False
self.yFlipped = False
self.rotated = False
def is_valid(self):
"""Return True if the image has been loaded correctly."""
return 3 * self.width * self.height == len(self.data)
def METHOD_NAME(self, x, y):
"""Get pixel at the speficied position."""
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
i = 3 * (y * self.width + x)
return (
self.data[i],
self.data[i + 1],
self.data[i + 2]
)
def set_pixel(self, x, y, pixel):
"""Set pixel at the speficied position."""
assert x >= 0 and x < self.width
assert y >= 0 and y < self.height
i = 3 * (y * self.width + x)
self.data[i] = pixel[0]
self.data[i + 1] = pixel[1]
self.data[i + 2] = pixel[2]
def clamp(self, threshold):
"""Clamp all the floats to some value."""
assert self.is_valid()
t = pow(threshold, 1.0 / GAMMA)
for i in range(3 * self.width * self.height):
self.data[i] = t if self.data[i] > t else self.data[i]
def save(self, filename):
"""Save the image to a file."""
assert self.is_valid()
assert filename.endswith('.hdr')
assert not (self.rotated or self.xFlipped or self.yFlipped), 'Flip or rotation flags are not supported.'
with open(filename, "wb") as f:
f.write('#?RADIANCE\n'.encode('ascii'))
f.write('FORMAT=32-bit_rle_rgbe\n'.encode('ascii'))
f.write('\n'.encode('ascii'))
f.write(('-Y %d +X %d\n' % (self.height, self.width)).encode('ascii'))
for i in range(self.width * self.height):
r = pow(self.data[3 * i], GAMMA)
g = pow(self.data[3 * i + 1], GAMMA)
b = pow(self.data[3 * i + 2], GAMMA)
v = max(r, g, b)
e = math.ceil(math.log(v, 2)) if v != 0.0 else 0.0
s = pow(2, e - 8)
arr = [
clamp_int(r / s, 0, 255),
clamp_int(g / s, 0, 255),
clamp_int(b / s, 0, 255),
clamp_int(e + 128, 0, 255)
]
f.write(bytes(arr))
def to_pil(self):
"""Create a PIL image to test the script."""
assert self.is_valid()
from PIL import Image
im = Image.new('RGB', (self.width, self.height))
pixels = im.load()
for y in range(self.height):
for x in range(self.width):
i = 3 * (y * self.width + x)
r = clamp_int(255.0 * self.data[i], 0, 255)
g = clamp_int(255.0 * self.data[i + 1], 0, 255)
b = clamp_int(255.0 * self.data[i + 2], 0, 255)
pixels[x, y] = (r, g, b)
return im | null |
do finalize | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2021 Lucas Teske <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
# The Muselab IceSugar Pro PCB and IOs have been documented by @wuxx
# https://github.com/wuxx/icesugar-pro
from litex.build.generic_platform import *
from litex.build.lattice import LatticeECP5Platform
from litex.build.lattice.programmer import EcpDapProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk
("clk25", 0, Pins("P6"), IOStandard("LVCMOS33")),
# Led
("user_led_n", 0, Pins("B11"), IOStandard("LVCMOS33")), # Red
("user_led_n", 1, Pins("A11"), IOStandard("LVCMOS33")), # Green
("user_led_n", 2, Pins("A12"), IOStandard("LVCMOS33")), # Blue
("rgb_led", 0,
Subsignal("r", Pins("B11")),
Subsignal("g", Pins("A11")),
Subsignal("b", Pins("A12")),
IOStandard("LVCMOS33"),
),
# Reset button
("cpu_reset_n", 0, Pins("L14"), IOStandard("LVCMOS33"), Misc("PULLMODE=UP")),
# Serial
("serial", 0, # iCELink
Subsignal("tx", Pins("B9")),
Subsignal("rx", Pins("A9")),
IOStandard("LVCMOS33")
),
# SPIFlash (W25Q256JV (32MB))
("spiflash", 0,
Subsignal("cs_n", Pins("N8")),
# https://github.com/m-labs/nmigen-boards/pull/38
#Subsignal("clk", Pins("")), driven through USRMCLK
Subsignal("mosi", Pins("T8")),
Subsignal("miso", Pins("T7")),
IOStandard("LVCMOS33"),
),
# SDRAM (IS42S16160B (32MB))
("sdram_clock", 0, Pins("R15"), IOStandard("LVCMOS33")),
("sdram", 0,
Subsignal("a", Pins(
"H15 B13 B12 J16 J15 R12 K16 R13",
"T13 K15 A13 R14 T14")),
Subsignal("dq", Pins(
"F16 E15 F15 D14 E16 C15 D16 B15",
"R16 P16 P15 N16 N14 M16 M15 L15")),
Subsignal("we_n", Pins("A15")),
Subsignal("ras_n", Pins("B16")),
Subsignal("cas_n", Pins("G16")),
Subsignal("cs_n", Pins("A14")),
Subsignal("cke", Pins("L16")),
Subsignal("ba", Pins("G15 B14")),
Subsignal("dm", Pins("C16 T15")),
IOStandard("LVCMOS33"),
Misc("SLEWRATE=FAST")
),
# SDCard
("spisdcard", 0,
Subsignal("clk", Pins("J12")),
Subsignal("mosi", Pins("H12"), Misc("PULLMODE=UP")),
Subsignal("cs_n", Pins("G12"), Misc("PULLMODE=UP")),
Subsignal("miso", Pins("K12"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33"),
),
("sdcard", 0,
Subsignal("clk", Pins("J12")),
Subsignal("cmd", Pins("H12"), Misc("PULLMODE=UP")),
Subsignal("data", Pins("K12 L12 F12 G12"), Misc("PULLMODE=UP")),
Misc("SLEWRATE=FAST"),
IOStandard("LVCMOS33")
),
# GPDI
("gpdi", 0,
Subsignal("clk_p", Pins("E2"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
Subsignal("clk_n", Pins("D3"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
Subsignal("data0_p", Pins("G1"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
Subsignal("data0_n", Pins("F1"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
Subsignal("data1_p", Pins("J1"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
Subsignal("data1_n", Pins("H2"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
Subsignal("data2_p", Pins("L1"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
Subsignal("data2_n", Pins("K2"), IOStandard("LVCMOS33"), Misc("DRIVE=4")),
),
# RMII Ethernet PHY (WaveShare Board)
# Assumed to be modified to be PMOD-compatible (TX1 tied to MDIO)
# Position is P4 header "top half" (toward the GPDI connector)
("eth_clocks", 0,
Subsignal("ref_clk", Pins("D5")),
IOStandard("LVCMOS33"),
),
("eth", 0,
Subsignal("rx_data", Pins("D4 C3")),
Subsignal("crs_dv", Pins("C4")),
Subsignal("tx_en", Pins("E4")),
Subsignal("tx_data", Pins("E3 R7")),
IOStandard("LVCMOS33"),
),
]
# from colorlight_i5.py adapted to icesugar pro
# https://github.com/wuxx/icesugar-pro/blob/master/doc/iCESugar-pro-pinmap.png
_connectors = [
("pmode", "N3 M2 L2 G2 P1 N1 M1 K1"),
("pmodf", "T6 R5 R4 R3 P7 R6 T4 T3"),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(LatticeECP5Platform):
default_clk_name = "clk25"
default_clk_period = 1e9/25e6
def __init__(self, toolchain="trellis"):
device = "LFE5U-25F-6BG256C"
io = _io
connectors = _connectors
LatticeECP5Platform.__init__(self, device, io, connectors=connectors, toolchain=toolchain)
def create_programmer(self):
return EcpDapProgrammer()
def METHOD_NAME(self, fragment):
LatticeECP5Platform.METHOD_NAME(self, fragment)
self.add_period_constraint(self.lookup_request("clk25", loose=True), 1e9/25e6) | null |
extend name | #
# SPDX-License-Identifier: GPL-2.0-only
#
import collections
def get_packages(d):
pkgs = d.getVar("PACKAGES_NONML")
extcls = d.getVar("EXTENDERCLASS")
return extcls.rename_packages_internal(pkgs)
def get_depends(varprefix, d):
extcls = d.getVar("EXTENDERCLASS")
return extcls.map_depends_variable(varprefix + "_NONML")
class ClassExtender(object):
def __init__(self, extname, d):
self.extname = extname
self.d = d
self.pkgs_mapping = []
self.d.setVar("EXTENDERCLASS", self)
def METHOD_NAME(self, name):
if name.startswith("kernel-") or name == "virtual/kernel":
return name
if name.startswith("rtld"):
return name
if name.endswith("-crosssdk"):
return name
if name.endswith("-" + self.extname):
name = name.replace("-" + self.extname, "")
if name.startswith("virtual/"):
subs = name.split("/", 1)[1]
if not subs.startswith(self.extname):
return "virtual/" + self.extname + "-" + subs
return name
if name.startswith("/") or (name.startswith("${") and name.endswith("}")):
return name
if not name.startswith(self.extname):
return self.extname + "-" + name
return name
def map_variable(self, varname, setvar = True):
var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
newvar.append(self.METHOD_NAME(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_regexp_variable(self, varname, setvar = True):
var = self.d.getVar(varname)
if not var:
return ""
var = var.split()
newvar = []
for v in var:
if v.startswith("^" + self.extname):
newvar.append(v)
elif v.startswith("^"):
newvar.append("^" + self.extname + "-" + v[1:])
else:
newvar.append(self.METHOD_NAME(v))
newdata = " ".join(newvar)
if setvar:
self.d.setVar(varname, newdata)
return newdata
def map_depends(self, dep):
if dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('cross-canadian' in dep) or ('-crosssdk-' in dep):
return dep
else:
# Do not extend for that already have multilib prefix
var = self.d.getVar("MULTILIB_VARIANTS")
if var:
var = var.split()
for v in var:
if dep.startswith(v):
return dep
return self.METHOD_NAME(dep)
def map_depends_variable(self, varname, suffix = ""):
# We need to preserve EXTENDPKGV so it can be expanded correctly later
if suffix:
varname = varname + ":" + suffix
orig = self.d.getVar("EXTENDPKGV", False)
self.d.setVar("EXTENDPKGV", "EXTENDPKGV")
deps = self.d.getVar(varname)
if not deps:
self.d.setVar("EXTENDPKGV", orig)
return
deps = bb.utils.explode_dep_versions2(deps)
newdeps = collections.OrderedDict()
for dep in deps:
newdeps[self.map_depends(dep)] = deps[dep]
if not varname.endswith("_NONML"):
self.d.renameVar(varname, varname + "_NONML")
self.d.setVar(varname, "${@oe.classextend.get_depends('%s', d)}" % varname)
self.d.appendVarFlag(varname, "vardeps", " " + varname + "_NONML")
ret = bb.utils.join_deps(newdeps, False).replace("EXTENDPKGV", "${EXTENDPKGV}")
self.d.setVar("EXTENDPKGV", orig)
return ret
def map_packagevars(self):
for pkg in (self.d.getVar("PACKAGES").split() + [""]):
self.map_depends_variable("RDEPENDS", pkg)
self.map_depends_variable("RRECOMMENDS", pkg)
self.map_depends_variable("RSUGGESTS", pkg)
self.map_depends_variable("RPROVIDES", pkg)
self.map_depends_variable("RREPLACES", pkg)
self.map_depends_variable("RCONFLICTS", pkg)
self.map_depends_variable("PKG", pkg)
def rename_packages(self):
for pkg in (self.d.getVar("PACKAGES") or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
self.pkgs_mapping.append([pkg, self.METHOD_NAME(pkg)])
self.d.renameVar("PACKAGES", "PACKAGES_NONML")
self.d.setVar("PACKAGES", "${@oe.classextend.get_packages(d)}")
def rename_packages_internal(self, pkgs):
self.pkgs_mapping = []
for pkg in (self.d.expand(pkgs) or "").split():
if pkg.startswith(self.extname):
self.pkgs_mapping.append([pkg.split(self.extname + "-")[1], pkg])
continue
self.pkgs_mapping.append([pkg, self.METHOD_NAME(pkg)])
return " ".join([row[1] for row in self.pkgs_mapping])
def rename_package_variables(self, variables):
for pkg_mapping in self.pkgs_mapping:
if pkg_mapping[0].startswith("${") and pkg_mapping[0].endswith("}"):
continue
for subs in variables:
self.d.renameVar("%s:%s" % (subs, pkg_mapping[0]), "%s:%s" % (subs, pkg_mapping[1]))
class NativesdkClassExtender(ClassExtender):
def map_depends(self, dep):
if dep.startswith(self.extname):
return dep
if dep.endswith(("-gcc", "-g++")):
return dep + "-crosssdk"
elif dep.endswith(("-native", "-native-runtime")) or ('nativesdk-' in dep) or ('-cross-' in dep) or ('-crosssdk-' in dep):
return dep
else:
return self.METHOD_NAME(dep) | null |
run compare with baseline sweep | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
from fairseq.data.data_utils_fast import batch_by_size_fn
from fairseq.data.data_utils_fast import batch_by_size_vec
class TestBatchBySize(unittest.TestCase):
@classmethod
def batch_by_size_baseline(
cls,
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
):
"""Simple, reliable and slow implementation of batch by size """
batches = []
start = 0
while start < len(indices):
for end in range(start + 1, len(indices) + 1):
max_val = max(num_tokens_vec[pos] for pos in range(start, end))
sent_count = end - start
num_tokens = max_val * sent_count
overflow = num_tokens > max_tokens > 0 or sent_count > max_sentences > 0
terminate = overflow or end == len(indices)
if overflow:
sent_count -= 1
if terminate:
if sent_count > bsz_mult:
sent_count = sent_count - sent_count % bsz_mult
batches.append(indices[start : start + sent_count])
start = start + sent_count
break
return batches
@classmethod
def _get_error_message(
cls, max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results
):
return f"""Reference batch_by_size implementation should produce
same output as the baseline method.
Params:
max_sentences={max_sentences},
max_tokens={max_tokens},
bsz_mult={bsz_mult},
num_tokens_vec={num_tokens_vec},
expected_batches={validation},
returned_batches={results}"""
def _compare_results(
self,
indices_len,
batch_by_size_impl,
max_sentences,
max_tokens,
bsz_mult,
num_tokens_vec,
):
indices = np.array(list(range(indices_len)))
validation = self.batch_by_size_baseline(
indices,
num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
bsz_mult=bsz_mult,
)
results = batch_by_size_impl(
indices,
num_tokens_vec,
max_tokens=max_tokens,
max_sentences=max_sentences,
bsz_mult=bsz_mult,
)
error_msg = self._get_error_message(
max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results
)
self.assertEqual(len(validation), len(results), error_msg)
for first, second in zip(validation, results):
self.assertTrue(np.array_equal(first, second), error_msg)
def METHOD_NAME(self, batch_by_size_impl):
"""Compare reference batch_by_size implementation with batch_by_size_baseline
across a dense grid of hyperparam values"""
MAX_MAX_TOKENS = 10
NUM_TOKENS_VECS_COUNT = 5
for indices_len in [10, 11]: # try odd and even len of indices
for max_sentences in range(0, indices_len + 2):
for max_tokens in range(0, MAX_MAX_TOKENS):
for bsz_mult in range(1, max(MAX_MAX_TOKENS, indices_len) + 2):
for _ in range(NUM_TOKENS_VECS_COUNT):
num_tokens_vec = np.random.randint(
0, max_tokens + 1, size=indices_len
)
self._compare_results(
indices_len,
batch_by_size_impl,
max_sentences,
max_tokens,
bsz_mult,
num_tokens_vec,
)
class TestBatchBySizeVec(TestBatchBySize):
def test_compare_with_baseline(self):
self.METHOD_NAME(batch_by_size_vec)
class TestBatchBySizeFn(TestBatchBySize):
def test_compare_with_baseline(self):
def batch_by_size_fn_wrapper(
indices,
num_tokens_vec,
max_tokens,
max_sentences,
bsz_mult,
):
def num_tokens_fn(idx):
return num_tokens_vec[idx]
return batch_by_size_fn(
indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult
)
self.METHOD_NAME(batch_by_size_fn_wrapper)
if __name__ == "__main__":
unittest.main() | null |
test exclude the first half points | # Copyright 2022 Waymo LLC.
#
# Licensed under the terms in https://github.com/keras-team/keras-cv/blob/master/keras_cv/layers/preprocessing_3d/waymo/LICENSE # noqa: E501
import numpy as np
from keras_cv.layers.preprocessing_3d import base_augmentation_layer_3d
from keras_cv.layers.preprocessing_3d.waymo.frustum_random_dropping_points import ( # noqa: E501
FrustumRandomDroppingPoints,
)
from keras_cv.tests.test_case import TestCase
POINT_CLOUDS = base_augmentation_layer_3d.POINT_CLOUDS
BOUNDING_BOXES = base_augmentation_layer_3d.BOUNDING_BOXES
class FrustumRandomDroppingPointTest(TestCase):
def test_augment_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs)
def test_not_augment_drop_rate0_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_not_augment_drop_rate1_frustum_empty_point_clouds_and_bounding_boxes( # noqa: E501
self,
):
add_layer = FrustumRandomDroppingPoints(
r_distance=10, theta_width=0, phi_width=0, drop_rate=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def test_drop_rate1_large_frustum_drop_all_point_clouds(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=np.pi, phi_width=np.pi, drop_rate=1.0
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs[POINT_CLOUDS] * 0.0, outputs[POINT_CLOUDS])
def test_exclude_all_points(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0,
theta_width=np.pi,
phi_width=np.pi,
drop_rate=1.0,
exclude_classes=1,
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
exclude_classes = np.ones(shape=(2, 50, 1)).astype("float32")
point_clouds = np.concatenate([point_clouds, exclude_classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(inputs, outputs)
def METHOD_NAME(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0,
theta_width=np.pi,
phi_width=np.pi,
drop_rate=1.0,
exclude_classes=[1, 2],
)
point_clouds = np.random.random(size=(2, 50, 10)).astype("float32")
class_1 = np.ones(shape=(2, 10, 1)).astype("float32")
class_2 = np.ones(shape=(2, 15, 1)).astype("float32") * 2
classes = np.concatenate(
[class_1, class_2, np.zeros(shape=(2, 25, 1)).astype("float32")],
axis=1,
)
point_clouds = np.concatenate([point_clouds, classes], axis=-1)
bounding_boxes = np.random.random(size=(2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertAllClose(
inputs[POINT_CLOUDS][:, 25:, :] * 0.0,
outputs[POINT_CLOUDS][:, 25:, :],
)
self.assertAllClose(
inputs[POINT_CLOUDS][:, :25, :], outputs[POINT_CLOUDS][:, :25, :]
)
def test_augment_batch_point_clouds_and_bounding_boxes(self):
add_layer = FrustumRandomDroppingPoints(
r_distance=0, theta_width=1, phi_width=1, drop_rate=0.5
)
point_clouds = np.random.random(size=(3, 2, 50, 10)).astype("float32")
bounding_boxes = np.random.random(size=(3, 2, 10, 7)).astype("float32")
inputs = {POINT_CLOUDS: point_clouds, BOUNDING_BOXES: bounding_boxes}
outputs = add_layer(inputs)
self.assertNotAllClose(inputs, outputs) | null |
test env required no env | #!/usr/bin/env python3
# Owner(s): ["oncall: distributed"]
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
from argparse import ArgumentParser
from torch.distributed.argparse_util import check_env, env
class ArgParseUtilTest(unittest.TestCase):
def setUp(self):
# remove any lingering environment variables
for e in os.environ.keys():
if e.startswith("PET_"):
del os.environ[e]
def test_env_string_arg_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar")
self.assertEqual("bar", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_string_arg_env(self):
os.environ["PET_FOO"] = "env_baz"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar")
self.assertEqual("env_baz", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_int_arg_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default=1, type=int)
self.assertEqual(1, parser.parse_args([]).foo)
self.assertEqual(2, parser.parse_args(["-f", "2"]).foo)
self.assertEqual(2, parser.parse_args(["--foo", "2"]).foo)
def test_env_int_arg_env(self):
os.environ["PET_FOO"] = "3"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default=1, type=int)
self.assertEqual(3, parser.parse_args([]).foo)
self.assertEqual(2, parser.parse_args(["-f", "2"]).foo)
self.assertEqual(2, parser.parse_args(["--foo", "2"]).foo)
def test_env_no_default_no_env(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env)
self.assertIsNone(parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_no_default_env(self):
os.environ["PET_FOO"] = "env_baz"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env)
self.assertEqual("env_baz", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def METHOD_NAME(self):
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, required=True)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_env_required_env(self):
os.environ["PET_FOO"] = "env_baz"
parser = ArgumentParser()
parser.add_argument("-f", "--foo", action=env, default="bar", required=True)
self.assertEqual("env_baz", parser.parse_args([]).foo)
self.assertEqual("baz", parser.parse_args(["-f", "baz"]).foo)
self.assertEqual("baz", parser.parse_args(["--foo", "baz"]).foo)
def test_check_env_no_env(self):
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env)
self.assertFalse(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["-v"]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_default_no_env(self):
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env, default=True)
self.assertTrue(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["-v"]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_env_zero(self):
os.environ["PET_VERBOSE"] = "0"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env)
self.assertFalse(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_env_one(self):
os.environ["PET_VERBOSE"] = "1"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env)
self.assertTrue(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_default_env_zero(self):
os.environ["PET_VERBOSE"] = "0"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env, default=True)
self.assertFalse(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose)
def test_check_env_default_env_one(self):
os.environ["PET_VERBOSE"] = "1"
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", action=check_env, default=True)
self.assertTrue(parser.parse_args([]).verbose)
self.assertTrue(parser.parse_args(["--verbose"]).verbose) | null |
test download file | # -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""HEPData Test Fixtures"""
import os
from unittest import mock
from invenio_accounts.models import Role, User
from invenio_db import db
import pytest
from hepdata.ext.opensearch.admin_view.api import AdminIndexer
from hepdata.ext.opensearch.api import reindex_all
from hepdata.factory import create_app
from hepdata.modules.records.importer.api import import_records, _download_file
TEST_EMAIL = '[email protected]'
TEST_PWD = 'hello1'
def create_basic_app():
app = create_app()
test_db_host = app.config.get('TEST_DB_HOST', 'localhost')
app.config.update(dict(
TESTING=True,
TEST_RUNNER="celery.contrib.test_runner.CeleryTestSuiteRunner",
CELERY_TASK_ALWAYS_EAGER=True,
CELERY_RESULT_BACKEND="cache",
CELERY_CACHE_BACKEND="memory",
MAIL_SUPPRESS_SEND=True,
CELERY_TASK_EAGER_PROPAGATES=True,
OPENSEARCH_INDEX="hepdata-main-test",
SUBMISSION_INDEX='hepdata-submission-test',
AUTHOR_INDEX='hepdata-authors-test',
SQLALCHEMY_DATABASE_URI=os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'postgresql+psycopg2://hepdata:hepdata@' + test_db_host + '/hepdata_test')
))
return app
def setup_app(app):
with app.app_context():
db.drop_all()
db.create_all()
reindex_all(recreate=True, synchronous=True)
ctx = app.test_request_context()
ctx.push()
user_count = User.query.filter_by(email='[email protected]').count()
if user_count == 0:
user = User(email=TEST_EMAIL, password='hello1', active=True)
admin_role = Role(name='admin')
coordinator_role = Role(name='coordinator')
user.roles.append(admin_role)
user.roles.append(coordinator_role)
db.session.add(admin_role)
db.session.add(coordinator_role)
db.session.add(user)
db.session.commit()
yield app
ctx.pop()
@pytest.fixture()
def app(request):
"""Flask app fixture."""
app = create_basic_app()
app_generator = setup_app(app)
for app in app_generator:
yield app
@pytest.fixture()
def admin_idx(app):
with app.app_context():
admin_idx = AdminIndexer()
return admin_idx
@pytest.fixture()
def load_default_data(app, identifiers):
import_default_data(app, identifiers)
def import_default_data(app, identifiers):
with app.app_context():
# Mock out the _download_file method in importer to avoid downloading the
# sample files multiple times during testing
def METHOD_NAME(base_url, inspire_id):
filename = 'HEPData-ins{0}-v1.zip'.format(inspire_id)
print(f'Looking for file {filename} in {app.config["CFG_TMPDIR"]}')
expected_file_name = os.path.join(app.config["CFG_TMPDIR"], filename)
if os.path.exists(expected_file_name):
print("Using existing file at %s" % expected_file_name)
return expected_file_name
else:
print("Reverting to normal _download_file method")
return _download_file(base_url, inspire_id)
with mock.patch('hepdata.modules.records.importer.api._download_file', wraps=METHOD_NAME):
to_load = [x["hepdata_id"] for x in identifiers]
import_records(to_load, synchronous=True)
@pytest.fixture()
def client(app):
with app.test_client() as client:
yield client
@pytest.fixture()
def identifiers():
return get_identifiers()
def get_identifiers():
return [{"hepdata_id": "ins1283842", "inspire_id": '1283842',
"title": "Measurement of the forward-backward asymmetry "
"in the distribution of leptons in $t\\bar{t}$ "
"events in the lepton+jets channel",
"data_tables": 14,
"arxiv": "arXiv:1403.1294"},
{"hepdata_id": "ins1245023", "inspire_id": '1245023',
"title": "High-statistics study of $K^0_S$ pair production in two-photon collisions",
"data_tables": 40,
"arxiv": "arXiv:1307.7457"}
]
@pytest.fixture()
def load_submission(app, load_default_data):
import_records(['ins1487726'], synchronous=True) | null |
test serialize subclassed kb | from pathlib import Path
from typing import Any, Callable, Dict, Iterable
import srsly
from numpy import zeros
from thinc.api import Config
from spacy import Errors, util
from spacy.kb.kb_in_memory import InMemoryLookupKB
from spacy.util import SimpleFrozenList, ensure_path, load_model_from_config, registry
from spacy.vocab import Vocab
from ..util import make_tempdir
def test_serialize_kb_disk(en_vocab):
# baseline assertions
kb1 = _get_dummy_kb(en_vocab)
_check_kb(kb1)
# dumping to file & loading back in
with make_tempdir() as d:
dir_path = ensure_path(d)
if not dir_path.exists():
dir_path.mkdir()
file_path = dir_path / "kb"
kb1.to_disk(str(file_path))
kb2 = InMemoryLookupKB(vocab=en_vocab, entity_vector_length=3)
kb2.from_disk(str(file_path))
# final assertions
_check_kb(kb2)
def _get_dummy_kb(vocab):
kb = InMemoryLookupKB(vocab, entity_vector_length=3)
kb.add_entity(entity="Q53", freq=33, entity_vector=[0, 5, 3])
kb.add_entity(entity="Q17", freq=2, entity_vector=[7, 1, 0])
kb.add_entity(entity="Q007", freq=7, entity_vector=[0, 0, 7])
kb.add_entity(entity="Q44", freq=342, entity_vector=[4, 4, 4])
kb.add_alias(alias="double07", entities=["Q17", "Q007"], probabilities=[0.1, 0.9])
kb.add_alias(
alias="guy",
entities=["Q53", "Q007", "Q17", "Q44"],
probabilities=[0.3, 0.3, 0.2, 0.1],
)
kb.add_alias(alias="random", entities=["Q007"], probabilities=[1.0])
return kb
def _check_kb(kb):
# check entities
assert kb.get_size_entities() == 4
for entity_string in ["Q53", "Q17", "Q007", "Q44"]:
assert entity_string in kb.get_entity_strings()
for entity_string in ["", "Q0"]:
assert entity_string not in kb.get_entity_strings()
# check aliases
assert kb.get_size_aliases() == 3
for alias_string in ["double07", "guy", "random"]:
assert alias_string in kb.get_alias_strings()
for alias_string in ["nothingness", "", "randomnoise"]:
assert alias_string not in kb.get_alias_strings()
# check candidates & probabilities
candidates = sorted(kb.get_alias_candidates("double07"), key=lambda x: x.entity_)
assert len(candidates) == 2
assert candidates[0].entity_ == "Q007"
assert 6.999 < candidates[0].entity_freq < 7.01
assert candidates[0].entity_vector == [0, 0, 7]
assert candidates[0].alias_ == "double07"
assert 0.899 < candidates[0].prior_prob < 0.901
assert candidates[1].entity_ == "Q17"
assert 1.99 < candidates[1].entity_freq < 2.01
assert candidates[1].entity_vector == [7, 1, 0]
assert candidates[1].alias_ == "double07"
assert 0.099 < candidates[1].prior_prob < 0.101
def METHOD_NAME():
"""Check that IO of a custom KB works fine as part of an EL pipe."""
config_string = """
[nlp]
lang = "en"
pipeline = ["entity_linker"]
[components]
[components.entity_linker]
factory = "entity_linker"
[components.entity_linker.generate_empty_kb]
@misc = "kb_test.CustomEmptyKB.v1"
[initialize]
[initialize.components]
[initialize.components.entity_linker]
[initialize.components.entity_linker.kb_loader]
@misc = "kb_test.CustomKB.v1"
entity_vector_length = 342
custom_field = 666
"""
class SubInMemoryLookupKB(InMemoryLookupKB):
def __init__(self, vocab, entity_vector_length, custom_field):
super().__init__(vocab, entity_vector_length)
self.custom_field = custom_field
def to_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()):
"""We overwrite InMemoryLookupKB.to_disk() to ensure that self.custom_field is stored as well."""
path = ensure_path(path)
if not path.exists():
path.mkdir(parents=True)
if not path.is_dir():
raise ValueError(Errors.E928.format(loc=path))
def serialize_custom_fields(file_path: Path) -> None:
srsly.write_json(file_path, {"custom_field": self.custom_field})
serialize = {
"contents": lambda p: self.write_contents(p),
"strings.json": lambda p: self.vocab.strings.to_disk(p),
"custom_fields": lambda p: serialize_custom_fields(p),
}
util.to_disk(path, serialize, exclude)
def from_disk(self, path, exclude: Iterable[str] = SimpleFrozenList()):
"""We overwrite InMemoryLookupKB.from_disk() to ensure that self.custom_field is loaded as well."""
path = ensure_path(path)
if not path.exists():
raise ValueError(Errors.E929.format(loc=path))
if not path.is_dir():
raise ValueError(Errors.E928.format(loc=path))
def deserialize_custom_fields(file_path: Path) -> None:
self.custom_field = srsly.read_json(file_path)["custom_field"]
deserialize: Dict[str, Callable[[Any], Any]] = {
"contents": lambda p: self.read_contents(p),
"strings.json": lambda p: self.vocab.strings.from_disk(p),
"custom_fields": lambda p: deserialize_custom_fields(p),
}
util.from_disk(path, deserialize, exclude)
@registry.misc("kb_test.CustomEmptyKB.v1")
def empty_custom_kb() -> Callable[[Vocab, int], SubInMemoryLookupKB]:
def empty_kb_factory(vocab: Vocab, entity_vector_length: int):
return SubInMemoryLookupKB(
vocab=vocab,
entity_vector_length=entity_vector_length,
custom_field=0,
)
return empty_kb_factory
@registry.misc("kb_test.CustomKB.v1")
def custom_kb(
entity_vector_length: int, custom_field: int
) -> Callable[[Vocab], SubInMemoryLookupKB]:
def custom_kb_factory(vocab):
kb = SubInMemoryLookupKB(
vocab=vocab,
entity_vector_length=entity_vector_length,
custom_field=custom_field,
)
kb.add_entity("random_entity", 0.0, zeros(entity_vector_length))
return kb
return custom_kb_factory
config = Config().from_str(config_string)
nlp = load_model_from_config(config, auto_fill=True)
nlp.initialize()
entity_linker = nlp.get_pipe("entity_linker")
assert type(entity_linker.kb) == SubInMemoryLookupKB
assert entity_linker.kb.entity_vector_length == 342
assert entity_linker.kb.custom_field == 666
# Make sure the custom KB is serialized correctly
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
entity_linker2 = nlp2.get_pipe("entity_linker")
# After IO, the KB is the standard one
assert type(entity_linker2.kb) == SubInMemoryLookupKB
assert entity_linker2.kb.entity_vector_length == 342
assert entity_linker2.kb.custom_field == 666 | null |
radial split 2 d | import math
from warnings import warn
from pyaedt.generic.general_methods import pyaedt_function_handler
from pyaedt.modeler.cad.Modeler import GeometryModeler
from pyaedt.modeler.cad.Modeler import Modeler
from pyaedt.modeler.cad.Primitives2D import Primitives2D
class ModelerRMxprt(Modeler):
"""Provides the Modeler RMxprt application interface.
This class is inherited in the caller application and is accessible through the modeler variable
object( eg. ``rmxprt.modeler``).
"""
def __init__(self, app):
app.logger.reset_timer()
Modeler.__init__(self, app)
app.logger.info_timer("ModelerRMxprt class has been initialized!")
@property
def oeditor(self):
"""oEditor Module.
References
----------
>>> oEditor = oDesign.SetActiveEditor("Machine")"""
return self._app.oeditor
class Modeler2D(GeometryModeler, Primitives2D):
"""Provides the Modeler 2D application interface.
This class is inherited in the caller application and is accessible through the modeler variable
object( eg. ``maxwell2d.modeler``).
Parameters
----------
application : :class:`pyaedt.application.Analysis2D.FieldAnalysis2D`
Examples
--------
>>> from pyaedt import Maxwell2d
>>> app = Maxwell2d()
>>> my_modeler = app.modeler
"""
def __init__(self, application):
GeometryModeler.__init__(self, application, is3d=False)
Primitives2D.__init__(self)
self._primitives = self
self.logger.info("Modeler2D class has been initialized!")
def __get__(self, instance, owner):
self._app = instance
return self
@property
def primitives(self):
"""Primitives.
.. deprecated:: 0.4.15
No need to use primitives anymore. You can instantiate primitives methods directly from modeler instead.
Returns
-------
:class:`pyaedt.modeler.Primitives2D.Primitives2D`
"""
mess = "`primitives` is deprecated.\n"
mess += " Use `app.modeler` directly to instantiate primitives methods."
warn(mess, DeprecationWarning)
return self._primitives
@pyaedt_function_handler()
def calculate_radius_2D(self, object_name, inner=False):
"""Calculate the extremity of an object in the radial direction.
Parameters
----------
object_name : str
name of the object from which to calculate the radius.
inner : bool, optional
The default is ``False``.
Returns
-------
float
Radius value.
.. note::
If ``inner=True``, then the maximum is returned; otherwise,
the minimum is returned.
"""
radius = 0
oVertexIDs = self[object_name].vertices
if oVertexIDs:
if inner:
radius = 0
else:
radius = 1e9
for vertex in oVertexIDs:
pos = vertex.position
vertex_radius = math.sqrt(float(pos[0]) ** 2 + float(pos[1]) ** 2)
if inner:
if vertex_radius > radius:
radius = vertex_radius
else:
if vertex_radius < radius:
radius = vertex_radius
elif self[object_name].edges:
radius = self[object_name].edges[0].length / (2 * math.pi)
return radius
@pyaedt_function_handler()
def METHOD_NAME(self, radius, name):
"""Split the stator and rotor for mesh refinement.
Parameters
----------
radius : float
Radius of the circle.
name : str
Name of the circle.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
"""
cir = self.modeler.create_circle([0, 0, 0], 3, name=name + "_split", matname="vacuum")
self.oeditor.Copy(["NAME:Selections", "Selections:=", name])
objects = [i for i in self.modeler.object_names]
self.oeditor.Paste()
name1 = [i for i in self.modeler.object_names if i not in objects]
self.intersect([name1[0], cir.name], keep_originals=False)
self.subtract(name, name1[0])
return True
@pyaedt_function_handler()
def objects_in_bounding_box(self, bounding_box, check_lines=True, check_sheets=True):
"""Given a 2D bounding box, check if sheets and lines are inside it.
Parameters
----------
bounding_box : list
List of either the 4 or 6 coordinates of the bounding box vertices.
Bounding box is provided as [xmin, ymin, zmin, xmax, ymax, zmax].
check_lines : bool, optional
Whether to check line objects. The default is ``True``.
check_sheets : bool, optional
Whether to check sheet objects. The default is ``True``.
Returns
-------
list of :class:`pyaedt.modeler.cad.object3d`
"""
if len(bounding_box) != 4 and len(bounding_box) != 6:
raise ValueError("Bounding box must be a list of 4 or 6 elements.")
if len(bounding_box) == 4:
if self._app.design_type == "2D Extractor" or self._app.xy_plane:
bounding_box = [bounding_box[0], bounding_box[1], 0, bounding_box[2], bounding_box[3], 0]
else:
bounding_box = [bounding_box[0], 0, bounding_box[1], bounding_box[2], 0, bounding_box[3]]
objects_2d = []
if check_lines:
for obj in self.line_objects:
bound = obj.bounding_box
if ( # pragma: no cover
bounding_box[0] <= bound[0] <= bounding_box[3]
and bounding_box[1] <= bound[1] <= bounding_box[4]
and bounding_box[2] <= bound[2] <= bounding_box[5]
and bounding_box[0] <= bound[3] <= bounding_box[3]
and bounding_box[1] <= bound[4] <= bounding_box[4]
and bounding_box[2] <= bound[5] <= bounding_box[5]
):
objects_2d.append(obj)
if check_sheets:
for obj in self.sheet_objects:
bound = obj.bounding_box
if (
bounding_box[0] <= bound[0] <= bounding_box[3]
and bounding_box[1] <= bound[1] <= bounding_box[4]
and bounding_box[2] <= bound[2] <= bounding_box[5]
and bounding_box[0] <= bound[3] <= bounding_box[3]
and bounding_box[1] <= bound[4] <= bounding_box[4]
and bounding_box[2] <= bound[5] <= bounding_box[5]
):
objects_2d.append(obj)
return objects_2d | null |
base mac addr | # -*- coding: utf-8 -*-
########################################################################
# Ruijie B6510-48VS8CQ
#
# Module contains platform specific implementation of SONiC Platform
# Base API and provides the EEPROMs' information.
#
# The different EEPROMs available are as follows:
# - System EEPROM : Contains Serial number, Service tag, Base MA
# address, etc. in ONIE TlvInfo EEPROM format.
# - PSU EEPROM : Contains Serial number, Part number, Service Tag,
# PSU type, Revision.
# - Fan EEPROM : Contains Serial number, Part number, Service Tag,
# Fan type, Number of Fans in Fantray, Revision.
########################################################################
try:
from sonic_eeprom import eeprom_tlvinfo
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class Eeprom(eeprom_tlvinfo.TlvInfoDecoder):
def __init__(self, bus=2, loc="0057", config=None, iom_eeprom=False):
self.is_module = iom_eeprom
if config:
bus = config.get("bus")
loc = config.get("loc")
if bus and loc:
self.eeprom_path = "/sys/bus/i2c/devices/{}-{}/eeprom".format(bus, loc)
else:
raise ValueError(
"Eeprom location error, bus: {}, loc: {}, config: {}".format(
bus, loc, config
)
)
super(Eeprom, self).__init__(self.eeprom_path, 0, "", True)
self.eeprom_tlv_dict = dict()
try:
if self.is_module:
# TODO
pass
# self.write_eeprom("\x00\x00")
# self.eeprom_data = self.read_eeprom_bytes(256)
else:
self.eeprom_data = self.read_eeprom()
except Exception:
self.eeprom_data = "N/A"
if not self.is_module:
raise RuntimeError("Eeprom is not Programmed")
else:
eeprom = self.eeprom_data
if not self.is_valid_tlvinfo_header(eeprom):
return
total_length = (eeprom[9] << 8) | eeprom[10]
tlv_index = self._TLV_INFO_HDR_LEN
tlv_end = self._TLV_INFO_HDR_LEN + total_length
while (tlv_index + 2) < len(eeprom) and tlv_index < tlv_end:
if not self.is_valid_tlv(eeprom[tlv_index:]):
break
tlv = eeprom[tlv_index : tlv_index + 2 + eeprom[tlv_index + 1]]
code = "0x%02X" % (tlv[0])
if tlv[0] == self._TLV_CODE_VENDOR_EXT:
value = str(
(tlv[2] << 24)
| (tlv[3] << 16)
| (tlv[4] << 8)
| tlv[5]
)
value += str(tlv[6 : 6 + tlv[1]])
else:
name, value = self.decoder(None, tlv)
self.eeprom_tlv_dict[code] = value
if eeprom[tlv_index] == self._TLV_CODE_CRC_32:
break
tlv_index += eeprom[tlv_index + 1] + 2
def serial_number_str(self):
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_SERIAL_NUMBER
)
if not is_valid:
return "N/A"
return results[2]
def METHOD_NAME(self):
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_MAC_BASE
)
if not is_valid or results[1] != 6:
return super(TlvInfoDecoder, self).switchaddrstr(e)
return ":".join([hex(T) for T in results[2]])
def modelstr(self):
if self.is_module:
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_PLATFORM_NAME
)
else:
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_PRODUCT_NAME
)
if not is_valid:
return "N/A"
return results[2]
def part_number_str(self):
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_PART_NUMBER
)
if not is_valid:
return "N/A"
return results[2]
def serial_str(self):
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_SERVICE_TAG
)
if not is_valid:
return "N/A"
return results[2]
def revision_str(self):
(is_valid, results) = self.get_tlv_field(
self.eeprom_data, self._TLV_CODE_DEVICE_VERSION
)
if not is_valid:
return "N/A"
return results[2]
def system_eeprom_info(self):
"""
Returns a dictionary, where keys are the type code defined in
ONIE EEPROM format and values are their corresponding values
found in the system EEPROM.
"""
return self.eeprom_tlv_dict | null |
create app file | from __future__ import annotations
import importlib.util
import sys
from pathlib import Path
from shutil import rmtree
from typing import TYPE_CHECKING, Callable, Generator, Protocol, cast
import pytest
from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch
from click.testing import CliRunner
from pytest_mock import MockerFixture
from litestar.cli._utils import _path_to_dotted_path
from . import (
APP_FILE_CONTENT,
CREATE_APP_FILE_CONTENT,
GENERIC_APP_FACTORY_FILE_CONTENT,
GENERIC_APP_FACTORY_FILE_CONTENT_FUTURE_ANNOTATIONS,
GENERIC_APP_FACTORY_FILE_CONTENT_STRING_ANNOTATION,
)
if TYPE_CHECKING:
from unittest.mock import MagicMock
from litestar.cli._utils import LitestarGroup
@pytest.fixture()
def root_command() -> LitestarGroup:
import litestar.cli.main
return cast("LitestarGroup", importlib.reload(litestar.cli.main).litestar_group)
@pytest.fixture
def patch_autodiscovery_paths(request: FixtureRequest) -> Callable[[list[str]], None]:
def patcher(paths: list[str]) -> None:
from litestar.cli._utils import AUTODISCOVERY_FILE_NAMES
old_paths = AUTODISCOVERY_FILE_NAMES[::]
AUTODISCOVERY_FILE_NAMES[:] = paths
def finalizer() -> None:
AUTODISCOVERY_FILE_NAMES[:] = old_paths
request.addfinalizer(finalizer)
return patcher
@pytest.fixture
def tmp_project_dir(monkeypatch: MonkeyPatch, tmp_path: Path) -> Path:
path = tmp_path / "project_dir"
path.mkdir(exist_ok=True)
monkeypatch.chdir(path)
return path
class CreateAppFileFixture(Protocol):
def __call__(
self,
file: str | Path,
directory: str | Path | None = None,
content: str | None = None,
init_content: str = "",
subdir: str | None = None,
) -> Path:
...
def _purge_module(module_names: list[str], path: str | Path) -> None:
for name in module_names:
if name in sys.modules:
del sys.modules[name]
Path(importlib.util.cache_from_source(path)).unlink(missing_ok=True) # type: ignore[arg-type]
@pytest.fixture
def create_app_file(tmp_project_dir: Path, request: FixtureRequest) -> CreateAppFileFixture:
def METHOD_NAME(
file: str | Path,
directory: str | Path | None = None,
content: str | None = None,
init_content: str = "",
subdir: str | None = None,
) -> Path:
base = tmp_project_dir
if directory:
base /= Path(Path(directory) / subdir) if subdir else Path(directory)
base.mkdir(parents=True)
base.joinpath("__init__.py").write_text(init_content)
tmp_app_file = base / file
tmp_app_file.write_text(content or APP_FILE_CONTENT)
if directory:
request.addfinalizer(lambda: rmtree(directory))
request.addfinalizer(
lambda: _purge_module(
[directory, _path_to_dotted_path(tmp_app_file.relative_to(Path.cwd()))], tmp_app_file # type: ignore[list-item]
)
)
else:
request.addfinalizer(tmp_app_file.unlink)
request.addfinalizer(lambda: _purge_module([str(file).replace(".py", "")], tmp_app_file))
return tmp_app_file
return METHOD_NAME
@pytest.fixture
def app_file(create_app_file: CreateAppFileFixture) -> Path:
return create_app_file("app.py")
@pytest.fixture
def runner() -> CliRunner:
return CliRunner()
@pytest.fixture
def mock_uvicorn_run(mocker: MockerFixture) -> MagicMock:
return mocker.patch("uvicorn.run")
@pytest.fixture()
def mock_subprocess_run(mocker: MockerFixture) -> MagicMock:
return mocker.patch("subprocess.run")
@pytest.fixture
def mock_confirm_ask(mocker: MockerFixture) -> Generator[MagicMock, None, None]:
yield mocker.patch("rich.prompt.Confirm.ask", return_value=True)
@pytest.fixture(
params=[
pytest.param((APP_FILE_CONTENT, "app"), id="app_obj"),
pytest.param((CREATE_APP_FILE_CONTENT, "create_app"), id="create_app"),
pytest.param((GENERIC_APP_FACTORY_FILE_CONTENT, "any_name"), id="app_factory"),
pytest.param((GENERIC_APP_FACTORY_FILE_CONTENT_STRING_ANNOTATION, "any_name"), id="app_factory_str_annot"),
pytest.param((GENERIC_APP_FACTORY_FILE_CONTENT_FUTURE_ANNOTATIONS, "any_name"), id="app_factory_future_annot"),
]
)
def _app_file_content(request: FixtureRequest) -> tuple[str, str]:
return cast("tuple[str, str]", request.param)
@pytest.fixture
def app_file_content(_app_file_content: tuple[str, str]) -> str:
return _app_file_content[0]
@pytest.fixture
def app_file_app_name(_app_file_content: tuple[str, str]) -> str:
return _app_file_content[1] | null |
text | # vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Pop-up window to display keybindings of current mode."""
from typing import List, Tuple, Iterator, Set
from vimiv.qt.widgets import QLabel, QHBoxLayout, QVBoxLayout, QLayout, QLineEdit
import vimiv
from vimiv import api, utils
from vimiv.config import styles
from vimiv.widgets import PopUp
_logger = utils.log.module_logger(__name__)
class KeybindingsPopUp(PopUp):
"""Pop up that displays keybinding information.
All available keybindings are displayed with the corresponding command. It is
possible to search through the commands using the line edit.
Class Attributes:
TITLE: Window title used for the pop up.
Attributes:
_bindings_color: Color used to highlight the keybindings.
_highlight_color: Color used to highlight matching search results.
_labels: List of labels to display keybinding-command text per column.
_search: Line edit widget to search for commands.
_search_matches: Set of commands that match the current search.
_description_label: Label to display a description of matching commands.
"""
TITLE = f"{vimiv.__name__} - keybindings"
# fmt: off
STYLESHEET = PopUp.STYLESHEET + """
QLineEdit {
font: {statusbar.font};
background-color: {statusbar.bg};
color: {statusbar.fg};
border: 0px solid;
padding: {statusbar.padding};
}
"""
# fmt: on
def __init__(self, columns: int = 2, parent=None):
super().__init__(self.TITLE, parent=parent)
self._bindings_color = styles.get("keybindings.bindings.color")
self._highlight_color = styles.get("keybindings.highlight.color")
self._labels: List[QLabel] = []
self._search = QLineEdit()
self._search_matches: Set[str] = set()
self._description_label = QLabel()
self._search.setPlaceholderText("search")
layout = QVBoxLayout()
layout.setSizeConstraint(QLayout.SizeConstraint.SetFixedSize)
content_layout = QHBoxLayout()
for _ in range(columns):
label = QLabel()
self._labels.append(label)
content_layout.addWidget(label)
layout.addLayout(content_layout)
layout.addWidget(self._description_label)
layout.addWidget(self._search)
self.setLayout(layout)
self._search.textChanged.connect(self._update_text)
for mode in api.modes.ALL:
mode.entered.connect(self._update_text)
self._update_text()
self._search.setFocus()
self.show()
@property
def METHOD_NAME(self) -> str:
"""Complete keybinding/command text displayed in all columns."""
return "\n".join(label.METHOD_NAME() for label in self._labels)
@property
def description(self) -> str:
"""Text of the description label for matching commands."""
return self._description_label.METHOD_NAME()
@property
def column_count(self) -> int:
"""Number of columns to split the bindings in."""
return len(self._labels)
def column_bindings(self) -> Iterator[List[Tuple[str, str]]]:
"""Return html-safe keybindings for each column sorted by command name."""
bindings = api.keybindings.get(api.modes.current())
formatted_bindings = [
(utils.escape_html(binding), command)
for binding, command in sorted(bindings, key=lambda x: x[1])
]
return utils.split(formatted_bindings, self.column_count)
def column_text(
self, search: str, highlight: str, bindings: List[Tuple[str, str]]
) -> str:
"""Return the formatted keybinding-command text for one column.
Args:
search: Current search string.
highlight: Search string wrapped in a highlight color span.
bindings: List of bindings to put into this column
"""
METHOD_NAME = ""
for binding, command in bindings:
if search and search in command:
command = command.replace(search, highlight)
METHOD_NAME += (
"<tr>"
f"<td style='color: {self._bindings_color}'>{binding}</td>"
f"<td style='padding-left: 2ex'>{command}</td>"
"</tr>"
)
return METHOD_NAME
def highlighted_search_str(self, search: str) -> str:
"""Current search string wrapped in a highlight color span."""
return utils.add_html(
utils.wrap_style_span(f"color: {self._highlight_color}", search), "b", "u"
)
def update_search_matches(
self, search: str, bindings: List[Tuple[str, str]]
) -> None:
"""Add names of commands that match the current search to the match set."""
self._search_matches |= {
command.split(maxsplit=1)[0]
for _, command in bindings
if search in command.split(maxsplit=1)[0]
}
def _update_text(self, search: str = None) -> None:
"""Update keybinding-command text for all columns.
This retrieves all keybindings for the current mode, splits them upon the
available columns and formats them neatly. If there is a search, then the
matching command parts are highlighted.
Args:
search: Current search string from the line edit.
"""
search = search if search is not None else self._search.METHOD_NAME()
search = search.strip()
highlight = self.highlighted_search_str(search)
self._search_matches.clear()
for label, bindings in zip(self._labels, self.column_bindings()):
label.setText(self.column_text(search, highlight, bindings))
self.update_search_matches(search, bindings)
self._update_description(search, highlight)
def _update_description(self, search, highlight):
"""Update text of the description label with new search results."""
if len(search) < 2: # Do not print many matches for single character search
self._description_label.clear()
return
METHOD_NAME = ""
for command_name in sorted(self._search_matches):
command = api.commands.get(command_name, api.modes.current())
METHOD_NAME += (
"<tr>"
f"<td>{command_name.replace(search, highlight)}</td>"
f"<td style='padding-left: 2ex'>{command.description}</td>"
"</tr>"
)
self._description_label.setText(METHOD_NAME) | null |
checkbox | import sys
from collections.abc import Container, Iterable, Sequence
from types import ModuleType
from typing import Any
from typing_extensions import Literal
if sys.platform == "win32":
from _msi import *
from _msi import _Database
AMD64: bool
Win64: bool
datasizemask: Literal[0x00FF]
type_valid: Literal[0x0100]
type_localizable: Literal[0x0200]
typemask: Literal[0x0C00]
type_long: Literal[0x0000]
type_short: Literal[0x0400]
type_string: Literal[0x0C00]
type_binary: Literal[0x0800]
type_nullable: Literal[0x1000]
type_key: Literal[0x2000]
knownbits: Literal[0x3FFF]
class Table:
name: str
fields: list[tuple[int, str, int]]
def __init__(self, name: str) -> None: ...
def add_field(self, index: int, name: str, type: int) -> None: ...
def sql(self) -> str: ...
def create(self, db: _Database) -> None: ...
class _Unspecified: ...
def change_sequence(
seq: Sequence[tuple[str, str | None, int]],
action: str,
seqno: int | type[_Unspecified] = ...,
cond: str | type[_Unspecified] = ...,
) -> None: ...
def add_data(db: _Database, table: str, values: Iterable[tuple[Any, ...]]) -> None: ...
def add_stream(db: _Database, name: str, path: str) -> None: ...
def init_database(
name: str, schema: ModuleType, ProductName: str, ProductCode: str, ProductVersion: str, Manufacturer: str
) -> _Database: ...
def add_tables(db: _Database, module: ModuleType) -> None: ...
def make_id(str: str) -> str: ...
def gen_uuid() -> str: ...
class CAB:
name: str
files: list[tuple[str, str]]
filenames: set[str]
index: int
def __init__(self, name: str) -> None: ...
def gen_id(self, file: str) -> str: ...
def append(self, full: str, file: str, logical: str) -> tuple[int, str]: ...
def commit(self, db: _Database) -> None: ...
_directories: set[str]
class Directory:
db: _Database
cab: CAB
basedir: str
physical: str
logical: str
component: str | None
short_names: set[str]
ids: set[str]
keyfiles: dict[str, str]
componentflags: int | None
absolute: str
def __init__(
self,
db: _Database,
cab: CAB,
basedir: str,
physical: str,
_logical: str,
default: str,
componentflags: int | None = None,
) -> None: ...
def start_component(
self,
component: str | None = None,
feature: Feature | None = None,
flags: int | None = None,
keyfile: str | None = None,
uuid: str | None = None,
) -> None: ...
def make_short(self, file: str) -> str: ...
def add_file(self, file: str, src: str | None = None, version: str | None = None, language: str | None = None) -> str: ...
def glob(self, pattern: str, exclude: Container[str] | None = None) -> list[str]: ...
def remove_pyc(self) -> None: ...
class Binary:
name: str
def __init__(self, fname: str) -> None: ...
class Feature:
id: str
def __init__(
self,
db: _Database,
id: str,
title: str,
desc: str,
display: int,
level: int = 1,
parent: Feature | None = None,
directory: str | None = None,
attributes: int = 0,
) -> None: ...
def set_current(self) -> None: ...
class Control:
dlg: Dialog
name: str
def __init__(self, dlg: Dialog, name: str) -> None: ...
def event(self, event: str, argument: str, condition: str = "1", ordering: int | None = None) -> None: ...
def mapping(self, event: str, attribute: str) -> None: ...
def condition(self, action: str, condition: str) -> None: ...
class RadioButtonGroup(Control):
property: str
index: int
def __init__(self, dlg: Dialog, name: str, property: str) -> None: ...
def add(self, name: str, x: int, y: int, w: int, h: int, text: str, value: str | None = None) -> None: ...
class Dialog:
db: _Database
name: str
x: int
y: int
w: int
h: int
def __init__(
self,
db: _Database,
name: str,
x: int,
y: int,
w: int,
h: int,
attr: int,
title: str,
first: str,
default: str,
cancel: str,
) -> None: ...
def control(
self,
name: str,
type: str,
x: int,
y: int,
w: int,
h: int,
attr: int,
prop: str | None,
text: str | None,
next: str | None,
help: str | None,
) -> Control: ...
def text(self, name: str, x: int, y: int, w: int, h: int, attr: int, text: str | None) -> Control: ...
def bitmap(self, name: str, x: int, y: int, w: int, h: int, text: str | None) -> Control: ...
def line(self, name: str, x: int, y: int, w: int, h: int) -> Control: ...
def pushbutton(
self, name: str, x: int, y: int, w: int, h: int, attr: int, text: str | None, next: str | None
) -> Control: ...
def radiogroup(
self, name: str, x: int, y: int, w: int, h: int, attr: int, prop: str | None, text: str | None, next: str | None
) -> RadioButtonGroup: ...
def METHOD_NAME(
self, name: str, x: int, y: int, w: int, h: int, attr: int, prop: str | None, text: str | None, next: str | None
) -> Control: ... | null |
triples | from __future__ import absolute_import
from __future__ import print_function
from owmeta_core.command import OWM
from owmeta_core.dataobject import ObjectProperty
from owmeta_core.custom_dataobject_property import CustomProperty
from owmeta_core.context import Context
from owmeta.neuron import Neuron
from owmeta.network import Network
from owmeta.worm import Worm
from owmeta.evidence import Evidence
from owmeta.document import Document
class NC_neighbor(CustomProperty):
def __init__(self, *args, **kwargs):
super(NC_neighbor, self).__init__('_nb', *args, **kwargs)
self.real_neighbor = self.owner.neighbor
# Re-assigning neighbor Property
self.owner.neighbor = self
def get(self, **kwargs):
# get the members of the class
for x in self.owner.neighbor():
yield x
def set(self, ob, **kwargs):
self.real_neighbor(ob)
if isinstance(ob, NeuronClass):
ob_name = ob.name()
this_name = self.owner.name()
for x in ob.member.defined_values:
# Get the name for the neighbor
try:
n = x.name()
side = n[n.find(ob_name)+len(ob_name):]
name_here = this_name + side
this_neuron = Neuron(name_here)
self.owner.member(this_neuron)
this_neuron.neighbor(x, **kwargs)
except ValueError:
# XXX: could default to all-to-all semantics
print('Do not recoginze the membership of this neuron/neuron class', ob)
elif isinstance(ob, Neuron):
for x in self.owner.member:
x.neighbor(ob)
def METHOD_NAME(self, *args, **kwargs):
""" Stub. All of the actual relationships are encoded in Neuron.neighbor and NeuronClass.member """
return []
# Circuit from http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2760495/
class NeuronClass(Neuron):
class_context = 'http://example.org/rmgr_example'
member = ObjectProperty(value_type=Neuron, multiple=True)
def __init__(self, name=False, **kwargs):
super(NeuronClass, self).__init__(**kwargs)
NC_neighbor(owner=self)
if name:
self.name(name)
# A neuron class should be a way for saying what all neurons of a class have in common
# (The notation below is a bit of a mish-mash. basically it's owmeta without
# iterators, type notations with ':', and some Python string operations)
#
# nc : NeuronClass
# n : Neuron
# p : Property
# a : DataObject
# | Literal ;
# bc : NeuronClass
# b : Neuron
# d : Neuron
# p.linkName not in {'name', 'connection', 'neighbor'}
# nc.p(a)
#
# bc.member(b)
# b.name(bc.name() + n.name()[-1])
# nc.member(n)
# nc.neighbor(bc)
# nc.neighbor(d)
# class_name = nc.name()
# ------------------------------------[implies]-------
# n.p(a) # Any property except name, connection, and neighbor is the same as in nc
# n.neighbor(d) # For neighbors, if the neighbor is a neuron, then just the connection
# # holds for all members of the class
# n.neighbor(b) # Otherwise, the neuron (b) in the connected class on the same side as
# # n (i.e., the one for which the last character in its name matches the
# # last in n's name) in the neighbor
# n.name()[:-1] == nc.name()
#
# Setting up the data
#
def setup(sctx, ctx, name, type):
n = ctx(NeuronClass)(name)
n.type(type)
r = sctx(Neuron).query(name+"R")
for rs in r.load():
n.member(rs)
l = sctx(Neuron).query(name+"L")
for ls in l.load():
n.member(ls)
return n
with OWM('../.owm').connect().transaction() as conn:
ctx = conn(Context)('http://example.org/data')
evctx = conn(Context)('http://example.org/evidence')
w = ctx(Worm)("C. elegans")
net = ctx(Network)()
w.neuron_network(net)
doc = evctx(Document)(title="A Hub-and-Spoke Circuit Drives Pheromone Attraction and Social Behavior in C. elegans",
uri="http://www.ncbi.nlm.nih.gov/pmc/articles/PMC2760495/",
year=2009)
ev = evctx(Evidence)(reference=doc)
ev.supports(ctx.rdf_object)
sctx = conn(Context)().stored
rmg = setup(sctx, ctx, "RMG", 'interneuron')
rmh = setup(sctx, ctx, "RMH", 'motor')
ask = setup(sctx, ctx, "ASK", 'sensory')
ash = setup(sctx, ctx, "ASH", 'sensory')
adl = setup(sctx, ctx, "ADL", 'sensory')
urx = setup(sctx, ctx, "URX", 'sensory')
awb = setup(sctx, ctx, "AWB", 'sensory')
il2 = setup(sctx, ctx, "IL2", 'sensory')
# describing the connections
d = [(ask, 'gj', rmg),
(rmh, 'gj', rmg),
(urx, 'gj', rmg),
(urx, 'sn', rmg),
(awb, 'gj', rmg),
(il2, 'gj', rmg),
(adl, 'gj', rmg),
(ash, 'sn', rmg),
(ash, 'gj', rmg),
(rmg, 'sn', ash)]
for p, x, o in d:
if x == 'gj':
x = 'GapJunction'
else:
x = 'Send'
p.neighbor(o, syntype=x)
ctx.add_import(Context('http://openworm.org/data'))
ctx.save()
ctx.save_imports()
evctx.save()
ctx.mapper.add_class(NeuronClass)
nc = ctx.stored(NeuronClass)()
nc.type('sensory')
print('Sensory neuron classes in the circuit and their neurons')
# XXX: Add an evidence query like ev.asserts(nc.member(P.Neuron("ADLL")))
for x in nc.load():
print(x.name(), "has:")
for y in x.member():
print(" ", y.name(), "of type", ", ".join(y.type())) | null |
build csv response | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
import csv
import json
from io import StringIO
import requests
import frappe
from frappe import _, msgprint
from frappe.utils import cint, comma_or, cstr, flt
def read_csv_content_from_attached_file(doc):
fileid = frappe.get_all(
"File",
fields=["name"],
filters={"attached_to_doctype": doc.doctype, "attached_to_name": doc.name},
order_by="creation desc",
)
if fileid:
fileid = fileid[0].name
if not fileid:
msgprint(_("File not attached"))
raise Exception
try:
_file = frappe.get_doc("File", fileid)
fcontent = _file.get_content()
return read_csv_content(fcontent)
except Exception:
frappe.throw(
_("Unable to open attached file. Did you export it as CSV?"), title=_("Invalid CSV Format")
)
def read_csv_content(fcontent):
if not isinstance(fcontent, str):
decoded = False
for encoding in ["utf-8", "windows-1250", "windows-1252"]:
try:
fcontent = str(fcontent, encoding)
decoded = True
break
except UnicodeDecodeError:
continue
if not decoded:
frappe.msgprint(
_("Unknown file encoding. Tried utf-8, windows-1250, windows-1252."), raise_exception=True
)
fcontent = fcontent.encode("utf-8")
content = [frappe.safe_decode(line) for line in fcontent.splitlines(True)]
try:
rows = []
for row in csv.reader(content):
r = []
for val in row:
# decode everything
val = val.strip()
if val == "":
# reason: in maraidb strict config, one cannot have blank strings for non string datatypes
r.append(None)
else:
r.append(val)
rows.append(r)
return rows
except Exception:
frappe.msgprint(_("Not a valid Comma Separated Value (CSV File)"))
raise
@frappe.whitelist()
def send_csv_to_client(args):
if isinstance(args, str):
args = json.loads(args)
args = frappe._dict(args)
frappe.response["result"] = cstr(to_csv(args.data))
frappe.response["doctype"] = args.filename
frappe.response["type"] = "csv"
def to_csv(data):
writer = UnicodeWriter()
for row in data:
writer.writerow(row)
return writer.getvalue()
def METHOD_NAME(data, filename):
frappe.response["result"] = cstr(to_csv(data))
frappe.response["doctype"] = filename
frappe.response["type"] = "csv"
class UnicodeWriter:
def __init__(self, encoding="utf-8", quoting=csv.QUOTE_NONNUMERIC):
self.encoding = encoding
self.queue = StringIO()
self.writer = csv.writer(self.queue, quoting=quoting)
def writerow(self, row):
self.writer.writerow(row)
def getvalue(self):
return self.queue.getvalue()
def check_record(d):
"""check for mandatory, select options, dates. these should ideally be in doclist"""
from frappe.utils.dateutils import parse_date
doc = frappe.get_doc(d)
for key in d:
docfield = doc.meta.get_field(key)
val = d[key]
if docfield:
if docfield.reqd and (val == "" or val is None):
frappe.msgprint(_("{0} is required").format(docfield.label), raise_exception=1)
if docfield.fieldtype == "Select" and val and docfield.options:
if val not in docfield.options.split("\n"):
frappe.throw(
_("{0} must be one of {1}").format(_(docfield.label), comma_or(docfield.options.split("\n")))
)
if val and docfield.fieldtype == "Date":
d[key] = parse_date(val)
elif val and docfield.fieldtype in ["Int", "Check"]:
d[key] = cint(val)
elif val and docfield.fieldtype in ["Currency", "Float", "Percent"]:
d[key] = flt(val)
def import_doc(d, doctype, overwrite, row_idx, submit=False, ignore_links=False):
"""import main (non child) document"""
if d.get("name") and frappe.db.exists(doctype, d["name"]):
if overwrite:
doc = frappe.get_doc(doctype, d["name"])
doc.flags.ignore_links = ignore_links
doc.update(d)
if d.get("docstatus") == 1:
doc.update_after_submit()
elif d.get("docstatus") == 0 and submit:
doc.submit()
else:
doc.save()
return "Updated row (#%d) %s" % (row_idx + 1, getlink(doctype, d["name"]))
else:
return "Ignored row (#%d) %s (exists)" % (row_idx + 1, getlink(doctype, d["name"]))
else:
doc = frappe.get_doc(d)
doc.flags.ignore_links = ignore_links
doc.insert()
if submit:
doc.submit()
return "Inserted row (#%d) %s" % (row_idx + 1, getlink(doctype, doc.get("name")))
def getlink(doctype, name):
return '<a href="/app/Form/%(doctype)s/%(name)s">%(name)s</a>' % locals()
def get_csv_content_from_google_sheets(url):
# https://docs.google.com/spreadsheets/d/{sheetid}}/edit#gid={gid}
validate_google_sheets_url(url)
# get gid, defaults to first sheet
if "gid=" in url:
gid = url.rsplit("gid=", 1)[1]
else:
gid = 0
# remove /edit path
url = url.rsplit("/edit", 1)[0]
# add /export path,
url = url + f"/export?format=csv&gid={gid}"
headers = {"Accept": "text/csv"}
response = requests.get(url, headers=headers)
if response.ok:
# if it returns html, it couldn't find the CSV content
# because of invalid url or no access
if response.text.strip().endswith("</html>"):
frappe.throw(
_("Google Sheets URL is invalid or not publicly accessible."), title=_("Invalid URL")
)
return response.content
elif response.status_code == 400:
frappe.throw(
_(
'Google Sheets URL must end with "gid={number}". Copy and paste the URL from the browser address bar and try again.'
),
title=_("Incorrect URL"),
)
else:
response.raise_for_status()
def validate_google_sheets_url(url):
from urllib.parse import urlparse
u = urlparse(url)
if u.scheme != "https" or u.netloc != "docs.google.com" or "/spreadsheets/" not in u.path:
frappe.throw(
_('"{0}" is not a valid Google Sheets URL').format(url),
title=_("Invalid URL"),
) | null |
get logo part | from abc import ABC, abstractmethod
from shutil import get_terminal_size
from textwrap import indent, wrap
from typing import Dict, Optional
from sanic import __version__
from sanic.helpers import is_atty
from sanic.log import logger
class MOTD(ABC):
"""Base class for the Message of the Day (MOTD) display."""
def __init__(
self,
logo: Optional[str],
serve_location: str,
data: Dict[str, str],
extra: Dict[str, str],
) -> None:
self.logo = logo
self.serve_location = serve_location
self.data = data
self.extra = extra
self.key_width = 0
self.value_width = 0
@abstractmethod
def display(self):
"""Display the MOTD."""
@classmethod
def output(
cls,
logo: Optional[str],
serve_location: str,
data: Dict[str, str],
extra: Dict[str, str],
) -> None:
"""Output the MOTD.
Args:
logo (Optional[str]): Logo to display.
serve_location (str): Location to serve.
data (Dict[str, str]): Data to display.
extra (Dict[str, str]): Extra data to display.
"""
motd_class = MOTDTTY if is_atty() else MOTDBasic
motd_class(logo, serve_location, data, extra).display()
class MOTDBasic(MOTD):
"""A basic MOTD display.
This is used when the terminal does not support ANSI escape codes.
"""
def display(self):
if self.logo:
logger.debug(self.logo)
lines = [f"Sanic v{__version__}"]
if self.serve_location:
lines.append(f"Goin' Fast @ {self.serve_location}")
lines += [
*(f"{key}: {value}" for key, value in self.data.items()),
*(f"{key}: {value}" for key, value in self.extra.items()),
]
for line in lines:
logger.info(line)
class MOTDTTY(MOTD):
"""A MOTD display for terminals that support ANSI escape codes."""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.set_variables()
def set_variables(self): # no cov
"""Set the variables used for display."""
fallback = (108, 24)
terminal_width = max(
get_terminal_size(fallback=fallback).columns, fallback[0]
)
self.max_value_width = terminal_width - fallback[0] + 36
self.key_width = 4
self.value_width = self.max_value_width
if self.data:
self.key_width = max(map(len, self.data.keys()))
self.value_width = min(
max(map(len, self.data.values())), self.max_value_width
)
self.logo_lines = self.logo.split("\n") if self.logo else []
self.logo_line_length = 24
self.centering_length = (
self.key_width + self.value_width + 2 + self.logo_line_length
)
self.display_length = self.key_width + self.value_width + 2
def display(self, version=True, action="Goin' Fast", out=None):
"""Display the MOTD.
Args:
version (bool, optional): Display the version. Defaults to `True`.
action (str, optional): Action to display. Defaults to
`"Goin' Fast"`.
out (Optional[Callable], optional): Output function. Defaults to
`None`.
"""
if not out:
out = logger.info
header = "Sanic"
if version:
header += f" v{__version__}"
header = header.center(self.centering_length)
running = (
f"{action} @ {self.serve_location}" if self.serve_location else ""
).center(self.centering_length)
length = len(header) + 2 - self.logo_line_length
first_filler = "─" * (self.logo_line_length - 1)
second_filler = "─" * length
display_filler = "─" * (self.display_length + 2)
lines = [
f"\n┌{first_filler}─{second_filler}┐",
f"│ {header} │",
f"│ {running} │",
f"├{first_filler}┬{second_filler}┤",
]
self._render_data(lines, self.data, 0)
if self.extra:
logo_part = self.METHOD_NAME(len(lines) - 4)
lines.append(f"| {logo_part} ├{display_filler}┤")
self._render_data(lines, self.extra, len(lines) - 4)
self._render_fill(lines)
lines.append(f"└{first_filler}┴{second_filler}┘\n")
out(indent("\n".join(lines), " "))
def _render_data(self, lines, data, start):
offset = 0
for idx, (key, value) in enumerate(data.items(), start=start):
key = key.rjust(self.key_width)
wrapped = wrap(value, self.max_value_width, break_on_hyphens=False)
for wrap_index, part in enumerate(wrapped):
part = part.ljust(self.value_width)
logo_part = self.METHOD_NAME(idx + offset + wrap_index)
display = (
f"{key}: {part}"
if wrap_index == 0
else (" " * len(key) + f" {part}")
)
lines.append(f"│ {logo_part} │ {display} │")
if wrap_index:
offset += 1
def _render_fill(self, lines):
filler = " " * self.display_length
idx = len(lines) - 5
for i in range(1, len(self.logo_lines) - idx):
logo_part = self.logo_lines[idx + i]
lines.append(f"│ {logo_part} │ {filler} │")
def METHOD_NAME(self, idx):
try:
logo_part = self.logo_lines[idx]
except IndexError:
logo_part = " " * (self.logo_line_length - 3)
return logo_part | null |
test init templatedir already set | from __future__ import annotations
import os.path
from unittest import mock
import pytest
import pre_commit.constants as C
from pre_commit.commands.init_templatedir import init_templatedir
from pre_commit.envcontext import envcontext
from pre_commit.util import cmd_output
from testing.fixtures import git_dir
from testing.fixtures import make_consuming_repo
from testing.util import cmd_output_mocked_pre_commit_home
from testing.util import cwd
from testing.util import git_commit
def test_init_templatedir(tmpdir, tempdir_factory, store, cap_out):
target = str(tmpdir.join('tmpl'))
init_templatedir(C.CONFIG_FILE, store, target, hook_types=['pre-commit'])
lines = cap_out.get().splitlines()
assert lines[0].startswith('pre-commit installed at ')
assert lines[1] == (
'[WARNING] `init.templateDir` not set to the target directory'
)
assert lines[2].startswith(
'[WARNING] maybe `git config --global init.templateDir',
)
with envcontext((('GIT_TEMPLATE_DIR', target),)):
path = make_consuming_repo(tempdir_factory, 'script_hooks_repo')
with cwd(path):
retcode, output = git_commit(
fn=cmd_output_mocked_pre_commit_home,
tempdir_factory=tempdir_factory,
)
assert retcode == 0
assert 'Bash hook....' in output
def METHOD_NAME(tmpdir, tempdir_factory, store, cap_out):
target = str(tmpdir.join('tmpl'))
tmp_git_dir = git_dir(tempdir_factory)
with cwd(tmp_git_dir):
cmd_output('git', 'config', 'init.templateDir', target)
init_templatedir(
C.CONFIG_FILE, store, target, hook_types=['pre-commit'],
)
lines = cap_out.get().splitlines()
assert len(lines) == 1
assert lines[0].startswith('pre-commit installed at')
def test_init_templatedir_not_set(tmpdir, store, cap_out):
# set HOME to ignore the current `.gitconfig`
with envcontext((('HOME', str(tmpdir)),)):
with tmpdir.join('tmpl').ensure_dir().as_cwd():
# we have not set init.templateDir so this should produce a warning
init_templatedir(
C.CONFIG_FILE, store, '.', hook_types=['pre-commit'],
)
lines = cap_out.get().splitlines()
assert len(lines) == 3
assert lines[1] == (
'[WARNING] `init.templateDir` not set to the target directory'
)
def test_init_templatedir_expanduser(tmpdir, tempdir_factory, store, cap_out):
target = str(tmpdir.join('tmpl'))
tmp_git_dir = git_dir(tempdir_factory)
with cwd(tmp_git_dir):
cmd_output('git', 'config', 'init.templateDir', '~/templatedir')
with mock.patch.object(os.path, 'expanduser', return_value=target):
init_templatedir(
C.CONFIG_FILE, store, target, hook_types=['pre-commit'],
)
lines = cap_out.get().splitlines()
assert len(lines) == 1
assert lines[0].startswith('pre-commit installed at')
def test_init_templatedir_hookspath_set(tmpdir, tempdir_factory, store):
target = tmpdir.join('tmpl')
tmp_git_dir = git_dir(tempdir_factory)
with cwd(tmp_git_dir):
cmd_output('git', 'config', '--local', 'core.hooksPath', 'hooks')
init_templatedir(
C.CONFIG_FILE, store, target, hook_types=['pre-commit'],
)
assert target.join('hooks/pre-commit').exists()
@pytest.mark.parametrize(
('skip', 'commit_retcode', 'commit_output_snippet'),
(
(True, 0, 'Skipping `pre-commit`.'),
(False, 1, f'No {C.CONFIG_FILE} file was found'),
),
)
def test_init_templatedir_skip_on_missing_config(
tmpdir,
tempdir_factory,
store,
cap_out,
skip,
commit_retcode,
commit_output_snippet,
):
target = str(tmpdir.join('tmpl'))
init_git_dir = git_dir(tempdir_factory)
with cwd(init_git_dir):
cmd_output('git', 'config', 'init.templateDir', target)
init_templatedir(
C.CONFIG_FILE,
store,
target,
hook_types=['pre-commit'],
skip_on_missing_config=skip,
)
lines = cap_out.get().splitlines()
assert len(lines) == 1
assert lines[0].startswith('pre-commit installed at')
with envcontext((('GIT_TEMPLATE_DIR', target),)):
verify_git_dir = git_dir(tempdir_factory)
with cwd(verify_git_dir):
retcode, output = git_commit(
fn=cmd_output_mocked_pre_commit_home,
tempdir_factory=tempdir_factory,
check=False,
)
assert retcode == commit_retcode
assert commit_output_snippet in output | null |
party choices | import re
from django.db import models
from django.utils import timezone
from .constants import JOINT_DESCRIPTION_REGEX
class PartyQuerySet(models.QuerySet):
def active_for_date(self, date=None):
if not date:
date = timezone.now()
qs = self.filter(date_registered__lte=date)
return qs.filter(
models.Q(date_deregistered__gte=date)
| models.Q(date_deregistered=None)
)
def active_in_last_year(self, date=None):
"""
Return a PartyQuerySet of instances that have been active any time
within a year of the given date.
"""
date = date or timezone.now()
last_year = date - timezone.timedelta(days=365)
return self.filter(date_registered__lt=date).filter(
models.Q(date_deregistered=None)
| models.Q(date_deregistered__gte=last_year)
)
def current(self):
return self.active_for_date()
def register(self, register):
register = register.upper()
return self.filter(
models.Q(register=register) | models.Q(register=None)
)
def order_by_memberships(self, date=None, nocounts=False):
qs = self
if date:
qs = qs.filter(membership__ballot__election__election_date=date)
qs = qs.annotate(
candidate_count=models.Count("membership")
).order_by("-candidate_count", "name")
if nocounts:
qs = qs.filter(
~models.Q(membership__ballot__election__election_date=date)
).annotate(candidate_count=models.Value(0, models.IntegerField()))
return qs
def METHOD_NAME(
self,
include_descriptions=True,
exclude_deregistered=False,
include_description_ids=False,
include_non_current=True,
extra_party_ids=None,
):
# For various reasons, we've found it's best to order the
# parties by those that have the most candidates - this means
# that the commonest parties to select are at the top of the
# drop down. The logic here tries to build such an ordered
# list of candidates if there are enough that such an ordering
# makes sense. Otherwise the fallback is to rank
# alphabetically.
party_filter_kwargs = {}
party_order_by = ["name"]
if include_non_current:
party_order_by.insert(0, "-total_candidates")
else:
party_filter_kwargs["current_candidates__gt"] = 0
party_order_by.insert(0, "-current_candidates")
parties_current_qs = (
self.filter(**party_filter_kwargs)
.order_by(*party_order_by)
.only("date_deregistered", "name", "ec_id", "register")
)
if not extra_party_ids:
extra_party_ids = []
if isinstance(extra_party_ids, tuple):
extra_party_ids = list(extra_party_ids)
extra_party_ids.append("ynmp-party:2")
extra_qs = self.model.objects.filter(ec_id__in=extra_party_ids)
parties_current_qs = extra_qs | parties_current_qs
if include_descriptions:
parties_current_qs = parties_current_qs.prefetch_related(
"descriptions"
)
result = [("", {"label": ""})]
for party in parties_current_qs:
if (
party.date_deregistered
and party.is_deregistered
and exclude_deregistered
):
continue
if include_descriptions and party.descriptions.exists():
names = [
(
party.ec_id,
{
"label": party.format_name,
"register": party.register or "all",
},
)
]
for description in party.descriptions.all():
joint_text = re.compile(JOINT_DESCRIPTION_REGEX, re.I)
party_id_str = str(party.ec_id)
if include_description_ids:
party_id_str = "{}__{}".format(
party_id_str, description.pk
)
if not joint_text.search(description.description):
names.append(
(
party_id_str,
{
"label": description.description,
"register": party.register or "all",
},
)
)
party_names = (party.format_name, names)
else:
party_names = (
str(party.ec_id),
{
"label": party.format_name,
"register": party.register or "all",
},
)
result.append(party_names)
return result
def default_party_choices(self, register=None, extra_party_ids=None):
qs = self
if register:
qs = qs.register(register)
return qs.METHOD_NAME(
include_descriptions=True,
include_non_current=False,
exclude_deregistered=True,
include_description_ids=True,
extra_party_ids=extra_party_ids,
) | null |
report error | '''Define SearchEngine for search dialogs.'''
import re
from Tkinter import StringVar, BooleanVar, TclError
import tkMessageBox
def get(root):
'''Return the singleton SearchEngine instance for the process.
The single SearchEngine saves settings between dialog instances.
If there is not a SearchEngine already, make one.
'''
if not hasattr(root, "_searchengine"):
root._searchengine = SearchEngine(root)
# This creates a cycle that persists until root is deleted.
return root._searchengine
class SearchEngine:
"""Handles searching a text widget for Find, Replace, and Grep."""
def __init__(self, root):
'''Initialize Variables that save search state.
The dialogs bind these to the UI elements present in the dialogs.
'''
self.root = root # need for report_error()
self.patvar = StringVar(root, '') # search pattern
self.revar = BooleanVar(root, False) # regular expression?
self.casevar = BooleanVar(root, False) # match case?
self.wordvar = BooleanVar(root, False) # match whole word?
self.wrapvar = BooleanVar(root, True) # wrap around buffer?
self.backvar = BooleanVar(root, False) # search backwards?
# Access methods
def getpat(self):
return self.patvar.get()
def setpat(self, pat):
self.patvar.set(pat)
def isre(self):
return self.revar.get()
def iscase(self):
return self.casevar.get()
def isword(self):
return self.wordvar.get()
def iswrap(self):
return self.wrapvar.get()
def isback(self):
return self.backvar.get()
# Higher level access methods
def setcookedpat(self, pat):
"Set pattern after escaping if re."
# called only in SearchDialog.py: 66
if self.isre():
pat = re.escape(pat)
self.setpat(pat)
def getcookedpat(self):
pat = self.getpat()
if not self.isre(): # if True, see setcookedpat
pat = re.escape(pat)
if self.isword():
pat = r"\b%s\b" % pat
return pat
def getprog(self):
"Return compiled cooked search pattern."
pat = self.getpat()
if not pat:
self.METHOD_NAME(pat, "Empty regular expression")
return None
pat = self.getcookedpat()
flags = 0
if not self.iscase():
flags = flags | re.IGNORECASE
try:
prog = re.compile(pat, flags)
except re.error as what:
args = what.args
msg = args[0]
col = args[1] if len(args) >= 2 else -1
self.METHOD_NAME(pat, msg, col)
return None
return prog
def METHOD_NAME(self, pat, msg, col=-1):
# Derived class could override this with something fancier
msg = "Error: " + str(msg)
if pat:
msg = msg + "\nPattern: " + str(pat)
if col >= 0:
msg = msg + "\nOffset: " + str(col)
tkMessageBox.showerror("Regular expression error",
msg, master=self.root)
def search_text(self, text, prog=None, ok=0):
'''Return (lineno, matchobj) or None for forward/backward search.
This function calls the right function with the right arguments.
It directly return the result of that call.
Text is a text widget. Prog is a precompiled pattern.
The ok parameter is a bit complicated as it has two effects.
If there is a selection, the search begin at either end,
depending on the direction setting and ok, with ok meaning that
the search starts with the selection. Otherwise, search begins
at the insert mark.
To aid progress, the search functions do not return an empty
match at the starting position unless ok is True.
'''
if not prog:
prog = self.getprog()
if not prog:
return None # Compilation failed -- stop
wrap = self.wrapvar.get()
first, last = get_selection(text)
if self.isback():
if ok:
start = last
else:
start = first
line, col = get_line_col(start)
res = self.search_backward(text, prog, line, col, wrap, ok)
else:
if ok:
start = first
else:
start = last
line, col = get_line_col(start)
res = self.search_forward(text, prog, line, col, wrap, ok)
return res
def search_forward(self, text, prog, line, col, wrap, ok=0):
wrapped = 0
startline = line
chars = text.get("%d.0" % line, "%d.0" % (line+1))
while chars:
m = prog.search(chars[:-1], col)
if m:
if ok or m.end() > col:
return line, m
line = line + 1
if wrapped and line > startline:
break
col = 0
ok = 1
chars = text.get("%d.0" % line, "%d.0" % (line+1))
if not chars and wrap:
wrapped = 1
wrap = 0
line = 1
chars = text.get("1.0", "2.0")
return None
def search_backward(self, text, prog, line, col, wrap, ok=0):
wrapped = 0
startline = line
chars = text.get("%d.0" % line, "%d.0" % (line+1))
while 1:
m = search_reverse(prog, chars[:-1], col)
if m:
if ok or m.start() < col:
return line, m
line = line - 1
if wrapped and line < startline:
break
ok = 1
if line <= 0:
if not wrap:
break
wrapped = 1
wrap = 0
pos = text.index("end-1c")
line, col = map(int, pos.split("."))
chars = text.get("%d.0" % line, "%d.0" % (line+1))
col = len(chars) - 1
return None
def search_reverse(prog, chars, col):
'''Search backwards and return an re match object or None.
This is done by searching forwards until there is no match.
Prog: compiled re object with a search method returning a match.
Chars: line of text, without \\n.
Col: stop index for the search; the limit for match.end().
'''
m = prog.search(chars)
if not m:
return None
found = None
i, j = m.span() # m.start(), m.end() == match slice indexes
while i < col and j <= col:
found = m
if i == j:
j = j+1
m = prog.search(chars, j)
if not m:
break
i, j = m.span()
return found
def get_selection(text):
'''Return tuple of 'line.col' indexes from selection or insert mark.
'''
try:
first = text.index("sel.first")
last = text.index("sel.last")
except TclError:
first = last = None
if not first:
first = text.index("insert")
if not last:
last = first
return first, last
def get_line_col(index):
'''Return (line, col) tuple of ints from 'line.col' string.'''
line, col = map(int, index.split(".")) # Fails on invalid index
return line, col
if __name__ == "__main__":
import unittest
unittest.main('idlelib.idle_test.test_searchengine', verbosity=2, exit=False) | null |
build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2022-11-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.NetApp/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.netapp.NetAppManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Describes the Resource Provider.
Lists all of the available Microsoft.NetApp Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.netapp.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.NetApp/operations"} | null |
test hadoop version url override | """Tests for dbp_service."""
import unittest
from absl.testing import flagsaver
from packaging import version
from perfkitbenchmarker import errors
from perfkitbenchmarker.linux_packages import hadoop
from tests import pkb_common_test_case
import requests_mock
HADOOP_STABLE_DIR = """\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">
<html>
<head>
<title>Index of /hadoop/common/stable</title>
</head>
<body>
<h1>Index of /hadoop/common/stable</h1>
<pre><img src="/icons/blank.gif" alt="Icon "> <a href="?C=N;O=D">Name</a> <a href="?C=M;O=A">Last modified</a> <a href="?C=S;O=A">Size</a> <a href="?C=D;O=A">Description</a><hr><img src="/icons/back.gif" alt="[PARENTDIR]"> <a href="/hadoop/common/">Parent Directory</a> -
<img src="/icons/text.gif" alt="[TXT]"> <a href="CHANGELOG.md">CHANGELOG.md</a> 2022-05-11 16:49 5.2K
<img src="/icons/text.gif" alt="[TXT]"> <a href="CHANGELOG.md.asc">CHANGELOG.md.asc</a> 2022-05-11 16:49 833
<img src="/icons/text.gif" alt="[TXT]"> <a href="CHANGELOG.md.sha512">CHANGELOG.md.sha512</a> 2022-05-11 16:49 153
<img src="/icons/text.gif" alt="[TXT]"> <a href="RELEASENOTES.md">RELEASENOTES.md</a> 2022-05-11 16:49 2.0K
<img src="/icons/text.gif" alt="[TXT]"> <a href="RELEASENOTES.md.asc">RELEASENOTES.md.asc</a> 2022-05-11 16:49 833
<img src="/icons/text.gif" alt="[TXT]"> <a href="RELEASENOTES.md.sha512">RELEASENOTES.md.sha512</a> 2022-05-11 16:49 156
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3-rat.txt">hadoop-3.3.3-rat.txt</a> 2022-05-11 16:49 2.0M
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3-rat.txt.asc">hadoop-3.3.3-rat.txt.asc</a> 2022-05-11 16:49 833
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3-rat.txt.sha512">hadoop-3.3.3-rat.txt.sha512</a> 2022-05-11 16:49 161
<img src="/icons/compressed.gif" alt="[ ]"> <a href="hadoop-3.3.3-site.tar.gz">hadoop-3.3.3-site.tar.gz</a> 2022-05-11 16:49 42M
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3-site.tar.gz.asc">hadoop-3.3.3-site.tar.gz.asc</a> 2022-05-11 16:49 833
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3-site.tar.gz.sha512">hadoop-3.3.3-site.tar.gz.sha512</a> 2022-05-11 16:49 165
<img src="/icons/compressed.gif" alt="[ ]"> <a href="hadoop-3.3.3-src.tar.gz">hadoop-3.3.3-src.tar.gz</a> 2022-05-11 16:49 34M
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3-src.tar.gz.asc">hadoop-3.3.3-src.tar.gz.asc</a> 2022-05-11 16:49 833
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3-src.tar.gz.sha512">hadoop-3.3.3-src.tar.gz.sha512</a> 2022-05-11 16:49 164
<img src="/icons/compressed.gif" alt="[ ]"> <a href="hadoop-3.3.3.tar.gz">hadoop-3.3.3.tar.gz</a> 2022-05-11 16:49 615M
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3.tar.gz.asc">hadoop-3.3.3.tar.gz.asc</a> 2022-05-11 16:49 833
<img src="/icons/text.gif" alt="[TXT]"> <a href="hadoop-3.3.3.tar.gz.sha512">hadoop-3.3.3.tar.gz.sha512</a> 2022-05-11 16:49 160
<hr></pre>
</body></html>
"""
class HadoopVersionsTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super().setUp()
hadoop.HadoopVersion.cache_clear()
@requests_mock.Mocker()
def testDefaultHadoopVersion(self, mock_requests):
mock_requests.get(
'https://downloads.apache.org/hadoop/common/stable',
text=HADOOP_STABLE_DIR)
for _ in range(5):
observed = hadoop.HadoopVersion()
self.assertEqual(version.Version('3.3.3'), observed)
# test caching
self.assertEqual(1, mock_requests.call_count)
@requests_mock.Mocker()
def testHadoopVersionConnectionError(self, mock_requests):
mock_requests.get(
'https://downloads.apache.org/hadoop/common/stable', status_code=404)
with self.assertRaisesRegex(
errors.Setup.MissingExecutableError,
'Could not load https://downloads.apache.org/hadoop/common/stable'):
hadoop.HadoopVersion()
@requests_mock.Mocker()
def testHadoopVersionParsingError(self, mock_requests):
mock_requests.get(
'https://downloads.apache.org/hadoop/common/stable',
text='<html><body><a href="foo">bar</a></body></html>')
with self.assertRaisesRegex(
errors.Setup.MissingExecutableError,
'Could not find valid hadoop version at '
'https://downloads.apache.org/hadoop/common/stable'):
hadoop.HadoopVersion()
@requests_mock.Mocker()
@flagsaver.flagsaver(hadoop_version='4.2.0')
def testHadoopVersionProvider(self, mock_requests):
observed = hadoop.HadoopVersion()
self.assertFalse(mock_requests.called)
self.assertEqual(version.Version('4.2.0'), observed)
@requests_mock.Mocker()
@flagsaver.flagsaver(hadoop_bin_url='http://my/hadooop-4.2.0.tar.gz')
def METHOD_NAME(self, mock_requests):
observed = hadoop.HadoopVersion()
self.assertFalse(mock_requests.called)
self.assertEqual(version.Version('4.2.0'), observed)
if __name__ == '__main__':
unittest.main() | null |
new config | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections.abc
from collections import OrderedDict
from .register import get_registered_model_info, get_registered_suite_info
__all__ = ['Config', 'BaseConfig']
class Config(object):
# We constrain function params here
def __new__(cls, model_name, config_path=None):
# Build config from model name
model_info = get_registered_model_info(model_name)
suite_name = model_info['suite']
suite_info = get_registered_suite_info(suite_name)
config_cls = suite_info['config']
config_obj = config_cls(model_name=model_name, config_path=config_path)
return config_obj
class _Config(object):
_DICT_TYPE_ = OrderedDict
def __init__(self, cfg=None):
super().__init__()
self._dict = self._DICT_TYPE_()
if cfg is not None:
# Manipulate the internal `_dict` such that we avoid an extra copy
self.reset_from_dict(cfg._dict)
@property
def dict(self):
return dict(self._dict)
def __getattr__(self, key):
try:
val = self._dict[key]
return val
except KeyError:
raise AttributeError
def set_val(self, key, val):
self._dict[key] = val
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, val):
self._dict[key] = val
def __contains__(self, key):
return key in self._dict
def METHOD_NAME(self, **kwargs):
cfg = self.copy()
cfg.update(kwargs)
def copy(self):
return type(self)(cfg=self)
def pop(self, key):
self._dict.pop(key)
def __repr__(self):
return format_cfg(self, indent=0)
def reset_from_dict(self, dict_like_obj):
self._dict.clear()
self._dict.update(dict_like_obj)
class BaseConfig(_Config, metaclass=abc.ABCMeta):
"""
Abstract base class of Config.
Config provides the funtionality to load, parse, or dump to a
configuration file with a specific format. Also, it provides
APIs to update configurations of several important
hyperparameters and model components.
Args:
model_name (str): A registered model name.
config_path (str|None): Path of a configuration file.
cfg (BaseConfig|None): `BaseConfig` object to initialize from.
"""
def __init__(self, model_name, config_path=None, cfg=None):
super().__init__(cfg=cfg)
self.model_name = model_name
if cfg is None:
# Initialize from file if no `cfg` is specified to initialize from
if config_path is None:
model_info = get_registered_model_info(self.model_name)
config_path = model_info['config_path']
self.load(config_path)
@abc.abstractmethod
def load(self, config_path):
"""Load configurations from a file."""
raise NotImplementedError
@abc.abstractmethod
def dump(self, config_path):
"""Dump configurations to a file."""
raise NotImplementedError
@abc.abstractmethod
def update(self, dict_like_obj):
"""Update configurations from a dict-like object."""
raise NotImplementedError
@abc.abstractmethod
def update_dataset(self, dataset_dir, dataset_type=None):
"""Update configurations of dataset."""
raise NotImplementedError
@abc.abstractmethod
def update_learning_rate(self, learning_rate):
"""Update learning rate."""
raise NotImplementedError
@abc.abstractmethod
def update_batch_size(self, batch_size, mode='train'):
"""
Update batch size.
By default this method modifies the training batch size.
"""
raise NotImplementedError
@abc.abstractmethod
def _get_epochs_iters(self):
"""Get total number of epochs or iterations in training."""
raise NotImplementedError
@abc.abstractmethod
def _get_learning_rate(self):
"""Get learning rate used in training."""
raise NotImplementedError
@abc.abstractmethod
def _get_batch_size(self, mode='train'):
"""
Get batch size.
By default this method returns the training batch size.
"""
raise NotImplementedError
@abc.abstractmethod
def _get_qat_epochs_iters(self):
"""Get total number of epochs or iterations in QAT."""
raise NotImplementedError
@abc.abstractmethod
def _get_qat_learning_rate(self):
"""Get learning rate used in QAT."""
raise NotImplementedError
def copy(self):
return type(self)(model_name=self.model_name, cfg=self)
def format_cfg(cfg, indent=0):
MAP_TYPES = (collections.abc.Mapping, )
SEQ_TYPES = (list, tuple)
NESTED_TYPES = (*MAP_TYPES, *SEQ_TYPES)
s = ' ' * indent
if isinstance(cfg, _Config):
cfg = cfg.dict
if isinstance(cfg, MAP_TYPES):
for i, (k, v) in enumerate(sorted(cfg.items())):
s += str(k) + ': '
if isinstance(v, NESTED_TYPES):
s += '\n' + format_cfg(v, indent=indent + 1)
else:
s += str(v)
if i != len(cfg) - 1:
s += '\n'
elif isinstance(cfg, SEQ_TYPES):
for i, v in enumerate(cfg):
s += '- '
if isinstance(v, NESTED_TYPES):
s += '\n' + format_cfg(v, indent=indent + 1)
else:
s += str(v)
if i != len(cfg) - 1:
s += '\n'
else:
s += str(cfg)
return s | null |
get gh backup | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import copy
from typing import Any, Optional
from hydra import version
from hydra._internal.deprecation_warning import deprecation_warning
from hydra.core.global_hydra import GlobalHydra
from hydra.core.singleton import Singleton
from hydra.initialize import _UNSPECIFIED_
def METHOD_NAME() -> Any:
if GlobalHydra in Singleton._instances:
return copy.deepcopy(Singleton._instances[GlobalHydra])
else:
return None
def restore_gh_from_backup(_gh_backup: Any) -> Any:
if _gh_backup is None:
del Singleton._instances[GlobalHydra]
else:
Singleton._instances[GlobalHydra] = _gh_backup
class initialize:
def __init__(
self,
config_path: Optional[str] = _UNSPECIFIED_,
job_name: Optional[str] = None,
caller_stack_depth: int = 1,
) -> None:
from hydra import initialize as real_initialize
message = (
"hydra.experimental.initialize() is no longer experimental. "
"Use hydra.initialize()"
)
if version.base_at_least("1.2"):
raise ImportError(message)
deprecation_warning(message=message)
self.delegate = real_initialize(
config_path=config_path,
job_name=job_name,
caller_stack_depth=caller_stack_depth + 1,
)
def __enter__(self, *args: Any, **kwargs: Any) -> None:
self.delegate.__enter__(*args, **kwargs)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.delegate.__exit__(exc_type, exc_val, exc_tb)
def __repr__(self) -> str:
return "hydra.experimental.initialize()"
class initialize_config_module:
"""
Initializes Hydra and add the config_module to the config search path.
The config module must be importable (an __init__.py must exist at its top level)
:param config_module: absolute module name, for example "foo.bar.conf".
:param job_name: the value for hydra.job.name (default is 'app')
"""
def __init__(self, config_module: str, job_name: str = "app") -> None:
from hydra import initialize_config_module as real_initialize_config_module
message = (
"hydra.experimental.initialize_config_module() is no longer experimental. "
"Use hydra.initialize_config_module()."
)
if version.base_at_least("1.2"):
raise ImportError(message)
deprecation_warning(message=message)
self.delegate = real_initialize_config_module(
config_module=config_module, job_name=job_name
)
def __enter__(self, *args: Any, **kwargs: Any) -> None:
self.delegate.__enter__(*args, **kwargs)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.delegate.__exit__(exc_type, exc_val, exc_tb)
def __repr__(self) -> str:
return "hydra.experimental.initialize_config_module()"
class initialize_config_dir:
"""
Initializes Hydra and add an absolute config dir to the to the config search path.
The config_dir is always a path on the file system and is must be an absolute path.
Relative paths will result in an error.
:param config_dir: absolute file system path
:param job_name: the value for hydra.job.name (default is 'app')
"""
def __init__(self, config_dir: str, job_name: str = "app") -> None:
from hydra import initialize_config_dir as real_initialize_config_dir
message = (
"hydra.experimental.initialize_config_dir() is no longer experimental. "
"Use hydra.initialize_config_dir()."
)
if version.base_at_least("1.2"):
raise ImportError(message)
deprecation_warning(message=message)
self.delegate = real_initialize_config_dir(
config_dir=config_dir, job_name=job_name
)
def __enter__(self, *args: Any, **kwargs: Any) -> None:
self.delegate.__enter__(*args, **kwargs)
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.delegate.__exit__(exc_type, exc_val, exc_tb)
def __repr__(self) -> str:
return "hydra.experimental.initialize_config_dir()" | null |
test arithmetic drops references | import sys
import os
import mmap
import pytest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryFile
from numpy import (
memmap, sum, average, product, ndarray, isscalar, add, subtract, multiply)
from numpy import arange, allclose, asarray
from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
break_cycles
)
class TestMemmap:
def setup_method(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.shape = (3, 4)
self.dtype = 'float32'
self.data = arange(12, dtype=self.dtype)
self.data.resize(self.shape)
def teardown_method(self):
self.tmpfp.close()
self.data = None
if IS_PYPY:
break_cycles()
break_cycles()
def test_roundtrip(self):
# Write data to file
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp # Test __del__ machinery, which handles cleanup
# Read data back from file
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
shape=self.shape)
assert_(allclose(self.data, newfp))
assert_array_equal(self.data, newfp)
assert_equal(newfp.flags.writeable, False)
def test_open_with_filename(self, tmp_path):
tmpname = tmp_path / 'mmap'
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
del fp
def test_unnamed_file(self):
with TemporaryFile() as f:
fp = memmap(f, dtype=self.dtype, shape=self.shape)
del fp
def test_attributes(self):
offset = 1
mode = "w+"
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
shape=self.shape, offset=offset)
assert_equal(offset, fp.offset)
assert_equal(mode, fp.mode)
del fp
def test_filename(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
shape=self.shape)
abspath = Path(os.path.abspath(tmpname))
fp[:] = self.data[:]
assert_equal(abspath, fp.filename)
b = fp[:1]
assert_equal(abspath, b.filename)
del b
del fp
def test_path(self, tmp_path):
tmpname = tmp_path / "mmap"
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
shape=self.shape)
# os.path.realpath does not resolve symlinks on Windows
# see: https://bugs.python.org/issue9949
# use Path.resolve, just as memmap class does internally
abspath = str(Path(tmpname).resolve())
fp[:] = self.data[:]
assert_equal(abspath, str(fp.filename.resolve()))
b = fp[:1]
assert_equal(abspath, str(b.filename.resolve()))
del b
del fp
def test_filename_fileobj(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
shape=self.shape)
assert_equal(fp.filename, self.tmpfp.name)
@pytest.mark.skipif(sys.platform == 'gnu0',
reason="Known to fail on hurd")
def test_flush(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp[:] = self.data[:]
assert_equal(fp[0], self.data[0])
fp.flush()
def test_del(self):
# Make sure a view does not delete the underlying mmap
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
fp_base[0] = 5
fp_view = fp_base[0:1]
assert_equal(fp_view[0], 5)
del fp_view
# Should still be able to access and assign values after
# deleting the view
assert_equal(fp_base[0], 5)
fp_base[0] = 6
assert_equal(fp_base[0], 6)
def METHOD_NAME(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[(1, 2), (2, 3)]
if isinstance(tmp, memmap):
assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
assert_(new1.base is fp)
assert_(new2.base is fp)
new_array = asarray(fp)
assert_(new_array.base is fp)
def test_ufunc_return_ndarray(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
with suppress_warnings() as sup:
sup.filter(FutureWarning, "np.average currently does not preserve")
for unary_op in [sum, average, product]:
result = unary_op(fp)
assert_(isscalar(result))
assert_(result.__class__ is self.data[0, 0].__class__)
assert_(unary_op(fp, axis=0).__class__ is ndarray)
assert_(unary_op(fp, axis=1).__class__ is ndarray)
for binary_op in [add, subtract, multiply]:
assert_(binary_op(fp, self.data).__class__ is ndarray)
assert_(binary_op(self.data, fp).__class__ is ndarray)
assert_(binary_op(fp, fp).__class__ is ndarray)
fp += 1
assert(fp.__class__ is memmap)
add(fp, 1, out=fp)
assert(fp.__class__ is memmap)
def test_getitem(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
assert_(fp[1:, :-1].__class__ is memmap)
# Fancy indexing returns a copy that is not memmapped
assert_(fp[[0, 1]].__class__ is ndarray)
def test_memmap_subclass(self):
class MemmapSubClass(memmap):
pass
fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
fp[:] = self.data
# We keep previous behavior for subclasses of memmap, i.e. the
# ufunc and __getitem__ output is never turned into a ndarray
assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
assert_(sum(fp).__class__ is MemmapSubClass)
assert_(fp[1:, :-1].__class__ is MemmapSubClass)
assert(fp[[0, 1]].__class__ is MemmapSubClass)
def test_mmap_offset_greater_than_allocation_granularity(self):
size = 5 * mmap.ALLOCATIONGRANULARITY
offset = mmap.ALLOCATIONGRANULARITY + 1
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
assert_(fp.offset == offset)
def test_no_shape(self):
self.tmpfp.write(b'a'*16)
mm = memmap(self.tmpfp, dtype='float64')
assert_equal(mm.shape, (2,))
def test_empty_array(self):
# gh-12653
with pytest.raises(ValueError, match='empty file'):
memmap(self.tmpfp, shape=(0,4), mode='w+')
self.tmpfp.write(b'\0')
# ok now the file is not empty
memmap(self.tmpfp, shape=(0,4), mode='w+') | null |
pre operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"capacity reservation group delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete operation to delete a capacity reservation group. This operation is allowed only if all the associated resources are disassociated from the reservation group and all capacity reservations under the reservation group have also been deleted. Please refer to https://aka.ms/CapacityReservation for more details.
:example: Delete a capacity reservation group.
az capacity reservation group delete -n ReservationGroupName -g MyResourceGroup --yes
"""
_aaz_info = {
"version": "2022-08-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/capacityreservationgroups/{}", "2022-08-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.capacity_reservation_group_name = AAZStrArg(
options=["-n", "--capacity-reservation-group", "--capacity-reservation-group-name"],
help="The name of the capacity reservation group.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.METHOD_NAME()
self.CapacityReservationGroupsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def METHOD_NAME(self):
pass
@register_callback
def post_operations(self):
pass
class CapacityReservationGroupsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
if session.http_response.status_code in [202]:
return self.on_202(session)
if session.http_response.status_code in [204]:
return self.on_204(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/capacityReservationGroups/{capacityReservationGroupName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"capacityReservationGroupName", self.ctx.args.capacity_reservation_group_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-08-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_202(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] | null |
exec action | import logging
import os.path as op
from functools import partial
from itertools import chain
import sublime
import sublime_plugin
import flower
from . import manager
from .utils import rootSplit, getBinaryFolder
from .compilerutils import getCompiler, COMPILER_KEY
from .process import Executor, Callback, Default
from .config import inlineIncludes
from ..env import expandEnv, expandPath
from ..pathutils import getExt, getFileName, getName
log = logging.getLogger(flower.NAME)
def snake_to_camel(s, sep=' '):
return '{}'.format(sep).join(map(str.capitalize, s.split('_')))
def option_keys(workdir, workpath, binaryfolder_):
name, pathname = op.basename(workpath), op.join(workdir, workpath)
binaryfolder = expandPath(binaryfolder_ or flower.ENV.get('binaryfolder'))
return Default(
name=name,
pathname=pathname,
workpath=workpath,
binaryfolder=getBinaryFolder(binaryfolder, workdir),
)
def meta_values(workdir):
"""Application is meta-app if we have root/(www2,flow.config,app)/app.flow"""
www2 = op.join(workdir, 'www2')
if op.exists(www2):
root = op.basename(workdir)
url = "http://${{localhost}}/{root}/flowjs.html?name={{name}}{{args}}".format(root=root)
log.info("Meta app overrides")
log.debug("binaryfolder: %s", www2)
log.debug("url: %s", url)
return www2, url
return None, None
class RunFlow(sublime_plugin.TextCommand):
"""Interactive main menu (F8)"""
@staticmethod
def selectPreset(action=None, runner=None):
"""Select preset from menu and run action/runner, or select runner"""
ext = runner and runner.ext
presets = manager.getPresets(ext)
onPreset = partial(RunFlow.get_run, action=action, runner=runner)
if len(presets) == 1:
onPreset(presets[0])
else:
items = [preset.toStr() for preset in presets]
onDone = lambda i: i > -1 and onPreset(presets[i])
sublime.active_window().show_quick_panel(items, onDone)
@staticmethod
def selectRunner(preset):
"""Select action/runner from menu for predefined preset"""
if not preset:
log.error("Can't select runner without preset")
return
ext = getExt(preset.main)
actions = manager.getActions(ext)
if not actions:
log.error("No actions available for `%s` file", ext)
return
# compiler makes sense for .flow
compiler = getCompiler(preset.main, key=COMPILER_KEY)
runners = manager.getRunners(ext)
wrap = lambda runner: partial(RunFlow.runCommand, preset, runner)
action_pairs = ((snake_to_camel(a), wrap(getattr(compiler, a)())) for a in actions)
runner_pairs = ((r.toStr()[0], wrap(r)) for r in runners)
view = sublime.active_window().active_view()
open_config = lambda: view.run_command("flower_open_config", {
"configpath": compiler.configpath
})
misc_pairs = filter(lambda x: x[0], [
(compiler.configpath and 'Configure {}'.format(compiler), open_config),
])
names, commands = zip(*(chain(action_pairs, runner_pairs, misc_pairs)))
onDone = lambda i: i > -1 and commands[i]()
sublime.active_window().show_quick_panel(names, onDone)
@staticmethod
def METHOD_NAME(preset, action, key=COMPILER_KEY, format_keys=None, after_args=()):
runner, compiler = manager.actionToRunner(action, preset, key)
RunFlow.execRun(preset, runner, compiler, format_keys, after_args)
@staticmethod
def execRun(preset, runner, compiler, format_keys=None, after_args=()):
"""Prepare all necessary bits for executing a command & go for it"""
main = preset.main
workdir, name = rootSplit(main)
compiler = compiler or getCompiler(main)
# regular includes retrieved from config, add inline ones
includes = compiler.formatIncludes__(inlineIncludes(main))
meta_binaryfolder, meta_url = meta_values(workdir)
binaryfolder, url = preset.binaryfolder or meta_binaryfolder, preset.url or meta_url
format_keys = format_keys or dict()
# can't ChainMap Default-dict, so update
format_keys.update(option_keys(workdir, getName(name), binaryfolder))
expand_options = lambda option: option.format(**format_keys)
cmd = expandEnv(*chain(
runner.cmd,
map(expand_options, runner.options),
includes,
[name],
))
afterArgs = {
"cpp": (workdir, getName(name), preset.args),
"gdb": (workdir, name, preset.args),
"web": (url, getFileName(name), preset.args),
"find": (compiler.defn_regex,),
}.get(runner.after, ()) + after_args
Executor.run(
cmd=cmd,
workdir=workdir,
after=Callback(runner.after, afterArgs),
quiet=True
)
@staticmethod
def runCommand(preset, runner, compiler=None):
"""Run specified preset with runner"""
if not (preset and runner):
return
view = sublime.active_window().active_view()
if view.is_dirty():
view.run_command('save')
manager.setRun(preset, runner)
RunFlow.execRun(preset, runner, compiler)
@staticmethod
def get_preset(preset):
""" preset: None = select, 'invalid' = current """
if isinstance(preset, (type(None), manager.Preset)):
return preset
presets = tuple(filter(lambda p: p.key == preset.lower(), manager.getPresets()))
if presets:
got_preset = presets[0]
log.debug("Preset: %s", got_preset)
else:
got_preset = manager.currentFilePreset()
if preset:
log.warning("Preset `%s` is not specified", preset)
return got_preset
@staticmethod
def get_runner(runner, preset):
if isinstance(runner, (type(None), manager.Runner)):
return runner
ext = preset and getExt(preset.main)
runners = tuple(filter(lambda r: r.key == runner.lower(), manager.getRunners(ext)))
if runners:
got_runner = runners[0]
log.debug("Runner: %s", got_runner)
else:
got_runner = None
log.error("Runner `%s` is not specified", runner)
return got_runner
@staticmethod
def get_run(preset=None, action=None, runner=None):
got_preset = RunFlow.get_preset(preset)
got_runner = RunFlow.get_runner(runner, got_preset)
# nothing provided - select preset first
if got_preset is None:
RunFlow.selectPreset(action, got_runner)
# action provided
elif action is not None:
runner_, compiler_ = manager.actionToRunner(action, got_preset)
RunFlow.runCommand(got_preset, runner_, compiler_)
# preset provided
elif got_runner is None:
RunFlow.selectRunner(got_preset)
# preset & runner provided
else:
RunFlow.runCommand(got_preset, got_runner)
def run(self, edit, preset=None, action=None, runner=None):
return self.get_run(preset, action, runner) | null |
save | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Random cost model
"""
from typing import List, Optional, Tuple, Union
from ..cost_model import PyCostModel
from ..runner import RunnerResult
from ..search_strategy import MeasureCandidate
from ..tune_context import TuneContext
from ..utils import derived_object # type: ignore
@derived_object
class RandomModel(PyCostModel):
"""Random cost model
Parameters
----------
random_state : Union[Tuple[str, np.ndarray, int, int, float], dict]
The random state of the random number generator.
path : Optional[str]
The path of the random cost model.
max_range : Optional[int]
The maximum range of random results, [0, max_range].
Reference
---------
https://numpy.org/doc/stable/reference/random/generated/numpy.random.get_state.html
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
random_state: Union[Tuple[str, np.ndarray, int, int, float], dict]
path: Optional[str]
def __init__(
self,
*,
seed: Optional[int] = None,
path: Optional[str] = None,
max_range: Optional[int] = 100,
):
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
super().__init__()
if path is not None:
self.load(path)
else:
np.random.seed(seed)
self.random_state = np.random.get_state()
self.max_range = max_range
def load(self, path: str) -> None:
"""Load the cost model from given file location.
Parameters
----------
path : str
The file path.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
self.random_state = tuple(np.load(path, allow_pickle=True)) # type: ignore
def METHOD_NAME(self, path: str) -> None:
"""Save the cost model to given file location.
Parameters
----------
path : str
The file path.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
np.METHOD_NAME(path, np.array(self.random_state, dtype=object), allow_pickle=True)
def update(
self,
context: TuneContext,
candidates: List[MeasureCandidate],
results: List[RunnerResult],
) -> None:
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
results : List[RunnerResult]
The running results of the measure candidates.
"""
def predict(
self, context: TuneContext, candidates: List[MeasureCandidate]
) -> np.ndarray: # type: ignore # pylint: disable=used-before-assignment
"""Update the cost model given running results.
Parameters
----------
context : TuneContext,
The tuning context.
candidates : List[MeasureCandidate]
The measure candidates.
Return
------
result : np.ndarray
The predicted running results.
"""
import numpy as np # type: ignore # pylint: disable=import-outside-toplevel
np.random.set_state(self.random_state)
# TODO(@zxybazh): Use numpy's RandState object:
# https://numpy.org/doc/1.16/reference/generated/numpy.random.RandomState.html#numpy.random.RandomState
result = np.random.rand(len(candidates)) * self.max_range # type: ignore
self.random_state = np.random.get_state()
return result | null |
get component custom id | from __future__ import annotations
import dataclasses
from collections.abc import Mapping
from rest_framework import status
from rest_framework.request import Request
from sentry import options
from sentry.services.hybrid_cloud.identity.model import RpcIdentity
from sentry.services.hybrid_cloud.identity.service import identity_service
from sentry.services.hybrid_cloud.integration import RpcIntegration, integration_service
from sentry.services.hybrid_cloud.user.model import RpcUser
from sentry.services.hybrid_cloud.user.service import user_service
from ..utils import logger, verify_signature
@dataclasses.dataclass(frozen=True)
class DiscordRequestError(Exception):
"""
Something was invalid about the request from Discord.
Includes the status the endpoint should return, based on the error.
"""
status: int
class DiscordRequestTypes:
PING = 1
COMMAND = 2
MESSAGE_COMPONENT = 3
MODAL_SUBMIT = 5
class DiscordMessageComponentTypes:
ACTION_ROW = 1
BUTTON = 2
SELECT = 3
TEXT_INPUT = 4
class DiscordRequest:
"""
A Request from Discord to our interactions endpoint.
Handles request verification and data access.
Raises DiscordRequestError whenever something goes wrong, including the
appropriate response code that the endpoint should respond with.
"""
def __init__(self, request: Request):
self.request = request
self._integration: RpcIntegration | None = None
self._data: Mapping[str, object] = self.request.data
self._identity: RpcIdentity | None = None
self.user: RpcUser | None = None
@property
def integration(self) -> RpcIntegration | None:
return self._integration
@property
def data(self) -> Mapping[str, object]:
"""This is the data object nested within request.data"""
return self._data.get("data") or {} # type: ignore
@property
def guild_id(self) -> str | None:
guild_id = self._data.get("guild_id")
return str(guild_id) if guild_id else None
@property
def channel_id(self) -> str | None:
channel_id = self._data.get("channel_id")
return str(channel_id) if channel_id else None
@property
def user_id(self) -> str | None:
try:
return self._data.get("member")["user"]["id"] # type: ignore
except (AttributeError, TypeError):
return None
@property
def logging_data(self) -> Mapping[str, str | int]:
# TODO: come back to this later and see what additional metadata makes sense to include here
data: dict[str, str | int | None] = {
"discord_guild_id": self.guild_id,
"discord_channel_id": self.channel_id,
}
if self.integration:
data["integration_id"] = self.integration.id
if self.user_id:
data["discord_user_id"] = self.user_id
if self.has_identity():
data["identity"] = self.get_identity_str()
if self.is_command():
data["command"] = self.get_command_name()
if self.is_message_component():
data["component_custom_id"] = self.METHOD_NAME()
return {k: v for k, v in data.items() if v}
def validate(self) -> None:
self._log_request()
self.authorize()
self.validate_integration()
self._validate_identity()
def authorize(self) -> None:
public_key: str = options.get("discord.public-key")
signature: str | None = self.request.META.get("HTTP_X_SIGNATURE_ED25519")
timestamp: str | None = self.request.META.get("HTTP_X_SIGNATURE_TIMESTAMP")
body: str = self.request.body.decode("utf-8")
if signature and timestamp and verify_signature(public_key, signature, timestamp + body):
return
raise DiscordRequestError(status=status.HTTP_401_UNAUTHORIZED)
def _validate_identity(self) -> None:
self.user = self.get_identity_user()
def get_identity_user(self) -> RpcUser | None:
identity = self.get_identity()
if not identity:
return None
return user_service.get_user(identity.user_id)
def get_identity(self) -> RpcIdentity | None:
if not self._identity:
provider = identity_service.get_provider(
provider_type="discord", provider_ext_id=self.guild_id
)
self._identity = (
identity_service.get_identity(
filter={"provider_id": provider.id, "identity_ext_id": self.user_id}
)
if provider
else None
)
return self._identity
def get_identity_str(self) -> str | None:
return self.user.email if self.user else None
def validate_integration(self) -> None:
self._integration = integration_service.get_integration(
provider="discord", external_id=self.guild_id
)
def has_identity(self) -> bool:
return self.user is not None
def _log_request(self) -> None:
self._info("discord.request")
def _info(self, key: str) -> None:
logger.info(key, extra={**self.logging_data})
def _error(self, key: str) -> None:
logger.error(key, extra={**self.logging_data})
def is_ping(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.PING
def is_command(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.COMMAND
def is_message_component(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.MESSAGE_COMPONENT
def is_modal_submit(self) -> bool:
return self._data.get("type", 0) == DiscordRequestTypes.MODAL_SUBMIT
def get_command_name(self) -> str:
if not self.is_command():
return ""
return self.data["name"] # type: ignore
def METHOD_NAME(self) -> str:
if not self.is_message_component():
return ""
return self.data["custom_id"] # type: ignore
def is_select_component(self) -> bool:
return self.data["component_type"] == DiscordMessageComponentTypes.SELECT
def get_selected_options(self) -> list[str]:
if not self.is_select_component():
return []
return self.data["values"] # type: ignore | null |
fallback getpass | """Utilities to get a password and/or the current user name.
getpass(prompt[, stream]) - Prompt for a password, with echo turned off.
getuser() - Get the user name from the environment or password database.
GetPassWarning - This UserWarning is issued when getpass() cannot prevent
echoing of the password contents while reading.
On Windows, the msvcrt module will be used.
"""
# Authors: Piers Lauder (original)
# Guido van Rossum (Windows support and cleanup)
# Gregory P. Smith (tty support & GetPassWarning)
import contextlib
import io
import os
import sys
import warnings
__all__ = ["getpass","getuser","GetPassWarning"]
class GetPassWarning(UserWarning): pass
def unix_getpass(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
Args:
prompt: Written on stream to ask for the input. Default: 'Password: '
stream: A writable file object to display the prompt. Defaults to
the tty. If no tty is available defaults to sys.stderr.
Returns:
The seKr3t input.
Raises:
EOFError: If our input tty or stdin was closed.
GetPassWarning: When we were unable to turn echo off on the input.
Always restores terminal settings before returning.
"""
passwd = None
with contextlib.ExitStack() as stack:
try:
# Always try reading and writing directly on the tty first.
fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
tty = io.FileIO(fd, 'w+')
stack.enter_context(tty)
input = io.TextIOWrapper(tty)
stack.enter_context(input)
if not stream:
stream = input
except OSError as e:
# If that fails, see if stdin can be controlled.
stack.close()
try:
fd = sys.stdin.fileno()
except (AttributeError, ValueError):
fd = None
passwd = METHOD_NAME(prompt, stream)
input = sys.stdin
if not stream:
stream = sys.stderr
if fd is not None:
try:
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] &= ~termios.ECHO # 3 == 'lflags'
tcsetattr_flags = termios.TCSAFLUSH
if hasattr(termios, 'TCSASOFT'):
tcsetattr_flags |= termios.TCSASOFT
try:
termios.tcsetattr(fd, tcsetattr_flags, new)
passwd = _raw_input(prompt, stream, input=input)
finally:
termios.tcsetattr(fd, tcsetattr_flags, old)
stream.flush() # issue7208
except termios.error:
if passwd is not None:
# _raw_input succeeded. The final tcsetattr failed. Reraise
# instead of leaving the terminal in an unknown state.
raise
# We can't control the tty or stdin. Give up and use normal IO.
# fallback_getpass() raises an appropriate warning.
if stream is not input:
# clean up unused file objects before blocking
stack.close()
passwd = METHOD_NAME(prompt, stream)
stream.write('\n')
return passwd
def win_getpass(prompt='Password: ', stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
return METHOD_NAME(prompt, stream)
for c in prompt:
msvcrt.putwch(c)
pw = ""
while 1:
c = msvcrt.getwch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putwch('\r')
msvcrt.putwch('\n')
return pw
def METHOD_NAME(prompt='Password: ', stream=None):
warnings.warn("Can not control echo on the terminal.", GetPassWarning,
stacklevel=2)
if not stream:
stream = sys.stderr
print("Warning: Password input may be echoed.", file=stream)
return _raw_input(prompt, stream)
def _raw_input(prompt="", stream=None, input=None):
# This doesn't save the string in the GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
try:
stream.write(prompt)
except UnicodeEncodeError:
# Use replace error handler to get as much as possible printed.
prompt = prompt.encode(stream.encoding, 'replace')
prompt = prompt.decode(stream.encoding)
stream.write(prompt)
stream.flush()
# NOTE: The Python C API calls flockfile() (and unlock) during readline.
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getuser():
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# Bind the name getpass to the appropriate function
try:
import termios
# it's possible there is an incompatible termios from the
# McMillan Installer, make sure we have a UNIX-compatible termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
try:
import msvcrt
except ImportError:
getpass = METHOD_NAME
else:
getpass = win_getpass
else:
getpass = unix_getpass | null |
test auto q | import unittest
from email import _encoded_words as _ew
from email import errors
from test.test_email import TestEmailBase
class TestDecodeQ(TestEmailBase):
def _test(self, source, ex_result, ex_defects=[]):
result, defects = _ew.decode_q(source)
self.assertEqual(result, ex_result)
self.assertDefectsEqual(defects, ex_defects)
def test_no_encoded(self):
self._test(b'foobar', b'foobar')
def test_spaces(self):
self._test(b'foo=20bar=20', b'foo bar ')
self._test(b'foo_bar_', b'foo bar ')
def test_run_of_encoded(self):
self._test(b'foo=20=20=21=2Cbar', b'foo !,bar')
class TestDecodeB(TestEmailBase):
def _test(self, source, ex_result, ex_defects=[]):
result, defects = _ew.decode_b(source)
self.assertEqual(result, ex_result)
self.assertDefectsEqual(defects, ex_defects)
def test_simple(self):
self._test(b'Zm9v', b'foo')
def test_missing_padding(self):
self._test(b'dmk', b'vi', [errors.InvalidBase64PaddingDefect])
def test_invalid_character(self):
self._test(b'dm\x01k===', b'vi', [errors.InvalidBase64CharactersDefect])
def test_invalid_character_and_bad_padding(self):
self._test(b'dm\x01k', b'vi', [errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
class TestDecode(TestEmailBase):
def test_wrong_format_input_raises(self):
with self.assertRaises(ValueError):
_ew.decode('=?badone?=')
with self.assertRaises(ValueError):
_ew.decode('=?')
with self.assertRaises(ValueError):
_ew.decode('')
def _test(self, source, result, charset='us-ascii', lang='', defects=[]):
res, char, l, d = _ew.decode(source)
self.assertEqual(res, result)
self.assertEqual(char, charset)
self.assertEqual(l, lang)
self.assertDefectsEqual(d, defects)
def test_simple_q(self):
self._test('=?us-ascii?q?foo?=', 'foo')
def test_simple_b(self):
self._test('=?us-ascii?b?dmk=?=', 'vi')
def test_q_case_ignored(self):
self._test('=?us-ascii?Q?foo?=', 'foo')
def test_b_case_ignored(self):
self._test('=?us-ascii?B?dmk=?=', 'vi')
def test_non_trivial_q(self):
self._test('=?latin-1?q?=20F=fcr=20Elise=20?=', ' Für Elise ', 'latin-1')
def test_q_escaped_bytes_preserved(self):
self._test(b'=?us-ascii?q?=20\xACfoo?='.decode('us-ascii',
'surrogateescape'),
' \uDCACfoo',
defects = [errors.UndecodableBytesDefect])
def test_b_undecodable_bytes_ignored_with_defect(self):
self._test(b'=?us-ascii?b?dm\xACk?='.decode('us-ascii',
'surrogateescape'),
'vi',
defects = [
errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
def test_b_invalid_bytes_ignored_with_defect(self):
self._test('=?us-ascii?b?dm\x01k===?=',
'vi',
defects = [errors.InvalidBase64CharactersDefect])
def test_b_invalid_bytes_incorrect_padding(self):
self._test('=?us-ascii?b?dm\x01k?=',
'vi',
defects = [
errors.InvalidBase64CharactersDefect,
errors.InvalidBase64PaddingDefect])
def test_b_padding_defect(self):
self._test('=?us-ascii?b?dmk?=',
'vi',
defects = [errors.InvalidBase64PaddingDefect])
def test_nonnull_lang(self):
self._test('=?us-ascii*jive?q?test?=', 'test', lang='jive')
def test_unknown_8bit_charset(self):
self._test('=?unknown-8bit?q?foo=ACbar?=',
b'foo\xacbar'.decode('ascii', 'surrogateescape'),
charset = 'unknown-8bit',
defects = [])
def test_unknown_charset(self):
self._test('=?foobar?q?foo=ACbar?=',
b'foo\xacbar'.decode('ascii', 'surrogateescape'),
charset = 'foobar',
# XXX Should this be a new Defect instead?
defects = [errors.CharsetError])
def test_q_nonascii(self):
self._test('=?utf-8?q?=C3=89ric?=',
'Éric',
charset='utf-8')
class TestEncodeQ(TestEmailBase):
def _test(self, src, expected):
self.assertEqual(_ew.encode_q(src), expected)
def test_all_safe(self):
self._test(b'foobar', 'foobar')
def test_spaces(self):
self._test(b'foo bar ', 'foo_bar_')
def test_run_of_encodables(self):
self._test(b'foo ,,bar', 'foo__=2C=2Cbar')
class TestEncodeB(TestEmailBase):
def test_simple(self):
self.assertEqual(_ew.encode_b(b'foo'), 'Zm9v')
def test_padding(self):
self.assertEqual(_ew.encode_b(b'vi'), 'dmk=')
class TestEncode(TestEmailBase):
def test_q(self):
self.assertEqual(_ew.encode('foo', 'utf-8', 'q'), '=?utf-8?q?foo?=')
def test_b(self):
self.assertEqual(_ew.encode('foo', 'utf-8', 'b'), '=?utf-8?b?Zm9v?=')
def METHOD_NAME(self):
self.assertEqual(_ew.encode('foo', 'utf-8'), '=?utf-8?q?foo?=')
def test_auto_q_if_short_mostly_safe(self):
self.assertEqual(_ew.encode('vi.', 'utf-8'), '=?utf-8?q?vi=2E?=')
def test_auto_b_if_enough_unsafe(self):
self.assertEqual(_ew.encode('.....', 'utf-8'), '=?utf-8?b?Li4uLi4=?=')
def test_auto_b_if_long_unsafe(self):
self.assertEqual(_ew.encode('vi.vi.vi.vi.vi.', 'utf-8'),
'=?utf-8?b?dmkudmkudmkudmkudmku?=')
def test_auto_q_if_long_mostly_safe(self):
self.assertEqual(_ew.encode('vi vi vi.vi ', 'utf-8'),
'=?utf-8?q?vi_vi_vi=2Evi_?=')
def test_utf8_default(self):
self.assertEqual(_ew.encode('foo'), '=?utf-8?q?foo?=')
def test_lang(self):
self.assertEqual(_ew.encode('foo', lang='jive'), '=?utf-8*jive?q?foo?=')
def test_unknown_8bit(self):
self.assertEqual(_ew.encode('foo\uDCACbar', charset='unknown-8bit'),
'=?unknown-8bit?q?foo=ACbar?=')
if __name__ == '__main__':
unittest.main() | null |
create component structure | """
Algorithms for asteroidal triples and asteroidal numbers in graphs.
An asteroidal triple in a graph G is a set of three non-adjacent vertices
u, v and w such that there exist a path between any two of them that avoids
closed neighborhood of the third. More formally, v_j, v_k belongs to the same
connected component of G - N[v_i], where N[v_i] denotes the closed neighborhood
of v_i. A graph which does not contain any asteroidal triples is called
an AT-free graph. The class of AT-free graphs is a graph class for which
many NP-complete problems are solvable in polynomial time. Amongst them,
independent set and coloring.
"""
import networkx as nx
from networkx.utils import not_implemented_for
__all__ = ["is_at_free", "find_asteroidal_triple"]
@not_implemented_for("directed")
@not_implemented_for("multigraph")
@nx._dispatch
def find_asteroidal_triple(G):
r"""Find an asteroidal triple in the given graph.
An asteroidal triple is a triple of non-adjacent vertices such that
there exists a path between any two of them which avoids the closed
neighborhood of the third. It checks all independent triples of vertices
and whether they are an asteroidal triple or not. This is done with the
help of a data structure called a component structure.
A component structure encodes information about which vertices belongs to
the same connected component when the closed neighborhood of a given vertex
is removed from the graph. The algorithm used to check is the trivial
one, outlined in [1]_, which has a runtime of
:math:`O(|V||\overline{E} + |V||E|)`, where the second term is the
creation of the component structure.
Parameters
----------
G : NetworkX Graph
The graph to check whether is AT-free or not
Returns
-------
list or None
An asteroidal triple is returned as a list of nodes. If no asteroidal
triple exists, i.e. the graph is AT-free, then None is returned.
The returned value depends on the certificate parameter. The default
option is a bool which is True if the graph is AT-free, i.e. the
given graph contains no asteroidal triples, and False otherwise, i.e.
if the graph contains at least one asteroidal triple.
Notes
-----
The component structure and the algorithm is described in [1]_. The current
implementation implements the trivial algorithm for simple graphs.
References
----------
.. [1] Ekkehard Köhler,
"Recognizing Graphs without asteroidal triples",
Journal of Discrete Algorithms 2, pages 439-452, 2004.
https://www.sciencedirect.com/science/article/pii/S157086670400019X
"""
V = set(G.nodes)
if len(V) < 6:
# An asteroidal triple cannot exist in a graph with 5 or less vertices.
return None
component_structure = METHOD_NAME(G)
E_complement = set(nx.complement(G).edges)
for e in E_complement:
u = e[0]
v = e[1]
u_neighborhood = set(G[u]).union([u])
v_neighborhood = set(G[v]).union([v])
union_of_neighborhoods = u_neighborhood.union(v_neighborhood)
for w in V - union_of_neighborhoods:
# Check for each pair of vertices whether they belong to the
# same connected component when the closed neighborhood of the
# third is removed.
if (
component_structure[u][v] == component_structure[u][w]
and component_structure[v][u] == component_structure[v][w]
and component_structure[w][u] == component_structure[w][v]
):
return [u, v, w]
return None
@not_implemented_for("directed")
@not_implemented_for("multigraph")
@nx._dispatch
def is_at_free(G):
"""Check if a graph is AT-free.
The method uses the `find_asteroidal_triple` method to recognize
an AT-free graph. If no asteroidal triple is found the graph is
AT-free and True is returned. If at least one asteroidal triple is
found the graph is not AT-free and False is returned.
Parameters
----------
G : NetworkX Graph
The graph to check whether is AT-free or not.
Returns
-------
bool
True if G is AT-free and False otherwise.
Examples
--------
>>> G = nx.Graph([(0, 1), (0, 2), (1, 2), (1, 3), (1, 4), (4, 5)])
>>> nx.is_at_free(G)
True
>>> G = nx.cycle_graph(6)
>>> nx.is_at_free(G)
False
"""
return find_asteroidal_triple(G) is None
@not_implemented_for("directed")
@not_implemented_for("multigraph")
@nx._dispatch
def METHOD_NAME(G):
r"""Create component structure for G.
A *component structure* is an `nxn` array, denoted `c`, where `n` is
the number of vertices, where each row and column corresponds to a vertex.
.. math::
c_{uv} = \begin{cases} 0, if v \in N[u] \\
k, if v \in component k of G \setminus N[u] \end{cases}
Where `k` is an arbitrary label for each component. The structure is used
to simplify the detection of asteroidal triples.
Parameters
----------
G : NetworkX Graph
Undirected, simple graph.
Returns
-------
component_structure : dictionary
A dictionary of dictionaries, keyed by pairs of vertices.
"""
V = set(G.nodes)
component_structure = {}
for v in V:
label = 0
closed_neighborhood = set(G[v]).union({v})
row_dict = {}
for u in closed_neighborhood:
row_dict[u] = 0
G_reduced = G.subgraph(set(G.nodes) - closed_neighborhood)
for cc in nx.connected_components(G_reduced):
label += 1
for u in cc:
row_dict[u] = label
component_structure[v] = row_dict
return component_structure | null |
check mt | #
# Test virtio-scsi and virtio-blk queue settings for all machine types
#
# Copyright (c) 2019 Virtuozzo International GmbH
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import sys
import os
import re
import logging
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'python'))
from qemu.machine import QEMUMachine
from avocado_qemu import Test
from avocado import skip
#list of machine types and virtqueue properties to test
VIRTIO_SCSI_PROPS = {'seg_max_adjust': 'seg_max_adjust'}
VIRTIO_BLK_PROPS = {'seg_max_adjust': 'seg-max-adjust'}
DEV_TYPES = {'virtio-scsi-pci': VIRTIO_SCSI_PROPS,
'virtio-blk-pci': VIRTIO_BLK_PROPS}
VM_DEV_PARAMS = {'virtio-scsi-pci': ['-device', 'virtio-scsi-pci,id=scsi0'],
'virtio-blk-pci': ['-device',
'virtio-blk-pci,id=scsi0,drive=drive0',
'-drive',
'driver=null-co,id=drive0,if=none']}
class VirtioMaxSegSettingsCheck(Test):
@staticmethod
def make_pattern(props):
pattern_items = ['{0} = \w+'.format(prop) for prop in props]
return '|'.join(pattern_items)
def query_virtqueue(self, vm, dev_type_name):
query_ok = False
error = None
props = None
output = vm.command('human-monitor-command',
command_line = 'info qtree')
props_list = DEV_TYPES[dev_type_name].values();
pattern = self.make_pattern(props_list)
res = re.findall(pattern, output)
if len(res) != len(props_list):
props_list = set(props_list)
res = set(res)
not_found = props_list.difference(res)
not_found = ', '.join(not_found)
error = '({0}): The following properties not found: {1}'\
.format(dev_type_name, not_found)
else:
query_ok = True
props = dict()
for prop in res:
p = prop.split(' = ')
props[p[0]] = p[1]
return query_ok, props, error
def METHOD_NAME(self, mt, dev_type_name):
mt['device'] = dev_type_name # Only for the debug() call.
logger = logging.getLogger('machine')
logger.debug(mt)
with QEMUMachine(self.qemu_bin) as vm:
vm.set_machine(mt["name"])
vm.add_args('-nodefaults')
for s in VM_DEV_PARAMS[dev_type_name]:
vm.add_args(s)
try:
vm.launch()
query_ok, props, error = self.query_virtqueue(vm, dev_type_name)
except:
query_ok = False
error = sys.exc_info()[0]
if not query_ok:
self.fail('machine type {0}: {1}'.format(mt['name'], error))
for prop_name, prop_val in props.items():
expected_val = mt[prop_name]
self.assertEqual(expected_val, prop_val)
@staticmethod
def seg_max_adjust_enabled(mt):
# machine types >= 5.0 should have seg_max_adjust = true
# others seg_max_adjust = false
mt = mt.split("-")
# machine types with one line name and name like pc-x.x
if len(mt) <= 2:
return False
# machine types like pc-<chip_name>-x.x[.x]
ver = mt[2]
ver = ver.split(".");
# versions >= 5.0 goes with seg_max_adjust enabled
major = int(ver[0])
if major >= 5:
return True
return False
@skip("break multi-arch CI")
def test_machine_types(self):
# collect all machine types except 'none', 'isapc', 'microvm'
with QEMUMachine(self.qemu_bin) as vm:
vm.launch()
machines = [m['name'] for m in vm.command('query-machines')]
vm.shutdown()
machines.remove('none')
machines.remove('isapc')
machines.remove('microvm')
for dev_type in DEV_TYPES:
# create the list of machine types and their parameters.
mtypes = list()
for m in machines:
if self.seg_max_adjust_enabled(m):
enabled = 'true'
else:
enabled = 'false'
mtypes.append({'name': m,
DEV_TYPES[dev_type]['seg_max_adjust']: enabled})
# test each machine type for a device type
for mt in mtypes:
self.METHOD_NAME(mt, dev_type) | null |
get task data | import numpy as np
from libertem.masks import _make_circular_mask
from libertem.udf import UDF
class CrystallinityUDF(UDF):
"""
Determine crystallinity by integration over a ring in the fourier spectrum of each frame.
Parameters
----------
rad_in: float
Inner radius in pixels of a ring mask for the integration in Fourier space
rad_out: float
Outer radius in pixels of a ring mask for the integration in Fourier space
real_center: Tuple[float], optional
(y,x) - pixels, coordinates of a center of a circle for a masking out zero-order peak
in real space.
real_rad: float, optional
Radius in pixels of circle for a masking out zero-order peak in real space.
If one of real_center or real_rad is missing: the integration will be done without
masking zero-order peak out.
Examples
--------
>>> cryst_udf = CrystallinityUDF(rad_in=4, rad_out=6, real_center=(8, 8), real_rad=3)
>>> result = ctx.run_udf(dataset=dataset, udf=cryst_udf)
>>> np.array(result["intensity"]).shape
(16, 16)
"""
def __init__(self, rad_in, rad_out, real_center, real_rad):
super().__init__(rad_in=rad_in, rad_out=rad_out,
real_center=real_center, real_rad=real_rad)
def get_result_buffers(self):
return {
'intensity': self.buffer(
kind="nav", dtype="float32"
),
}
def METHOD_NAME(self):
sigshape = tuple(self.meta.partition_shape.sig)
rad_in = self.params.rad_in
rad_out = self.params.rad_out
real_center = self.params.real_center
real_rad = self.params.real_rad
if not (real_center is None or real_rad is None):
real_mask = 1-1*_make_circular_mask(
real_center[1], real_center[0], sigshape[1], sigshape[0], real_rad
)
else:
real_mask = None
fourier_mask_out = 1*_make_circular_mask(
sigshape[1]*0.5, sigshape[0]*0.5, sigshape[1], sigshape[0], rad_out
)
fourier_mask_in = 1*_make_circular_mask(
sigshape[1]*0.5, sigshape[0]*0.5, sigshape[1], sigshape[0], rad_in
)
fourier_mask = np.fft.fftshift(fourier_mask_out - fourier_mask_in)
half_fourier_mask = fourier_mask[:, :int(fourier_mask.shape[1]*0.5)+1]
kwargs = {
'real_mask': real_mask,
'half_fourier_mask': half_fourier_mask,
}
return kwargs
def process_frame(self, frame):
h_f_mask = self.task_data.half_fourier_mask
if self.task_data.real_mask is not None:
maskedframe = frame*self.task_data.real_mask
else:
maskedframe = frame
self.results.intensity[:] = np.sum(abs(np.fft.rfft2(maskedframe))*h_f_mask)
def run_analysis_crystall(ctx, dataset, rad_in, rad_out, real_center=None, real_rad=None, roi=None,
progress=False):
"""
Return a value after integration of Fourier spectrum for each frame over ring.
Parameters
----------
ctx : libertem.api.Context
dataset : libertem.io.dataset.DataSet
A dataset with 1- or 2-D scan dimensions and 2-D frame dimensions
rad_in : int
Inner radius in pixels of a ring mask for the integration in Fourier space
rad_out : int
Outer radius in pixels of a ring mask for the integration in Fourier space
real_center : Tuple[float], optional
(y,x) - pixels, coordinates of a center of a circle for a masking out zero-order peak
in real space.
real_rad : int, optional
Radius in pixels of circle for a masking out zero-order peak in real space.
If one of real_center or real_rad is missing: the integration will be done without
masking zero-order peak out.
Returns
-------
pass_results: dict
Returns a "crystallinity" value for each frame.
To return 2-D array use pass_results['intensity'].data
"""
udf = CrystallinityUDF(
rad_in=rad_in, rad_out=rad_out, real_center=real_center, real_rad=real_rad
)
pass_results = ctx.run_udf(dataset=dataset, udf=udf, roi=roi, progress=progress)
return pass_results | null |
docker push | #!/usr/bin/env python
# Copyright 2014 The Serviced Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script will take a compiled serviced template and attempt to
# mirror images referenced in that image to a new registry. It will
# also create a new template file with update ImageIDs that reference
# the new mirror
import argparse
import subprocess
import os.path
import sys
import json
import logging
import urllib2
logging.basicConfig()
LOG = logging.getLogger(__name__)
def fatal(*args, **kwargs):
LOG.fatal(*args, **kwargs)
sys.exit(1)
def get_images(service):
imageIDs = set()
if service["ImageID"]:
imageIDs.add(service["ImageID"])
for svc in service["Services"]:
imageIDs.update(get_images(svc))
return imageIDs
def remap_image_id(service, oldImageID, newImageID):
if service["ImageID"] == oldImageID:
service["ImageID"] = newImageID
for svc in service["Services"]:
remap_image_id(svc, oldImageID, newImageID)
def docker_pull(imageID):
_docker_op("pull", imageID)
def METHOD_NAME(imageID):
_docker_op("push", imageID)
def docker_tag(imageID, newImageID):
_docker_op("tag", imageID, newImageID)
def _docker_op(op, *args):
try:
sargs = ["docker", op]
sargs.extend(args)
subprocess.check_call(sargs)
except Exception as ex:
fatal("could not docker %s %s: %s", op, " ".join(args), ex)
def ping_registry(registry):
try:
url = "http://%s/v1/_ping" % (registry)
urllib2.urlopen(url).read()
except Exception as ex:
fatal("could not ping registry %s: %s", url, ex)
class Main(object):
def __init__(self, input_template, output_template, mirror):
self._input_template = input_template
self._output_template = output_template
self._mirror = mirror
def _load_template(self):
try:
with open(self._input_template) as f:
return json.load(f)
except Exception as ex:
LOG.fatal("could not load template: %s", ex)
sys.exit(1)
def _dump_template(self, template):
try:
with open(self._output_template, "w") as f:
json.dump(template, f, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as ex:
LOG.fatal("could not write new template to %s: %s", self._output_template, ex)
sys.exit(1)
def run(self):
if os.path.isfile(self._output_template):
LOG.fatal("destination template file should not exist: %s", self._output_template)
sys.exit(1)
print "Pinging destination registry: %s" % (self._mirror)
ping_registry(self._mirror)
print "Extracting imageIDs from template"
imageIDs = set()
template = self._load_template()
for svc in template['Services']:
imageIDs.update(get_images(svc))
for imageID in imageIDs:
print "Pulling image %s" % (imageID)
docker_pull(imageID)
newImageIDs = []
for imageID in imageIDs:
newImageID = self._mirror + "/" + imageID
print "Retagging %s to %s" % (imageID, newImageID)
docker_tag(imageID, newImageID)
newImageIDs.append(newImageID)
for svc in template['Services']:
remap_image_id(svc, imageID, newImageID)
for imageID in newImageIDs:
METHOD_NAME(imageID)
print "writing new template to %s" % (self._output_template)
description = template["Description"].split("|")[0]
template["Description"] = description.strip() + " | %s mirror" % self._mirror
self._dump_template(template)
if __name__=="__main__":
parser = argparse.ArgumentParser(description="a tool to mirror images referenced in serviced templates")
parser.add_argument("input_template", help="the template to mirror")
parser.add_argument("mirror", help="the docker mirror to use (eg somehost.example.com:5000")
parser.add_argument("output_template", help="the destination to write the modified template")
args = parser.parse_args()
main = Main(args.input_template, args.output_template, args.mirror)
main.run()
| null |
length in bytes | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from dataclasses import dataclass
from plc4py.api.messages.PlcMessage import PlcMessage
from plc4py.protocols.modbus.readwrite.ModbusPDU import ModbusPDU
from plc4py.protocols.modbus.readwrite.ModbusPDU import ModbusPDUBuilder
from plc4py.spi.generation.ReadBuffer import ReadBuffer
from plc4py.spi.generation.WriteBuffer import WriteBuffer
from typing import List
import math
@dataclass
class ModbusPDUReportServerIdResponse(PlcMessage, ModbusPDU):
value: List[int]
# Accessors for discriminator values.
error_flag: bool = False
function_flag: int = 0x11
response: bool = True
def __post_init__(self):
super().__init__()
def serialize_modbus_pdu_child(self, write_buffer: WriteBuffer):
write_buffer.push_context("ModbusPDUReportServerIdResponse")
# Implicit Field (byte_count) (Used for parsing, but its value is not stored as it's implicitly given by the objects content)
byte_count: int = int(len(self.value))
write_buffer.write_unsigned_byte(byte_count, logical_name="byteCount")
# Array Field (value)
write_buffer.write_byte_array(self.value, logical_name="value")
write_buffer.pop_context("ModbusPDUReportServerIdResponse")
def METHOD_NAME(self) -> int:
return int(math.ceil(float(self.get_length_in_bits() / 8.0)))
def get_length_in_bits(self) -> int:
length_in_bits: int = super().get_length_in_bits()
_value: ModbusPDUReportServerIdResponse = self
# Implicit Field (byteCount)
length_in_bits += 8
# Array field
if self.value != None:
length_in_bits += 8 * len(self.value)
return length_in_bits
@staticmethod
def static_parse_builder(read_buffer: ReadBuffer, response: bool):
read_buffer.push_context("ModbusPDUReportServerIdResponse")
byte_count: int = read_implicit_field("byteCount", read_unsigned_short)
self.value = read_buffer.read_byte_array("value", int(byte_count))
read_buffer.pop_context("ModbusPDUReportServerIdResponse")
# Create the instance
return ModbusPDUReportServerIdResponseBuilder(value)
def equals(self, o: object) -> bool:
if self == o:
return True
if not isinstance(o, ModbusPDUReportServerIdResponse):
return False
that: ModbusPDUReportServerIdResponse = ModbusPDUReportServerIdResponse(o)
return (self.value == that.value) and super().equals(that) and True
def hash_code(self) -> int:
return hash(self)
def __str__(self) -> str:
write_buffer_box_based: WriteBufferBoxBased = WriteBufferBoxBased(True, True)
try:
write_buffer_box_based.writeSerializable(self)
except SerializationException as e:
raise RuntimeException(e)
return "\n" + str(write_buffer_box_based.get_box()) + "\n"
@dataclass
class ModbusPDUReportServerIdResponseBuilder(ModbusPDUBuilder):
value: List[int]
def __post_init__(self):
pass
def build(
self,
) -> ModbusPDUReportServerIdResponse:
modbus_pdu_report_server_id_response: ModbusPDUReportServerIdResponse = (
ModbusPDUReportServerIdResponse(self.value)
)
return modbus_pdu_report_server_id_response | null |
set time frame | """Base classes."""
from enum import Enum, auto
import numpy as np
import pandas as pd
from .const import ChartType, TimeFrame
__all__ = ('Indicator', 'Symbol', 'Quotes')
# I actually can't think of a worse reason to override an array than
# this:
# - a method .new() that mutates the data from an input data frame
# - mutating the time column wholesale based on a setting
# - enforcing certain fields / columns
# - zero overriding of any of the array interface for the purposes of
# a different underlying implementation.
# Literally all this can be done in a simple function with way less
# confusion for the reader.
class BaseQuotes(np.recarray):
def __new__(cls, shape=None, dtype=None, order='C'):
dt = np.dtype(
[
('id', int),
('time', float),
('open', float),
('high', float),
('low', float),
('close', float),
('volume', int),
]
)
shape = shape or (1,)
return np.ndarray.__new__(cls, shape, (np.record, dt), order=order)
def _nan_to_closest_num(self):
"""Return interpolated values instead of NaN."""
for col in ['open', 'high', 'low', 'close']:
mask = np.isnan(self[col])
if not mask.size:
continue
self[col][mask] = np.interp(
np.flatnonzero(mask), np.flatnonzero(~mask), self[col][~mask]
)
def METHOD_NAME(self, default_tf):
tf = {
1: TimeFrame.M1,
5: TimeFrame.M5,
15: TimeFrame.M15,
30: TimeFrame.M30,
60: TimeFrame.H1,
240: TimeFrame.H4,
1440: TimeFrame.D1,
}
minutes = int(np.diff(self.time[-10:]).min() / 60)
self.timeframe = tf.get(minutes) or tf[default_tf]
# bruh this isn't creating anything it's copying data in
# from a data frame...
def new(self, data, source=None, default_tf=None):
shape = (len(data),)
self.resize(shape, refcheck=False)
if isinstance(data, pd.DataFrame):
data.reset_index(inplace=True)
data.insert(0, 'id', data.index)
data.Date = self.convert_dates(data.Date)
data = data.rename(
columns={
'Date': 'time',
'Open': 'open',
'High': 'high',
'Low': 'low',
'Close': 'close',
'Volume': 'volume',
}
)
for name in self.dtype.names:
self[name] = data[name]
elif isinstance(data, (np.recarray, BaseQuotes)):
self[:] = data[:]
self._nan_to_closest_num()
self.METHOD_NAME(default_tf)
return self
def convert_dates(self, dates):
breakpoint()
return np.array([d.timestamp() for d in dates])
class SymbolType(Enum):
FOREX = auto()
CFD = auto()
FUTURES = auto()
SHARES = auto()
class Symbol:
FOREX = SymbolType.FOREX
CFD = SymbolType.CFD
FUTURES = SymbolType.FUTURES
SHARES = SymbolType.SHARES
def __init__(self, ticker, mode, tick_size=0, tick_value=None):
self.ticker = ticker
self.mode = mode
if self.mode in [self.FOREX, self.CFD]:
# number of units of the commodity, currency
# or financial asset in one lot
self.contract_size = 100_000 # (100000 == 1 Lot)
elif self.mode == self.FUTURES:
# cost of a single price change point ($10) /
# one minimum price movement
self.tick_value = tick_value
# minimum price change step (0.0001)
self.tick_size = tick_size
if isinstance(tick_size, float):
self.digits = len(str(tick_size).split('.')[1])
else:
self.digits = 0
def __repr__(self):
return 'Symbol (%s | %s)' % (self.ticker, self.mode)
class Indicator:
def __init__(
self, label=None, window=None, data=None, tp=None, base=None, **kwargs
):
self.label = label
self.window = window
self.data = data or [0]
self.type = tp or ChartType.LINE
self.base = base or {'linewidth': 0.5, 'color': 'black'}
self.lineStyle = {'linestyle': '-', 'linewidth': 0.5, 'color': 'blue'}
self.lineStyle.update(kwargs)
# This creates a global array that seems to be shared between all
# charting UI components
Quotes = BaseQuotes() | null |
detach | ###############################################################################
# Popen for LokyProcess.
#
# author: Thomas Moreau and Olivier Grisel
#
import os
import sys
import signal
import pickle
from io import BytesIO
from multiprocessing import util, process
from multiprocessing.connection import wait
from multiprocessing.context import set_spawning_popen
from . import reduction, resource_tracker, spawn
__all__ = ["Popen"]
#
# Wrapper for an fd used while launching a process
#
class _DupFd:
def __init__(self, fd):
self.fd = reduction._mk_inheritable(fd)
def METHOD_NAME(self):
return self.fd
#
# Start child process using subprocess.Popen
#
class Popen:
method = "loky"
DupFd = _DupFd
def __init__(self, process_obj):
sys.stdout.flush()
sys.stderr.flush()
self.returncode = None
self._fds = []
self._launch(process_obj)
def duplicate_for_child(self, fd):
self._fds.append(fd)
return reduction._mk_inheritable(fd)
def poll(self, flag=os.WNOHANG):
if self.returncode is None:
while True:
try:
pid, sts = os.waitpid(self.pid, flag)
except OSError:
# Child process not yet created. See #1731717
# e.errno == errno.ECHILD == 10
return None
else:
break
if pid == self.pid:
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
else:
assert os.WIFEXITED(sts)
self.returncode = os.WEXITSTATUS(sts)
return self.returncode
def wait(self, timeout=None):
if self.returncode is None:
if timeout is not None:
if not wait([self.sentinel], timeout):
return None
# This shouldn't block if wait() returned successfully.
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
return self.returncode
def terminate(self):
if self.returncode is None:
try:
os.kill(self.pid, signal.SIGTERM)
except ProcessLookupError:
pass
except OSError:
if self.wait(timeout=0.1) is None:
raise
def _launch(self, process_obj):
tracker_fd = resource_tracker._resource_tracker.getfd()
fp = BytesIO()
set_spawning_popen(self)
try:
prep_data = spawn.get_preparation_data(
process_obj._name,
getattr(process_obj, "init_main_module", True),
)
reduction.dump(prep_data, fp)
reduction.dump(process_obj, fp)
finally:
set_spawning_popen(None)
try:
parent_r, child_w = os.pipe()
child_r, parent_w = os.pipe()
# for fd in self._fds:
# _mk_inheritable(fd)
cmd_python = [sys.executable]
cmd_python += ["-m", self.__module__]
cmd_python += ["--process-name", str(process_obj.name)]
cmd_python += ["--pipe", str(reduction._mk_inheritable(child_r))]
reduction._mk_inheritable(child_w)
reduction._mk_inheritable(tracker_fd)
self._fds += [child_r, child_w, tracker_fd]
if sys.version_info >= (3, 8) and os.name == "posix":
mp_tracker_fd = prep_data["mp_tracker_args"]["fd"]
self.duplicate_for_child(mp_tracker_fd)
from .fork_exec import fork_exec
pid = fork_exec(cmd_python, self._fds, env=process_obj.env)
util.debug(
f"launched python with pid {pid} and cmd:\n{cmd_python}"
)
self.sentinel = parent_r
method = "getbuffer"
if not hasattr(fp, method):
method = "getvalue"
with os.fdopen(parent_w, "wb") as f:
f.write(getattr(fp, method)())
self.pid = pid
finally:
if parent_r is not None:
util.Finalize(self, os.close, (parent_r,))
for fd in (child_r, child_w):
if fd is not None:
os.close(fd)
@staticmethod
def thread_is_spawning():
return True
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("Command line parser")
parser.add_argument(
"--pipe", type=int, required=True, help="File handle for the pipe"
)
parser.add_argument(
"--process-name",
type=str,
default=None,
help="Identifier for debugging purpose",
)
args = parser.parse_args()
info = {}
exitcode = 1
try:
with os.fdopen(args.pipe, "rb") as from_parent:
process.current_process()._inheriting = True
try:
prep_data = pickle.load(from_parent)
spawn.prepare(prep_data)
process_obj = pickle.load(from_parent)
finally:
del process.current_process()._inheriting
exitcode = process_obj._bootstrap()
except Exception:
print("\n\n" + "-" * 80)
print(f"{args.process_name} failed with traceback: ")
print("-" * 80)
import traceback
print(traceback.format_exc())
print("\n" + "-" * 80)
finally:
if from_parent is not None:
from_parent.close()
sys.exit(exitcode) | null |
method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
class ListReferences(AAZCommand):
"""Returns the DNS records specified by the referencing targetResourceIds.
:example: List DNS records referencing the target resource IDs.
az network dns list-references --parameters /subscriptions/**921/resourceGroups/MyRg/providers/Microsoft.Network/trafficManagerProfiles/MyTm
"""
_aaz_info = {
"version": "2023-07-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.network/getdnsresourcereference", "2023-07-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
# define Arg Group "Properties"
_args_schema = cls._args_schema
_args_schema.target_resources = AAZListArg(
options=["--target-resources"],
arg_group="Properties",
help="A list of references to azure resources for which referencing dns records need to be queried.",
)
target_resources = cls._args_schema.target_resources
target_resources.Element = AAZObjectArg()
_element = cls._args_schema.target_resources.Element
_element.id = AAZStrArg(
options=["id"],
help="Resource Id.",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.DnsResourceReferenceGetByTargetResources(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class DnsResourceReferenceGetByTargetResources(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Network/getDnsResourceReference",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("properties", AAZObjectType, typ_kwargs={"flags": {"client_flatten": True}})
properties = _builder.get(".properties")
if properties is not None:
properties.set_prop("targetResources", AAZListType, ".target_resources")
target_resources = _builder.get(".properties.targetResources")
if target_resources is not None:
target_resources.set_elements(AAZObjectType, ".")
_elements = _builder.get(".properties.targetResources[]")
if _elements is not None:
_elements.set_prop("id", AAZStrType, ".id")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.properties
properties.dns_resource_references = AAZListType(
serialized_name="dnsResourceReferences",
)
dns_resource_references = cls._schema_on_200.properties.dns_resource_references
dns_resource_references.Element = AAZObjectType()
_element = cls._schema_on_200.properties.dns_resource_references.Element
_element.dns_resources = AAZListType(
serialized_name="dnsResources",
)
_element.target_resource = AAZObjectType(
serialized_name="targetResource",
)
_ListReferencesHelper._build_schema_sub_resource_read(_element.target_resource)
dns_resources = cls._schema_on_200.properties.dns_resource_references.Element.dns_resources
dns_resources.Element = AAZObjectType()
_ListReferencesHelper._build_schema_sub_resource_read(dns_resources.Element)
return cls._schema_on_200
class _ListReferencesHelper:
"""Helper class for ListReferences"""
_schema_sub_resource_read = None
@classmethod
def _build_schema_sub_resource_read(cls, _schema):
if cls._schema_sub_resource_read is not None:
_schema.id = cls._schema_sub_resource_read.id
return
cls._schema_sub_resource_read = _schema_sub_resource_read = AAZObjectType()
sub_resource_read = _schema_sub_resource_read
sub_resource_read.id = AAZStrType()
_schema.id = cls._schema_sub_resource_read.id
__all__ = ["ListReferences"] | null |
resolve model field relations | from collections.abc import Iterable, Iterator, Sequence
from contextlib import contextmanager
from typing import Any
from django.apps import AppConfig
from django.apps.registry import Apps
from django.db.models.base import Model
from django.db.models.fields import Field
from django.db.models.manager import Manager
from django.utils.functional import cached_property
class AppConfigStub(AppConfig):
def __init__(self, label: str) -> None: ...
class ModelState:
name: str
app_label: str
fields: dict[str, Field]
options: dict[str, Any]
bases: Sequence[type[Model] | str]
managers: list[tuple[str, Manager]]
def __init__(
self,
app_label: str,
name: str,
fields: list[tuple[str, Field]] | dict[str, Field],
options: dict[str, Any] | None = ...,
bases: Sequence[type[Model] | str] | None = ...,
managers: list[tuple[str, Manager]] | None = ...,
) -> None: ...
def clone(self) -> ModelState: ...
def construct_managers(self) -> Iterator[tuple[str, Manager]]: ...
@classmethod
def from_model(cls, model: type[Model], exclude_rels: bool = ...) -> ModelState: ...
def get_field(self, field_name: str) -> Field: ...
@cached_property
def name_lower(self) -> str: ...
def render(self, apps: Apps) -> Any: ...
def get_index_by_name(self, name: str) -> Any: ...
def get_constraint_by_name(self, name: str) -> Any: ...
def __eq__(self, other: object) -> bool: ...
def get_related_models_tuples(model: type[Model]) -> set[tuple[str, str]]: ...
def get_related_models_recursive(model: type[Model]) -> set[tuple[str, str]]: ...
class ProjectState:
is_delayed: bool
models: dict[Any, Any]
real_apps: set[str]
def __init__(
self, models: dict[tuple[str, str], ModelState] | None = ..., real_apps: set[str] | None = ...
) -> None: ...
@property
def relations(self) -> Any: ...
def add_model(self, model_state: ModelState) -> None: ...
@cached_property
def apps(self) -> StateApps: ...
def clear_delayed_apps_cache(self) -> None: ...
def clone(self) -> ProjectState: ...
@classmethod
def from_apps(cls, apps: Apps) -> ProjectState: ...
def reload_model(self, app_label: str, model_name: str, delay: bool = ...) -> None: ...
def reload_models(self, models: list[Any], delay: bool = ...) -> None: ...
def remove_model(self, app_label: str, model_name: str) -> None: ...
def rename_model(self, app_label: str, old_name: str, new_name: str) -> None: ...
def alter_model_options(
self, app_label: str, model_name: str, options: dict[str, Any], option_keys: Iterable[str] | None = ...
) -> None: ...
def remove_model_options(self, app_label: str, model_name: str, option_name: str, value_to_remove: Any) -> None: ...
def alter_model_managers(self, app_label: str, model_name: str, managers: list[tuple[str, Manager]]) -> None: ...
def add_index(self, app_label: str, model_name: str, index: Any) -> None: ...
def remove_index(self, app_label: str, model_name: str, index_name: str) -> None: ...
def rename_index(self, app_label: str, model_name: str, old_index_name: str, new_index_name: str) -> None: ...
def add_constraint(self, app_label: str, model_name: str, constraint: Any) -> None: ...
def remove_constraint(self, app_label: str, model_name: str, constraint_name: str) -> None: ...
def add_field(self, app_label: str, model_name: str, name: str, field: Field, preserve_default: Any) -> None: ...
def remove_field(self, app_label: str, model_name: str, name: str) -> None: ...
def alter_field(self, app_label: str, model_name: str, name: str, field: Field, preserve_default: Any) -> None: ...
def rename_field(self, app_label: str, model_name: str, old_name: str, new_name: str) -> None: ...
def update_model_field_relation(
self, model: type[Model], model_key: tuple[str, str], field_name: str, field: Field, concretes: Any
) -> None: ...
def METHOD_NAME(
self, model_key: tuple[str, str], field_name: str, field: Field, concretes: Any | None = ...
) -> None: ...
def resolve_model_relations(self, model_key: tuple[str, str], concretes: Any | None = ...) -> None: ...
def resolve_fields_and_relations(self) -> None: ...
def get_concrete_model_key(self, model: type[Model]) -> Any: ...
class StateApps(Apps):
real_models: list[ModelState]
def __init__(
self, real_apps: list[str], models: dict[tuple[str, str], ModelState], ignore_swappable: bool = ...
) -> None: ...
@contextmanager
def bulk_update(self) -> Iterator[None]: ...
def clone(self) -> StateApps: ...
def render_multiple(self, model_states: list[ModelState]) -> None: ...
def register_model(self, app_label: str, model: type[Model]) -> None: ...
def unregister_model(self, app_label: str, model_name: str) -> None: ... | null |
format perc | # Copyright 2012 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gst, Gtk, GObject
from quodlibet import _
from quodlibet.plugins import PluginImportException
from quodlibet.plugins.gstelement import GStreamerPlugin
from quodlibet.qltk.util import GSignals
from quodlibet import qltk
from quodlibet import config
_PLUGIN_ID = "compressor"
_SETTINGS = {
"threshold": [_("_Threshold:"),
_("Threshold until the filter is activated"), 1.0],
"ratio": [_("R_atio:"), _("Compression ratio"), 1.0],
}
def get_cfg(option):
cfg_option = "%s_%s" % (_PLUGIN_ID, option)
default = _SETTINGS[option][2]
if option == "threshold":
return config.getfloat("plugins", cfg_option, default)
elif option == "ratio":
return config.getfloat("plugins", cfg_option, default)
def set_cfg(option, value):
cfg_option = "%s_%s" % (_PLUGIN_ID, option)
if get_cfg(option) != value:
config.set("plugins", cfg_option, value)
class Preferences(Gtk.VBox):
__gsignals__: GSignals = {
'changed': (GObject.SignalFlags.RUN_LAST, None, tuple()),
}
def __init__(self):
super().__init__(spacing=12)
table = Gtk.Table(n_rows=2, n_columns=2)
table.set_col_spacings(6)
table.set_row_spacings(6)
labels = {}
for idx, key in enumerate(["threshold", "ratio"]):
text, tooltip = _SETTINGS[key][:2]
label = Gtk.Label(label=text)
labels[key] = label
label.set_tooltip_text(tooltip)
label.set_alignment(0.0, 0.5)
label.set_padding(0, 6)
label.set_use_underline(True)
table.attach(label, 0, 1, idx, idx + 1,
xoptions=Gtk.AttachOptions.FILL |
Gtk.AttachOptions.SHRINK)
threshold_scale = Gtk.HScale(
adjustment=Gtk.Adjustment.new(0, 0, 1, 0.01, 0.1, 0))
threshold_scale.set_digits(2)
labels["threshold"].set_mnemonic_widget(threshold_scale)
threshold_scale.set_value_pos(Gtk.PositionType.RIGHT)
def METHOD_NAME(scale, value):
return _("%d %%") % (value * 100)
threshold_scale.connect('format-value', METHOD_NAME)
table.attach(threshold_scale, 1, 2, 0, 1)
def threshold_changed(scale):
value = scale.get_value()
set_cfg("threshold", value)
self.emit("changed")
threshold_scale.connect('value-changed', threshold_changed)
threshold_scale.set_value(get_cfg("threshold"))
ratio_scale = Gtk.HScale(
adjustment=Gtk.Adjustment.new(0, 0, 1, 0.01, 0.1, 0))
ratio_scale.set_digits(2)
labels["ratio"].set_mnemonic_widget(ratio_scale)
ratio_scale.set_value_pos(Gtk.PositionType.RIGHT)
table.attach(ratio_scale, 1, 2, 1, 2)
def ratio_changed(scale):
value = scale.get_value()
set_cfg("ratio", value)
self.emit("changed")
ratio_scale.connect('value-changed', ratio_changed)
ratio_scale.set_value(get_cfg("ratio"))
self.pack_start(qltk.Frame(_("Preferences"), child=table),
True, True, 0)
class Compressor(GStreamerPlugin):
PLUGIN_ID = _PLUGIN_ID
PLUGIN_NAME = _("Audio Compressor")
PLUGIN_DESC = _("Changes the amplitude of all samples above a specific "
"threshold with a specific ratio.")
@classmethod
def setup_element(cls):
return Gst.ElementFactory.make('audiodynamic', cls.PLUGIN_ID)
@classmethod
def update_element(cls, element):
element.set_property("characteristics", "soft-knee")
element.set_property("mode", "compressor")
element.set_property("ratio", get_cfg("ratio"))
element.set_property("threshold", get_cfg("threshold"))
@classmethod
def PluginPreferences(cls, window):
prefs = Preferences()
prefs.connect("changed", lambda *x: cls.queue_update())
return prefs
if not Compressor.setup_element():
raise PluginImportException(
"GStreamer element 'audiodynamic' missing (gst-plugins-good)") | null |
snmp get | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
from typing import Any, Dict, Generator # noqa: F401
from pyasn1.type.univ import Null
from pysnmp import hlapi # noqa: F401
from pysnmp.entity.rfc3413 import cmdgen
from pysnmp.hlapi.asyncore.cmdgen import vbProcessor
from pysnmp.proto import errind
from pysnmp.proto.rfc1905 import endOfMibView
from datadog_checks.base.errors import CheckException
from .config import InstanceConfig # noqa: F401
def _handle_error(ctx, config):
# type: (dict, InstanceConfig) -> None
error = ctx['error']
if error:
message = '{} for device {}'.format(error, config.device)
raise CheckException(message)
def METHOD_NAME(config, oids, lookup_mib):
# type: (InstanceConfig, list, bool) -> list
"""Call SNMP GET on a list of oids."""
if config.device is None:
raise RuntimeError('No device set') # pragma: no cover
def callback( # type: ignore
snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBinds, cbCtx
):
var_binds = vbProcessor.unmakeVarBinds(snmpEngine, varBinds, lookup_mib)
cbCtx['error'] = errorIndication
cbCtx['var_binds'] = var_binds
ctx = {} # type: Dict[str, Any]
var_binds = vbProcessor.makeVarBinds(config._snmp_engine, oids)
cmdgen.GetCommandGenerator().sendVarBinds(
config._snmp_engine,
config.device.target,
config._context_data.contextEngineId,
config._context_data.contextName,
var_binds,
callback,
ctx,
)
config._snmp_engine.transportDispatcher.runDispatcher()
_handle_error(ctx, config)
return ctx['var_binds']
def snmp_getnext(config, oids, lookup_mib, ignore_nonincreasing_oid):
# type: (InstanceConfig, list, bool, bool) -> Generator
"""Call SNMP GETNEXT on a list of oids. It will iterate on the results if it happens to be under the same prefix."""
if config.device is None:
raise RuntimeError('No device set') # pragma: no cover
def callback( # type: ignore
snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBindTable, cbCtx
):
var_bind_table = [vbProcessor.unmakeVarBinds(snmpEngine, row, lookup_mib) for row in varBindTable]
if ignore_nonincreasing_oid and errorIndication and isinstance(errorIndication, errind.OidNotIncreasing):
errorIndication = None
cbCtx['error'] = errorIndication
cbCtx['var_bind_table'] = var_bind_table[0] if var_bind_table else []
ctx = {} # type: Dict[str, Any]
initial_vars = [x[0] for x in vbProcessor.makeVarBinds(config._snmp_engine, oids)]
var_binds = oids
gen = cmdgen.NextCommandGenerator()
while True:
gen.sendVarBinds(
config._snmp_engine,
config.device.target,
config._context_data.contextEngineId,
config._context_data.contextName,
var_binds,
callback,
ctx,
)
config._snmp_engine.transportDispatcher.runDispatcher()
_handle_error(ctx, config)
var_binds = []
new_initial_vars = []
for col, var_bind in enumerate(ctx['var_bind_table']):
name, val = var_bind
if not isinstance(val, Null) and initial_vars[col].isPrefixOf(name):
var_binds.append(var_bind)
new_initial_vars.append(initial_vars[col])
yield var_bind
if not var_binds:
return
initial_vars = new_initial_vars
def snmp_bulk(config, oid, non_repeaters, max_repetitions, lookup_mib, ignore_nonincreasing_oid):
# type: (InstanceConfig, hlapi.ObjectType, int, int, bool, bool) -> Generator
"""Call SNMP GETBULK on an oid."""
if config.device is None:
raise RuntimeError('No device set') # pragma: no cover
def callback( # type: ignore
snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBindTable, cbCtx
):
var_bind_table = [vbProcessor.unmakeVarBinds(snmpEngine, row, lookup_mib) for row in varBindTable]
if ignore_nonincreasing_oid and errorIndication and isinstance(errorIndication, errind.OidNotIncreasing):
errorIndication = None
cbCtx['error'] = errorIndication
cbCtx['var_bind_table'] = var_bind_table
ctx = {} # type: Dict[str, Any]
var_binds = [oid]
initial_var = vbProcessor.makeVarBinds(config._snmp_engine, var_binds)[0][0]
gen = cmdgen.BulkCommandGenerator()
while True:
gen.sendVarBinds(
config._snmp_engine,
config.device.target,
config._context_data.contextEngineId,
config._context_data.contextName,
non_repeaters,
max_repetitions,
vbProcessor.makeVarBinds(config._snmp_engine, var_binds),
callback,
ctx,
)
config._snmp_engine.transportDispatcher.runDispatcher()
_handle_error(ctx, config)
for var_binds in ctx['var_bind_table']:
name, value = var_binds[0]
if endOfMibView.isSameTypeWith(value):
return
if initial_var.isPrefixOf(name):
yield var_binds[0]
else:
return | null |
mmio read cb | #!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import ctypes
from qiling.core import Qiling
from qiling.hw.peripheral import QlPeripheral
from qiling.utils import ql_get_module_function
from qiling.exception import QlErrorModuleFunctionNotFound
class QlHwManager:
def __init__(self, ql: Qiling):
self.ql = ql
self.entity = {}
self.region = {}
self.stepable = {}
def create(self, label: str, struct: str=None, base: int=None, kwargs: dict={}) -> "QlPeripheral":
""" Create the peripheral accroding the label and envs.
struct: Structure of the peripheral. Use defualt ql structure if not provide.
base: Base address. Use defualt address if not provide.
"""
if struct is None:
struct, base, kwargs = self.load_env(label.upper())
try:
entity = ql_get_module_function('qiling.hw', struct)(self.ql, label, **kwargs)
self.entity[label] = entity
if hasattr(entity, 'step'):
self.stepable[label] = entity
self.region[label] = [(lbound + base, rbound + base) for (lbound, rbound) in entity.region]
return entity
except QlErrorModuleFunctionNotFound:
self.ql.log.debug(f'The {struct}({label}) has not been implemented')
def delete(self, label: str):
""" Remove the peripheral
"""
if label in self.entity:
self.entity.pop(label)
self.region.pop(label)
if label in self.stepable:
self.stepable.pop(label)
def load_env(self, label: str):
""" Get peripheral information (structure, base address, initialization list) from env.
Args:
label (str): Peripheral Label
"""
args = self.ql.env[label]
return args['struct'], args['base'], args.get("kwargs", {})
def load_all(self):
for label, args in self.ql.env.items():
if args['type'] == 'peripheral':
self.create(label.lower(), args['struct'], args['base'], args.get("kwargs", {}))
def find(self, address: int):
""" Find the peripheral at `address`
"""
for label in self.entity.keys():
for lbound, rbound in self.region[label]:
if lbound <= address < rbound:
return self.entity[label]
def step(self):
""" Update all peripheral's state
"""
for entity in self.stepable.values():
entity.step()
def setup_mmio(self, begin, size, info=""):
mmio = ctypes.create_string_buffer(size)
def METHOD_NAME(ql, offset, size):
address = begin + offset
hardware = self.find(address)
if hardware:
return hardware.read(address - hardware.base, size)
else:
ql.log.debug('%s Read non-mapped hardware [0x%08x]' % (info, address))
buf = ctypes.create_string_buffer(size)
ctypes.memmove(buf, ctypes.addressof(mmio) + offset, size)
return int.from_bytes(buf.raw, byteorder='little')
def mmio_write_cb(ql, offset, size, value):
address = begin + offset
hardware = self.find(address)
if hardware:
hardware.write(address - hardware.base, size, value)
else:
ql.log.debug('%s Write non-mapped hardware [0x%08x] = 0x%08x' % (info, address, value))
ctypes.memmove(ctypes.addressof(mmio) + offset, (value).to_bytes(size, 'little'), size)
self.ql.mem.map_mmio(begin, size, METHOD_NAME, mmio_write_cb, info=info)
def show_info(self):
self.ql.log.info(f'{"Start":8s} {"End":8s} {"Label":8s} {"Class"}')
for label, region in self.region.items():
for lbound, ubound in region:
classname = self.entity[label].__class__.__name__
self.ql.log.info(f'{lbound:08x} - {ubound:08x} {label.upper():8s} {classname}')
def __getitem__(self, key):
return self.entity[key]
def __setitem__(self, key, value):
self.entity[key] = value
def __getattr__(self, key):
return self.entity.get(key)
def save(self):
return {label : entity.save() for label, entity in self.entity.items()}
def restore(self, saved_state):
for label, data in saved_state.items():
self.entity[label].restore(data) | null |
set role | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from ..uac import RoleV2_pb2 as uac_dot_RoleV2__pb2
class RoleServiceV2Stub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.METHOD_NAME = channel.unary_unary(
'/ai.verta.uac.RoleServiceV2/setRole',
request_serializer=uac_dot_RoleV2__pb2.SetRoleV2.SerializeToString,
response_deserializer=uac_dot_RoleV2__pb2.SetRoleV2.Response.FromString,
)
self.deleteRole = channel.unary_unary(
'/ai.verta.uac.RoleServiceV2/deleteRole',
request_serializer=uac_dot_RoleV2__pb2.DeleteRoleV2.SerializeToString,
response_deserializer=uac_dot_RoleV2__pb2.DeleteRoleV2.Response.FromString,
)
self.searchRoles = channel.unary_unary(
'/ai.verta.uac.RoleServiceV2/searchRoles',
request_serializer=uac_dot_RoleV2__pb2.SearchRolesV2.SerializeToString,
response_deserializer=uac_dot_RoleV2__pb2.SearchRolesV2.Response.FromString,
)
self.getRole = channel.unary_unary(
'/ai.verta.uac.RoleServiceV2/getRole',
request_serializer=uac_dot_RoleV2__pb2.GetRoleV2.SerializeToString,
response_deserializer=uac_dot_RoleV2__pb2.GetRoleV2.Response.FromString,
)
self.getEnabledActions = channel.unary_unary(
'/ai.verta.uac.RoleServiceV2/getEnabledActions',
request_serializer=uac_dot_RoleV2__pb2.GetEnabledActions.SerializeToString,
response_deserializer=uac_dot_RoleV2__pb2.GetEnabledActions.Response.FromString,
)
self.getSelfAllowedActionsBatchForWorkspace = channel.unary_unary(
'/ai.verta.uac.RoleServiceV2/getSelfAllowedActionsBatchForWorkspace',
request_serializer=uac_dot_RoleV2__pb2.GetSelfAllowedActionsBatchForWorkspace.SerializeToString,
response_deserializer=uac_dot_RoleV2__pb2.GetSelfAllowedActionsBatchForWorkspace.Response.FromString,
)
class RoleServiceV2Servicer(object):
# missing associated documentation comment in .proto file
pass
def METHOD_NAME(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteRole(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def searchRoles(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getRole(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getEnabledActions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getSelfAllowedActionsBatchForWorkspace(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RoleServiceV2Servicer_to_server(servicer, server):
rpc_method_handlers = {
'setRole': grpc.unary_unary_rpc_method_handler(
servicer.METHOD_NAME,
request_deserializer=uac_dot_RoleV2__pb2.SetRoleV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.SetRoleV2.Response.SerializeToString,
),
'deleteRole': grpc.unary_unary_rpc_method_handler(
servicer.deleteRole,
request_deserializer=uac_dot_RoleV2__pb2.DeleteRoleV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.DeleteRoleV2.Response.SerializeToString,
),
'searchRoles': grpc.unary_unary_rpc_method_handler(
servicer.searchRoles,
request_deserializer=uac_dot_RoleV2__pb2.SearchRolesV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.SearchRolesV2.Response.SerializeToString,
),
'getRole': grpc.unary_unary_rpc_method_handler(
servicer.getRole,
request_deserializer=uac_dot_RoleV2__pb2.GetRoleV2.FromString,
response_serializer=uac_dot_RoleV2__pb2.GetRoleV2.Response.SerializeToString,
),
'getEnabledActions': grpc.unary_unary_rpc_method_handler(
servicer.getEnabledActions,
request_deserializer=uac_dot_RoleV2__pb2.GetEnabledActions.FromString,
response_serializer=uac_dot_RoleV2__pb2.GetEnabledActions.Response.SerializeToString,
),
'getSelfAllowedActionsBatchForWorkspace': grpc.unary_unary_rpc_method_handler(
servicer.getSelfAllowedActionsBatchForWorkspace,
request_deserializer=uac_dot_RoleV2__pb2.GetSelfAllowedActionsBatchForWorkspace.FromString,
response_serializer=uac_dot_RoleV2__pb2.GetSelfAllowedActionsBatchForWorkspace.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.uac.RoleServiceV2', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,)) | null |
get terminal penalty | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
from pyomo.core.base.componentuid import ComponentUID
from pyomo.core.base.expression import Expression
from pyomo.core.base.set import Set
from pyomo.contrib.mpc.data.series_data import get_indexed_cuid
from pyomo.contrib.mpc.data.scalar_data import ScalarData
def _get_quadratic_penalty_at_time(var, t, target, weight=None):
if weight is None:
weight = 1.0
return weight * (var[t] - target) ** 2
def _get_penalty_expressions_at_time(
variables, t, target_data, weight_data=None, time_set=None
):
"""A private helper function to process data and construct penalty
expressions
"""
if weight_data is None:
weight_data = ScalarData(ComponentMap((var, 1.0) for var in variables))
if not isinstance(weight_data, ScalarData):
# We pass time_set as an argument in case the user provides a
# ComponentMap of VarData -> values. In this case knowing the
# time set is necessary to recover the indexed CUID.
weight_data = ScalarData(weight_data, time_set=time_set)
if not isinstance(target_data, ScalarData):
target_data = ScalarData(target_data, time_set=time_set)
for var in variables:
if not target_data.contains_key(var):
raise KeyError(
"Target data does not contain a key for variable %s" % var.name
)
if not weight_data.contains_key(var):
raise KeyError(
"Penalty weight data does not contain a key for variable %s" % var.name
)
penalties = [
_get_quadratic_penalty_at_time(
var,
t,
target_data.get_data_from_key(var),
weight_data.get_data_from_key(var),
)
for var in variables
]
return penalties
def get_penalty_at_time(
variables, t, target_data, weight_data=None, time_set=None, variable_set=None
):
"""Returns an Expression penalizing the deviation of the specified
variables at the specified point in time from the specified target
Arguments
---------
variables: List
List of time-indexed variables that will be penalized
t: Float
Time point at which to apply the penalty
target_data: ScalarData
ScalarData object containing the target for (at least) the variables
to be penalized
weight_data: ScalarData (optional)
ScalarData object containing the penalty weights for (at least) the
variables to be penalized
time_set: Set (optional)
Time set that indexes the provided variables. This is only used if
target or weight data are provided as a ComponentMap with VarData
as keys. In this case the Set is necessary to recover the CUIDs
used internally as keys
variable_set: Set (optional)
Set indexing the list of variables provided, if such a set already
exists
Returns
-------
Set, Expression
Set indexing the list of variables provided and an Expression,
indexed by this set, containing the weighted penalty expressions
"""
if variable_set is None:
variable_set = Set(initialize=range(len(variables)))
penalty_expressions = _get_penalty_expressions_at_time(
variables, t, target_data, weight_data=weight_data, time_set=time_set
)
def penalty_rule(m, i):
return penalty_expressions[i]
penalty = Expression(variable_set, rule=penalty_rule)
return variable_set, penalty
def METHOD_NAME(
variables, time_set, target_data, weight_data=None, variable_set=None
):
"""Returns an Expression penalizing the deviation of the specified
variables at the final point in time from the specified target
Arguments
---------
variables: List
List of time-indexed variables that will be penalized
time_set: Set
Time set that indexes the provided variables. Penalties are applied
at the last point in this set.
target_data: ScalarData
ScalarData object containing the target for (at least) the variables
to be penalized
weight_data: ScalarData (optional)
ScalarData object containing the penalty weights for (at least) the
variables to be penalized
variable_set: Set (optional)
Set indexing the list of variables provided, if such a set already
exists
Returns
-------
Set, Expression
Set indexing the list of variables provided and an Expression,
indexed by this set, containing the weighted penalty expressions
"""
t = time_set.last()
return get_penalty_at_time(
variables,
t,
target_data,
weight_data=weight_data,
time_set=time_set,
variable_set=variable_set,
) | null |
reset hook | # pylint: disable=wrong-or-nonexistent-copyright-notice
from typing import Tuple, List
from unittest.mock import create_autospec
import cirq
import numpy as np
from pyquil.gates import MEASURE, RX, DECLARE, H, CNOT, I
from pyquil.quilbase import Pragma, Reset
from cirq_rigetti import circuit_transformers as transformers
def test_transform_cirq_circuit_to_pyquil_program(
parametric_circuit_with_params: Tuple[cirq.Circuit, cirq.Linspace]
) -> None:
"""test that a user can transform a `cirq.Circuit` to a `pyquil.Program`
functionally.
"""
parametric_circuit, param_resolvers = parametric_circuit_with_params
circuit = cirq.protocols.resolve_parameters(parametric_circuit, param_resolvers[1])
program, _ = transformers.default(circuit=circuit)
assert (
RX(np.pi / 2, 0) in program.instructions
), "executable should contain an RX(pi) 0 instruction"
assert DECLARE("m0") in program.instructions, "executable should declare a read out bit"
assert (
MEASURE(0, ("m0", 0)) in program.instructions
), "executable should measure the read out bit"
def test_transform_cirq_circuit_to_pyquil_program_with_qubit_id_map(
bell_circuit_with_qids: Tuple[cirq.Circuit, List[cirq.Qid]]
) -> None:
"""test that a user can transform a `cirq.Circuit` to a `pyquil.Program`
functionally with explicit physical qubit address mapping.
"""
bell_circuit, qubits = bell_circuit_with_qids
qubit_id_map = {qubits[1]: "11", qubits[0]: "13"}
transformer = transformers.build(qubit_id_map=qubit_id_map)
program, _ = transformer(circuit=bell_circuit)
assert H(13) in program.instructions, "bell circuit should include Hadamard"
assert CNOT(13, 11) in program.instructions, "bell circuit should include CNOT"
assert (
DECLARE("m0", memory_size=2) in program.instructions
), "executable should declare a read out bit"
assert (
MEASURE(13, ("m0", 0)) in program.instructions
), "executable should measure the first qubit to the first read out bit"
assert (
MEASURE(11, ("m0", 1)) in program.instructions
), "executable should measure the second qubit to the second read out bit"
def test_transform_with_post_transformation_hooks(
bell_circuit_with_qids: Tuple[cirq.Circuit, List[cirq.Qid]]
) -> None:
"""test that a user can transform a `cirq.Circuit` to a `pyquil.Program`
functionally with explicit physical qubit address mapping.
"""
bell_circuit, qubits = bell_circuit_with_qids
def METHOD_NAME(program, measurement_id_map):
program._instructions.insert(0, Reset())
return program, measurement_id_map
reset_hook_spec = create_autospec(METHOD_NAME, side_effect=METHOD_NAME)
pragma = Pragma('INTIAL_REWIRING', freeform_string='GREEDY')
def rewire_hook(program, measurement_id_map):
program._instructions.insert(0, pragma)
return program, measurement_id_map
rewire_hook_spec = create_autospec(rewire_hook, side_effect=rewire_hook)
transformer = transformers.build(
qubits=tuple(qubits), post_transformation_hooks=[reset_hook_spec, rewire_hook_spec]
)
program, _ = transformer(circuit=bell_circuit)
assert 1 == reset_hook_spec.call_count
assert Reset() in program.instructions, "hook should add reset"
assert 1 == rewire_hook_spec.call_count
assert pragma in program.instructions, "hook should add pragma"
assert H(0) in program.instructions, "bell circuit should include Hadamard"
assert CNOT(0, 1) in program.instructions, "bell circuit should include CNOT"
assert (
DECLARE("m0", memory_size=2) in program.instructions
), "executable should declare a read out bit"
assert (
MEASURE(0, ("m0", 0)) in program.instructions
), "executable should measure the first qubit to the first read out bit"
assert (
MEASURE(1, ("m0", 1)) in program.instructions
), "executable should measure the second qubit to the second read out bit"
def test_transform_cirq_circuit_with_explicit_decompose(
parametric_circuit_with_params: Tuple[cirq.Circuit, cirq.Linspace]
) -> None:
"""test that a user add a custom circuit decomposition function"""
parametric_circuit, param_resolvers = parametric_circuit_with_params
parametric_circuit.append(cirq.I(cirq.GridQubit(0, 0)))
parametric_circuit.append(cirq.I(cirq.GridQubit(0, 1)))
parametric_circuit.append(cirq.measure(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1), key='m'))
circuit = cirq.protocols.resolve_parameters(parametric_circuit, param_resolvers[1])
def decompose_operation(operation: cirq.Operation) -> List[cirq.Operation]:
operations = [operation]
if isinstance(operation.gate, cirq.MeasurementGate) and operation.gate.num_qubits() == 1:
operations.append(cirq.I(operation.qubits[0]))
return operations
program, _ = transformers.build(decompose_operation=decompose_operation)(circuit=circuit)
assert (
RX(np.pi / 2, 2) in program.instructions
), "executable should contain an RX(pi) 0 instruction"
assert I(0) in program.instructions, "executable should contain an I(0) instruction"
assert I(1) in program.instructions, "executable should contain an I(1) instruction"
assert I(2) in program.instructions, "executable should contain an I(2) instruction"
assert DECLARE("m0") in program.instructions, "executable should declare a read out bit"
assert (
MEASURE(0, ("m0", 0)) in program.instructions
), "executable should measure the read out bit" | null |
test for break npm | import warnings
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba.core import types, errors
from numba.tests.support import TestCase, CompilationCache
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
force_pyobj_flags = Flags()
force_pyobj_flags.force_pyobject = True
no_pyobj_flags = Flags()
def assignments(a):
b = c = str(a)
return b + c
def assignments2(a):
b = c = d = str(a)
return b + c + d
# Use cases for issue #503
def var_propagate1(a, b):
c = (a if a > b else b) + 5
return c
def var_propagate2(a, b):
c = 5 + (a if a > b else b + 12) / 2.0
return c
def var_propagate3(a, b):
c = 5 + (a > b and a or b)
return c
def var_propagate4(a, b):
c = 5 + (a - 1 and b + 1) or (a + 1 and b - 1)
return c
# Issue #480
def chained_compare(a):
return 1 < a < 3
# Issue #591
def stack_effect_error(x):
i = 2
c = 1
if i == x:
for i in range(3):
c = i
return i + c
# Some more issues with stack effect and blocks
def for_break(n, x):
for i in range(n):
n = 0
if i == x:
break
else:
n = i
return i, n
# Issue #571
def var_swapping(a, b, c, d, e):
a, b = b, a
c, d, e = e, c, d
a, b, c, d = b, c, d, a
return a + b + c + d +e
class TestDataFlow(TestCase):
def setUp(self):
self.cache = CompilationCache()
def test_assignments(self, flags=force_pyobj_flags):
pyfunc = assignments
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_assignments2(self, flags=force_pyobj_flags):
pyfunc = assignments2
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [-1, 0, 1]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
if flags is force_pyobj_flags:
cfunc("a")
# The dataflow analysis must be good enough for native mode
# compilation to succeed, hence the no_pyobj_flags in the following tests.
def run_propagate_func(self, pyfunc, args):
cr = self.cache.compile(pyfunc, (types.int32, types.int32),
flags=no_pyobj_flags)
cfunc = cr.entry_point
self.assertPreciseEqual(cfunc(*args), pyfunc(*args))
def test_var_propagate1(self):
self.run_propagate_func(var_propagate1, (2, 3))
self.run_propagate_func(var_propagate1, (3, 2))
def test_var_propagate2(self):
self.run_propagate_func(var_propagate2, (2, 3))
self.run_propagate_func(var_propagate2, (3, 2))
def test_var_propagate3(self):
self.run_propagate_func(var_propagate3, (2, 3))
self.run_propagate_func(var_propagate3, (3, 2))
self.run_propagate_func(var_propagate3, (2, 0))
self.run_propagate_func(var_propagate3, (-1, 0))
self.run_propagate_func(var_propagate3, (0, 2))
self.run_propagate_func(var_propagate3, (0, -1))
def test_var_propagate4(self):
self.run_propagate_func(var_propagate4, (1, 1))
self.run_propagate_func(var_propagate4, (1, 0))
self.run_propagate_func(var_propagate4, (1, -1))
self.run_propagate_func(var_propagate4, (0, 1))
self.run_propagate_func(var_propagate4, (0, 0))
self.run_propagate_func(var_propagate4, (0, -1))
self.run_propagate_func(var_propagate4, (-1, 1))
self.run_propagate_func(var_propagate4, (-1, 0))
self.run_propagate_func(var_propagate4, (-1, -1))
def test_chained_compare(self, flags=force_pyobj_flags):
pyfunc = chained_compare
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in [0, 1, 2, 3, 4]:
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_chained_compare_npm(self):
self.test_chained_compare(no_pyobj_flags)
def test_stack_effect_error(self, flags=force_pyobj_flags):
# Issue #591: POP_BLOCK must undo all stack pushes done inside
# the block.
pyfunc = stack_effect_error
cr = compile_isolated(pyfunc, (types.int32,), flags=flags)
cfunc = cr.entry_point
for x in (0, 1, 2, 3):
self.assertPreciseEqual(pyfunc(x), cfunc(x))
def test_stack_effect_error_npm(self):
self.test_stack_effect_error(no_pyobj_flags)
def test_var_swapping(self, flags=force_pyobj_flags):
pyfunc = var_swapping
cr = compile_isolated(pyfunc, (types.int32,) * 5, flags=flags)
cfunc = cr.entry_point
args = tuple(range(0, 10, 2))
self.assertPreciseEqual(pyfunc(*args), cfunc(*args))
def test_var_swapping_npm(self):
self.test_var_swapping(no_pyobj_flags)
def test_for_break(self, flags=force_pyobj_flags):
# BREAK_LOOP must unwind the current inner syntax block.
pyfunc = for_break
cr = compile_isolated(pyfunc, (types.intp, types.intp), flags=flags)
cfunc = cr.entry_point
for (n, x) in [(4, 2), (4, 6)]:
self.assertPreciseEqual(pyfunc(n, x), cfunc(n, x))
def METHOD_NAME(self):
self.test_for_break(no_pyobj_flags)
if __name__ == '__main__':
unittest.main()
| null |
start server | # noinspection PyPackageRequirements
import wx
from logbook import Logger
import threading
import time
import base64
import json
import config
import webbrowser
import eos.db
from service.const import EsiLoginMethod, EsiSsoMode
from eos.saveddata.ssocharacter import SsoCharacter
from service.esiAccess import APIException, GenericSsoError
import gui.globalEvents as GE
from gui.ssoLogin import SsoLogin, SsoLoginServer
from service.server import StoppableHTTPServer, AuthHandler
from service.settings import EsiSettings
from service.esiAccess import EsiAccess
import gui.mainFrame
from requests import Session
pyfalog = Logger(__name__)
class Esi(EsiAccess):
_instance = None
@classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = Esi()
return cls._instance
def __init__(self):
self.settings = EsiSettings.getInstance()
super().__init__()
# these will be set when needed
self.httpd = None
self.state = None
self.ssoTimer = None
self.implicitCharacter = None
# until I can get around to making proper caching and modifications to said cache, storee deleted fittings here
# so that we can easily hide them in the fitting browser
self.fittings_deleted = set()
# need these here to post events
import gui.mainFrame # put this here to avoid loop
self.mainFrame = gui.mainFrame.MainFrame.getInstance()
def delSsoCharacter(self, id):
char = eos.db.getSsoCharacter(id, config.getClientSecret())
# There is an issue in which the SSO character is not removed from any linked characters - a reference to the
# sso character remains even though the SSO character is deleted which should have deleted the link. This is a
# work around until we can figure out why. Manually delete SSOCharacter from all of it's characters
for x in char.characters:
x._Character__ssoCharacters.remove(char)
eos.db.remove(char)
wx.PostEvent(self.mainFrame, GE.SsoLogout(charID=id))
def getSsoCharacters(self):
chars = eos.db.getSsoCharacters(config.getClientSecret())
return chars
def getSsoCharacter(self, id):
char = eos.db.getSsoCharacter(id, config.getClientSecret())
eos.db.commit()
return char
def getSkills(self, id):
char = self.getSsoCharacter(id)
resp = super().getSkills(char)
return resp.json()
def getSecStatus(self, id):
char = self.getSsoCharacter(id)
resp = super().getSecStatus(char)
return resp.json()
def getFittings(self, id):
char = self.getSsoCharacter(id)
resp = super().getFittings(char)
return resp.json()
def postFitting(self, id, json_str):
# @todo: new fitting ID can be recovered from resp.data,
char = self.getSsoCharacter(id)
resp = super().postFitting(char, json_str)
return resp
def delFitting(self, id, fittingID):
char = self.getSsoCharacter(id)
super().delFitting(char, fittingID)
self.fittings_deleted.add(fittingID)
def login(self):
# always start the local server if user is using client details. Otherwise, start only if they choose to do so.
if self.settings.get('loginMode') == EsiLoginMethod.SERVER:
with gui.ssoLogin.SsoLoginServer(0) as dlg:
dlg.ShowModal()
else:
with gui.ssoLogin.SsoLogin() as dlg:
if dlg.ShowModal() == wx.ID_OK:
message = json.loads(base64.b64decode(dlg.ssoInfoCtrl.Value.strip()))
self.handleLogin(message)
def stopServer(self):
pyfalog.debug("Stopping Server")
if self.httpd:
self.httpd.stop()
self.httpd = None
def METHOD_NAME(self, port): # todo: break this out into two functions: starting the server, and getting the URI
pyfalog.debug("Starting server")
# we need this to ensure that the previous get_request finishes, and then the socket will close
if self.httpd:
self.stopServer()
time.sleep(1)
self.httpd = StoppableHTTPServer(('localhost', port), AuthHandler)
port = self.httpd.socket.getsockname()[1]
self.serverThread = threading.Thread(target=self.httpd.serve, args=(self.handleServerLogin,))
self.serverThread.name = "SsoCallbackServer"
self.serverThread.daemon = True
self.serverThread.start()
return 'http://localhost:{}'.format(port)
def handleLogin(self, message):
auth_response, data = self.auth(message['code'])
currentCharacter = self.getSsoCharacter(data['name'])
sub_split = data["sub"].split(":")
if (len(sub_split) != 3):
raise GenericSsoError("JWT sub does not contain the expected data. Contents: %s" % data["sub"])
cid = sub_split[-1]
if currentCharacter is None:
currentCharacter = SsoCharacter(cid, data['name'], config.getClientSecret())
Esi.update_token(currentCharacter, auth_response)
eos.db.save(currentCharacter)
wx.PostEvent(self.mainFrame, GE.SsoLogin(character=currentCharacter))
# get (endpoint, char, data?)
def handleServerLogin(self, message):
if not message:
raise GenericSsoError("Could not parse out querystring parameters.")
try:
state_enc = message['state']
state = json.loads(base64.b64decode(state_enc))['state']
except Exception:
raise GenericSsoError("There was a problem decoding state parameter.")
if state != self.state:
pyfalog.warn("OAUTH state mismatch")
raise GenericSsoError("OAUTH State Mismatch.")
pyfalog.debug("Handling SSO login with: {0}", message)
self.handleLogin(message) | null |
parse proxy pac | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib.request, urllib.error, urllib.parse
import sys
import json
import logging
import re
'''
HTTP Proxy parser and Connection
connect() function:
- auto detects proxy in windows, osx
- in ux systems, the http_proxy enviroment variable must be set
- if it fails, try to find the proxy.pac address.
- parses the file, and looks up for all possible proxies
'''
class ConnectionProxy(object):
def __init__(self, url, timeout):
self.url = url
self.timeout = timeout
def _get_addresses_of_proxy_pac(self):
"""
Return a list of possible auto proxy .pac files being used,
based on the system registry (win32) or system preferences (OSX).
@return: list of urls
"""
pac_files = []
if sys.platform == 'win32':
try:
import winreg as winreg # used from python 2.0-2.6
except:
import winreg # used from python 2.7 onwards
net = winreg.OpenKey(
winreg.HKEY_CURRENT_USER,
"Software\\Microsoft\\Windows\\CurrentVersion\\Internet Settings"
)
n_subs, n_vals, last_mod = winreg.QueryInfoKey(net)
subkeys = {}
for i in range(n_vals):
this_name, this_val, this_type = winreg.EnumValue(net, i)
subkeys[this_name] = this_val
if 'AutoConfigURL' in list(subkeys.keys()) and len(subkeys['AutoConfigURL']) > 0:
pac_files.append(subkeys['AutoConfigURL'])
elif sys.platform == 'darwin':
import plistlib
sys_prefs = plistlib.readPlist(
'/Library/Preferences/SystemConfiguration/preferences.plist')
networks = sys_prefs['NetworkServices']
# loop through each possible network (e.g. Ethernet, Airport...)
for network in list(networks.items()):
# the first part is a long identifier
net_key, network = network
if 'ProxyAutoConfigURLString' in list(network['Proxies'].keys()):
pac_files.append(
network['Proxies']['ProxyAutoConfigURLString'])
return list(set(pac_files)) # remove redundant ones
def METHOD_NAME(self, pac_urls_list):
'''
For every pac file url in pac_urls_list, it tryes to connect.
If the connection is successful parses the file in search for
http proxies.
@param pac_urls_list: List with urls for the pac files
@return: list with all found http proxies
'''
proxy_url_list = []
for this_pac_url in pac_urls_list:
logging.debug('Trying pac file (%s)...' % this_pac_url)
try:
response = urllib.request.urlopen(
this_pac_url, timeout=self.timeout)
logging.debug('Succeeded (%s)...' % this_pac_url)
except Exception:
logging.debug('Failled (%s)...' % this_pac_url)
continue
pacStr = response.read()
possProxies = re.findall(
r"PROXY\s([^\s;,:]+:[0-9]{1,5})[^0-9]", pacStr + '\n')
for thisPoss in possProxies:
prox_url = 'http://' + thisPoss
proxy_dic = {'http': prox_url}
proxy_url_list.append(proxy_dic)
return proxy_url_list
def _set_proxy(self,proxy_dic=None):
'''
Sets connection proxy.
if proxy_dic is None get's teh proxy from the system.
To disable autodetected proxy pass an empty dictionary: {}
@param proxy_dic: format: {'http': 'http://www.example.com:3128/'}
'''
if proxy_dic is None:
# The default is to read the list of proxies from the environment variables <protocol>_proxy.
# If no proxy environment variables are set, then in a Windows environment proxy settings are
# obtained from the registry's Internet Settings section, and in a Mac OS X environment proxy
# information is retrieved from the OS X System Configuration
# Framework.
proxy = urllib.request.ProxyHandler()
else:
# If proxies is given, it must be a dictionary mapping protocol names to
# URLs of proxies.
proxy = urllib.request.ProxyHandler(proxy_dic)
opener = urllib.request.build_opener(proxy)
urllib.request.install_opener(opener)
def connect(self):
'''
Performs the request and gets a response from self.url
@return: response object from urllib2.urlopen
'''
req = urllib.request.Request(self.url)
response = None
try:
logging.debug("Trying Direct connection to %s..."%self.url)
response = urllib.request.urlopen(req, timeout=self.timeout)
except Exception as e:
logging.debug("Failed!")
logging.debug(e)
try:
logging.debug("Trying to use system proxy if it exists...")
self._set_proxy()
response = urllib.request.urlopen(req, timeout=self.timeout)
except Exception as e:
logging.debug("Failed!")
logging.debug(e)
pac_urls = self._get_addresses_of_proxy_pac()
proxy_urls = self.METHOD_NAME(pac_urls)
for proxy in proxy_urls:
try:
logging.debug("Trying to use the proxy %s found in proxy.pac configuration"%proxy)
self._set_proxy(proxy)
response = urllib.request.urlopen(req, timeout=self.timeout)
except Exception as e:
logging.debug("Failed!")
logging.debug(e)
if response is not None:
logging.debug("The connection to %s was successful."%self.url)
else:
logging.warning("Connection to %s failed..."%self.url)
return response
if __name__ == "__main__":
from pprint import pprint
c = Connection()
response = c.connect()
if response is not None:
print(50 * '-')
content = json.loads(response.read().strip())
pprint(content)
| null |
set up | import unittest
from collections import namedtuple
from unittest import mock
from PyQt5.QtTest import QSignalSpy
from securedrop_client import state
from tests.helper import app # noqa: F401
Source = namedtuple("Source", ["uuid"])
File = namedtuple("File", ["uuid", "source", "is_downloaded"])
class TestState(unittest.TestCase):
def METHOD_NAME(self):
self.state = state.State()
def test_selected_conversation_is_unset_by_default(self):
assert self.state.selected_conversation is None
def test_selected_conversation_can_be_updated(self):
self.state.selected_conversation = "0"
assert self.state.selected_conversation == "0"
# File identifiers can be of any shape.
self.state.selected_conversation = 1
assert self.state.selected_conversation == 1
def test_selected_conversation_can_be_set_from_an_optional_source_id_and_cleared(self):
source_id = state.SourceId("some_id")
self.state.set_selected_conversation_for_source(source_id)
assert self.state.selected_conversation == state.ConversationId("some_id")
self.state.clear_selected_conversation()
assert self.state.selected_conversation is None
def test_add_file_does_not_duplicate_information(self):
self.state.add_file(5, 1)
self.state.add_file(5, 7)
assert len(self.state.conversation_files(5)) == 2
self.state.add_file(5, 7)
assert len(self.state.conversation_files(5)) == 2
def test_remove_conversation_files_removes_all_conversation_files(self):
self.state.add_file(7, 3)
self.state.add_file(7, 1)
assert len(self.state.conversation_files(7)) == 2
self.state.remove_conversation_files(7)
assert len(self.state.conversation_files(7)) == 0
def test_remove_conversation_files_handles_missing_files_gracefully(self):
self.state.remove_conversation_files(8)
assert len(self.state.conversation_files(8)) == 0
def test_conversation_files_is_empty_by_default(self):
assert len(self.state.conversation_files(2)) == 0
def test_conversation_files_returns_the_conversation_files(self):
self.state.add_file(4, 1)
self.state.add_file(4, 7)
self.state.add_file(4, 3)
assert len(self.state.conversation_files(4)) == 3
self.state.add_file(4, 8)
assert len(self.state.conversation_files(4)) == 4
def test_records_downloads(self):
some_file_id = state.FileId("X")
another_file_id = state.FileId("Y")
self.state.add_file("4", some_file_id)
self.state.add_file("4", another_file_id)
files = self.state.conversation_files("4")
assert len(files) == 2
assert not files[0].is_downloaded
assert not files[1].is_downloaded
self.state.record_file_download(some_file_id)
assert len(files) == 2
assert files[0].is_downloaded
assert not files[1].is_downloaded
def test_record_downloads_ignores_missing_files(self):
missing_file_id = state.FileId("missing")
self.state.record_file_download(missing_file_id)
assert True
def test_selected_conversation_files_changed_signal_is_emited_when_meaningful(self):
signal_emissions = QSignalSpy(self.state.selected_conversation_files_changed)
# when the selected conversation changed
self.state.selected_conversation = 1
assert len(signal_emissions) == 1
# NOT when a file is added to a conversation that's not the selected one
self.state.add_file("some_conversation_id", "file_id")
assert len(signal_emissions) == 1 # the signal wasn't emitted again
# when a known file was downloaded
self.state.record_file_download("file_id")
assert len(signal_emissions) == 2
# when a file is added to the selected conversation
self.state.add_file(1, "some_file_id")
assert len(signal_emissions) == 3
# NOT when files are removed from a conversation that's not the selected one
self.state.remove_conversation_files("some_conversation_id")
assert len(signal_emissions) == 3 # the signal wasn't emitted again
# when the selected conversation files are removed
self.state.remove_conversation_files(1)
assert len(signal_emissions) == 4
def test_selected_conversation_has_downloadable_files_false_by_default(self):
assert not self.state.selected_conversation_has_downloadable_files
def test_selected_conversation_has_downloadable_files_false_when_all_files_are_downloaded(self):
self.state.selected_conversation = 1
self.state.add_file(1, "some_file_id")
self.state.add_file(1, "another_file_id")
self.state.add_file("conversation that's not selected", "unrelated_file")
self.state.file("unrelated_file").is_downloaded = False # to be explicit
self.state.file("some_file_id").is_downloaded = True
self.state.file("another_file_id").is_downloaded = True
assert not self.state.selected_conversation_has_downloadable_files
self.state.file("some_file_id").is_downloaded = False
assert self.state.selected_conversation_has_downloadable_files
def test_gets_initialized_when_created_with_a_database(self):
source = Source(uuid="id")
file_1 = File(uuid="one", source=source, is_downloaded=True)
file_2 = File(uuid="two", source=source, is_downloaded=False)
database = mock.MagicMock()
database.get_files = mock.MagicMock(return_value=[file_1, file_2])
initialized_state = state.State(database)
assert initialized_state.file(state.FileId("one")).is_downloaded
assert not initialized_state.file(state.FileId("two")).is_downloaded
assert len(initialized_state.conversation_files(state.ConversationId("id"))) == 2 | null |
sqrtbox | # -*- coding: utf-8 -*-
"""
Lower-level formatter Mathics objects as plain text.
"""
from mathics.builtin.box.graphics import GraphicsBox
from mathics.builtin.box.graphics3d import Graphics3DBox
from mathics.builtin.box.layout import (
FractionBox,
GridBox,
RowBox,
SqrtBox,
StyleBox,
SubscriptBox,
SubsuperscriptBox,
SuperscriptBox,
)
from mathics.core.atoms import String
from mathics.core.exceptions import BoxConstructError
from mathics.core.formatter import add_conversion_fn, lookup_method
from mathics.core.symbols import Atom, SymbolTrue
def boxes_to_text(boxes, **options) -> str:
return lookup_method(boxes, "text")(boxes, **options)
def string(self, **options) -> str:
value = self.value
show_string_characters = (
options.get("System`ShowStringCharacters", None) is SymbolTrue
)
if value.startswith('"') and value.endswith('"'): # nopep8
if not show_string_characters:
value = value[1:-1]
return value
add_conversion_fn(String, string)
def fractionbox(self, **options) -> str:
_options = self.box_options.copy()
_options.update(options)
options = _options
num_text = boxes_to_text(self.num, **options)
den_text = boxes_to_text(self.den, **options)
if isinstance(self.num, RowBox):
num_text = f"({num_text})"
if isinstance(self.den, RowBox):
den_text = f"({den_text})"
return " / ".join([num_text, den_text])
add_conversion_fn(FractionBox, fractionbox)
def gridbox(self, elements=None, **box_options) -> str:
if not elements:
elements = self._elements
evaluation = box_options.get("evaluation", None)
items, options = self.get_array(elements, evaluation)
result = ""
if not items:
return ""
try:
widths = [0] * max(1, max(len(row) for row in items if isinstance(row, tuple)))
except ValueError:
widths = [0]
cells = [
[
# TODO: check if this evaluation is necesary.
boxes_to_text(item, **box_options).splitlines()
for item in row
]
if isinstance(row, tuple)
else [boxes_to_text(row, **box_options).splitlines()]
for row in items
]
# compute widths
full_width = 0
for i, row in enumerate(cells):
for index, cell in enumerate(row):
if index >= len(widths):
raise BoxConstructError
if not isinstance(items[i], tuple):
for line in cell:
full_width = max(full_width, len(line))
else:
for line in cell:
widths[index] = max(widths[index], len(line))
full_width = max(sum(widths), full_width)
for row_index, row in enumerate(cells):
if row_index > 0:
result += "\n"
k = 0
while True:
line_exists = False
line = ""
for cell_index, cell in enumerate(row):
if len(cell) > k:
line_exists = True
text = cell[k]
else:
text = ""
line += text
if isinstance(items[row_index], tuple):
if cell_index < len(row) - 1:
line += " " * (widths[cell_index] - len(text))
# if cell_index < len(row) - 1:
line += " "
if line_exists:
result += line + "\n"
else:
break
k += 1
return result
add_conversion_fn(GridBox, gridbox)
def METHOD_NAME(self, **options) -> str:
_options = self.box_options.copy()
_options.update(options)
options = _options
if self.index:
return "Sqrt[%s,%s]" % (
boxes_to_text(self.radicand, **options),
boxes_to_text(self.index, **options),
)
return "Sqrt[%s]" % (boxes_to_text(self.radicand, **options))
add_conversion_fn(SqrtBox, METHOD_NAME)
def superscriptbox(self, **options) -> str:
_options = self.box_options.copy()
_options.update(options)
options = _options
fmt_str = "%s^%s" if isinstance(self.superindex, Atom) else "%s^(%s)"
return fmt_str % (
boxes_to_text(self.base, **options),
boxes_to_text(self.superindex, **options),
)
add_conversion_fn(SuperscriptBox, superscriptbox)
def subscriptbox(self, **options) -> str:
_options = self.box_options.copy()
_options.update(options)
options = _options
return "Subscript[%s, %s]" % (
boxes_to_text(self.base, **options),
boxes_to_text(self.subindex, **options),
)
add_conversion_fn(SubscriptBox, subscriptbox)
def subsuperscriptbox(self, **options) -> str:
_options = self.box_options.copy()
_options.update(options)
options = _options
return "Subsuperscript[%s, %s, %s]" % (
boxes_to_text(self.base, **options),
boxes_to_text(self.subindex, **options),
boxes_to_text(self.superindex, **options),
)
add_conversion_fn(SubsuperscriptBox, subsuperscriptbox)
def rowbox(self, elements=None, **options) -> str:
_options = self.box_options.copy()
_options.update(options)
options = _options
return "".join([boxes_to_text(element, **options) for element in self.items])
add_conversion_fn(RowBox, rowbox)
def stylebox(self, **options) -> str:
options.pop("evaluation", None)
_options = self.box_options.copy()
_options.update(options)
options = _options
return boxes_to_text(self.boxes, **options)
add_conversion_fn(StyleBox, stylebox)
def graphicsbox(self, elements=None, **options) -> str:
if not elements:
elements = self._elements
self._prepare_elements(elements, options) # to test for Box errors
return "-Graphics-"
add_conversion_fn(GraphicsBox, graphicsbox)
def graphics3dbox(self, elements=None, **options) -> str:
if not elements:
elements = self._elements
return "-Graphics3D-"
add_conversion_fn(Graphics3DBox, graphics3dbox) | null |
move item down | # SPDX-License-Identifier: BSD-3-Clause
# Copyright Contributors to the OpenColorIO Project.
from dataclasses import dataclass
from typing import Any, Callable, Optional, Type
import PyOpenColorIO as ocio
from PySide2 import QtCore, QtGui, QtWidgets
from ..config_cache import ConfigCache
from ..undo import ConfigSnapshotUndoCommand
from .config_item_model import ColumnDesc, BaseConfigItemModel
@dataclass
class Display:
"""Individual display storage."""
name: str
active: False
@dataclass
class View:
"""Individual view storage."""
name: str
active: False
class BaseActiveDisplayViewModel(BaseConfigItemModel):
"""
Base item model for active displays and views in the current
config.
"""
ACTIVE = ColumnDesc(1, "Active", bool)
# OCIO config object type this model manages.
__item_type__: type = None
# Callable to get all items from the config cache
__get_all_items__: Callable = None
# Callable to get active items from the config cache
__get_active_items__: Callable = None
# Config attribute name for method to set the active item string
__set_active_items_attr__: str = None
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ConfigCache.register_reset_callback(self._reset_cache)
def move_item_up(self, item_name: str) -> bool:
active_names = self.__get_active_items__()
if item_name not in active_names:
return False
src_row = active_names.index(item_name)
dst_row = max(0, src_row - 1)
if dst_row == src_row:
return False
return self.moveRows(self.NULL_INDEX, src_row, 1, self.NULL_INDEX, dst_row)
def METHOD_NAME(self, item_name: str) -> bool:
active_names = self.__get_active_items__()
if item_name not in active_names:
return False
src_row = active_names.index(item_name)
dst_row = min(len(active_names) - 1, src_row + 1)
if dst_row == src_row:
return False
return self.moveRows(self.NULL_INDEX, src_row, 1, self.NULL_INDEX, dst_row)
def flags(self, index: QtCore.QModelIndex) -> int:
return super().flags(index) | QtCore.Qt.ItemIsUserCheckable
def get_item_names(self) -> list[str]:
return [item.name for item in self._get_items()]
def _get_undo_command_type(
self, column_desc: ColumnDesc
) -> Type[QtWidgets.QUndoCommand]:
if column_desc == self.ACTIVE:
# Changing check state of the ACTIVE column has side effects related to
# display/view order, so a config snapshot is needed to revert the change.
return ConfigSnapshotUndoCommand
else:
return super()._get_undo_command_type(column_desc)
def _get_icon(
self, item: __item_type__, column_desc: ColumnDesc
) -> Optional[QtGui.QIcon]:
if column_desc == self.NAME:
return self.item_type_icon()
else:
return None
def _reset_cache(self) -> None:
self._items = []
def _get_items(self, preserve: bool = False) -> list[__item_type__]:
if ConfigCache.validate() and self._items:
return self._items
all_names = self.__get_all_items__()
active_names = self.__get_active_items__()
self._items = []
for name in active_names:
self._items.append(self.__item_type__(name, True))
for name in all_names:
if name not in active_names:
self._items.append(self.__item_type__(name, False))
return self._items
def _clear_items(self) -> None:
getattr(ocio.GetCurrentConfig(), self.__set_active_items_attr__)("")
def _add_item(self, item: __item_type__) -> None:
active_names = self.__get_active_items__()
if item.active and item.name not in active_names:
active_names.append(item.name)
getattr(ocio.GetCurrentConfig(), self.__set_active_items_attr__)(
",".join(active_names)
)
def _remove_item(self, item: __item_type__) -> None:
active_names = self.__get_active_items__()
if not item.active and item.name in active_names:
active_names.remove(item.name)
getattr(ocio.GetCurrentConfig(), self.__set_active_items_attr__)(
",".join(active_names)
)
def _new_item(self, name: __item_type__) -> None:
# Existing config items only
pass
def _get_checked_column(self) -> Optional[ColumnDesc]:
return self.ACTIVE
def _get_value(self, item: __item_type__, column_desc: ColumnDesc) -> Any:
# Get parameters
if column_desc == self.NAME:
return item.name
elif column_desc == self.ACTIVE:
return item.active
# Invalid column
return None
def _set_value(
self,
item: __item_type__,
column_desc: ColumnDesc,
value: Any,
index: QtCore.QModelIndex,
) -> None:
# Update parameters
if column_desc == self.ACTIVE:
item.active = value
if value is True:
self._add_item(item)
self.item_added.emit(item.name)
else:
self._remove_item(item)
self.item_removed.emit()
class ActiveDisplayModel(BaseActiveDisplayViewModel):
"""
Item model for active displays in the current config.
"""
NAME = ColumnDesc(0, "Display", str)
COLUMNS = [NAME, BaseActiveDisplayViewModel.ACTIVE]
__item_type__ = Display
__icon_glyph__ = "mdi6.monitor"
__get_active_items__ = ConfigCache.get_active_displays
__get_all_items__ = ConfigCache.get_displays
__set_active_items_attr__ = "setActiveDisplays"
class ActiveViewModel(BaseActiveDisplayViewModel):
"""
Item model for active views in the current config.
"""
NAME = ColumnDesc(0, "View", str)
COLUMNS = [NAME, BaseActiveDisplayViewModel.ACTIVE]
__item_type__ = View
__icon_glyph__ = "mdi6.eye-outline"
__get_active_items__ = ConfigCache.get_active_views
__get_all_items__ = ConfigCache.get_views
__set_active_items_attr__ = "setActiveViews" | null |
get charmer or summoner | from game.world.managers.objects.ObjectManager import ObjectManager
from game.world.managers.objects.farsight.FarSightManager import FarSightManager
from game.world.managers.objects.guids.GuidManager import GuidManager
from utils.constants.MiscCodes import ObjectTypeIds, HighGuid, ObjectTypeFlags
from utils.constants.UpdateFields import ObjectFields, DynamicObjectFields
# TODO: Finish implementing.
class DynamicObjectManager(ObjectManager):
GUID_MANAGER = GuidManager()
def __init__(self, owner, location, radius, effect, dynamic_type, ttl, **kwargs):
super().__init__(**kwargs)
self.summoner = owner
self.owner = owner.guid
self.map_id = owner.map_id
self.instance_id = owner.instance_id
self.location = location.copy()
self.dynamic_type = dynamic_type
self.spell_id = effect.casting_spell.spell_entry.ID
self.radius = radius
self.ttl = ttl
self.guid = self.generate_object_guid(DynamicObjectManager.GUID_MANAGER.get_new_guid())
self.update_packet_factory.init_values(owner.guid, DynamicObjectFields)
# override
def initialize_field_values(self):
# Object fields.
self.set_uint64(ObjectFields.OBJECT_FIELD_GUID, self.guid)
self.set_uint32(ObjectFields.OBJECT_FIELD_TYPE, self.get_type_mask())
self.set_float(ObjectFields.OBJECT_FIELD_SCALE_X, self.current_scale)
self.set_uint32(ObjectFields.OBJECT_FIELD_PADDING, 0)
# DynamicObject fields.
self.set_uint64(DynamicObjectFields.DYNAMICOBJECT_CASTER, self.owner)
self.set_uint32(DynamicObjectFields.DYNAMICOBJECT_BYTES, self.dynamic_type)
self.set_uint32(DynamicObjectFields.DYNAMICOBJECT_SPELLID, self.spell_id)
self.set_float(DynamicObjectFields.DYNAMICOBJECT_RADIUS, self.radius)
self.set_float(DynamicObjectFields.DYNAMICOBJECT_POS_X, self.location.x)
self.set_float(DynamicObjectFields.DYNAMICOBJECT_POS_Y, self.location.y)
self.set_float(DynamicObjectFields.DYNAMICOBJECT_POS_Z, self.location.z)
self.set_float(DynamicObjectFields.DYNAMICOBJECT_FACING, self.location.o)
self.initialized = True
def update(self, now):
if now > self.last_tick > 0:
elapsed = now - self.last_tick
if self.ttl > 0:
self.ttl = max(0, self.ttl - elapsed)
if self.ttl == 0:
self.despawn()
self.last_tick = now
@staticmethod
def spawn(summoner, location, radius, effect, dynamic_type, ttl=-1):
dynamic_object = DynamicObjectManager(owner=summoner, location=location, radius=radius, effect=effect,
dynamic_type=dynamic_type, ttl=ttl)
summoner.get_map().spawn_object(world_object_instance=dynamic_object)
return dynamic_object
@classmethod
def spawn_from_spell_effect(cls, effect, dynamic_type, orientation=0, ttl=-1):
target = effect.casting_spell.initial_target
# Target must be a vector.
if isinstance(target, ObjectManager):
target = target.location.copy()
else:
target = target.get_ray_vector(is_terrain=True)
if orientation:
target.set_orientation(orientation)
effect.casting_spell.dynamic_object = DynamicObjectManager.spawn(effect.casting_spell.spell_caster,
target, effect.get_radius(), effect,
dynamic_type, ttl=ttl)
return effect.casting_spell.dynamic_object
# override
def METHOD_NAME(self, include_self=False):
charmer_or_summoner = self.charmer if self.charmer else self.summoner if self.summoner else None
return charmer_or_summoner if charmer_or_summoner else self if include_self else None
# override
def is_temp_summon(self):
return True
# override
def is_active_object(self):
return FarSightManager.object_is_camera_view_point(self)
# override
def get_type_mask(self):
return super().get_type_mask() | ObjectTypeFlags.TYPE_DYNAMICOBJECT
# override
def get_low_guid(self):
return self.guid & ~HighGuid.HIGHGUID_DYNAMICOBJECT
# override
def get_type_id(self):
return ObjectTypeIds.ID_DYNAMICOBJECT
# override
def generate_object_guid(self, low_guid):
return low_guid | HighGuid.HIGHGUID_DYNAMICOBJECT | null |
head200 | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ..._vendor import _convert_request
from ...operations._head_exception_operations import build_head200_request, build_head204_request, build_head404_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class HeadExceptionOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~headexceptions.aio.AutoRestHeadExceptionTestService`'s
:attr:`head_exception` attribute.
"""
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def METHOD_NAME(self, **kwargs: Any) -> bool:
"""Return 200 status code if successful.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_head200_request(
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
METHOD_NAME.metadata = {"url": "/http/success/200"}
@distributed_trace_async
async def head204(self, **kwargs: Any) -> bool:
"""Return 204 status code if successful.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_head204_request(
template_url=self.head204.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
head204.metadata = {"url": "/http/success/204"}
@distributed_trace_async
async def head404(self, **kwargs: Any) -> bool:
"""Return 404 status code if successful.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: bool or the result of cls(response)
:rtype: bool
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = kwargs.pop("params", {}) or {}
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_head404_request(
template_url=self.head404.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
return 200 <= response.status_code <= 299
head404.metadata = {"url": "/http/success/404"} | null |
default nfe environment | # Copyright (C) 2009 Renato Lima - Akretion
# Copyright (C) 2011 Vinicius Dittgen - PROGE, Leonardo Santagada - PROGE
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import base64
import time
from datetime import datetime
from odoo import _, fields, models
from odoo.exceptions import Warning as UserError
class L10nBrAccountNfeExportInvoice(models.TransientModel):
"""Export fiscal eletronic file from invoice"""
_name = "l10n_br_account_product.nfe_export_invoice"
_description = "Export eletronic invoice"
def _default_file_type(self):
return self.env.user.company_id.file_type
def METHOD_NAME(self):
return self.env.user.company_id.nfe_environment
def _default_export_folder(self):
return self.env.user.company_id.nfe_export_folder
def _default_sign_xml(self):
return self.env.user.company_id.sign_xml
name = fields.Char(string="Nome", size=255)
file = fields.Binary("Arquivo", readonly=True)
file_type = fields.Selection(
selection=[("xml", "XML")], string="Tipo do Arquivo", default=_default_file_type
)
state = fields.Selection(
selection=[("init", "init"), ("done", "done")],
string="state",
readonly=True,
default="init",
)
nfe_environment = fields.Selection(
selection=[("1", "Produção"), ("2", "Homologação")],
string="Ambiente",
default=METHOD_NAME,
)
sign_xml = fields.Boolean(string="Assinar XML", default=_default_sign_xml)
nfe_export_result = fields.One2many(
comodel_name="l10n_br_account_product.nfe_export_invoice_result",
inverse_name="wizard_id",
string="NFe Export Result",
)
export_folder = fields.Boolean(
string="Salvar na Pasta de Exportação", default=_default_export_folder
)
def nfe_export(self):
for data in self:
active_ids = self._context.get("active_ids", [])
if not active_ids:
err_msg = "Não existe nenhum documento fiscal para ser" " exportado!"
invoices = []
export_inv_numbers = []
company_ids = []
err_msg = ""
for inv in self.env["account.invoice"].browse(active_ids):
if inv.state not in ("sefaz_export"):
err_msg += (
"O Documento Fiscal %s não esta definida para"
" ser exportação "
"para a SEFAZ.\n"
) % inv.fiscal_number
elif not inv.issuer == "0":
err_msg += (
"O Documento Fiscal %s é do tipo externa e "
"não pode ser exportada para a "
"receita.\n"
) % inv.fiscal_number
else:
inv.write(
{
"nfe_export_date": False,
"nfe_access_key": False,
"nfe_status": False,
"nfe_date": False,
}
)
message = (
"O Documento Fiscal %s foi \
exportado."
% inv.fiscal_number
)
invoices.append(inv)
company_ids.append(inv.company_id.id)
export_inv_numbers.append(inv.fiscal_number)
if len(set(company_ids)) > 1:
err_msg += (
"Não é permitido exportar Documentos Fiscais de "
"mais de uma empresa, por favor selecione "
"Documentos Fiscais da mesma empresa."
)
if len(export_inv_numbers) > 1:
name = "nfes{}-{}.{}".format(
time.strftime("%d-%m-%Y"),
self.env["ir.sequence"].get("nfe.export"),
data.file_type,
)
else:
name = "nfe{}.{}".format(export_inv_numbers[0], data.file_type)
mod_serializer = __import__(
("openerp.addons.l10n_br_account_product" ".sped.nfe.serializer.")
+ data.file_type,
globals(),
locals(),
data.file_type,
)
func = mod_serializer.nfe_export
for invoice in invoices:
invoice.nfe_export_date = datetime.now()
nfes = func(invoices, data.nfe_environment, inv.nfe_version)
for nfe in nfes:
nfe_file = nfe["nfe"].encode("utf8")
data.write(
{"file": base64.b64encode(nfe_file), "state": "done", "name": name}
)
if err_msg:
raise UserError(_(err_msg))
view_rec = self.env.ref(
"l10n_br_account_product." "l10n_br_account_product_nfe_export_invoice_form"
)
view_id = view_rec and view_rec.id or False
return {
"view_type": "form",
"view_id": [view_id],
"view_mode": "form",
"res_model": "l10n_br_account_product.nfe_export_invoice",
"res_id": data.id,
"type": "ir.actions.act_window",
"target": "new",
"context": data.env.context,
}
class L10nBrAccountNfeExportInvoiceResult(models.TransientModel):
_name = "l10n_br_account_product.nfe_export_invoice_result"
wizard_id = fields.Many2one(
comodel_name="l10n_br_account_product.nfe_export_invoice",
string="Wizard ID",
ondelete="cascade",
)
document = fields.Char(string="Documento", size=255)
status = fields.Selection(selection=[("success", "Sucesso"), ("error", "Erro")])
message = fields.Char(string="Mensagem", size=255) | null |
outputs | """
generalized_inner_product
=========================
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.METHOD_NAME import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class generalized_inner_product(Operator):
"""Computes a general notion of inner product between two fields of
possibly different dimensionality.
Parameters
----------
fieldA : Field or FieldsContainer or float
Field or fields container with only one field
is expected
fieldB : Field or FieldsContainer or float
Field or fields container with only one field
is expected
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.generalized_inner_product()
>>> # Make input connections
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.generalized_inner_product(
... fieldA=my_fieldA,
... fieldB=my_fieldB,
... )
>>> # Get output data
>>> result_field = op.outputs.field()
"""
def __init__(self, fieldA=None, fieldB=None, config=None, server=None):
super().__init__(name="generalized_inner_product", config=config, server=server)
self._inputs = InputsGeneralizedInnerProduct(self)
self._outputs = OutputsGeneralizedInnerProduct(self)
if fieldA is not None:
self.inputs.fieldA.connect(fieldA)
if fieldB is not None:
self.inputs.fieldB.connect(fieldB)
@staticmethod
def _spec():
description = """Computes a general notion of inner product between two fields of
possibly different dimensionality."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fieldA",
type_names=[
"field",
"fields_container",
"double",
"vector<double>",
],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
1: PinSpecification(
name="fieldB",
type_names=[
"field",
"fields_container",
"double",
"vector<double>",
],
optional=False,
document="""Field or fields container with only one field
is expected""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="field",
type_names=["field"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="generalized_inner_product", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsGeneralizedInnerProduct
"""
return super().inputs
@property
def METHOD_NAME(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsGeneralizedInnerProduct
"""
return super().METHOD_NAME
class InputsGeneralizedInnerProduct(_Inputs):
"""Intermediate class used to connect user inputs to
generalized_inner_product operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.generalized_inner_product()
>>> my_fieldA = dpf.Field()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> my_fieldB = dpf.Field()
>>> op.inputs.fieldB.connect(my_fieldB)
"""
def __init__(self, op: Operator):
super().__init__(generalized_inner_product._spec().inputs, op)
self._fieldA = Input(generalized_inner_product._spec().input_pin(0), 0, op, -1)
self._inputs.append(self._fieldA)
self._fieldB = Input(generalized_inner_product._spec().input_pin(1), 1, op, -1)
self._inputs.append(self._fieldB)
@property
def fieldA(self):
"""Allows to connect fieldA input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_fieldA : Field or FieldsContainer or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.generalized_inner_product()
>>> op.inputs.fieldA.connect(my_fieldA)
>>> # or
>>> op.inputs.fieldA(my_fieldA)
"""
return self._fieldA
@property
def fieldB(self):
"""Allows to connect fieldB input to the operator.
Field or fields container with only one field
is expected
Parameters
----------
my_fieldB : Field or FieldsContainer or float
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.generalized_inner_product()
>>> op.inputs.fieldB.connect(my_fieldB)
>>> # or
>>> op.inputs.fieldB(my_fieldB)
"""
return self._fieldB
class OutputsGeneralizedInnerProduct(_Outputs):
"""Intermediate class used to get outputs from
generalized_inner_product operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.generalized_inner_product()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
"""
def __init__(self, op: Operator):
super().__init__(generalized_inner_product._spec().METHOD_NAME, op)
self._field = Output(generalized_inner_product._spec().output_pin(0), 0, op)
self._outputs.append(self._field)
@property
def field(self):
"""Allows to get field output of the operator
Returns
----------
my_field : Field
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.generalized_inner_product()
>>> # Connect inputs : op.inputs. ...
>>> result_field = op.outputs.field()
""" # noqa: E501
return self._field | null |
interpret args | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
args = sys.argv
def METHOD_NAME():
"""Interprets the command line arguments, and returns a dictionary."""
parser = argparse.ArgumentParser()
parser.add_argument("--no_gpus", type=bool, default=1)
# Data parameters
parser.add_argument(
"--raw_train_filename", type=str, default="../atis_data/data/resplit/processed/train_with_tables.pkl"
)
parser.add_argument(
"--raw_dev_filename", type=str, default="../atis_data/data/resplit/processed/dev_with_tables.pkl"
)
parser.add_argument(
"--raw_validation_filename", type=str, default="../atis_data/data/resplit/processed/valid_with_tables.pkl"
)
parser.add_argument(
"--raw_test_filename", type=str, default="../atis_data/data/resplit/processed/test_with_tables.pkl"
)
parser.add_argument("--data_directory", type=str, default="processed_data")
parser.add_argument("--processed_train_filename", type=str, default="train.pkl")
parser.add_argument("--processed_dev_filename", type=str, default="dev.pkl")
parser.add_argument("--processed_validation_filename", type=str, default="validation.pkl")
parser.add_argument("--processed_test_filename", type=str, default="test.pkl")
parser.add_argument("--database_schema_filename", type=str, default=None)
parser.add_argument("--embedding_filename", type=str, default=None)
parser.add_argument("--input_vocabulary_filename", type=str, default="input_vocabulary.pkl")
parser.add_argument("--output_vocabulary_filename", type=str, default="output_vocabulary.pkl")
parser.add_argument("--input_key", type=str, default="utterance")
parser.add_argument("--anonymize", type=bool, default=False)
parser.add_argument("--anonymization_scoring", type=bool, default=False)
parser.add_argument("--use_snippets", type=bool, default=False)
parser.add_argument("--use_previous_query", type=bool, default=True)
parser.add_argument("--maximum_queries", type=int, default=1)
parser.add_argument("--use_copy_switch", type=bool, default=False)
parser.add_argument("--use_query_attention", type=bool, default=True)
parser.add_argument("--use_utterance_attention", type=bool, default=True)
parser.add_argument("--scheduler", type=bool, default=False)
parser.add_argument("--use_bert", type=bool, default=True)
parser.add_argument("--bert_input_version", type=str, default="v1")
parser.add_argument("--fine_tune_bert", type=bool, default=True)
parser.add_argument("--lr_bert", default=1e-5, type=float, help="BERT model learning rate.")
# Debugging/logging parameters
parser.add_argument("--reload_embedding", type=bool, default=False)
parser.add_argument("--logdir", type=str, default="logs")
parser.add_argument("--deterministic", type=bool, default=False)
parser.add_argument("--num_train", type=int, default=-1)
parser.add_argument("--logfile", type=str, default="log.txt")
parser.add_argument("--results_file", type=str, default="results.txt")
# Model architecture
parser.add_argument("--input_embedding_size", type=int, default=300)
parser.add_argument("--output_embedding_size", type=int, default=300)
parser.add_argument("--encoder_state_size", type=int, default=300)
parser.add_argument("--decoder_state_size", type=int, default=300)
parser.add_argument("--encoder_num_layers", type=int, default=1)
parser.add_argument("--decoder_num_layers", type=int, default=1)
parser.add_argument("--snippet_num_layers", type=int, default=1)
parser.add_argument("--maximum_utterances", type=int, default=5)
parser.add_argument("--state_positional_embeddings", type=bool, default=True)
parser.add_argument("--positional_embedding_size", type=int, default=50)
parser.add_argument("--snippet_age_embedding", type=bool, default=False)
parser.add_argument("--snippet_age_embedding_size", type=int, default=64)
parser.add_argument("--max_snippet_age_embedding", type=int, default=4)
parser.add_argument("--previous_decoder_snippet_encoding", type=bool, default=False)
parser.add_argument("--discourse_level_lstm", type=bool, default=True)
parser.add_argument("--use_schema_attention", type=bool, default=True)
parser.add_argument("--use_encoder_attention", type=bool, default=True)
parser.add_argument("--use_schema_encoder", type=bool, default=True)
parser.add_argument("--use_schema_self_attention", type=bool, default=False)
parser.add_argument("--use_schema_encoder_2", type=bool, default=False)
# Training parameters
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--train_maximum_sql_length", type=int, default=400) # 200
parser.add_argument("--train_evaluation_size", type=int, default=100)
parser.add_argument("--dropout_amount", type=float, default=0.5)
parser.add_argument("--initial_patience", type=float, default=10.0)
parser.add_argument("--patience_ratio", type=float, default=1.01)
parser.add_argument("--initial_learning_rate", type=float, default=1e-3)
parser.add_argument("--learning_rate_ratio", type=float, default=0.9)
parser.add_argument("--interaction_level", type=bool, default=True)
parser.add_argument("--reweight_batch", type=bool, default=True)
parser.add_argument("--gnn_layer_number", type=int, default=1)
parser.add_argument("--clip", type=float, default=5.0)
parser.add_argument("--warmup_step", type=int, default=1000)
# Setting
parser.add_argument("--train", type=bool, default=False)
parser.add_argument("--debug", type=bool, default=False)
parser.add_argument("--evaluate", type=bool, default=False)
parser.add_argument("--attention", type=bool, default=False)
parser.add_argument("--save_file", type=str, default="")
parser.add_argument("--enable_testing", type=bool, default=False)
parser.add_argument("--use_predicted_queries", type=bool, default=False)
parser.add_argument("--evaluate_split", type=str, default="valid")
parser.add_argument("--evaluate_with_gold_forcing", type=bool, default=False)
parser.add_argument("--eval_maximum_sql_length", type=int, default=400)
parser.add_argument("--results_note", type=str, default="")
parser.add_argument("--compute_metrics", type=bool, default=False)
parser.add_argument("--reference_results", type=str, default="")
parser.add_argument("--interactive", type=bool, default=False)
parser.add_argument("--database_username", type=str, default="aviarmy")
parser.add_argument("--database_password", type=str, default="aviarmy")
parser.add_argument("--database_timeout", type=int, default=2)
parser.add_argument("--all_in_one_trainer", type=bool, default=False)
args = parser.parse_args()
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
if not (args.train or args.evaluate or args.interactive or args.attention):
raise ValueError("You need to be training or evaluating")
if args.enable_testing and not args.evaluate:
raise ValueError("You should evaluate the model if enabling testing")
return args | null |
array model form | #!/bin/env python
__author__ = 'dongyun.zdy'
import math
import numpy as np
from scipy.optimize import leastsq
from scipy.optimize import curve_fit
import sys
from lmfit import Model
import getopt
import os
#
# def array_model_form(args):
# # (
# # Nelem,
# # ) = args
#
# Telem_ence = 0.00898860
# Telem_copy = 0.00631888
#
# Nelem = args
#
# ELEM_PER_PAGE = 1024
# extend_cnt = math.ceil(math.log(float(Nelem)/ELEM_PER_PAGE, 2))
# if extend_cnt < 0:
# extend_cnt = 0
# copy_cnt = ELEM_PER_PAGE * (math.pow(2, extend_cnt) - 1)
#
# total_cost = Telem_ence * Nelem
# #total_cost += Tmem_alloc * extend_cnt
# total_cost += Telem_copy * copy_cnt
#
# return total_cost
def METHOD_NAME(args,
#Tstartup,
Telem_ence,
Telem_copy,
#Tmem_alloc
):
# (
# Nelem,
# ) = args
Nelem = args
ELEM_PER_PAGE = 1024
extend_cnt = math.ceil(math.log(float(Nelem)/ELEM_PER_PAGE, 2))
if extend_cnt < 0:
extend_cnt = 0
copy_cnt = ELEM_PER_PAGE * (math.pow(2, extend_cnt) - 1)
total_cost = Telem_ence * Nelem
#total_cost += Tmem_alloc * extend_cnt
total_cost += Telem_copy * copy_cnt
return total_cost
def material_model_arr(arg_sets,
# Tstartup,
Telem_ence,
Telem_copy,
#Tmem_alloc
):
res = []
for single_arg_set in arg_sets:
res.append(METHOD_NAME(single_arg_set,
# Tstartup,
Telem_ence,
Telem_copy,
#Tmem_alloc
))
return np.array(res)
material_model = Model(material_model_arr)
# material_model.set_param_hint("Tstartup", min=0.0)
material_model.set_param_hint("Telem_ence", min=0.0)
material_model.set_param_hint("Telem_copy", min=0.0)
#material_model.set_param_hint("Tmem_alloc", min=0.0)
def extract_info_from_line(line):
splited = line.split(",")
line_info = []
for item in splited:
line_info.append(float(item))
return line_info
if __name__ == '__main__':
#file_name = "scan_model.res.formal.prep"
#out_file_name = "scan_model.fit"
file_name = "array_result_final"
out_file_name = "array_model"
if os.path.exists(out_file_name):
os.remove(out_file_name)
#sys.argv.extend("-i arr.prep -o arr.model".split(" "))
output_fit_res = True
wrong_arg = False
opts,args = getopt.getopt(sys.argv[1:],"i:o:")
for op, value in opts:
if "-i" == op:
file_name = value
elif "-o" == op:
output_fit_res = True
out_file_name = value
else:
wrong_arg = True
if wrong_arg:
print "wrong arg"
sys.exit(1)
file = open(file_name, "r")
arg_sets = []
times = []
case_params = []
for line in file:
if line.startswith('#'):
continue
case_param = extract_info_from_line(line)
case_params.append(case_param)
arg_sets.append((case_param[0]))
times.append(case_param[1])
file.close()
arg_sets_np = np.array(arg_sets)
times_np = np.array(times)
#10, 0.20406430879623488, 0.016618100054245379, 14.0, 4.5, 37.0, -0.005, 0.5, -7.0
result = material_model.fit(times_np, arg_sets=arg_sets_np,
# Tstartup=10.0,
Telem_ence=1.0,
Telem_copy=1.0,
#Tmem_alloc=1.0
)
# res_line = str(result.best_values["Tstartup"]) + ","
res_line = str(result.best_values["Telem_ence"]) + ","
res_line += str(result.best_values["Telem_copy"])# + ","
#res_line += str(result.best_values["Tmem_alloc"])
print result.fit_report()
if output_fit_res:
out_file = open(out_file_name, "w")
out_file.write(res_line)
out_file.close() | null |
check target num partitions | # Copyright (c) Alibaba, Inc. and its affiliates.
import os
import shutil
from typing import Dict, List, Union
import torch
from torch import nn
from modelscope.utils.logger import get_logger
from modelscope.utils.torch_utils import is_master
logger = get_logger()
_DEFAULT_CFG_WITH_MODEL_TYPE = {
'gpt-moe': {
'version': 'moe',
'world_size': 8
},
'plug': {
'version': 'v1',
'world_size': 8,
'tensor_model_parallel_size': 8,
'seed': 1234
},
'mglm-text-summarization': {
'version': 'v1',
'seed': 1234
},
}
_CHECKPOINT_FORMAT = 'mp_rank_XX_model_states.pt'
_IS_MEGATRON_INITIALIZED = False
def init_megatron_util(megatron_cfg=None, model_dir=None, **kwargs):
"""Initialize megatron_util environment for megatron_based model.
If argument `megatron_cfg` is not specified, then the megatorn_cfg will be load
from configuration.json file in the model_dir.
Args:
megatron_cfg (Dict, optional): Megatron Config will be send to megatron_util.
model_dir (str, optional): The model path for configuration. Defaults to None.
"""
from modelscope.utils.hub import read_config
from megatron_util import initialize_megatron
assert not (megatron_cfg is None and model_dir is None), \
'cfg and model_dir cannot both be None when initializing megatron_util'
if megatron_cfg is None:
cfg = read_config(model_dir)
try:
megatron_cfg = cfg.megatron
except AttributeError:
try:
model_type = cfg.model.type
except AttributeError:
# Fit models without model type, such as mglm
model_type = cfg.pipeline.type
megatron_cfg = _DEFAULT_CFG_WITH_MODEL_TYPE[model_type] \
if model_type in _DEFAULT_CFG_WITH_MODEL_TYPE else {}
megatron_cfg.update(kwargs)
initialize_megatron(megatron_cfg)
global _IS_MEGATRON_INITIALIZED
_IS_MEGATRON_INITIALIZED = True
def is_megatron_initialized() -> bool:
return _IS_MEGATRON_INITIALIZED
def convert_megatron_checkpoint(
model: nn.Module, checkpoint_dir: Union[str, bytes, os.PathLike],
target_dir: Union[str, bytes, os.PathLike]) -> None:
"""Split or Merge checkpoint for megatron_based model.
Args:
model (nn.Module): Any megatron_based model.
checkpoint_dir (Union[str, bytes, os.PathLike]): The save path of origin checkpoint.
target_dir (Union[str, bytes, os.PathLike]): The target path of new checkpoint.
"""
def log_master(information: str):
if is_master():
logger.info(information)
if os.path.exists(os.path.join(checkpoint_dir, 'model')):
checkpoint_dir = os.path.join(checkpoint_dir, 'model')
origin_num_partitions = len(os.listdir(checkpoint_dir))
target_num_partitions = int(os.getenv('WORLD_SIZE'))
_check_origin_dir(checkpoint_dir)
METHOD_NAME(target_num_partitions)
log_master(
f'origin_num_partitions: {origin_num_partitions}, target_num_partitions: {target_num_partitions}'
)
if origin_num_partitions < target_num_partitions:
os.makedirs(target_dir, exist_ok=True)
state_dict = _split_checkpoint(
model, checkpoint_dir,
target_num_partitions // origin_num_partitions)
_save_converted_checkpoint(state_dict, target_dir)
log_master('Split checkpoints succeeded.')
elif origin_num_partitions > target_num_partitions:
os.makedirs(target_dir, exist_ok=True)
state_dict = _merge_checkpoint(
model, checkpoint_dir,
origin_num_partitions // target_num_partitions)
_save_converted_checkpoint(state_dict, target_dir)
log_master('Merge checkpoints succeeded.')
else:
shutil.copytree(checkpoint_dir, target_dir)
log_master('Copy checkpoints succeeded.')
def _check_origin_dir(origin_dir: Union[str, bytes, os.PathLike]) -> None:
filenames = os.listdir(origin_dir)
assert len(filenames) & (
len(filenames) - 1) == 0, 'The number of files must be a power of 2!'
for i in range(len(filenames)):
checkpoint_name = _CHECKPOINT_FORMAT.replace('XX', f'{i:02d}')
assert checkpoint_name in filenames, \
f'Can not find {checkpoint_name} file!'
def METHOD_NAME(num_partitions: int) -> None:
assert num_partitions & (num_partitions - 1) == 0, \
'The number of target partitions must be a power of 2!'
def _split_checkpoint(model: nn.Module, checkpoint_dir: Union[str, bytes,
os.PathLike],
num_partitions: int) -> Dict[str, torch.Tensor]:
target_rank = int(os.getenv('RANK'))
origin_rank = target_rank // num_partitions
state_dict = _load_by_rank(checkpoint_dir, origin_rank)
target_state_dict = {}
for name, parameter in model.named_parameters():
dim = _get_diff_dim(parameter, state_dict[name])
if dim == -1:
target_state_dict[name] = state_dict[name]
continue
partitions_list = _split_tensor(state_dict[name], num_partitions, dim)
target_state_dict[name] = partitions_list[target_rank
% num_partitions].clone()
return target_state_dict
def _merge_checkpoint(model: nn.Module, checkpoint_dir: Union[str, bytes,
os.PathLike],
num_partitions: int) -> Dict[str, torch.Tensor]:
target_rank = int(os.getenv('RANK'))
origin_rank_list = [
target_rank * num_partitions + i for i in range(num_partitions)
]
state_dict_list = [
_load_by_rank(checkpoint_dir, i) for i in origin_rank_list
]
target_state_dict = {}
for name, parameter in model.named_parameters():
dim = _get_diff_dim(parameter, state_dict_list[0][name])
if dim == -1:
target_state_dict[name] = state_dict_list[0][name]
continue
target_state_dict[name] = torch.cat(
[state_dict[name] for state_dict in state_dict_list],
dim=dim).clone()
return target_state_dict
def _save_converted_checkpoint(
state_dict: Dict[str, torch.Tensor],
target_dir: Union[str, bytes, os.PathLike]) -> None:
target_rank = int(os.getenv('RANK'))
target_name = _CHECKPOINT_FORMAT.replace('XX', f'{target_rank:02d}')
torch.save(state_dict, os.path.join(target_dir, target_name))
def _get_diff_dim(tensor1: torch.Tensor, tensor2: torch.Tensor) -> int:
for i, (s1, s2) in enumerate(zip(tensor1.shape, tensor2.shape)):
if s1 != s2:
return i
return -1
def _load_by_rank(checkpoint_dir: Union[str, bytes, os.PathLike],
rank: int) -> Dict[str, torch.Tensor]:
checkpoint_name = _CHECKPOINT_FORMAT.replace('XX', f'{rank:02d}')
state_dict = torch.load(
os.path.join(checkpoint_dir, checkpoint_name),
map_location=lambda storage, loc: storage)
return state_dict['module'] if 'module' in state_dict else state_dict
def _split_tensor(tensor: torch.Tensor, num_partitions: int,
partition_dim: int) -> List[torch.Tensor]:
from megatron_util import mpu
per_partition_size = mpu.utils.divide(
tensor.size(partition_dim), num_partitions)
partitions_list = torch.split(
tensor, per_partition_size, dim=partition_dim)
return partitions_list | null |
run | import json
import logging
from pathlib import Path
import hydra
import numpy as np
from omegaconf import DictConfig
from scipy.optimize import curve_fit
from scipy.stats import kendalltau, pearsonr
logger = logging.getLogger(__name__)
def rmse_score(x, y):
return np.sqrt(np.mean((x - y) ** 2))
def ncc_score(x, y):
return pearsonr(x, y)[0]
def kt_score(x, y):
return kendalltau(x, y)[0]
def std_err(x, y):
return np.std(x - y) / np.sqrt(len(x))
class Model:
"""Class to represent the mapping from mbstoi parameters to intelligibility scores.
The mapping uses a simple logistic function scaled between 0 and 100.
The mapping parameters need to fit first using mbstoi, intelligibility score
pairs, using fit().
Once the fit has been made predictions can be made by calling predict()
"""
params = None # The model params
def _logistic_mapping(self, x, x0, k):
"""
Logistic function
x0 - x value of the logistic's midpoint
k - the logistic growth rate or steepness of the curve
"""
L = 100 # correctness can't be over 100
return L / (1 + np.exp(-k * (x - x0)))
def fit(self, pred, intel):
"""Fit a mapping betweeen mbstoi scores and intelligibility scores."""
initial_guess = [50.0, 1.0] # Initial guess for parameter values
self.params, *_remaining_returns = curve_fit(
self._logistic_mapping, pred, intel, initial_guess
)
def predict(self, x):
"""Predict intelligilbity scores from mbstoi scores."""
# Note, fit() must be called before predictions can be made
assert self.params is not None
return self._logistic_mapping(x, self.params[0], self.params[1])
def compute_scores(predictions, labels):
return {
"RMSE": rmse_score(predictions, labels),
"Std": std_err(predictions, labels),
"NCC": ncc_score(predictions, labels),
"KT": kt_score(predictions, labels),
}
def read_data(pred_json: Path, label_json: Path):
# read label_json to dict
with label_json.open("r", encoding="utf-8") as fp:
labels = json.load(fp)
label_dict = {item["signal"]: item["correctness"] for item in labels}
with pred_json.open("r", encoding="utf-8") as fp:
pred_dict = json.load(fp)
prediction = [pred * 100.0 for pred in pred_dict.values()]
label = [label_dict[signal] for signal in pred_dict]
return np.array(prediction), np.array(label)
@hydra.main(config_path=".", config_name="config")
def METHOD_NAME(cfg: DictConfig) -> None:
if cfg.cpc1_track == "open":
track = "_indep"
elif cfg.cpc1_track == "closed":
track = ""
else:
logger.error("cpc1_track has to be closed or open")
# encoder representation evaluation
prediction_dev, label_dev = read_data(
Path(cfg.path.exp_folder) / "dev_enc_similarity.json",
Path(cfg.path.cpc1_train_data) / f"metadata/CPC1.{'train'+track}.json",
)
prediction_test, label_test = read_data(
Path(cfg.path.exp_folder) / "test_enc_similarity.json",
Path(f"../test_listener_responses/CPC1.{'test'+track}.json"),
)
logger.info("Apply logistic fitting.")
model = Model()
model.fit(prediction_dev, label_dev)
fit_pred = model.predict(prediction_test)
enc_scores = compute_scores(fit_pred, label_test)
# decoder representation evaluation
prediction_dev, label_dev = read_data(
Path(cfg.path.exp_folder) / "dev_dec_similarity.json",
Path(cfg.path.cpc1_train_data) / f"metadata/CPC1.{'train'+track}.json",
)
prediction_test, label_test = read_data(
Path(cfg.path.exp_folder) / "test_dec_similarity.json",
Path(f"../test_listener_responses/CPC1.{'test'+track}.json"),
)
logger.info("Apply logistic fitting.")
model = Model()
model.fit(prediction_dev, label_dev)
fit_pred = model.predict(prediction_test)
dec_scores = compute_scores(fit_pred, label_test)
results_file = Path(cfg.path.exp_folder) / "results.json"
with results_file.open("w", encoding="utf-8") as fp:
json.dump({"enc_results": enc_scores, "dec_results": dec_scores}, fp)
# pylint: disable=no-value-for-parameter
if __name__ == "__main__":
METHOD_NAME() | null |
restore | import sys
from collections.abc import Callable, Iterable, Iterator, Sequence
from typing import Any, AnyStr, Generic, NamedTuple, TypeVar, overload
if sys.version_info >= (3, 9):
from types import GenericAlias
__all__ = [
"get_close_matches",
"ndiff",
"restore",
"SequenceMatcher",
"Differ",
"IS_CHARACTER_JUNK",
"IS_LINE_JUNK",
"context_diff",
"unified_diff",
"diff_bytes",
"HtmlDiff",
"Match",
]
_T = TypeVar("_T")
class Match(NamedTuple):
a: int
b: int
size: int
class SequenceMatcher(Generic[_T]):
@overload
def __init__(self, isjunk: Callable[[_T], bool] | None, a: Sequence[_T], b: Sequence[_T], autojunk: bool = True) -> None: ...
@overload
def __init__(self, *, a: Sequence[_T], b: Sequence[_T], autojunk: bool = True) -> None: ...
@overload
def __init__(
self: SequenceMatcher[str],
isjunk: Callable[[str], bool] | None = None,
a: Sequence[str] = "",
b: Sequence[str] = "",
autojunk: bool = True,
) -> None: ...
def set_seqs(self, a: Sequence[_T], b: Sequence[_T]) -> None: ...
def set_seq1(self, a: Sequence[_T]) -> None: ...
def set_seq2(self, b: Sequence[_T]) -> None: ...
if sys.version_info >= (3, 9):
def find_longest_match(self, alo: int = 0, ahi: int | None = None, blo: int = 0, bhi: int | None = None) -> Match: ...
else:
def find_longest_match(self, alo: int, ahi: int, blo: int, bhi: int) -> Match: ...
def get_matching_blocks(self) -> list[Match]: ...
def get_opcodes(self) -> list[tuple[str, int, int, int, int]]: ...
def get_grouped_opcodes(self, n: int = 3) -> Iterable[list[tuple[str, int, int, int, int]]]: ...
def ratio(self) -> float: ...
def quick_ratio(self) -> float: ...
def real_quick_ratio(self) -> float: ...
if sys.version_info >= (3, 9):
def __class_getitem__(cls, item: Any) -> GenericAlias: ...
@overload
def get_close_matches(word: AnyStr, possibilities: Iterable[AnyStr], n: int = 3, cutoff: float = 0.6) -> list[AnyStr]: ...
@overload
def get_close_matches(
word: Sequence[_T], possibilities: Iterable[Sequence[_T]], n: int = 3, cutoff: float = 0.6
) -> list[Sequence[_T]]: ...
class Differ:
def __init__(self, linejunk: Callable[[str], bool] | None = None, charjunk: Callable[[str], bool] | None = None) -> None: ...
def compare(self, a: Sequence[str], b: Sequence[str]) -> Iterator[str]: ...
def IS_LINE_JUNK(line: str, pat: Any = ...) -> bool: ... # pat is undocumented
def IS_CHARACTER_JUNK(ch: str, ws: str = " \t") -> bool: ... # ws is undocumented
def unified_diff(
a: Sequence[str],
b: Sequence[str],
fromfile: str = "",
tofile: str = "",
fromfiledate: str = "",
tofiledate: str = "",
n: int = 3,
lineterm: str = "\n",
) -> Iterator[str]: ...
def context_diff(
a: Sequence[str],
b: Sequence[str],
fromfile: str = "",
tofile: str = "",
fromfiledate: str = "",
tofiledate: str = "",
n: int = 3,
lineterm: str = "\n",
) -> Iterator[str]: ...
def ndiff(
a: Sequence[str],
b: Sequence[str],
linejunk: Callable[[str], bool] | None = None,
charjunk: Callable[[str], bool] | None = ...,
) -> Iterator[str]: ...
class HtmlDiff:
def __init__(
self,
tabsize: int = 8,
wrapcolumn: int | None = None,
linejunk: Callable[[str], bool] | None = None,
charjunk: Callable[[str], bool] | None = ...,
) -> None: ...
def make_file(
self,
fromlines: Sequence[str],
tolines: Sequence[str],
fromdesc: str = "",
todesc: str = "",
context: bool = False,
numlines: int = 5,
*,
charset: str = "utf-8",
) -> str: ...
def make_table(
self,
fromlines: Sequence[str],
tolines: Sequence[str],
fromdesc: str = "",
todesc: str = "",
context: bool = False,
numlines: int = 5,
) -> str: ...
def METHOD_NAME(delta: Iterable[str], which: int) -> Iterator[str]: ...
def diff_bytes(
dfunc: Callable[[Sequence[str], Sequence[str], str, str, str, str, int, str], Iterator[str]],
a: Iterable[bytes | bytearray],
b: Iterable[bytes | bytearray],
fromfile: bytes | bytearray = b"",
tofile: bytes | bytearray = b"",
fromfiledate: bytes | bytearray = b"",
tofiledate: bytes | bytearray = b"",
n: int = 3,
lineterm: bytes | bytearray = b"\n",
) -> Iterator[bytes]: ... | null |
inherits filter | # Copyright (C) 2011 Chris Dekter
# Copyright (C) 2019-2020 Thomas Hess <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import typing
class AbstractWindowFilter:
def __init__(self):
self.windowInfoRegex = None
self.isRecursive = False
def get_serializable(self):
if self.windowInfoRegex is not None:
return {"regex": self.windowInfoRegex.pattern, "isRecursive": self.isRecursive}
else:
return {"regex": None, "isRecursive": False}
def load_from_serialized(self, data):
try:
if isinstance(data, dict): # check needed for data from versions < 0.80.4
self.set_window_titles(data["regex"])
self.isRecursive = data["isRecursive"]
else:
self.set_window_titles(data)
except re.error as e:
raise e
def copy_window_filter(self, window_filter):
self.windowInfoRegex = window_filter.windowInfoRegex
self.isRecursive = window_filter.isRecursive
def set_window_titles(self, regex):
if regex is not None:
try:
self.windowInfoRegex = re.compile(regex, re.UNICODE)
except re.error as e:
raise e
else:
self.windowInfoRegex = regex
def set_filter_recursive(self, recurse):
self.isRecursive = recurse
def has_filter(self) -> bool:
return self.windowInfoRegex is not None
def METHOD_NAME(self) -> bool:
if self.parent is not None:
return self.parent.get_applicable_regex(True) is not None
return False
def get_child_filter(self):
if self.isRecursive and self.windowInfoRegex is not None:
return self.get_filter_regex()
elif self.parent is not None:
return self.parent.get_child_filter()
else:
return ""
def get_filter_regex(self):
"""
Used by the GUI to obtain human-readable version of the filter
"""
if self.windowInfoRegex is not None:
if self.isRecursive:
return self.windowInfoRegex.pattern
else:
return self.windowInfoRegex.pattern
elif self.parent is not None:
return self.parent.get_child_filter()
else:
return ""
def filter_matches(self, otherFilter):
# XXX Should this be and?
if otherFilter is None or self.get_applicable_regex() is None:
return True
return otherFilter == self.get_applicable_regex().pattern
def same_filter_as_item(self, otherItem):
if not isinstance(otherItem, AbstractWindowFilter):
return False
return self.filter_matches(otherItem.get_applicable_regex)
def get_applicable_regex(self, forChild=False):
if self.windowInfoRegex is not None:
if (forChild and self.isRecursive) or not forChild:
return self.windowInfoRegex
elif self.parent is not None:
return self.parent.get_applicable_regex(True)
return None
def _should_trigger_window_title(self, window_info):
r = self.get_applicable_regex() # type: typing.Pattern
if r is not None:
return bool(r.match(window_info.wm_title)) or bool(r.match(window_info.wm_class))
else:
return True | null |
test floordiv 128 64 | import itertools
import contextlib
import operator
import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
from numpy.testing import assert_raises, assert_equal
INT64_MAX = np.iinfo(np.int64).max
INT64_MIN = np.iinfo(np.int64).min
INT64_MID = 2**32
# int128 is not two's complement, the sign bit is separate
INT128_MAX = 2**128 - 1
INT128_MIN = -INT128_MAX
INT128_MID = 2**64
INT64_VALUES = (
[INT64_MIN + j for j in range(20)] +
[INT64_MAX - j for j in range(20)] +
[INT64_MID + j for j in range(-20, 20)] +
[2*INT64_MID + j for j in range(-20, 20)] +
[INT64_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70))
)
INT128_VALUES = (
[INT128_MIN + j for j in range(20)] +
[INT128_MAX - j for j in range(20)] +
[INT128_MID + j for j in range(-20, 20)] +
[2*INT128_MID + j for j in range(-20, 20)] +
[INT128_MID//2 + j for j in range(-20, 20)] +
list(range(-70, 70)) +
[False] # negative zero
)
INT64_POS_VALUES = [x for x in INT64_VALUES if x > 0]
@contextlib.contextmanager
def exc_iter(*args):
"""
Iterate over Cartesian product of *args, and if an exception is raised,
add information of the current iterate.
"""
value = [None]
def iterate():
for v in itertools.product(*args):
value[0] = v
yield v
try:
yield iterate()
except Exception:
import traceback
msg = "At: %r\n%s" % (repr(value[0]),
traceback.format_exc())
raise AssertionError(msg)
def test_safe_binop():
# Test checked arithmetic routines
ops = [
(operator.add, 1),
(operator.sub, 2),
(operator.mul, 3)
]
with exc_iter(ops, INT64_VALUES, INT64_VALUES) as it:
for xop, a, b in it:
pyop, op = xop
c = pyop(a, b)
if not (INT64_MIN <= c <= INT64_MAX):
assert_raises(OverflowError, mt.extint_safe_binop, a, b, op)
else:
d = mt.extint_safe_binop(a, b, op)
if c != d:
# assert_equal is slow
assert_equal(d, c)
def test_to_128():
with exc_iter(INT64_VALUES) as it:
for a, in it:
b = mt.extint_to_128(a)
if a != b:
assert_equal(b, a)
def test_to_64():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if not (INT64_MIN <= a <= INT64_MAX):
assert_raises(OverflowError, mt.extint_to_64, a)
else:
b = mt.extint_to_64(a)
if a != b:
assert_equal(b, a)
def test_mul_64_64():
with exc_iter(INT64_VALUES, INT64_VALUES) as it:
for a, b in it:
c = a * b
d = mt.extint_mul_64_64(a, b)
if c != d:
assert_equal(d, c)
def test_add_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a + b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_add_128, a, b)
else:
d = mt.extint_add_128(a, b)
if c != d:
assert_equal(d, c)
def test_sub_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a - b
if not (INT128_MIN <= c <= INT128_MAX):
assert_raises(OverflowError, mt.extint_sub_128, a, b)
else:
d = mt.extint_sub_128(a, b)
if c != d:
assert_equal(d, c)
def test_neg_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
b = -a
c = mt.extint_neg_128(a)
if b != c:
assert_equal(c, b)
def test_shl_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -(((-a) << 1) & (2**128-1))
else:
b = (a << 1) & (2**128-1)
c = mt.extint_shl_128(a)
if b != c:
assert_equal(c, b)
def test_shr_128():
with exc_iter(INT128_VALUES) as it:
for a, in it:
if a < 0:
b = -((-a) >> 1)
else:
b = a >> 1
c = mt.extint_shr_128(a)
if b != c:
assert_equal(c, b)
def test_gt_128():
with exc_iter(INT128_VALUES, INT128_VALUES) as it:
for a, b in it:
c = a > b
d = mt.extint_gt_128(a, b)
if c != d:
assert_equal(d, c)
@pytest.mark.slow
def test_divmod_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
if a >= 0:
c, cr = divmod(a, b)
else:
c, cr = divmod(-a, b)
c = -c
cr = -cr
d, dr = mt.extint_divmod_128_64(a, b)
if c != d or d != dr or b*d + dr != a:
assert_equal(d, c)
assert_equal(dr, cr)
assert_equal(b*d + dr, a)
def METHOD_NAME():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = a // b
d = mt.extint_floordiv_128_64(a, b)
if c != d:
assert_equal(d, c)
def test_ceildiv_128_64():
with exc_iter(INT128_VALUES, INT64_POS_VALUES) as it:
for a, b in it:
c = (a + b - 1) // b
d = mt.extint_ceildiv_128_64(a, b)
if c != d:
assert_equal(d, c) | null |
test cossin error missing partitioning | import pytest
import numpy as np
from numpy.random import seed
from numpy.testing import assert_allclose
from scipy.linalg.lapack import _compute_lwork
from scipy.stats import ortho_group, unitary_group
from scipy.linalg import cossin, get_lapack_funcs
REAL_DTYPES = (np.float32, np.float64)
COMPLEX_DTYPES = (np.complex64, np.complex128)
DTYPES = REAL_DTYPES + COMPLEX_DTYPES
@pytest.mark.parametrize('dtype_', DTYPES)
@pytest.mark.parametrize('m, p, q',
[
(2, 1, 1),
(3, 2, 1),
(3, 1, 2),
(4, 2, 2),
(4, 1, 2),
(40, 12, 20),
(40, 30, 1),
(40, 1, 30),
(100, 50, 1),
(100, 50, 50),
])
@pytest.mark.parametrize('swap_sign', [True, False])
def test_cossin(dtype_, m, p, q, swap_sign):
seed(1234)
if dtype_ in COMPLEX_DTYPES:
x = np.array(unitary_group.rvs(m), dtype=dtype_)
else:
x = np.array(ortho_group.rvs(m), dtype=dtype_)
u, cs, vh = cossin(x, p, q,
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
# Test for float32 or float 64
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]],
swap_sign=swap_sign)
assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps)
assert u.dtype == dtype_
assert cs.dtype == np.real(u).dtype
assert vh.dtype == dtype_
_, cs2, vh2 = cossin(x, p, q,
compute_u=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps)
u2, cs2, _ = cossin(x, p, q,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
_, cs2, _ = cossin(x, p, q,
compute_u=False,
compute_vh=False,
swap_sign=swap_sign)
assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps)
def test_cossin_mixed_types():
seed(1234)
x = np.array(ortho_group.rvs(4), dtype=np.float64)
u, cs, vh = cossin([x[:2, :2],
np.array(x[:2, 2:], dtype=np.complex128),
x[2:, :2],
x[2:, 2:]])
assert u.dtype == np.complex128
assert cs.dtype == np.float64
assert vh.dtype == np.complex128
assert_allclose(x, u @ cs @ vh, rtol=0.,
atol=1e4 * np.finfo(np.complex128).eps)
def test_cossin_error_incorrect_subblocks():
with pytest.raises(ValueError, match="be due to missing p, q arguments."):
cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10]))
def test_cossin_error_empty_subblocks():
with pytest.raises(ValueError, match="x11.*empty"):
cossin(([], [], [], []))
with pytest.raises(ValueError, match="x12.*empty"):
cossin(([1, 2], [], [6, 7], [8, 9, 10]))
with pytest.raises(ValueError, match="x21.*empty"):
cossin(([1, 2], [3, 4, 5], [], [8, 9, 10]))
with pytest.raises(ValueError, match="x22.*empty"):
cossin(([1, 2], [3, 4, 5], [2], []))
def METHOD_NAME():
with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"):
cossin(unitary_group.rvs(2))
with pytest.raises(ValueError, match=".*might be due to missing p, q"):
cossin(unitary_group.rvs(4))
def test_cossin_error_non_iterable():
with pytest.raises(ValueError, match="containing the subblocks of X"):
cossin(12j)
def test_cossin_error_non_square():
with pytest.raises(ValueError, match="only supports square"):
cossin(np.array([[1, 2]]), 1, 1)
def test_cossin_error_partitioning():
x = np.array(ortho_group.rvs(4), dtype=np.float64)
with pytest.raises(ValueError, match="invalid p=0.*0<p<4.*"):
cossin(x, 0, 1)
with pytest.raises(ValueError, match="invalid p=4.*0<p<4.*"):
cossin(x, 4, 1)
with pytest.raises(ValueError, match="invalid q=-2.*0<q<4.*"):
cossin(x, 1, -2)
with pytest.raises(ValueError, match="invalid q=5.*0<q<4.*"):
cossin(x, 1, 5)
@pytest.mark.parametrize("dtype_", DTYPES)
def test_cossin_separate(dtype_):
seed(1234)
m, p, q = 250, 80, 170
pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
X = np.array(X, dtype=dtype_)
drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'),[X])
lwval = _compute_lwork(dlw, m, p, q)
lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
'lrwork'],
lwval))
*_, theta, u1, u2, v1t, v2t, _ = \
drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
(u1_2, u2_2), theta2, (v1t_2, v2t_2) = cossin(X, p, q, separate=True)
assert_allclose(u1_2, u1, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(u2_2, u2, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v1t_2, v1t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(v2t_2, v2t, rtol=0., atol=10*np.finfo(dtype_).eps)
assert_allclose(theta2, theta, rtol=0., atol=10*np.finfo(dtype_).eps) | null |
test name including spaces | # -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify it under
# the terms of the (LGPL) GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library Lesser General Public License
# for more details at ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jurko Gospodnetić ( [email protected] )
"""
Suds SAX Element unit tests.
Implemented using the 'pytest' testing framework.
"""
if __name__ == "__main__":
import testutils
testutils.run_using_pytest(globals())
import suds
from suds.sax.element import Element
import suds.sax.parser
import pytest
import re
class TestChildAtPath:
def test_backslash_as_path_separator(self):
name1 = "child"
name2 = "grandchild"
root = self.__create_single_branch("root", name1, name2)[0]
result = root.childAtPath(name1 + "\\" + name2)
assert result is None
def test_backslash_in_name(self):
root, a, _, _ = self.__create_single_branch("root", "a", "b", "c")
b_c = Element("b\\c")
a.append(b_c)
result = root.childAtPath("a/b\\c")
assert result is b_c
def test_child_leaf(self):
root, child = self.__create_single_branch("root", "child")
result = root.childAtPath("child")
assert result is child
def test_child_not_leaf(self):
root, child, _ = self.__create_single_branch("root", "child",
"grandchild")
result = root.childAtPath("child")
assert result is child
def test_grandchild_leaf(self):
root, _, grandchild = self.__create_single_branch("root", "child",
"grandchild")
result = root.childAtPath("child/grandchild")
assert result is grandchild
def test_grandchild_not_leaf(self):
root, _, grandchild, _ = self.__create_single_branch("root", "child",
"grandchild", "great grandchild")
result = root.childAtPath("child/grandchild")
assert result is grandchild
def test_misplaced(self):
root = self.__create_single_branch("root", "a", "x", "b")[0]
result = root.childAtPath("a/b")
assert result is None
def test_missing(self):
root = Element("root")
result = root.childAtPath("an invalid path")
assert result is None
def METHOD_NAME(self):
root, _, child, _ = self.__create_single_branch("root", "dumbo",
"foo - bar", "baz")
result = root.childAtPath("dumbo/foo - bar")
assert result is child
@pytest.mark.parametrize("n", (2, 3))
def test_repeated_path_separators(self, n):
root, child, grandchild = self.__create_single_branch("root", "child",
"grandchild")
sep = "/" * n
path = "child" + sep + "grandchild"
result = root.childAtPath(path)
assert result is grandchild
def test_same_named(self):
root, _, child, _ = self.__create_single_branch("root", "a", "a", "a")
result = root.childAtPath("a/a")
assert result is child
@staticmethod
def __create_single_branch(*args):
"""
Construct a single branch element tree with given element names.
Returns a list of constructed Element nodes from root to leaf.
"""
result = []
parent = None
for name in args:
e = Element(name)
result.append(e)
if parent is not None:
parent.append(e)
parent = e
return result
class TestStringRepresentation:
# Must be consistent with how Element.str() formats this data.
str_formatted_xml = """\
<xsd:element name="ZuZu">
<xsd:simpleType>
<xsd:restriction base="xsd:string">
<xsd:enumeration value="alfa"/>
<xsd:enumeration value="beta"/>
<xsd:enumeration value="gamma"/>
</xsd:restriction>
</xsd:simpleType>
</xsd:element>"""
@staticmethod
def create_test_element(content=str_formatted_xml):
input_data = suds.byte_str(content)
xml = suds.sax.parser.Parser().parse(suds.BytesIO(input_data))
element = xml.root()
assert element.__class__ is Element
return element
def test_convert_to_unicode(self):
element = self.create_test_element()
expected = element.str()
assert str(element) == expected
def test_plain_method(self):
element = self.create_test_element(self.str_formatted_xml)
expected = re.sub(r"\s*[\r\n]\s*", "", self.str_formatted_xml)
result = element.plain()
assert result == expected
def test_str_method(self):
element = self.create_test_element(self.str_formatted_xml)
result = element.str()
assert result == self.str_formatted_xml
@pytest.mark.parametrize("name, expected_prefix, expected_name", (
("", None, ""),
("bazinga", None, "bazinga"),
("test element name", None, "test element name"),
("aaa:bbb", "aaa", "bbb"),
("aaa:", "aaa", ""),
(":aaa", "", "aaa"),
("aaa::bbb", "aaa", ":bbb"),
("aaa:bbb:ccc", "aaa", "bbb:ccc")))
def test_init_name(name, expected_prefix, expected_name):
e = Element(name)
assert e.prefix == expected_prefix
assert e.name == expected_name | null |
validate | # (C) Datadog, Inc. 2021-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
# This file is autogenerated.
# To change this file you should edit assets/configuration/spec.yaml and then run the following commands:
# ddev -x validate config -s <INTEGRATION_NAME>
# ddev -x validate models -s <INTEGRATION_NAME>
from __future__ import annotations
from types import MappingProxyType
from typing import Any, Optional, Union
from pydantic import BaseModel, ConfigDict, field_validator, model_validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class AuthToken(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
frozen=True,
)
reader: Optional[MappingProxyType[str, Any]] = None
writer: Optional[MappingProxyType[str, Any]] = None
class IgnoreMetricsByLabels(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
frozen=True,
)
target_label_key: Optional[str] = None
target_label_value_list: Optional[tuple[str, ...]] = None
class TargetMetric(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
frozen=True,
)
label_to_match: Optional[str] = None
labels_to_get: Optional[tuple[str, ...]] = None
class LabelJoins(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
frozen=True,
)
target_metric: Optional[TargetMetric] = None
class MetricPatterns(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
frozen=True,
)
exclude: Optional[tuple[str, ...]] = None
include: Optional[tuple[str, ...]] = None
class Proxy(BaseModel):
model_config = ConfigDict(
arbitrary_types_allowed=True,
frozen=True,
)
http: Optional[str] = None
https: Optional[str] = None
no_proxy: Optional[tuple[str, ...]] = None
class InstanceConfig(BaseModel):
model_config = ConfigDict(
validate_default=True,
arbitrary_types_allowed=True,
frozen=True,
)
allow_redirects: Optional[bool] = None
auth_token: Optional[AuthToken] = None
auth_type: Optional[str] = None
aws_host: Optional[str] = None
aws_region: Optional[str] = None
aws_service: Optional[str] = None
bearer_token_auth: Optional[Union[bool, str]] = None
bearer_token_path: Optional[str] = None
bearer_token_refresh_interval: Optional[int] = None
connect_timeout: Optional[float] = None
disable_generic_tags: Optional[bool] = None
empty_default_hostname: Optional[bool] = None
exclude_labels: Optional[tuple[str, ...]] = None
extra_headers: Optional[MappingProxyType[str, Any]] = None
headers: Optional[MappingProxyType[str, Any]] = None
health_service_check: Optional[bool] = None
ignore_metrics: Optional[tuple[str, ...]] = None
ignore_metrics_by_labels: Optional[IgnoreMetricsByLabels] = None
ignore_tags: Optional[tuple[str, ...]] = None
include_labels: Optional[tuple[str, ...]] = None
kerberos_auth: Optional[str] = None
kerberos_cache: Optional[str] = None
kerberos_delegate: Optional[bool] = None
kerberos_force_initiate: Optional[bool] = None
kerberos_hostname: Optional[str] = None
kerberos_keytab: Optional[str] = None
kerberos_principal: Optional[str] = None
label_joins: Optional[LabelJoins] = None
label_to_hostname: Optional[str] = None
labels_mapper: Optional[MappingProxyType[str, Any]] = None
leader_tag: Optional[bool] = None
log_requests: Optional[bool] = None
metric_patterns: Optional[MetricPatterns] = None
metrics: Optional[tuple[Union[str, MappingProxyType[str, str]], ...]] = None
min_collection_interval: Optional[float] = None
namespace: Optional[str] = None
ntlm_domain: Optional[str] = None
password: Optional[str] = None
persist_connections: Optional[bool] = None
prometheus_metrics_prefix: Optional[str] = None
prometheus_url: str
proxy: Optional[Proxy] = None
read_timeout: Optional[float] = None
request_size: Optional[float] = None
send_distribution_buckets: Optional[bool] = None
send_distribution_counts_as_monotonic: Optional[bool] = None
send_distribution_sums_as_monotonic: Optional[bool] = None
send_histograms_buckets: Optional[bool] = None
send_monotonic_counter: Optional[bool] = None
send_monotonic_with_gauge: Optional[bool] = None
service: Optional[str] = None
skip_proxy: Optional[bool] = None
tags: Optional[tuple[str, ...]] = None
timeout: Optional[float] = None
tls_ca_cert: Optional[str] = None
tls_cert: Optional[str] = None
tls_ignore_warning: Optional[bool] = None
tls_private_key: Optional[str] = None
tls_protocols_allowed: Optional[tuple[str, ...]] = None
tls_use_host_header: Optional[bool] = None
tls_verify: Optional[bool] = None
type_overrides: Optional[MappingProxyType[str, Any]] = None
use_legacy_auth_encoding: Optional[bool] = None
use_preview: Optional[bool] = None
use_process_start_time: Optional[bool] = None
username: Optional[str] = None
@model_validator(mode='before')
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_instance', identity)(values))
@field_validator('*', mode='before')
def METHOD_NAME(cls, value, info):
field = cls.model_fields[info.field_name]
field_name = field.alias or info.field_name
if field_name in info.context['configured_fields']:
value = getattr(validators, f'instance_{info.field_name}', identity)(value, field=field)
else:
value = getattr(defaults, f'instance_{info.field_name}', lambda: value)()
return validation.utils.make_immutable(value)
@model_validator(mode='after')
def _final_validation(cls, model):
return validation.core.check_model(getattr(validators, 'check_instance', identity)(model)) | null |
test property xml type | import sys
from xsdata.codegen.models import Restrictions
from xsdata.models.enums import DataType
from xsdata.models.enums import Namespace
from xsdata.models.enums import Tag
from xsdata.utils.testing import AttrFactory
from xsdata.utils.testing import AttrTypeFactory
from xsdata.utils.testing import FactoryTestCase
class AttrTests(FactoryTestCase):
def test__eq__(self):
attr = AttrFactory.element()
clone = attr.clone()
self.assertIsNot(attr, clone)
self.assertEqual(attr, clone)
attr.default = "foo"
self.assertEqual(attr, clone)
attr.restrictions.length = 10
self.assertEqual(attr, clone)
attr.index = -1
self.assertEqual(attr, clone)
attr.namespace = __file__
self.assertNotEqual(attr, clone)
def test_property_key(self):
attr = AttrFactory.attribute(name="a", namespace="b")
self.assertEqual("Attribute.b.a", attr.key)
def test_property_is_property(self):
self.assertTrue(AttrFactory.attribute().is_attribute)
self.assertTrue(AttrFactory.any_attribute().is_attribute)
self.assertFalse(AttrFactory.element().is_attribute)
def test_property_is_enumeration(self):
self.assertTrue(AttrFactory.enumeration().is_enumeration)
self.assertFalse(AttrFactory.element().is_enumeration)
def test_property_is_factory(self):
self.assertTrue(AttrFactory.any_attribute().is_factory)
element = AttrFactory.element()
self.assertFalse(element.is_factory)
element.restrictions.max_occurs = 2
self.assertTrue(element.is_factory)
def test_property_is_group(self):
self.assertTrue(AttrFactory.group().is_group)
self.assertTrue(AttrFactory.attribute_group().is_group)
self.assertFalse(AttrFactory.element().is_group)
def test_property_is_list(self):
attr = AttrFactory.create(restrictions=Restrictions(max_occurs=2))
self.assertTrue(attr.is_list)
attr.restrictions.max_occurs = 1
self.assertFalse(attr.is_list)
def test_property_is_prohibited(self):
attr = AttrFactory.create(restrictions=Restrictions(max_occurs=0))
self.assertTrue(attr.is_prohibited)
attr.restrictions.max_occurs = 1
self.assertFalse(attr.is_list)
def test_property_is_optional(self):
attr = AttrFactory.create(restrictions=Restrictions(min_occurs=0))
self.assertTrue(attr.is_optional)
attr.restrictions.min_occurs = 1
self.assertFalse(attr.is_optional)
def test_property_is_suffix(self):
attr = AttrFactory.create()
self.assertFalse(attr.is_suffix)
attr.index = sys.maxsize
self.assertTrue(attr.is_suffix)
def test_property_is_wild_attr(self):
attr = AttrFactory.create()
self.assertFalse(attr.is_wildcard)
attr = AttrFactory.any()
self.assertTrue(attr.is_wildcard)
def test_property_is_xsi_type(self):
attr = AttrFactory.create()
self.assertFalse(attr.is_xsi_type)
attr.namespace = Namespace.XSI.uri
self.assertFalse(attr.is_xsi_type)
attr.name = "type"
self.assertTrue(attr.is_xsi_type)
def test_property_is_nameless(self):
self.assertFalse(AttrFactory.create(tag=Tag.ELEMENT).is_nameless)
self.assertFalse(AttrFactory.create(tag=Tag.ATTRIBUTE).is_nameless)
self.assertTrue(AttrFactory.create(tag=Tag.ANY).is_nameless)
def test_property_is_any_type(self):
attr = AttrFactory.create(
types=[
AttrTypeFactory.create(qname="foo"),
AttrTypeFactory.native(DataType.FLOAT),
]
)
self.assertFalse(attr.is_any_type)
attr.types.append(AttrTypeFactory.native(DataType.ANY_SIMPLE_TYPE))
self.assertTrue(attr.is_any_type)
def test_property_native_types(self):
attr = AttrFactory.create(
types=[
AttrTypeFactory.create(qname="foo"),
AttrTypeFactory.native(DataType.INT),
AttrTypeFactory.native(DataType.SHORT),
AttrTypeFactory.native(DataType.INTEGER),
AttrTypeFactory.native(DataType.FLOAT),
]
)
self.assertCountEqual([float, int], attr.native_types)
def test_property_user_types(self):
attr = AttrFactory.create(
types=[
AttrTypeFactory.create(qname="foo"),
AttrTypeFactory.native(DataType.INT),
AttrTypeFactory.native(DataType.SHORT),
AttrTypeFactory.create(qname="bar"),
]
)
self.assertCountEqual([attr.types[0], attr.types[-1]], list(attr.user_types))
def METHOD_NAME(self):
attr = AttrFactory.create(tag=Tag.ELEMENT)
self.assertEqual("Element", attr.xml_type)
attr = AttrFactory.create(tag=Tag.ATTRIBUTE)
self.assertEqual("Attribute", attr.xml_type)
attr = AttrFactory.create(tag=Tag.ANY_ATTRIBUTE)
self.assertEqual("Attributes", attr.xml_type)
attr = AttrFactory.create(tag=Tag.ANY)
self.assertEqual("Wildcard", attr.xml_type)
attr = AttrFactory.create(tag=Tag.RESTRICTION)
self.assertIsNone(attr.xml_type) | null |
set log levels | """
Command to load course blocks.
"""
import logging
from django.core.management.base import BaseCommand
import openedx.core.djangoapps.content.block_structure.api as api
import openedx.core.djangoapps.content.block_structure.store as store
import openedx.core.djangoapps.content.block_structure.tasks as tasks
from openedx.core.djangoapps.content.block_structure.config import enable_storage_backing_for_cache_in_request
from openedx.core.lib.command_utils import (
get_mutually_exclusive_required_option,
parse_course_keys,
validate_dependent_option
)
from xmodule.modulestore.django import modulestore # lint-amnesty, pylint: disable=wrong-import-order
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Example usage:
$ ./manage.py lms generate_course_blocks --all_courses --settings=devstack
$ ./manage.py lms generate_course_blocks 'edX/DemoX/Demo_Course' --settings=devstack
"""
args = '<course_id course_id ...>'
help = 'Generates and stores course blocks for one or more courses.'
def add_arguments(self, parser):
"""
Entry point for subclassed commands to add custom arguments.
"""
parser.add_argument(
'--courses',
dest='courses',
nargs='+',
help='Generate course blocks for the list of courses provided.',
)
parser.add_argument(
'--all_courses',
help='Generate course blocks for all courses, given the requested start and end indices.',
action='store_true',
default=False,
)
parser.add_argument(
'--enqueue_task',
help='Enqueue the tasks for asynchronous computation.',
action='store_true',
default=False,
)
parser.add_argument(
'--routing_key',
dest='routing_key',
help='Routing key to use for asynchronous computation.',
)
parser.add_argument(
'--force_update',
help='Force update of the course blocks for the requested courses.',
action='store_true',
default=False,
)
parser.add_argument(
'--start_index',
help='Starting index of course list.',
default=0,
type=int,
)
parser.add_argument(
'--end_index',
help='Ending index of course list.',
default=0,
type=int,
)
parser.add_argument(
'--with_storage',
help='Store the course blocks in Storage, overriding value of the storage_backing_for_cache waffle switch',
action='store_true',
default=False,
)
def handle(self, *args, **options):
courses_mode = get_mutually_exclusive_required_option(options, 'courses', 'all_courses')
validate_dependent_option(options, 'routing_key', 'enqueue_task')
validate_dependent_option(options, 'start_index', 'all_courses')
validate_dependent_option(options, 'end_index', 'all_courses')
if courses_mode == 'all_courses':
course_keys = [course.id for course in modulestore().get_course_summaries()]
if options.get('start_index'):
end = options.get('end_index') or len(course_keys)
course_keys = course_keys[options['start_index']:end]
else:
course_keys = parse_course_keys(options['courses'])
self.METHOD_NAME(options)
log.critical('BlockStructure: STARTED generating Course Blocks for %d courses.', len(course_keys))
self._generate_course_blocks(options, course_keys)
log.critical('BlockStructure: FINISHED generating Course Blocks for %d courses.', len(course_keys))
def METHOD_NAME(self, options):
"""
Sets logging levels for this module and the block structure
cache module, based on the given the options.
"""
verbosity = options.get('verbosity')
if verbosity == 0:
log_level = logging.CRITICAL
elif verbosity == 1:
log_level = logging.WARNING
else:
log_level = logging.INFO
if verbosity is not None and verbosity < 3:
cache_log_level = logging.CRITICAL
else:
cache_log_level = logging.INFO
log.setLevel(log_level)
store.logger.setLevel(cache_log_level)
def _generate_course_blocks(self, options, course_keys):
"""
Generates course blocks for the given course_keys per the given options.
"""
if options.get('with_storage'):
enable_storage_backing_for_cache_in_request()
for course_key in course_keys:
try:
self._generate_for_course(options, course_key)
except Exception as ex: # pylint: disable=broad-except
log.exception(
'BlockStructure: An error occurred while generating course blocks for %s: %s',
str(course_key),
str(ex),
)
def _generate_for_course(self, options, course_key):
"""
Generates course blocks for the given course_key per the given options.
"""
if options.get('enqueue_task'):
action = tasks.update_course_in_cache_v2 if options.get('force_update') else tasks.get_course_in_cache_v2
task_options = {'routing_key': options['routing_key']} if options.get('routing_key') else {}
result = action.apply_async(
kwargs=dict(course_id=str(course_key), with_storage=options.get('with_storage')),
**task_options
)
log.info('BlockStructure: ENQUEUED generating for course: %s, task_id: %s.', course_key, result.id)
else:
log.info('BlockStructure: STARTED generating for course: %s.', course_key)
action = api.update_course_in_cache if options.get('force_update') else api.get_course_in_cache
action(course_key)
log.info('BlockStructure: FINISHED generating for course: %s.', course_key) | null |
test organization name method | import doctest
from decimal import Decimal
from uuid import uuid4
from django.test import SimpleTestCase
from nose.tools import assert_equal
from .. import matchers
from ..const import SYSTEM_URI_CASE_ID
from ..matchers import (
GivenName,
NegativeIdentifier,
OrganizationMatcher,
OrganizationName,
PatientMatcher,
PersonMatcher,
PropertyWeight,
)
def test_given_name_method():
"""
Test the GivenName ComparisonMethod as used by PersonMatcher
"""
pw = PropertyWeight('$.name[0].given', Decimal('0.3'), GivenName)
assert pw in PersonMatcher.property_weights
method = pw.method
john = {'name': [{'given': 'Henry John Colchester'.split()}]}
for resource, candidate, expected in [
# candidate "H. John" matches patient "Henry John Colchester"
(john, {'name': [{'given': ['H.', 'John']}]}, True),
# candidate "John" does not match patient "Henry John Colchester"
(john, {'name': [{'given': ['John']}]}, False),
(john, {'name': [{'given': ['Henry']}]}, True),
(john, {'name': [{'given': ['H.', 'J.', 'C.']}]}, False),
# candidate "Eric John Marwood" matches "Henry John Colchester"
(john, {'name': [{'given': ['Eric', 'John', 'Marwood']}]}, True),
]:
yield check_method, method, resource, candidate, expected
def METHOD_NAME():
"""
Test the OrganizationName method as used by OrganizationMatcher
"""
pw = PropertyWeight('$.name', Decimal('0.8'), OrganizationName)
assert pw in OrganizationMatcher.property_weights
method = pw.method
dimagi = {'name': 'Dimagi'}
for resource, candidate, expected in [
(dimagi, {'name': 'Dimagi'}, True),
(dimagi, {'name': 'DiMaggi'}, True),
(dimagi, {'name': 'Di Maggi'}, False),
(dimagi, {'name': 'dimCGI'}, True),
]:
yield check_method, method, resource, candidate, expected
def check_method(method, a, b, expected):
result = method.is_match(a, b)
assert_equal(result, expected)
def test_negative_identifier():
for a, b, expected in [
('name|Beth Harmon', 'name|Elizabeth Harmon', False),
('name|Beth', 'name|Beth', True),
('name|Elizabeth Harmon', 'name|Elisabeth Harmon', True),
('given_name|Elizabeth', 'family_name|Harmon', True),
]:
yield check_compare, NegativeIdentifier, a, b, expected
def check_compare(method_class, a, b, expected):
result = method_class.compare(a, b)
assert_equal(result, expected)
class TestPatientCandidates(SimpleTestCase):
def test_with_commcare_id(self):
case_id = str(uuid4())
patient = {
'id': case_id,
'name': [{
'text': 'Beth Harmon',
'given': ['Elizabeth'],
'family': 'Harmon',
}],
'identifier': [{
'system': SYSTEM_URI_CASE_ID,
'value': case_id,
}]
}
candidates = [
{
'id': '1',
'name': [{
'given': ['Elizabeth'],
'family': 'Harmon',
}],
'identifier': [{
'system': SYSTEM_URI_CASE_ID,
'value': str(uuid4()),
}],
},
{
'id': '2',
'name': [{'given': ['Jolene']}],
'identifier': [{
'system': SYSTEM_URI_CASE_ID,
'value': case_id,
}],
},
{
'id': '3',
'name': [{'family': 'Harmon'}],
'identifier': [{
'system': SYSTEM_URI_CASE_ID,
'value': case_id,
}],
},
]
matcher = PatientMatcher(patient)
matches = matcher.find_matches(candidates)
self.assertEqual(len(matches), 1)
self.assertEqual(matches[0]['id'], '3')
scores = [matcher.get_score(c) for c in candidates]
expected = [
Decimal('-0.4'), # Same name, different ID
Decimal('0.5'), # Same ID, different name
Decimal('1.2'), # Same ID, same family name
]
self.assertEqual(scores, expected)
def test_doctests():
results = doctest.testmod(matchers)
assert_equal(results.failed, 0) | null |
test staff can fetch the invite | import datetime
from pytest import mark
from schedule.models import ScheduleItem
pytestmark = mark.django_db
def test_fetch_an_invitation(
submission_factory,
graphql_client,
user,
schedule_item_factory,
slot_factory,
day_factory,
):
graphql_client.force_login(user)
submission = submission_factory(
speaker_id=user.id,
)
schedule_item_factory(
status=ScheduleItem.STATUS.confirmed,
speaker_invitation_notes="notes",
submission=submission,
type=ScheduleItem.TYPES.submission,
conference=submission.conference,
slot=slot_factory(
day=day_factory(
day=datetime.date(2020, 10, 10), conference=submission.conference
),
hour=datetime.time(10, 10, 0),
duration=30,
),
)
response = graphql_client.query(
"""query($submissionId: ID!) {
scheduleInvitation(submissionId: $submissionId) {
option
notes
dates {
start
end
}
}
}""",
variables={"submissionId": submission.hashid},
)
assert not response.get("errors")
assert response["data"]["scheduleInvitation"] == {
"option": "CONFIRM",
"notes": "notes",
"dates": [{"start": "2020-10-10T10:10:00", "end": "2020-10-10T10:40:00"}],
}
def test_random_user_cannot_fetch_the_invite(
submission_factory,
graphql_client,
user,
schedule_item_factory,
slot_factory,
day_factory,
):
graphql_client.force_login(user)
submission = submission_factory(
speaker_id=50000,
)
schedule_item_factory(
status=ScheduleItem.STATUS.confirmed,
speaker_invitation_notes="notes",
submission=submission,
type=ScheduleItem.TYPES.submission,
conference=submission.conference,
slot=slot_factory(
day=day_factory(
day=datetime.date(2020, 10, 10), conference=submission.conference
),
hour=datetime.time(10, 10, 0),
duration=30,
),
)
response = graphql_client.query(
"""query($submissionId: ID!) {
scheduleInvitation(submissionId: $submissionId) {
option
notes
dates {
start
end
}
}
}""",
variables={"submissionId": submission.hashid},
)
assert not response.get("errors")
assert response["data"]["scheduleInvitation"] is None
def METHOD_NAME(
submission_factory,
graphql_client,
admin_user,
schedule_item_factory,
slot_factory,
day_factory,
):
graphql_client.force_login(admin_user)
submission = submission_factory(
speaker_id=50000,
)
schedule_item_factory(
status=ScheduleItem.STATUS.confirmed,
speaker_invitation_notes="notes",
submission=submission,
type=ScheduleItem.TYPES.submission,
conference=submission.conference,
slot=slot_factory(
day=day_factory(
day=datetime.date(2020, 10, 10), conference=submission.conference
),
hour=datetime.time(10, 10, 0),
duration=30,
),
)
response = graphql_client.query(
"""query($submissionId: ID!) {
scheduleInvitation(submissionId: $submissionId) {
option
notes
}
}""",
variables={"submissionId": submission.hashid},
)
assert not response.get("errors")
assert response["data"]["scheduleInvitation"] == {
"option": "CONFIRM",
"notes": "notes",
}
def test_requires_authentication(
submission_factory,
graphql_client,
schedule_item_factory,
slot_factory,
day_factory,
):
submission = submission_factory(
speaker_id=50000,
)
schedule_item_factory(
status=ScheduleItem.STATUS.confirmed,
speaker_invitation_notes="notes",
submission=submission,
type=ScheduleItem.TYPES.submission,
conference=submission.conference,
slot=slot_factory(
day=day_factory(
day=datetime.date(2020, 10, 10), conference=submission.conference
),
hour=datetime.time(10, 10, 0),
duration=30,
),
)
response = graphql_client.query(
"""query($submissionId: ID!) {
scheduleInvitation(submissionId: $submissionId) {
option
notes
dates {
start
end
}
}
}""",
variables={"submissionId": submission.hashid},
)
assert response["errors"][0]["message"] == "User not logged in"
assert response["data"]["scheduleInvitation"] is None | null |
add key | """
Basic Module to manage performance results
"""
import logging
import json
import time
from elasticsearch import Elasticsearch, exceptions as ESExp
log = logging.getLogger(__name__)
class PerfResult:
"""
Basic Performance results object for Q-PAS team
"""
def __init__(self, uuid, crd):
"""
Initialize the object by reading some of the data from the CRD file and
by connecting to the ES server and read all results from it.
Args:
uuid (str): the unique uid of the test
crd (dict): dictionary with test parameters - the test yaml file
that modify it in the test itself.
"""
self.uuid = uuid
# Initialize the Elastic-search server parameters
self.server = crd["spec"]["elasticsearch"]["server"]
self.port = crd["spec"]["elasticsearch"]["port"]
self.index = None # place holder for the ES index name
self.new_index = None # place holder for the ES full result index name
self.all_results = {}
self.es = None # place holder for the elastic-search connection
# Creating full results dictionary
self.results = {"clustername": crd["spec"]["clustername"], "uuid": uuid}
def es_connect(self):
"""
Create Elastic-Search server connection
"""
# Creating the connection to the elastic-search
log.info(f"Connecting to ES {self.server} on port {self.port}")
try:
self.es = Elasticsearch([{"host": self.server, "port": self.port}])
except ESExp.ConnectionError:
log.warning(
"Cannot connect to ES server {}:{}".format(self.server, self.port)
)
# Testing the connection to the elastic-search
if not self.es.ping():
log.warning(
"Cannot connect to ES server {}:{}".format(self.server, self.port)
)
def es_read(self):
"""
Reading all test results from the elastic-search server
Return:
list: list of results
Assert:
if no data found in the server
"""
query = {"query": {"match": {"uuid": self.uuid}}}
results = self.es.search(index=self.index, body=query)
assert results["hits"]["hits"], "Results not found in Elasticsearch"
return results["hits"]["hits"]
def dump_to_file(self):
"""
Writing the test results data into a JSON file, which can be loaded
into the ElasticSearch server
"""
json_file = f"{self.full_log_path}/full_results.json"
self.METHOD_NAME("index_name", self.new_index)
log.info(f"Dumping data to {json_file}")
with open(json_file, "w") as outfile:
json.dump(self.results, outfile, indent=4)
def es_write(self):
"""
Writing the results to the elastic-search server, and to a JSON file
"""
# Adding the results to the ES document and JSON file
self.METHOD_NAME("all_results", self.all_results)
log.debug(json.dumps(self.results, indent=4))
self.dump_to_file()
if self.es is None:
log.warning("No elasticsearch server to write data to")
return False
log.info(f"Writing all data to ES server {self.es}")
log.info(
f"Params : index={self.new_index}, "
f"doc_type=_doc, body={self.results}, id={self.uuid}"
)
retry = 3
while retry > 0:
try:
self.es.index(
index=self.new_index,
doc_type="_doc",
body=self.results,
id=self.uuid,
)
return True
except Exception as e:
if retry > 1:
log.warning("Failed to write data to ES, retrying in 3 sec...")
retry -= 1
time.sleep(3)
else:
log.warning(f"Failed writing data with : {e}")
return False
return True
def METHOD_NAME(self, key, value):
"""
Adding (key and value) to this object results dictionary as a new
dictionary.
Args:
key (str): String which will be the key for the value
value (*): value to add, can be any kind of data type
"""
self.results.update({key: value})
def results_link(self):
"""
Create a link to the results of the test in the elasticsearch serer
Return:
str: http link to the test results in the elastic-search server
"""
res_link = f"http://{self.server}:{self.port}/{self.new_index}/"
res_link += f'_search?q=uuid:"{self.uuid}"'
return res_link
class ResultsAnalyse(PerfResult):
"""
This class generates results for all tests as one unit
and saves them to an elastic search server on the cluster
"""
def __init__(self, uuid, crd, full_log_path, index_name):
"""
Initialize the object by reading some of the data from the CRD file and
by connecting to the ES server and read all results from it.
Args:
uuid (str): the unique uid of the test
crd (dict): dictionary with test parameters - the test yaml file
that modify it in the test itself.
full_log_path (str): the path of the results files to be found
index_name (str): index name in ES
"""
super(ResultsAnalyse, self).__init__(uuid, crd)
self.new_index = index_name
self.full_log_path = full_log_path
# make sure we have connection to the elastic search server
self.es_connect() | null |
get | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import RecoveryServicesBackupClientMixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
protected_item_name: str,
operation_id: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/operationResults/{operationId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, "str"),
"containerName": _SERIALIZER.url("container_name", container_name, "str"),
"protectedItemName": _SERIALIZER.url("protected_item_name", protected_item_name, "str"),
"operationId": _SERIALIZER.url("operation_id", operation_id, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class ProtectedItemOperationResultsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`protected_item_operation_results` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
protected_item_name: str,
operation_id: str,
**kwargs: Any
) -> Optional[_models.ProtectedItemResource]:
"""Fetches the result of any operation on the backup item.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the backup item. Required.
:type fabric_name: str
:param container_name: Container name associated with the backup item. Required.
:type container_name: str
:param protected_item_name: Backup item name whose details are to be fetched. Required.
:type protected_item_name: str
:param operation_id: OperationID which represents the operation whose result needs to be
fetched. Required.
:type operation_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectedItemResource or None or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectedItemResource or None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[Optional[_models.ProtectedItemResource]] = kwargs.pop("cls", None)
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
fabric_name=fabric_name,
container_name=container_name,
protected_item_name=protected_item_name,
operation_id=operation_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("ProtectedItemResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/protectedItems/{protectedItemName}/operationResults/{operationId}"
} | null |
required name | """Slightly extends the ``botocore.validate`` package to provide better integration with our parser/serializer."""
from typing import Any, Dict, List, NamedTuple
from botocore.model import OperationModel, Shape
from botocore.validate import ParamValidator as BotocoreParamValidator
from botocore.validate import ValidationErrors as BotocoreValidationErrors
from botocore.validate import type_check
from localstack.aws.api import ServiceRequest
class Error(NamedTuple):
"""
A wrapper around ``botocore.validate`` error tuples.
Attributes:
reason The error type
name The name of the parameter the error occured at
attributes Error type-specific attributes
"""
reason: str
name: str
attributes: Dict[str, Any]
class ParameterValidationError(Exception):
error: Error
def __init__(self, error: Error) -> None:
self.error = error
super().__init__(self.message)
@property
def reason(self):
return self.error.reason
@property
def message(self) -> str:
"""
Returns a default message for the error formatted by BotocoreValidationErrors.
:return: the exception message.
"""
return BotocoreValidationErrors()._format_error(self.error)
class MissingRequiredField(ParameterValidationError):
@property
def METHOD_NAME(self) -> str:
return self.error.attributes["required_name"]
# TODO: extend subclasses with properties from error arguments as needed. see ValidationErrors._format_error for
# which those are.
class UnknownField(ParameterValidationError):
pass
class InvalidType(ParameterValidationError):
pass
class InvalidRange(ParameterValidationError):
pass
class InvalidLength(ParameterValidationError):
pass
class JsonEncodingError(ParameterValidationError):
pass
class InvalidDocumentType(ParameterValidationError):
pass
class MoreThanOneInput(ParameterValidationError):
pass
class EmptyInput(ParameterValidationError):
pass
class ValidationErrors(BotocoreValidationErrors):
def __init__(self, shape: Shape, params: Dict[str, Any]):
super().__init__()
self.shape = shape
self.params = params
self._exceptions: List[ParameterValidationError] = []
@property
def exceptions(self):
return self._exceptions
def raise_first(self):
for error in self._exceptions:
raise error
def report(self, name, reason, **kwargs):
error = Error(reason, name, kwargs)
self._errors.append(error)
self._exceptions.append(self.to_exception(error))
def to_exception(self, error: Error) -> ParameterValidationError:
error_type, name, additional = error
if error_type == "missing required field":
return MissingRequiredField(error)
elif error_type == "unknown field":
return UnknownField(error)
elif error_type == "invalid type":
return InvalidType(error)
elif error_type == "invalid range":
return InvalidRange(error)
elif error_type == "invalid length":
return InvalidLength(error)
elif error_type == "unable to encode to json":
return JsonEncodingError(error)
elif error_type == "invalid type for document":
return InvalidDocumentType(error)
elif error_type == "more than one input":
return MoreThanOneInput(error)
elif error_type == "empty input":
return EmptyInput(error)
return ParameterValidationError(error)
class ParamValidator(BotocoreParamValidator):
def validate(self, params: Dict[str, Any], shape: Shape):
"""Validate parameters against a shape model.
This method will validate the parameters against a provided shape model.
All errors will be collected before returning to the caller. This means
that this method will not stop at the first error, it will return all
possible errors.
:param params: User provided dict of parameters
:param shape: A shape model describing the expected input.
:return: A list of errors.
"""
errors = ValidationErrors(shape, params)
self._validate(params, shape, errors, name="")
return errors
@type_check(valid_types=(dict,))
def _validate_structure(self, params, shape, errors, name):
# our parser sets the value of required members to None if they are not in the incoming request. we correct
# this behavior here to get the correct error messages.
for required_member in shape.metadata.get("required", []):
if required_member in params and params[required_member] is None:
params.pop(required_member)
super(ParamValidator, self)._validate_structure(params, shape, errors, name)
def validate_request(operation: OperationModel, request: ServiceRequest) -> ValidationErrors:
"""
Validates the service request with the input shape of the given operation.
:param operation: the operation
:param request: the input shape of the operation being validated
:return: ValidationError object
"""
return ParamValidator().validate(request, operation.input_shape) | null |
test with container | from mock.mock import patch
import os
import pytest
import ca_test_common
import ceph_osd_flag
fake_cluster = 'ceph'
fake_container_binary = 'podman'
fake_container_image = 'quay.io/ceph/daemon:latest'
fake_flag = 'noup'
fake_user = 'client.admin'
fake_keyring = '/etc/ceph/{}.{}.keyring'.format(fake_cluster, fake_user)
invalid_flag = 'nofoo'
class TestCephOSDFlagModule(object):
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_without_parameters(self, m_fail_json):
ca_test_common.set_module_args({})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_osd_flag.main()
result = result.value.args[0]
assert result['msg'] == 'missing required arguments: name'
@patch('ansible.module_utils.basic.AnsibleModule.fail_json')
def test_with_invalid_flag(self, m_fail_json):
ca_test_common.set_module_args({
'name': invalid_flag,
})
m_fail_json.side_effect = ca_test_common.fail_json
with pytest.raises(ca_test_common.AnsibleFailJson) as result:
ceph_osd_flag.main()
result = result.value.args[0]
assert result['msg'] == ('value of name must be one of: noup, nodown, '
'noout, nobackfill, norebalance, norecover, '
'noscrub, nodeep-scrub, got: {}'.format(invalid_flag))
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
def test_with_check_mode(self, m_exit_json):
ca_test_common.set_module_args({
'name': fake_flag,
'_ansible_check_mode': True
})
m_exit_json.side_effect = ca_test_common.exit_json
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_osd_flag.main()
result = result.value.args[0]
assert not result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
assert result['rc'] == 0
assert not result['stdout']
assert not result['stderr']
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_with_failure(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_flag
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = 'Error EINVAL: invalid command'
rc = 22
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_osd_flag.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
assert result['rc'] == rc
assert result['stderr'] == stderr
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_set_flag(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_flag,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = '{} is set'.format(fake_flag)
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_osd_flag.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'set', fake_flag]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def test_unset_flag(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_flag,
'state': 'absent'
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = '{} is unset'.format(fake_flag)
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_osd_flag.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == ['ceph', '-n', fake_user, '-k', fake_keyring, '--cluster', fake_cluster, 'osd', 'unset', fake_flag]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout
@patch.dict(os.environ, {'CEPH_CONTAINER_BINARY': fake_container_binary})
@patch.dict(os.environ, {'CEPH_CONTAINER_IMAGE': fake_container_image})
@patch('ansible.module_utils.basic.AnsibleModule.exit_json')
@patch('ansible.module_utils.basic.AnsibleModule.run_command')
def METHOD_NAME(self, m_run_command, m_exit_json):
ca_test_common.set_module_args({
'name': fake_flag,
})
m_exit_json.side_effect = ca_test_common.exit_json
stdout = ''
stderr = '{} is set'.format(fake_flag)
rc = 0
m_run_command.return_value = rc, stdout, stderr
with pytest.raises(ca_test_common.AnsibleExitJson) as result:
ceph_osd_flag.main()
result = result.value.args[0]
assert result['changed']
assert result['cmd'] == [fake_container_binary, 'run', '--rm', '--net=host',
'-v', '/etc/ceph:/etc/ceph:z',
'-v', '/var/lib/ceph/:/var/lib/ceph/:z',
'-v', '/var/log/ceph/:/var/log/ceph/:z',
'--entrypoint=ceph', fake_container_image,
'-n', fake_user, '-k', fake_keyring,
'--cluster', fake_cluster, 'osd', 'set', fake_flag]
assert result['rc'] == rc
assert result['stderr'] == stderr
assert result['stdout'] == stdout | null |
complete | import os
class TaskStatus:
requested = "requested"
reserved = "reserved"
started = "started"
scraper_started = "scraper_started"
scraper_running = "scraper_running"
scraper_completed = "scraper_completed"
scraper_killed = "scraper_killed"
failed = "failed"
cancel_requested = "cancel_requested"
canceled = "canceled"
succeeded = "succeeded"
update = "update"
created_file = "created_file"
uploaded_file = "uploaded_file"
failed_file = "failed_file"
checked_file = "checked_file"
@classmethod
def incomplete(cls):
return [
cls.requested,
cls.reserved,
cls.started,
cls.scraper_started,
cls.scraper_completed,
]
@classmethod
def METHOD_NAME(cls):
return [cls.failed, cls.canceled, cls.succeeded]
@classmethod
def all(cls):
return [
cls.requested,
cls.reserved,
cls.started,
cls.scraper_started,
cls.scraper_completed,
cls.scraper_killed,
cls.cancel_requested,
cls.canceled,
cls.succeeded,
cls.failed,
]
@classmethod
def file_events(cls):
return [cls.created_file, cls.uploaded_file, cls.failed_file, cls.checked_file]
@classmethod
def silent_events(cls):
return cls.file_events() + [cls.scraper_running, cls.update]
@classmethod
def all_events(cls):
return list(
filter(
lambda x: x not in (cls.requested, cls.reserved),
cls.all() + cls.silent_events(),
)
)
class WarehousePath:
hidden_dev = "/.hidden/dev"
hidden_endless = "/.hidden/endless"
hidden_bard = "/.hidden/bard"
hidden_bsf = "/.hidden/bsf"
hidden_private = "/.hidden/private"
hidden_custom_apps = "/.hidden/custom_apps"
videos = "/videos"
zimit = "/zimit"
@classmethod
def all(cls):
return ScheduleCategory.all_warehouse_paths() + [
cls.videos,
cls.zimit,
cls.hidden_dev,
cls.hidden_private,
cls.hidden_endless,
cls.hidden_bard,
cls.hidden_bsf,
cls.hidden_custom_apps,
]
class ScheduleCategory:
gutenberg = "gutenberg"
other = "other"
phet = "phet"
psiram = "psiram"
stack_exchange = "stack_exchange"
ted = "ted"
openedx = "openedx"
vikidia = "vikidia"
wikibooks = "wikibooks"
wikihow = "wikihow"
wikinews = "wikinews"
wikipedia = "wikipedia"
wikiquote = "wikiquote"
wikisource = "wikisource"
wikispecies = "wikispecies"
wikiversity = "wikiversity"
wikivoyage = "wikivoyage"
wiktionary = "wiktionary"
ifixit = "ifixit"
freecodecamp = "freecodecamp"
@classmethod
def all(cls):
return [
cls.gutenberg,
cls.other,
cls.phet,
cls.psiram,
cls.stack_exchange,
cls.ted,
cls.openedx,
cls.vikidia,
cls.wikibooks,
cls.wikihow,
cls.wikinews,
cls.wikipedia,
cls.wikiquote,
cls.wikisource,
cls.wikispecies,
cls.wikiversity,
cls.wikivoyage,
cls.wiktionary,
cls.ifixit,
cls.freecodecamp,
]
@classmethod
def get_warehouse_path(cls, category):
return "/{}".format(category)
@classmethod
def all_warehouse_paths(cls):
custom_paths = {cls.openedx: "mooc"}
excluded_categories = [cls.wikispecies]
return [
cls.get_warehouse_path(custom_paths.get(category, category))
for category in cls.all()
if category not in excluded_categories
]
class DockerImageName:
mwoffliner = "openzim/mwoffliner"
youtube = "openzim/youtube"
gutenberg = "openzim/gutenberg"
phet = "openzim/phet"
sotoki = "openzim/sotoki"
nautilus = "openzim/nautilus"
ted = "openzim/ted"
openedx = "openzim/openedx"
zimit = "openzim/zimit"
kolibri = "openzim/kolibri"
wikihow = "openzim/wikihow"
ifixit = "openzim/ifixit"
freecodecamp = "openzim/freecodecamp"
@classmethod
def all(cls) -> set:
return {
cls.mwoffliner,
cls.youtube,
cls.gutenberg,
cls.phet,
cls.sotoki,
cls.nautilus,
cls.ted,
cls.openedx,
cls.zimit,
cls.kolibri,
cls.wikihow,
cls.ifixit,
cls.freecodecamp,
}
class Offliner:
mwoffliner = "mwoffliner"
youtube = "youtube"
gutenberg = "gutenberg"
phet = "phet"
sotoki = "sotoki"
nautilus = "nautilus"
ted = "ted"
openedx = "openedx"
zimit = "zimit"
kolibri = "kolibri"
wikihow = "wikihow"
ifixit = "ifixit"
freecodecamp = "freecodecamp"
@classmethod
def all(cls):
return [
cls.mwoffliner,
cls.youtube,
cls.gutenberg,
cls.phet,
cls.sotoki,
cls.nautilus,
cls.ted,
cls.openedx,
cls.zimit,
cls.kolibri,
cls.wikihow,
cls.ifixit,
cls.freecodecamp,
]
@classmethod
def get_image_prefix(cls, offliner):
prefix = os.getenv(f"DOCKER_REGISTRY_{offliner}", "ghcr.io")
prefix += "/" if prefix else ""
return prefix
@classmethod
def get_image_name(cls, offliner):
return cls.get_image_prefix(offliner) + {
cls.mwoffliner: DockerImageName.mwoffliner,
cls.youtube: DockerImageName.youtube,
cls.gutenberg: DockerImageName.gutenberg,
cls.phet: DockerImageName.phet,
cls.sotoki: DockerImageName.sotoki,
cls.nautilus: DockerImageName.nautilus,
cls.ted: DockerImageName.ted,
cls.openedx: DockerImageName.openedx,
cls.zimit: DockerImageName.zimit,
cls.kolibri: DockerImageName.kolibri,
cls.wikihow: DockerImageName.wikihow,
cls.ifixit: DockerImageName.ifixit,
cls.freecodecamp: DockerImageName.freecodecamp,
}.get(offliner, "-")
class SchedulePeriodicity:
manually = "manually"
monthly = "monthly"
quarterly = "quarterly"
biannualy = "biannualy"
annually = "annually"
@classmethod
def all(cls):
return [cls.manually, cls.monthly, cls.quarterly, cls.biannualy, cls.annually]
class Platform:
wikimedia = "wikimedia"
youtube = "youtube"
wikihow = "wikihow"
ifixit = "ifixit"
ted = "ted"
@classmethod
def all(cls) -> str:
return [cls.wikimedia, cls.youtube, cls.wikihow, cls.ifixit, cls.ted]
@classmethod
def get_max_per_worker_tasks_for(cls, platform) -> int:
try:
return int(os.getenv(f"PLATFORM_{platform}_MAX_TASKS_PER_WORKER"))
except (TypeError, ValueError):
return None
@classmethod
def get_max_overall_tasks_for(cls, platform) -> int:
try:
return int(os.getenv(f"PLATFORM_{platform}_MAX_TASKS_TOTAL"))
except (TypeError, ValueError):
return None | null |
test reoccurred error message | from zerver.lib.test_classes import WebhookTestCase
class RaygunHookTests(WebhookTestCase):
STREAM_NAME = "raygun"
URL_TEMPLATE = "/api/v1/external/raygun?&api_key={api_key}&stream={stream}"
WEBHOOK_DIR_NAME = "raygun"
def test_status_changed_message(self) -> None:
expected_topic = "test"
expected_message = """
[Error](https://app.raygun.com/error-url) status changed to **Ignored** by Emma Cat:
* **Timestamp**: Wed Jan 28 01:49:36 1970
* **Application details**: [Best App](http://app.raygun.io/application-url)
""".strip()
self.check_webhook(
"error_status_changed",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_comment_added_to_error_message(self) -> None:
expected_topic = "test"
expected_message = """
Anita Peacock commented on [Error](https://app.raygun.com/error-url):
``` quote
Ignoring these errors
```
* **Timestamp**: Wed Jan 28 01:49:36 1970
* **Application details**: [application name](http://app.raygun.io/application-url)
""".strip()
self.check_webhook(
"comment_added_to_error",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_error_assigned_to_user_message(self) -> None:
expected_topic = "test"
expected_message = """
Amy Loondon assigned [Error](https://app.raygun.com/error-url) to Kyle Kenny:
* **Timestamp**: Wed Jan 28 01:49:36 1970
* **Application details**: [application name](http://app.raygun.io/application-url)
""".strip()
self.check_webhook(
"error_assigned_to_user",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_one_minute_followup_error_message(self) -> None:
expected_topic = "test"
expected_message = """
One minute [follow-up error](http://app.raygun.io/error-url):
* **First occurred**: Wed Jan 28 01:49:36 1970
* **Last occurred**: Wed Jan 28 01:49:36 1970
* 1 users affected with 1 total occurrences
* **Application details**: [application name](http://app.raygun.io/application-url)
""".strip()
self.check_webhook(
"one_minute_followup_error",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_hourly_followup_error_message(self) -> None:
expected_topic = "test"
expected_message = """
Hourly [follow-up error](http://app.raygun.io/error-url):
* **First occurred**: Wed Jan 28 01:49:36 1970
* **Last occurred**: Wed Jan 28 01:49:36 1970
* 1 users affected with 1 total occurrences
* **Application details**: [application name](http://app.raygun.io/application-url)
""".strip()
self.check_webhook(
"hourly_followup_error",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_new_error_message(self) -> None:
expected_topic = "test"
expected_message = """
New [Error](http://app.raygun.io/error-url) occurred:
* **First occurred**: Wed Jan 28 01:49:36 1970
* **Last occurred**: Wed Jan 28 01:49:36 1970
* 1 users affected with 1 total occurrences
* **Tags**: test, error-page, v1.0.1, env:staging
* **Affected user**: a9b7d8...33846
* **pageName**: Error Page
* **userLoggedIn**: True
* **Application details**: [application name](http://app.raygun.io/application-url)
""".strip()
self.check_webhook(
"new_error",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def METHOD_NAME(self) -> None:
expected_topic = "test"
expected_message = """
[Error](http://app.raygun.io/error-url) reoccurred:
* **First occurred**: Wed Jan 28 01:49:36 1970
* **Last occurred**: Wed Jan 28 01:49:36 1970
* 1 users affected with 1 total occurrences
* **Tags**: test, error-page, v1.0.1, env:staging
* **Affected user**: a9b7d8...33846
* **pageName**: Error Page
* **userLoggedIn**: True
* **Application details**: [application name](http://app.raygun.io/application-url)
""".strip()
self.check_webhook(
"reoccurred_error",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
) | null |
consume | import asyncio
import io
import numpy as np
import pyaudio
import wave
from aiortc.contrib.media import MediaBlackhole
from aiortc.mediastreams import AudioStreamTrack, MediaStreamError, MediaStreamTrack
from aiortc.mediastreams import VIDEO_CLOCK_RATE, VIDEO_TIME_BASE
from aiortc.rtcrtpsender import RTCRtpSender
from av import CodecContext, Packet
from pydub import AudioSegment
import cereal.messaging as messaging
AUDIO_RATE = 16000
SOUNDS = {
'engage': '../../selfdrive/assets/sounds/engage.wav',
'disengage': '../../selfdrive/assets/sounds/disengage.wav',
'error': '../../selfdrive/assets/sounds/warning_immediate.wav',
}
def force_codec(pc, sender, forced_codec='video/VP9', stream_type="video"):
codecs = RTCRtpSender.getCapabilities(stream_type).codecs
codec = [codec for codec in codecs if codec.mimeType == forced_codec]
transceiver = next(t for t in pc.getTransceivers() if t.sender == sender)
transceiver.setCodecPreferences(codec)
class EncodedBodyVideo(MediaStreamTrack):
kind = "video"
_start: float
_timestamp: int
def __init__(self):
super().__init__()
sock_name = 'livestreamDriverEncodeData'
messaging.context = messaging.Context()
self.sock = messaging.sub_sock(sock_name, None, conflate=True)
self.pts = 0
async def recv(self) -> Packet:
while True:
msg = messaging.recv_one_or_none(self.sock)
if msg is not None:
break
await asyncio.sleep(0.005)
evta = getattr(msg, msg.which())
self.last_idx = evta.idx.encodeId
packet = Packet(evta.header + evta.data)
packet.time_base = VIDEO_TIME_BASE
packet.pts = self.pts
self.pts += 0.05 * VIDEO_CLOCK_RATE
return packet
class WebClientSpeaker(MediaBlackhole):
def __init__(self):
super().__init__()
self.p = pyaudio.PyAudio()
self.buffer = io.BytesIO()
self.channels = 2
self.stream = self.p.open(format=pyaudio.paInt16, channels=self.channels, rate=48000, frames_per_buffer=9600,
output=True, stream_callback=self.pyaudio_callback)
def pyaudio_callback(self, in_data, frame_count, time_info, status):
if self.buffer.getbuffer().nbytes < frame_count * self.channels * 2:
buff = np.zeros((frame_count, 2), dtype=np.int16).tobytes()
elif self.buffer.getbuffer().nbytes > 115200: # 3x the usual read size
self.buffer.seek(0)
buff = self.buffer.read(frame_count * self.channels * 4)
buff = buff[:frame_count * self.channels * 2]
self.buffer.seek(2)
else:
self.buffer.seek(0)
buff = self.buffer.read(frame_count * self.channels * 2)
self.buffer.seek(2)
return (buff, pyaudio.paContinue)
async def METHOD_NAME(self, track):
while True:
try:
frame = await track.recv()
except MediaStreamError:
return
bio = bytes(frame.planes[0])
self.buffer.write(bio)
async def start(self):
for track, task in self._MediaBlackhole__tracks.items():
if task is None:
self._MediaBlackhole__tracks[track] = asyncio.ensure_future(self.METHOD_NAME(track))
async def stop(self):
for task in self._MediaBlackhole__tracks.values():
if task is not None:
task.cancel()
self._MediaBlackhole__tracks = {}
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
class BodyMic(AudioStreamTrack):
def __init__(self):
super().__init__()
self.sample_rate = AUDIO_RATE
self.AUDIO_PTIME = 0.020 # 20ms audio packetization
self.samples = int(self.AUDIO_PTIME * self.sample_rate)
self.FORMAT = pyaudio.paInt16
self.CHANNELS = 2
self.RATE = self.sample_rate
self.CHUNK = int(AUDIO_RATE * 0.020)
self.p = pyaudio.PyAudio()
self.mic_stream = self.p.open(format=self.FORMAT, channels=1, rate=self.RATE, input=True, frames_per_buffer=self.CHUNK)
self.codec = CodecContext.create('pcm_s16le', 'r')
self.codec.sample_rate = self.RATE
self.codec.channels = 2
self.audio_samples = 0
self.chunk_number = 0
async def recv(self):
mic_data = self.mic_stream.read(self.CHUNK)
mic_sound = AudioSegment(mic_data, sample_width=2, channels=1, frame_rate=self.RATE)
mic_sound = AudioSegment.from_mono_audiosegments(mic_sound, mic_sound)
mic_sound += 3 # increase volume by 3db
packet = Packet(mic_sound.raw_data)
frame = self.codec.decode(packet)[0]
frame.pts = self.audio_samples
self.audio_samples += frame.samples
self.chunk_number = self.chunk_number + 1
return frame
async def play_sound(sound):
chunk = 5120
with wave.open(SOUNDS[sound], 'rb') as wf:
def callback(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
return data, pyaudio.paContinue
p = pyaudio.PyAudio()
stream = p.open(format=p.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True,
frames_per_buffer=chunk,
stream_callback=callback)
stream.start_stream()
while stream.is_active():
await asyncio.sleep(0)
stream.stop_stream()
stream.close()
p.terminate() | null |
test drop null transformer transform default pct | import numpy as np
import pandas as pd
import pytest
import woodwork as ww
from pandas.testing import assert_frame_equal
from woodwork.logical_types import (
Boolean,
Categorical,
Double,
Integer,
NaturalLanguage,
)
from evalml.pipelines.components import DropNullColumns
def test_drop_null_transformer_init():
drop_null_transformer = DropNullColumns(pct_null_threshold=0)
assert drop_null_transformer.parameters == {"pct_null_threshold": 0.0}
assert drop_null_transformer._cols_to_drop is None
drop_null_transformer = DropNullColumns()
assert drop_null_transformer.parameters == {"pct_null_threshold": 1.0}
assert drop_null_transformer._cols_to_drop is None
drop_null_transformer = DropNullColumns(pct_null_threshold=0.95)
assert drop_null_transformer.parameters == {"pct_null_threshold": 0.95}
assert drop_null_transformer._cols_to_drop is None
with pytest.raises(
ValueError,
match="pct_null_threshold must be a float between 0 and 1, inclusive.",
):
DropNullColumns(pct_null_threshold=-0.95)
with pytest.raises(
ValueError,
match="pct_null_threshold must be a float between 0 and 1, inclusive.",
):
DropNullColumns(pct_null_threshold=1.01)
def METHOD_NAME():
drop_null_transformer = DropNullColumns()
X = pd.DataFrame(
{"lots_of_null": [None, None, None, None, 5], "no_null": [1, 2, 3, 4, 5]},
)
X_expected = X.astype({"lots_of_null": "Int64", "no_null": "int64"})
drop_null_transformer.fit(X)
X_t = drop_null_transformer.transform(X)
assert_frame_equal(X_expected, X_t)
def test_drop_null_transformer_transform_custom_pct_null_threshold():
X = pd.DataFrame(
{
"lots_of_null": [None, None, None, None, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
},
)
drop_null_transformer = DropNullColumns(pct_null_threshold=0.5)
X_expected = X.drop(["lots_of_null", "all_null"], axis=1)
X_expected = X_expected.astype({"no_null": "int64"})
drop_null_transformer.fit(X)
X_t = drop_null_transformer.transform(X)
assert_frame_equal(X_expected, X_t)
# check that X is untouched
assert X.equals(
pd.DataFrame(
{
"lots_of_null": [None, None, None, None, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
},
),
)
def test_drop_null_transformer_transform_boundary_pct_null_threshold():
drop_null_transformer = DropNullColumns(pct_null_threshold=0.0)
X = pd.DataFrame(
{
"all_null": [None, None, None, None, None],
"lots_of_null": [None, None, None, None, 5],
"some_null": [None, 0, 3, 4, 5],
},
)
drop_null_transformer.fit(X)
X_t = drop_null_transformer.transform(X)
assert X_t.empty
drop_null_transformer = DropNullColumns(pct_null_threshold=1.0)
drop_null_transformer.fit(X)
X_t = drop_null_transformer.transform(X)
assert_frame_equal(
X_t,
X.drop(columns=["all_null"]).astype(
{"some_null": "Int64", "lots_of_null": "Int64"},
),
)
# check that X is untouched
assert X.equals(
pd.DataFrame(
{
"all_null": [None, None, None, None, None],
"lots_of_null": [None, None, None, None, 5],
"some_null": [None, 0, 3, 4, 5],
},
),
)
def test_drop_null_transformer_fit_transform():
drop_null_transformer = DropNullColumns()
X = pd.DataFrame(
{"lots_of_null": [None, None, None, None, 5], "no_null": [1, 2, 3, 4, 5]},
)
X_expected = X.astype({"lots_of_null": "Int64", "no_null": "int64"})
X_t = drop_null_transformer.fit_transform(X)
assert_frame_equal(X_expected, X_t)
X = pd.DataFrame(
{
"lots_of_null": [None, None, None, None, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
},
)
drop_null_transformer = DropNullColumns(pct_null_threshold=0.5)
X_expected = X.drop(["lots_of_null", "all_null"], axis=1)
X_expected = X_expected.astype({"no_null": "int64"})
X_t = drop_null_transformer.fit_transform(X)
assert_frame_equal(X_expected, X_t)
# check that X is untouched
assert X.equals(
pd.DataFrame(
{
"lots_of_null": [None, None, None, None, 5],
"all_null": [None, None, None, None, None],
"no_null": [1, 2, 3, 4, 5],
},
),
)
drop_null_transformer = DropNullColumns(pct_null_threshold=0.0)
X = pd.DataFrame(
{"lots_of_null": [None, None, None, None, 5], "some_null": [None, 0, 3, 4, 5]},
)
X_t = drop_null_transformer.fit_transform(X)
assert X_t.empty
X = pd.DataFrame(
{
"all_null": [None, None, None, None, None],
"lots_of_null": [None, None, None, None, 5],
"some_null": [None, 0, 3, 4, 5],
},
).astype(
{
"lots_of_null": "Int64",
"some_null": "Int64",
},
)
drop_null_transformer = DropNullColumns(pct_null_threshold=1.0)
X_t = drop_null_transformer.fit_transform(X)
assert_frame_equal(X.drop(["all_null"], axis=1), X_t)
def test_drop_null_transformer_np_array():
drop_null_transformer = DropNullColumns(pct_null_threshold=0.5)
X = np.array(
[
[np.nan, 0, 2, 0],
[np.nan, 1, np.nan, 0],
[np.nan, 2, np.nan, 0],
[np.nan, 1, 1, 0],
],
)
X_t = drop_null_transformer.fit_transform(X)
assert_frame_equal(X_t, pd.DataFrame(np.delete(X, [0, 2], axis=1), columns=[1, 3]))
# check that X is untouched
np.testing.assert_allclose(
X,
np.array(
[
[np.nan, 0, 2, 0],
[np.nan, 1, np.nan, 0],
[np.nan, 2, np.nan, 0],
[np.nan, 1, 1, 0],
],
),
)
@pytest.mark.parametrize(
"X_df",
[
pd.DataFrame(pd.Series([1, 2, 3], dtype="Int64")),
pd.DataFrame(pd.Series([1.0, 2.0, 3.0], dtype="float")),
pd.DataFrame(pd.Series(["a", "b", "a"], dtype="category")),
pd.DataFrame(pd.Series([True, False, True], dtype="boolean")),
pd.DataFrame(
pd.Series(
["this will be a natural language column because length", "yay", "hay"],
dtype="string",
),
),
],
)
@pytest.mark.parametrize("has_nan", [True, False])
def test_drop_null_transformer_woodwork_custom_overrides_returned_by_components(
X_df,
has_nan,
):
y = pd.Series([1, 2, 1])
if has_nan:
X_df["all null"] = [np.nan, np.nan, np.nan]
override_types = [Integer, Double, Categorical, NaturalLanguage, Boolean]
for logical_type in override_types:
try:
X = X_df.copy()
X.ww.init(logical_types={0: logical_type})
except ww.exceptions.TypeConversionError:
continue
drop_null_transformer = DropNullColumns()
drop_null_transformer.fit(X)
transformed = drop_null_transformer.transform(X, y)
assert isinstance(transformed, pd.DataFrame)
assert {k: type(v) for k, v in transformed.ww.logical_types.items()} == {
0: logical_type,
} | null |
method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vm extension image list-names",
)
class ListNames(AAZCommand):
"""List the names of available extensions.
:example: Find Docker extensions by publisher and location.
az vm extension image list-names --publisher Microsoft.Azure.Extensions -l westus --query "[?starts_with(name, 'Docker')]"
:example: Find CustomScript extensions by publisher and location.
az vm extension image list-names --publisher Microsoft.Azure.Extensions -l westus --query "[?starts_with(name, 'Custom')]"
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.compute/locations/{}/publishers/{}/artifacttypes/vmextension/types", "2022-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
help="Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.",
required=True,
id_part="name",
)
_args_schema.publisher_name = AAZStrArg(
options=["-p", "--publisher", "--publisher-name"],
help="Image publisher name.",
required=True,
id_part="child_name_1",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.VirtualMachineExtensionImagesListTypes(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualMachineExtensionImagesListTypes(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmextension/types",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"location", self.ctx.args.location,
required=True,
),
**self.serialize_url_param(
"publisherName", self.ctx.args.publisher_name,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZListType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.Element = AAZObjectType()
_element = cls._schema_on_200.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.location = AAZStrType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"required": True, "read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.tags = AAZDictType()
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.Element.properties
properties.compute_role = AAZStrType(
serialized_name="computeRole",
flags={"required": True},
)
properties.handler_schema = AAZStrType(
serialized_name="handlerSchema",
flags={"required": True},
)
properties.operating_system = AAZStrType(
serialized_name="operatingSystem",
flags={"required": True},
)
properties.supports_multiple_extensions = AAZBoolType(
serialized_name="supportsMultipleExtensions",
)
properties.vm_scale_set_enabled = AAZBoolType(
serialized_name="vmScaleSetEnabled",
)
tags = cls._schema_on_200.Element.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ListNamesHelper:
"""Helper class for ListNames"""
__all__ = ["ListNames"] | null |
site url | from xml.sax.saxutils import escape
from django.conf import settings
from django.contrib.syndication.views import Feed
from django.core.cache import cache
from django.urls import reverse
from django.utils.feedgenerator import Enclosure, Rss201rev2Feed
from django.utils.safestring import SafeString, mark_safe
from django.utils.xmlutils import SimplerXMLGenerator
import bleach
from froide.helper.feed_utils import clean_feed_output
from froide.helper.text_utils import convert_html_to_text
from .models import Article, Publication
class CDataSimplerXMLGenerator(SimplerXMLGenerator):
def characters(self, content):
if content:
self._finish_pending_start_element()
if isinstance(content, SafeString):
self._write("<![CDATA[{}]]>".format(content))
else:
if not isinstance(content, str):
content = str(content, self._encoding)
self._write(escape(content))
class CDataRss201rev2Feed(Rss201rev2Feed):
def write(self, outfile, encoding):
# Overwrite Generator, keep rest the same
handler = CDataSimplerXMLGenerator(outfile, encoding, short_empty_elements=True)
handler.startDocument()
handler.startElement("rss", self.rss_attributes())
handler.startElement("channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement("rss")
class BaseFeed(Feed):
protocol = settings.META_SITE_PROTOCOL
limit = 20
cache_namespace = "feed"
feed_type = CDataRss201rev2Feed
def __call__(self, request, *args, **kwargs):
cache_key = self.get_cache_key(*args, **kwargs)
response = cache.get(cache_key)
if response is None:
response = super().__call__(request, *args, **kwargs)
cache.set(cache_key, response, 900) # cache for 15 minutes
return response
def get_cache_key(self, *args, **kwargs):
# Override this in subclasses for more caching control
return "%s:%s-%s" % (
self.cache_namespace,
self.__class__.__module__,
"/".join(["%s,%s" % (key, val) for key, val in kwargs.items()]),
)
def get_object(self, request):
# TODO: get the right one from request.app_name
publication = Publication.objects.all().first()
return publication
def title(self, obj=None):
if obj:
return obj.title
return settings.SITE_NAME
@property
def METHOD_NAME(self):
"""
Return the URL of the current site.
"""
return settings.SITE_URL
def link(self):
return self.METHOD_NAME
def description(self, obj=None):
if obj:
return obj.description
return ""
def item_pubdate(self, item):
"""
Publication date of an entry.
"""
return item.start_publication
def get_queryset(self):
"""
Items are published entries.
"""
return Article.published.all()
@clean_feed_output
def item_title(self, item):
if item.kicker:
return "{}: {}".format(item.kicker, item.title)
return item.title
@clean_feed_output
def item_description(self, item):
return item.get_full_html_content()
class LatestArticlesFeed(BaseFeed):
feed_copyright = settings.SITE_NAME
def item_author_email(self, item):
"""
Return the first author's email.
Should not be called if self.item_author_name has returned None.
"""
return ""
def items(self):
"""
Items are published entries.
"""
queryset = self.get_queryset()
return queryset[: self.limit]
def item_link(self, item):
return self.METHOD_NAME + item.get_absolute_url()
def feed_url(self):
return self.METHOD_NAME + reverse("blog:article-latest-feed")
class LatestArticlesTeaserFeed(LatestArticlesFeed):
cache_namespace = "feed-teaser"
@clean_feed_output
def item_description(self, obj):
return convert_html_to_text(obj.description)
def feed_url(self):
return self.METHOD_NAME + reverse("blog:article-latest-feed-teaser")
class PodcastFeed(CDataRss201rev2Feed):
def __init__(self, *args, **kwargs):
extra_fields = ("author", "image")
self.meta = {}
for field in extra_fields:
self.meta[field] = kwargs.pop(field, "")
super().__init__(*args, **kwargs)
def rss_attributes(self):
attrs = super().rss_attributes()
attrs["xmlns:itunes"] = "http://www.itunes.com/dtds/podcast-1.0.dtd"
attrs["xmlns:content"] = "http://purl.org/rss/1.0/modules/content/"
return attrs
def add_root_elements(self, handler):
super().add_root_elements(handler)
if self.meta["image"]:
handler.addQuickElement(
"itunes:image", None, attrs={"href": self.meta["image"]}
)
if self.meta["author"]:
handler.addQuickElement("itunes:author", self.meta["author"])
handler.addQuickElement("itunes:explicit", "false")
# iTunes Category
handler.startElement("itunes:category", {"text": "News"})
handler.addQuickElement("itunes:category", None, {"text": "Politics"})
handler.endElement("itunes:category")
# iTunes Owner
handler.startElement("itunes:owner", {})
handler.addQuickElement("itunes:name", settings.SITE_NAME)
handler.addQuickElement("itunes:email", settings.SITE_EMAIL)
handler.endElement("itunes:owner")
def add_item_elements(self, handler, item):
"""Adds new elements to each item in the feed"""
super().add_item_elements(handler, item)
# iTunes Elements
handler.addQuickElement("itunes:explicit", "false")
handler.addQuickElement("itunes:author", item["author_name"])
handler.addQuickElement("itunes:duration", item["audio_duration"])
class LatestAudioFeed(LatestArticlesFeed):
feed_type = PodcastFeed
cache_namespace = "feed-audio"
def feed_url(self):
return self.METHOD_NAME + reverse("blog:article-latest-feed-audio")
def items(self):
"""
Items are published entries.
"""
queryset = self.get_queryset().filter(audio__isnull=False)
return queryset[: self.limit]
@clean_feed_output
def item_description(self, item):
content = item.get_full_html_content()
content = bleach.clean(content, strip=True, tags=["p", "ol", "ul", "li", "a"])
return mark_safe(content)
def item_enclosures(self, item):
return [Enclosure(item.audio.url, str(item.audio.size), item.audio.mime_type)]
def feed_extra_kwargs(self, obj):
if obj:
return {
"author": obj.author,
"image": obj.image.url if obj.image else None,
}
return {
"author": settings.SITE_NAME,
"image": None,
}
def item_extra_kwargs(self, item):
return {
# "image": item.image.url if item.image else None,
"audio_duration": str(item.audio_duration),
} | null |
current user | import typing
import datetime
import strawberry
from strawberry.types import Info
from django.forms.models import model_to_dict
import karrio.lib as lib
from karrio.server.serializers import Context
from karrio.server.user.serializers import TokenSerializer
import karrio.server.graph.utils as utils
import karrio.server.graph.schemas.base as base
import karrio.server.orgs.models as models
import karrio.server.orgs.utils as orgs
@strawberry.type
class OrganizationUserType:
email: str
is_admin: bool
is_owner: bool
is_staff: typing.Optional[bool] = None
full_name: typing.Optional[str] = None
last_login: typing.Optional[datetime.datetime] = None
date_joined: typing.Optional[datetime.datetime] = None
@strawberry.type
class OrganizationInvitationType:
object_type: str
id: str
guid: str
invitee_identifier: str
created: datetime.datetime
modified: datetime.datetime
invited_by: base.types.UserType
invitee: typing.Optional[base.types.UserType] = None
@strawberry.field
def organization_name(self: models.Organization) -> str:
return self.organization.name
@staticmethod
def resolve(
info,
id: typing.Optional[str] = strawberry.UNSET,
guid: typing.Optional[str] = strawberry.UNSET,
) -> typing.Optional["OrganizationInvitationType"]:
_filter = lib.to_dict(
dict(
id=(id if id is not strawberry.UNSET else None),
guid=(guid if guid is not strawberry.UNSET else None),
)
)
return models.OrganizationInvitation.objects.filter(**_filter).first()
@strawberry.type
class OrganizationMemberType:
email: str
is_admin: bool
roles: typing.List[orgs.OrganizationUserRole]
is_owner: typing.Optional[bool] = None
full_name: typing.Optional[str] = None
last_login: typing.Optional[datetime.datetime] = None
invitation: typing.Optional[OrganizationInvitationType] = None
@strawberry.type
class OrganizationType:
id: str
name: str
slug: str
is_active: bool
created: datetime.datetime
modified: datetime.datetime
organization_invites: typing.List[OrganizationInvitationType]
@strawberry.field
def METHOD_NAME(self: models.Organization, info: Info) -> OrganizationMemberType:
user = info.context.request.user
return OrganizationUserType( # type: ignore
**{
k: v
for k, v in model_to_dict(user).items()
if k in OrganizationUserType.__annotations__.keys()
},
is_admin=self.organization_users.get(user=user).is_admin,
is_owner=self.is_owner(user),
)
@strawberry.field
def members(self: models.Organization) -> typing.List[OrganizationMemberType]:
users = [
OrganizationMemberType( # type: ignore
email=user.email,
full_name=user.full_name,
last_login=user.last_login,
is_owner=self.is_owner(user),
roles=self.organization_users.get(user=user).roles,
is_admin=self.organization_users.get(user=user).is_admin,
)
for user in self.users.filter(is_active=True)
]
invites = [
OrganizationMemberType( # type: ignore
email=getattr(invite.invitee, "email", invite.invitee_identifier),
full_name=getattr(invite.invitee, "full_name", ""),
is_admin=False,
is_owner=False,
invitation=invite,
roles=[orgs.OrganizationUserRole.member],
)
for invite in self.organization_invites.all()
]
return users + invites
@strawberry.field
def token(self: models.Organization, info: Info) -> str:
context = Context(
org=self,
user=info.context.request.user,
test_mode=info.context.request.test_mode,
)
return (
TokenSerializer.map(
data=dict(user=info.context.request.user),
context=context,
)
.save()
.instance
)
@staticmethod
@utils.authentication_required
def resolve(
info, id: typing.Optional[str] = strawberry.UNSET
) -> typing.Optional["OrganizationType"]:
if id != strawberry.UNSET:
return models.Organization.objects.get(
id=id,
users__id=info.context.request.user.id,
is_active=True,
)
return info.context.request.org
@staticmethod
@utils.authentication_required
def resolve_list(info) -> typing.List["OrganizationType"]:
return models.Organization.objects.filter(
users__id=info.context.request.user.id,
is_active=True,
) | null |
catch log error | from pathlib import Path
from typing import Type, Dict, Optional
import bpy
import inspect
import traceback
import logging
import logging.handlers
from contextlib import contextmanager
import sverchok
if not sverchok.reload_event: # otherwise it leeds to infinite recursion
old_factory = logging.getLogRecordFactory()
def add_relative_path_factory(name, *args, **kwargs):
record = old_factory(name, *args, **kwargs)
if name.startswith('sverchok'):
path = Path(record.pathname)
# search root path of the add-on
for root in path.parents:
if root.parent.name == 'addons': # add-ons are not always in the folder
break
else:
root = None
if root is not None:
record.relative_path = path.relative_to(root)
else: # it can if there is several instances of sverchok (as add-on and a separate folder)
record.relative_path = path
return record
if not sverchok.reload_event: # otherwise it leeds to infinite recursion
logging.setLogRecordFactory(add_relative_path_factory)
log_format = "%(asctime)s.%(msecs)03d [%(levelname)-5s] %(name)s %(relative_path)s:%(lineno)d - %(message)s"
sv_logger = logging.getLogger('sverchok') # root logger
# set any level whatever you desire,
# it will be overridden by the add-on settings after the last one will be registered
if not sverchok.reload_event:
sv_logger.setLevel(logging.ERROR)
class ColorFormatter(logging.Formatter):
START_COLOR = '\033[{}m'
RESET_COLOR = '\033[0m'
COLORS = {
'DEBUG': '1;30', # grey
'INFO': 32, # green
'WARNING': 33, # yellow
'ERROR': 31, # red
'CRITICAL': 41, # white on red bg
}
def format(self, record):
color = self.START_COLOR.format(self.COLORS[record.levelname])
color_format = color + self._fmt + self.RESET_COLOR
formatter = logging.Formatter(color_format, datefmt=self.datefmt)
return formatter.format(record)
console_handler = logging.StreamHandler()
console_handler.setFormatter(ColorFormatter(log_format, datefmt='%H:%M:%S'))
sv_logger.addHandler(console_handler)
def add_node_error_location(record: logging.LogRecord):
# https://docs.python.org/3/howto/logging-cookbook.html#using-filters-to-impart-contextual-information
# should be called with logger.error(msg, exc_info=True)
frame_info = inspect.getinnerframes(record.exc_info[-1])[-1]
record.relative_path = Path(frame_info.filename).name
record.lineno = frame_info.lineno
if not is_enabled_for('DEBUG'): # show traceback only in DEBUG mode
record.exc_info = None
return True
node_error_logger = logging.getLogger('sverchok.node_error')
node_error_logger.addFilter(add_node_error_location)
def add_file_handler(file_path):
sv_logger.debug(f'Logging to file="{file_path}"')
handler = logging.handlers.RotatingFileHandler(file_path,
maxBytes=10 * 1024 * 1024,
backupCount=3)
handler.setFormatter(logging.Formatter(log_format, datefmt="%Y-%m-%d %H:%M:%S"))
sv_logger.addHandler(handler)
def remove_console_handler():
# Remove console output handler.
logging.debug("Log output to console is disabled. Further messages will"
" be available only in text buffer and file (if configured).")
sv_logger.removeHandler(console_handler)
# https://docs.python.org/3/howto/logging.html#configuring-logging-for-a-library
sv_logger.addHandler(logging.NullHandler())
@contextmanager
def METHOD_NAME():
"""Catch logging errors"""
try:
yield
except Exception as e:
frame, _, line, *_ = inspect.trace()[-1]
module = inspect.getmodule(frame)
name = module.__name__ or "<Unknown Module>"
_logger = logging.getLogger(f'{name} {line}')
_logger.error(e)
if _logger.isEnabledFor(logging.DEBUG):
traceback.print_exc()
@contextmanager
def fix_error_msg(msgs: Dict[Type[Exception], str]):
try:
yield
except Exception as e:
err_class = type(e)
if err_class in msgs:
e.args = (msgs[err_class], )
raise
class TextBufferHandler(logging.Handler):
"""
A handler class which writes logging records, appropriately formatted,
to Blender's internal text buffer.
"""
terminator = '\n'
def __init__(self, name):
"""
Initialize the handler.
"""
super().__init__()
self.buffer_name = name
if self.buffer is None:
raise RuntimeError("Can't create TextBufferHandler, "
"most likely because Blender is not fully loaded")
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the buffer with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
# wen user enables a Sverchok extension it seems disables all Blender
# collections until the extension will be registered
# for now ignore such cases
if self.buffer is None:
return
try:
msg = self.format(record)
self.buffer.write(msg)
self.buffer.write(self.terminator)
self.flush()
except Exception:
self.handleError(record)
def clear(self):
"""Clear all records"""
self.buffer.clear()
sv_logger.debug("Internal text buffer cleared")
@property
def buffer(self) -> Optional:
"""
Get internal blender text buffer for logging.
"""
try:
return bpy.data.texts.get(self.buffer_name) \
or bpy.data.texts.new(name=self.buffer_name)
except AttributeError as e:
# logging.debug("Can't initialize logging to internal buffer: get_log_buffer is called too early: {}".format(e))
return None
@classmethod
def add_to_main_logger(cls):
"""This handler can work only after Blender is fully loaded"""
addon = bpy.context.preferences.addons.get(sverchok.__name__)
prefs = addon.preferences
if prefs.log_to_buffer:
sv_logger.debug(f'Logging to Blender text editor="{prefs.log_buffer_name}"')
handler = cls(prefs.log_buffer_name)
handler.setFormatter(logging.Formatter(log_format, datefmt="%Y-%m-%d %H:%M:%S"))
sv_logger.addHandler(handler)
def __repr__(self):
level = logging.getLevelName(self.level)
name = getattr(self.buffer, 'name', '')
if name:
name += ' '
return '<%s %s(%s)>' % (self.__class__.__name__, name, level)
# Convenience functions
def get_logger():
"""Get Logger instance. Logger name is obtained from caller module name."""
frame, *_ = inspect.stack()[1]
module = inspect.getmodule(frame)
name = module.__name__
return logging.getLogger(name)
def is_enabled_for(log_level="DEBUG") -> bool:
"""This check should be used for improving performance of calling disabled loggers"""
addon = bpy.context.preferences.addons.get(sverchok.__name__)
current_level = getattr(logging, addon.preferences.log_level)
given_level = getattr(logging, log_level)
return given_level >= current_level | null |
single tiff setup | import ufo.numpy
import numpy as np
import tifffile
import contextlib
from common import disable, tempdir
a, b = 1.5, 2.5
ones = np.ones((512, 512))
zeros = np.zeros((512, 512))
small = np.ones((256, 256))
random = np.random.random((512, 512))
def have_camera_plugin():
from gi.repository import Ufo
return 'camera' in Ufo.PluginManager().get_all_task_names()
@contextlib.contextmanager
def METHOD_NAME(n_images, fmt='foo-{:05}.tif'):
with tempdir() as d:
data = np.ones((512, 512), dtype=np.float32)
for i in range(n_images):
tifffile.imsave(d.path(fmt.format(i)), data)
yield d
def test_read_single_tiffs():
from ufo import Read, Null
with METHOD_NAME(32) as d:
read = Read(path=d.root)
null = Null()
null(read()).run().join()
assert(null.task.props.num_processed == 32)
def test_read_single_tiffs_stepped():
from ufo import Read, Null
with METHOD_NAME(32) as d:
read = Read(path=d.root, step=2)
null = Null()
null(read()).run().join()
assert(null.task.props.num_processed == 32 / 2)
def test_read_single_tiffs_start_modified():
from ufo import Read, Null
with METHOD_NAME(32) as d:
read = Read(path=d.root, start=15)
null = Null()
null(read()).run().join()
assert(null.task.props.num_processed == 32 - 15)
@disable
def test_read_multi_tiffs():
from ufo import Read, Null
with tempdir() as d:
n_images = 32
data = np.ones((512, 512, n_images))
tifffile.imsave(d.path('foo.tif'), data)
read = Read(path=d.path('foo.tif'))
null = Null()
null(read()).run().join()
assert(null.task.props.num_processed == n_images)
def test_average():
from ufo import Average
average = Average()
for x in average([a * ones, b * ones]):
expected = (a + b) / 2
assert(np.all(x == expected))
def test_buffer():
from ufo import DummyData, Buffer
data = DummyData(number=10, width=512, height=256)
buffr = Buffer()
result = list(buffr(data()))
assert(len(result) == 10)
for r in result:
assert(r.shape[0] == 256)
assert(r.shape[1] == 512)
def test_rescale():
from ufo import Rescale
rescale = Rescale(factor=0.5)
result = list(rescale([a * ones, b * small]))
assert(np.mean(result[0]) == a)
assert(np.mean(result[1]) == b)
@disable
def test_roi():
from ufo import CutRoi
x, y = 10, 20
w, h = 256, 128
roi = CutRoi(x=x, y=y, width=w, height=h)
result = list(roi([random, random]))
ref = random[y:y+h, x:x+w]
assert(ref.shape[0] == h)
assert(ref.shape[1] == w)
assert(np.all(ref == result[0]))
def test_stack():
from ufo import Stack
stack = Stack(number=2)
for x in stack([a * ones, b * ones]):
assert(x.shape[0] == 2)
assert(np.all(x[0,:,:] == a))
assert(np.all(x[1,:,:] == b))
def test_flatten():
from ufo import FlattenInplace
summing = FlattenInplace(mode="sum")
result = list(summing([a * ones, b * ones]).items())
assert(np.all(result[0] == a + b))
def test_fft_1d():
from ufo import Fft, Ifft
fft = Fft(dimensions=1)
ifft = Ifft(dimensions=1)
orig_a = a * ones
orig_b = b * random
result = list(ifft(fft([orig_a, orig_b])))
assert(np.sum(orig_a - result[0]) < 0.001)
assert(np.sum(orig_b - result[1]) < 0.01)
def test_fft_2d():
from ufo import Fft, Ifft
fft = Fft(dimensions=2)
ifft = Ifft(dimensions=2)
orig_a = a * ones
orig_b = b * random
result = list(ifft(fft([orig_a, orig_b])))
assert(np.sum(orig_a - result[0]) < 0.001)
assert(np.sum(orig_b - result[1]) < 0.1)
def test_flatfield_correction():
from ufo import FlatFieldCorrect
darks = np.ones((512, 512)) * 1.5
flats = np.ones((512, 512)) * 11.5
projs = np.ones((512, 512)) * 100.0
ffc = FlatFieldCorrect()
expected = (projs - darks) / (flats - darks)
result = list(ffc([projs, projs], [darks, darks], [flats, flats]))[0]
assert(np.sum(np.abs(expected - result)) < 1)
expected = - np.log((projs - darks) / (flats - darks))
ffc = FlatFieldCorrect(absorption_correct=True)
result = list(ffc([projs, projs], [darks, darks], [flats, flats]))[0]
assert(np.sum(np.abs(expected - result)) < 1)
def test_measure():
from ufo import Measure
measures = []
def measure_callback(m, a):
measures.append(ufo.numpy.asarray(a))
measure = Measure(metric='mean', axis=-1)
measure.connect('result', measure_callback)
measure([a * ones, b * ones]).run().join()
assert(len(measures) == 2)
def test_dummy_data():
from ufo import DummyData
data = DummyData(number=10, width=256, height=128)
result = list(data())
assert(len(result) == 10)
assert(all(r.shape[0] == 128 and r.shape[1] == 256 for r in result))
def test_metaballs():
from ufo import Metaballs
metaballs = Metaballs(number=5, number_balls=5, width=512, height=256)
result = list(metaballs())
assert(len(result) == 5)
assert(all(r.shape[0] == 256 and r.shape[1] == 512 for r in result))
def test_transpose():
from ufo import Transpose
transpose = Transpose()
ones = np.ones((256, 512))
zeros = np.zeros((256, 128))
result = list(transpose([ones, zeros]))
assert(np.all(result[0] == ones.transpose()))
assert(np.all(result[1] == zeros.transpose()))
def test_uca():
if have_camera_plugin():
from ufo import Camera
camera = Camera(name='mock', number=2)
result = list(camera())
assert(len(result) == 2)
def test_uca_direct():
try:
from gi.repository import Ufo, Uca
if have_camera_plugin():
from ufo import Camera
uca_pm = Uca.PluginManager()
mock = uca_pm.get_camerav('mock', [])
camera = Camera(camera=mock, count=3)
result = list(camera())
assert(len(result) == 3)
except ImportError:
pass
def test_memory_in():
with tempdir() as d:
from ufo import MemoryIn, Write
ref = random.astype(np.float32)
read = MemoryIn(pointer=ref.__array_interface__['data'][0], number=1,
width=ref.shape[1], height=ref.shape[0])
write = Write(filename=d.path('foo.tif'))
write(read()).run().join()
result = tifffile.imread(d.path('foo.tif'))
assert(np.all(ref == result))
def test_memory_out():
with tempdir() as d:
from ufo import MemoryOut, Read
ref = random.astype(np.float32)
out = np.zeros_like(ref).astype(np.float32)
tifffile.imsave(d.path('foo.tif'), ref)
read = Read(path=d.path('foo.tif'))
write = MemoryOut(pointer=out.__array_interface__['data'][0], max_size=ref.nbytes)
write(read()).run().join()
assert(np.all(out == ref)) | null |
parameters | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, List, Optional
from fastapi import APIRouter, Depends, Query, Request, Security
from pydantic import BaseModel, Field
from argilla.server.apis.v0.helpers import deprecate_endpoint
from argilla.server.apis.v0.models.commons.params import CommonTaskHandlerDependencies
from argilla.server.commons.config import TaskConfig, TasksFactory
from argilla.server.models import User
from argilla.server.security import auth
from argilla.server.services.datasets import DatasetsService
from argilla.server.services.metrics import MetricsService
class MetricInfo(BaseModel):
id: str = Field(description="The metric id")
name: str = Field(description="The metric name")
description: Optional[str] = Field(default=None, description="The metric description")
@dataclass
class MetricSummaryParams:
request: Request
interval: Optional[float] = Query(
default=None,
gt=0.0,
description="The histogram interval for histogram summaries",
)
size: Optional[int] = Query(
default=None,
ge=1,
description="The number of terms for terminological summaries",
)
@property
def METHOD_NAME(self) -> Dict[str, Any]:
"""Returns dynamic metric args found in the request query params"""
return {
"interval": self.interval,
"size": self.size,
**{k: v for k, v in self.request.query_params.items() if k not in ["interval", "size"]},
}
def configure_router(router: APIRouter, cfg: TaskConfig):
base_metrics_endpoint = f"/{cfg.task}/{{name}}/metrics"
new_base_metrics_endpoint = f"/{{name}}/{cfg.task}/metrics"
@deprecate_endpoint(
path=base_metrics_endpoint,
new_path=new_base_metrics_endpoint,
router_method=router.get,
operation_id="get_dataset_metrics",
name="get_dataset_metrics",
)
async def get_dataset_metrics(
name: str,
request_deps: CommonTaskHandlerDependencies = Depends(),
current_user: User = Security(auth.get_current_user),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
) -> List[MetricInfo]:
dataset = await datasets.find_by_name(
user=current_user,
name=name,
task=cfg.task,
workspace=request_deps.workspace,
)
metrics = TasksFactory.get_task_metrics(dataset.task)
metrics = metrics.metrics if metrics else []
return [MetricInfo.parse_obj(metric) for metric in metrics]
@deprecate_endpoint(
path=base_metrics_endpoint + "/{metric}:summary",
new_path=new_base_metrics_endpoint + "/{metric}:summary",
router_method=router.post,
operation_id="metric_summary",
name="metric_summary",
)
async def metric_summary(
name: str,
metric: str,
query: cfg.query,
metric_params: MetricSummaryParams = Depends(),
request_deps: CommonTaskHandlerDependencies = Depends(),
current_user: User = Security(auth.get_current_user, scopes=[]),
datasets: DatasetsService = Depends(DatasetsService.get_instance),
metrics: MetricsService = Depends(MetricsService.get_instance),
):
dataset = await datasets.find_by_name(
user=current_user,
name=name,
task=cfg.task,
workspace=request_deps.workspace,
)
metric_ = TasksFactory.find_task_metric(task=cfg.task, metric_id=metric)
record_class = TasksFactory.get_task_record(cfg.task)
return metrics.summarize_metric(
dataset=dataset,
metric=metric_,
record_class=record_class,
query=query,
**metric_params.METHOD_NAME,
)
router = APIRouter(tags=["Metrics"], prefix="/datasets")
for cfg in TasksFactory.get_all_configs():
configure_router(router, cfg) | null |
main | import struct
import math
import os
import os.path
import sys
# From lib/stm32wb_copro/wpan/interface/patterns/ble_thread/shci/shci.h
__STACK_TYPE_CODES = {
"BLE_FULL": 0x01,
"BLE_HCI": 0x02,
"BLE_LIGHT": 0x03,
"BLE_BEACON": 0x04,
"BLE_BASIC": 0x05,
"BLE_FULL_EXT_ADV": 0x06,
"BLE_HCI_EXT_ADV": 0x07,
"THREAD_FTD": 0x10,
"THREAD_MTD": 0x11,
"ZIGBEE_FFD": 0x30,
"ZIGBEE_RFD": 0x31,
"MAC": 0x40,
"BLE_THREAD_FTD_STATIC": 0x50,
"BLE_THREAD_FTD_DYAMIC": 0x51,
"802154_LLD_TESTS": 0x60,
"802154_PHY_VALID": 0x61,
"BLE_PHY_VALID": 0x62,
"BLE_LLD_TESTS": 0x63,
"BLE_RLV": 0x64,
"802154_RLV": 0x65,
"BLE_ZIGBEE_FFD_STATIC": 0x70,
"BLE_ZIGBEE_RFD_STATIC": 0x71,
"BLE_ZIGBEE_FFD_DYNAMIC": 0x78,
"BLE_ZIGBEE_RFD_DYNAMIC": 0x79,
"RLV": 0x80,
"BLE_MAC_STATIC": 0x90,
}
class CoproException(ValueError):
pass
# Formats based on AN5185
class CoproFooterBase:
SIG_BIN_SIZE = 5 * 4
_SIG_BIN_COMMON_SIZE = 2 * 4
def get_version(self):
return (
f"Version {self.version_major}.{self.version_minor}.{self.version_sub}, "
f"branch {self.version_branch}, build {self.version_build} (magic {self.magic:X})"
)
def get_details(self):
raise CoproException("Not implemented")
def __init__(self, raw: bytes):
if len(raw) != self.SIG_BIN_SIZE:
raise CoproException("Invalid footer size")
sig_common_part = raw[-self._SIG_BIN_COMMON_SIZE :]
parts = struct.unpack("BBBBI", sig_common_part)
self.version_major = parts[3]
self.version_minor = parts[2]
self.version_sub = parts[1]
# AN5185 mismatch: swapping byte halves
self.version_build = parts[0] & 0x0F
self.version_branch = (parts[0] & 0xF0) >> 4
self.magic = parts[4]
class CoproFusFooter(CoproFooterBase):
FUS_MAGIC_IMG_STACK = 0x23372991
FUS_MAGIC_IMG_FUS = 0x32279221
FUS_MAGIC_IMG_OTHER = 0x42769811
FUS_BASE = 0x80F4000
FLASH_PAGE_SIZE = 4 * 1024
def __init__(self, raw: bytes):
super().__init__(raw)
if self.magic not in (
self.FUS_MAGIC_IMG_OTHER,
self.FUS_MAGIC_IMG_FUS,
self.FUS_MAGIC_IMG_STACK,
):
raise CoproException(f"Invalid FUS img magic {self.magic:x}")
own_data = raw[: -self._SIG_BIN_COMMON_SIZE]
parts = struct.unpack("IIBBBB", own_data)
self.info1 = parts[0]
self.info2 = parts[1]
self.sram2b_1ks = parts[5]
self.sram2a_1ks = parts[4]
self.flash_4ks = parts[2]
def get_details(self):
return f"SRAM2b={self.sram2b_1ks}k SRAM2a={self.sram2a_1ks}k flash={self.flash_4ks}p"
def is_stack(self):
return self.magic == self.FUS_MAGIC_IMG_STACK
def get_flash_pages(self, fullsize):
return math.ceil(fullsize / self.FLASH_PAGE_SIZE)
def get_flash_base(self, fullsize):
if not self.is_stack():
raise CoproException("Not a stack image")
return self.FUS_BASE - self.get_flash_pages(fullsize) * self.FLASH_PAGE_SIZE
class CoproSigFooter(CoproFooterBase):
SIG_MAGIC_ST = 0xD3A12C5E
SIG_MAGIC_CUSTOMER = 0xE2B51D4A
def __init__(self, raw: bytes):
super().__init__(raw)
if self.magic not in (self.SIG_MAGIC_ST, self.SIG_MAGIC_CUSTOMER):
raise CoproException(f"Invalid FUS img magic {self.magic:x}")
own_data = raw[: -self._SIG_BIN_COMMON_SIZE]
parts = struct.unpack("IIBBH", own_data)
self.reserved_1 = parts[0]
self.reserved_2 = parts[1]
self.size = parts[2]
self.source = parts[3]
self.reserved_34 = parts[4]
def get_details(self):
return f"Signature Src {self.source:x} size {self.size:x}"
class CoproBinary:
def __init__(self, binary_path):
self.binary_path = binary_path
self.img_sig_footer = None
self.img_sig = None
self.binary_size = -1
self._load()
def _load(self):
with open(self.binary_path, "rb") as fin:
whole_file = fin.read()
self.binary_size = len(whole_file)
img_sig_footer_bin = whole_file[-CoproFooterBase.SIG_BIN_SIZE :]
self.img_sig_footer = CoproSigFooter(img_sig_footer_bin)
img_sig_size = self.img_sig_footer.size + CoproSigFooter.SIG_BIN_SIZE
img_sig_bin = whole_file[
-(img_sig_size + CoproFusFooter.SIG_BIN_SIZE) : -img_sig_size
]
self.img_sig = CoproFusFooter(img_sig_bin)
def is_valid(self):
return self.img_sig_footer is not None and self.img_sig is not None
def is_stack(self):
return self.img_sig and self.img_sig.is_stack()
def get_flash_load_addr(self):
if not self.is_stack():
raise CoproException("Not a stack image")
return self.img_sig.get_flash_base(self.binary_size)
def get_stack_type(typestr: str):
stack_code = __STACK_TYPE_CODES.get(typestr.upper(), None)
if stack_code is None:
raise CoproException(f"Unknown stack type {typestr}. See shci.h")
return stack_code
def _load_bin(binary_path: str):
print(binary_path)
copro_bin = CoproBinary(binary_path)
print(copro_bin.img_sig.get_version())
if copro_bin.img_sig.is_stack():
print(f"\t>> FLASH AT {copro_bin.get_flash_load_addr():X}\n")
def METHOD_NAME():
coprodir = (
sys.argv[1]
if len(sys.argv) > 1
else "../../../lib/STM32CubeWB/Projects/STM32WB_Copro_Wireless_Binaries/STM32WB5x"
)
for fn in os.listdir(coprodir):
if not fn.endswith(".bin"):
continue
_load_bin(os.path.join(coprodir, fn))
if __name__ == "__main__":
METHOD_NAME() | null |
stream name from stream arn | import contextlib
import logging
import threading
import time
from typing import Dict
from bson.json_util import dumps
from localstack.aws.api.dynamodbstreams import StreamStatus, StreamViewType
from localstack.aws.connect import connect_to
from localstack.services.dynamodbstreams.models import DynamoDbStreamsStore, dynamodbstreams_stores
from localstack.utils.aws import arns, resources
from localstack.utils.common import now_utc
DDB_KINESIS_STREAM_NAME_PREFIX = "__ddb_stream_"
LOG = logging.getLogger(__name__)
_SEQUENCE_MTX = threading.RLock()
_SEQUENCE_NUMBER_COUNTER = 1
def get_dynamodbstreams_store(account_id: str, region: str) -> DynamoDbStreamsStore:
return dynamodbstreams_stores[account_id][region]
def get_and_increment_sequence_number_counter() -> int:
global _SEQUENCE_NUMBER_COUNTER
with _SEQUENCE_MTX:
cnt = _SEQUENCE_NUMBER_COUNTER
_SEQUENCE_NUMBER_COUNTER += 1
return cnt
def add_dynamodb_stream(
account_id: str,
region_name: str,
table_name: str,
latest_stream_label: str | None = None,
view_type: StreamViewType = StreamViewType.NEW_AND_OLD_IMAGES,
enabled: bool = True,
) -> None:
if not enabled:
return
store = get_dynamodbstreams_store(account_id, region_name)
# create kinesis stream as a backend
stream_name = get_kinesis_stream_name(table_name)
resources.create_kinesis_stream(
connect_to(aws_access_key_id=account_id, region_name=region_name).kinesis,
stream_name=stream_name,
)
latest_stream_label = latest_stream_label or "latest"
stream = {
"StreamArn": arns.dynamodb_stream_arn(
table_name=table_name, latest_stream_label=latest_stream_label
),
"TableName": table_name,
"StreamLabel": latest_stream_label,
"StreamStatus": StreamStatus.ENABLING,
"KeySchema": [],
"Shards": [],
"StreamViewType": view_type,
"shards_id_map": {},
}
store.ddb_streams[table_name] = stream
def get_stream_for_table(account_id: str, region_name: str, table_arn: str) -> dict:
store = get_dynamodbstreams_store(account_id, region_name)
table_name = table_name_from_stream_arn(table_arn)
return store.ddb_streams.get(table_name)
def forward_events(account_id: str, region_name: str, records: dict) -> None:
kinesis = connect_to(aws_access_key_id=account_id, region_name=region_name).kinesis
for record in records:
table_arn = record.pop("eventSourceARN", "")
if stream := get_stream_for_table(account_id, region_name, table_arn):
table_name = table_name_from_stream_arn(stream["StreamArn"])
stream_name = get_kinesis_stream_name(table_name)
kinesis.put_record(
StreamName=stream_name,
Data=dumps(record),
PartitionKey="TODO",
)
def delete_streams(account_id: str, region_name: str, table_arn: str) -> None:
store = get_dynamodbstreams_store(account_id, region_name)
table_name = table_name_from_table_arn(table_arn)
if store.ddb_streams.pop(table_name, None):
stream_name = get_kinesis_stream_name(table_name)
with contextlib.suppress(Exception):
connect_to(aws_access_key_id=account_id, region_name=region_name).kinesis.delete_stream(
StreamName=stream_name
)
# sleep a bit, as stream deletion can take some time ...
time.sleep(1)
def get_kinesis_stream_name(table_name: str) -> str:
return DDB_KINESIS_STREAM_NAME_PREFIX + table_name
def table_name_from_stream_arn(stream_arn: str) -> str:
return stream_arn.split(":table/", 1)[-1].split("/")[0]
def table_name_from_table_arn(table_arn: str) -> str:
return table_name_from_stream_arn(table_arn)
def METHOD_NAME(stream_arn: str) -> str:
table_name = table_name_from_stream_arn(stream_arn)
return get_kinesis_stream_name(table_name)
def shard_id(kinesis_shard_id: str) -> str:
timestamp = str(int(now_utc()))
timestamp = f"{timestamp[:-5]}00000000".rjust(20, "0")
kinesis_shard_params = kinesis_shard_id.split("-")
return f"{kinesis_shard_params[0]}-{timestamp}-{kinesis_shard_params[-1][:32]}"
def kinesis_shard_id(dynamodbstream_shard_id: str) -> str:
shard_params = dynamodbstream_shard_id.rsplit("-")
return f"{shard_params[0]}-{shard_params[-1]}"
def get_shard_id(stream: Dict, kinesis_shard_id: str) -> str:
ddb_stream_shard_id = stream.get("shards_id_map", {}).get(kinesis_shard_id)
if not ddb_stream_shard_id:
ddb_stream_shard_id = shard_id(kinesis_shard_id)
stream["shards_id_map"][kinesis_shard_id] = ddb_stream_shard_id
return ddb_stream_shard_id | null |
test scan iterates through all docs | # SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
from __future__ import unicode_literals
import pytest
from pytest import raises
from opensearchpy import Date, Keyword, Q, Text, TransportError
from opensearchpy._async.helpers.document import AsyncDocument
from opensearchpy._async.helpers.search import AsyncMultiSearch, AsyncSearch
from opensearchpy.helpers.response import aggs
from test_opensearchpy.test_async.test_server.test_helpers.test_data import FLAT_DATA
pytestmark = pytest.mark.asyncio
class Repository(AsyncDocument):
created_at = Date()
description = Text(analyzer="snowball")
tags = Keyword()
@classmethod
def search(cls):
return super(Repository, cls).search().filter("term", commit_repo="repo")
class Index:
name = "git"
class Commit(AsyncDocument):
class Index:
name = "flat-git"
async def test_filters_aggregation_buckets_are_accessible(data_client):
has_tests_query = Q("term", files="test_opensearchpy/test_dsl")
s = Commit.search()[0:0]
s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket(
"has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query}
).metric("lines", "stats", field="stats.lines")
response = await s.execute()
assert isinstance(
response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket
)
assert (
35
== response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count
)
assert (
228
== response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max
)
async def test_top_hits_are_wrapped_in_response(data_client):
s = Commit.search()[0:0]
s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric(
"top_commits", "top_hits", size=5
)
response = await s.execute()
top_commits = response.aggregations.top_authors.buckets[0].top_commits
assert isinstance(top_commits, aggs.TopHitsData)
assert 5 == len(top_commits)
hits = [h for h in top_commits]
assert 5 == len(hits)
assert isinstance(hits[0], Commit)
async def test_inner_hits_are_wrapped_in_response(data_client):
s = AsyncSearch(index="git")[0:1].query(
"has_parent", parent_type="repo", inner_hits={}, query=Q("match_all")
)
response = await s.execute()
commit = response.hits[0]
assert isinstance(commit.meta.inner_hits.repo, response.__class__)
assert repr(commit.meta.inner_hits.repo[0]).startswith("<Hit(git/opensearch-py): ")
async def test_scan_respects_doc_types(data_client):
result = Repository.search().scan()
repos = await get_result(result)
assert 1 == len(repos)
assert isinstance(repos[0], Repository)
assert repos[0].organization == "opensearch"
async def METHOD_NAME(data_client):
s = AsyncSearch(index="flat-git")
result = s.scan()
commits = await get_result(result)
assert 52 == len(commits)
assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
async def get_result(b):
a = []
async for i in b:
a.append(i)
return a
async def test_multi_search(data_client):
s1 = Repository.search()
s2 = AsyncSearch(index="flat-git")
ms = AsyncMultiSearch()
ms = ms.add(s1).add(s2)
r1, r2 = await ms.execute()
assert 1 == len(r1)
assert isinstance(r1[0], Repository)
assert r1._search is s1
assert 52 == r2.hits.total.value
assert r2._search is s2
async def test_multi_missing(data_client):
s1 = Repository.search()
s2 = AsyncSearch(index="flat-git")
s3 = AsyncSearch(index="does_not_exist")
ms = AsyncMultiSearch()
ms = ms.add(s1).add(s2).add(s3)
with raises(TransportError):
await ms.execute()
r1, r2, r3 = await ms.execute(raise_on_error=False)
assert 1 == len(r1)
assert isinstance(r1[0], Repository)
assert r1._search is s1
assert 52 == r2.hits.total.value
assert r2._search is s2
assert r3 is None
async def test_raw_subfield_can_be_used_in_aggs(data_client):
s = AsyncSearch(index="git")[0:0]
s.aggs.bucket("authors", "terms", field="author.name.raw", size=1)
r = await s.execute()
authors = r.aggregations.authors
assert 1 == len(authors)
assert {"key": "Honza Král", "doc_count": 52} == authors[0] | null |
exit code | # Tests invocation of the interpreter with various command line arguments
# All tests are executed with environment variables ignored
# See test_cmd_line_script.py for testing of script execution
import test.test_support
import sys
import unittest
from test.script_helper import (
assert_python_ok, assert_python_failure, spawn_python, kill_python,
python_exit_code
)
class CmdLineTest(unittest.TestCase):
@classmethod
def tearDownClass(cls):
if test.test_support.is_jython:
# GC is not immediate, so Popen.__del__ may be delayed.
# Try to force any Popen.__del__ errors within scope of test.
from test_weakref import extra_collect
extra_collect()
def start_python(self, *args):
p = spawn_python(*args)
return kill_python(p)
def METHOD_NAME(self, *args):
return python_exit_code(*args)
def test_directories(self):
self.assertNotEqual(self.METHOD_NAME('.'), 0)
self.assertNotEqual(self.METHOD_NAME('< .'), 0)
def verify_valid_flag(self, cmd_line):
data = self.start_python(cmd_line)
self.assertTrue(data == '' or data.endswith('\n'))
self.assertNotIn('Traceback', data)
self.assertNotIn('usage:', data)
def test_optimize(self):
self.verify_valid_flag('-O')
self.verify_valid_flag('-OO')
def test_q(self):
self.verify_valid_flag('-Qold')
self.verify_valid_flag('-Qnew')
self.verify_valid_flag('-Qwarn')
self.verify_valid_flag('-Qwarnall')
def test_site_flag(self):
self.verify_valid_flag('-S')
def test_usage(self):
self.assertIn('usage', self.start_python('-h'))
def test_version(self):
prefix = 'Jython' if test.test_support.is_jython else 'Python'
version = (prefix + ' %d.%d') % sys.version_info[:2]
self.assertTrue(self.start_python('-V').startswith(version))
def test_run_module(self):
# Test expected operation of the '-m' switch
# Switch needs an argument
self.assertNotEqual(self.METHOD_NAME('-m'), 0)
# Check we get an error for a nonexistent module
self.assertNotEqual(
self.METHOD_NAME('-m', 'fnord43520xyz'),
0)
# Check the runpy module also gives an error for
# a nonexistent module
self.assertNotEqual(
self.METHOD_NAME('-m', 'runpy', 'fnord43520xyz'),
0)
# All good if module is located and run successfully
self.assertEqual(
self.METHOD_NAME('-m', 'timeit', '-n', '1'),
0)
def test_run_module_bug1764407(self):
# -m and -i need to play well together
# Runs the timeit module and checks the __main__
# namespace has been populated appropriately
p = spawn_python('-i', '-m', 'timeit', '-n', '1')
p.stdin.write('Timer\n')
p.stdin.write('exit()\n')
data = kill_python(p)
self.assertTrue(data.startswith('1 loop'))
self.assertIn('__main__.Timer', data)
def test_run_code(self):
# Test expected operation of the '-c' switch
# Switch needs an argument
self.assertNotEqual(self.METHOD_NAME('-c'), 0)
# Check we get an error for an uncaught exception
self.assertNotEqual(
self.METHOD_NAME('-c', 'raise Exception'),
0)
# All good if execution is successful
self.assertEqual(
self.METHOD_NAME('-c', 'pass'),
0)
@unittest.skipIf(test.test_support.is_jython,
"Hash randomisation is not supported in Jython.")
def test_hash_randomization(self):
# Verify that -R enables hash randomization:
self.verify_valid_flag('-R')
hashes = []
for i in range(2):
code = 'print(hash("spam"))'
data = self.start_python('-R', '-c', code)
hashes.append(data)
self.assertNotEqual(hashes[0], hashes[1])
# Verify that sys.flags contains hash_randomization
code = 'import sys; print sys.flags'
data = self.start_python('-R', '-c', code)
self.assertTrue('hash_randomization=1' in data)
def test_del___main__(self):
# Issue #15001: PyRun_SimpleFileExFlags() did crash because it kept a
# borrowed reference to the dict of __main__ module and later modify
# the dict whereas the module was destroyed
filename = test.test_support.TESTFN
self.addCleanup(test.test_support.unlink, filename)
with open(filename, "w") as script:
print >>script, "import sys"
print >>script, "del sys.modules['__main__']"
assert_python_ok(filename)
def test_unknown_options(self):
rc, out, err = assert_python_failure('-E', '-z')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
# Add "without='-E'" to prevent _assert_python to append -E
# to env_vars and change the output of stderr
rc, out, err = assert_python_failure('-z', without='-E')
self.assertIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -z'), 1)
self.assertEqual(b'', out)
rc, out, err = assert_python_failure('-a', '-z', without='-E')
self.assertIn(b'Unknown option: -a', err)
# only the first unknown option is reported
self.assertNotIn(b'Unknown option: -z', err)
self.assertEqual(err.splitlines().count(b'Unknown option: -a'), 1)
self.assertEqual(b'', out)
def test_python_startup(self):
# Test that the file designated by [PJ]YTHONSTARTUP is executed when interactive.
# Note: this test depends on the -i option forcing Python to treat stdin as interactive.
filename = test.test_support.TESTFN
self.addCleanup(test.test_support.unlink, filename)
with open(filename, "w") as script:
print >>script, "print 6*7"
print >>script, "print 'Ni!'"
expected = ['42', 'Ni!']
def check(*args, **kwargs):
result = assert_python_ok(*args, **kwargs)
self.assertListEqual(expected, result[1].splitlines())
if test.test_support.is_jython:
# Jython produces a prompt before exit, but not CPython. Hard to say who is correct.
expected.append('>>> ')
# The Jython way is to set a registry item python.startup
check('-i', '-J-Dpython.startup={}'.format(filename))
# But a JYTHONSTARTUP environment variable is also supported
check('-i', JYTHONSTARTUP=filename)
else:
check('-i', PYTHONSTARTUP=filename)
@unittest.skipUnless(test.test_support.is_jython, "Requires write to sys.flags.inspect")
def test_python_inspect(self):
# Test that PYTHONINSPECT set during a script causes an interactive session to start.
# Note: this test depends on the -i option forcing Python to treat stdin as interactive,
# and on Jython permitting manipulation of sys.flags.inspect (which CPython won't)
# so that PYTHONINSPECT can have some effect.
filename = test.test_support.TESTFN
self.addCleanup(test.test_support.unlink, filename)
with open(filename, "w") as script:
print >>script, "import sys, os"
print >>script, "sys.flags.inspect = False"
print >>script, "os.environ['PYTHONINSPECT'] = 'whatever'"
print >>script, "print os.environ['PYTHONINSPECT']"
expected = ['whatever', '>>> ']
result = assert_python_ok('-i', filename)
self.assertListEqual(expected, result[1].splitlines())
def test_main():
test.test_support.run_unittest(CmdLineTest)
test.test_support.reap_children()
if __name__ == "__main__":
test_main() | null |