text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(x):
print(x, "hop!", 3.5)<|fim_middle|>print_string<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self) -> int:<|fim_middle|>max_output_len<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return FrozenDict(
(k, self.open_store_variable(k, v)) for k, v in self.ds.variables.items()
)<|fim_middle|>get_variables<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(typ: typing.Type, fields: typing.Set[str]):
# In Rust, a type will not inherit fields from parent types.
# This means that we need to omit some ancestor of this type, which
# would include all fields of the type.
# We lose a bit of type checking strictness here.
for parent in typ.__mro__:
if not hasattr(parent, '_direct_fields'):
continue
fields = fields.difference((f.name for f in parent._direct_fields))
if len(fields) == 0:
return parent
raise AssertionError('unreachable')<|fim_middle|>find_covering_ancestor<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, function, wrapper):
# type: (FunctionType, Wrapper) -> None
"""Wrap a function with a hook."""
self._store(function)
self._wrapper_map[function] = wrapper
METHOD_NAME(function, wrapper)<|fim_middle|>wrap<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(x, x_adv):
"""
Return the absolute SNR of x to (x - x_adv), with range [0, inf]
If there is no adversarial perturbation, always return inf
"""
signal_power = (np.abs(x) ** 2).mean()
noise_power = (np.abs(x - x_adv) ** 2).mean()
if noise_power == 0:
return np.inf
else:
return signal_power / noise_power<|fim_middle|>snr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return "tt_hubspot_pagination"<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, jobid):<|fim_middle|>finish_job<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# If a unicode arg is passed, sep and end should be unicode, too.
class Recorder(object):
def __init__(self, must_be_unicode):
self.buf = []
self.force_unicode = must_be_unicode
def write(self, what):
if self.force_unicode and not isinstance(what, unicode):
raise AssertionError("{0!r} is not unicode".format(what))
self.buf.append(what)
buf = Recorder(True)
print(u'hi', file=buf)
self.assertEqual(u''.join(buf.buf), 'hi\n')
del buf.buf[:]
print(u'hi', u'nothing', file=buf)
self.assertEqual(u''.join(buf.buf), 'hi nothing\n')
buf = Recorder(False)
print('hi', 'bye', end=u'\n', file=buf)
self.assertIsInstance(buf.buf[1], unicode)
self.assertIsInstance(buf.buf[3], unicode)
del buf.buf[:]
print(sep=u'x', file=buf)
self.assertIsInstance(buf.buf[-1], unicode)<|fim_middle|>test_mixed_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Whether alt key pressed
Returns
-------
bool
"""
return self._alt<|fim_middle|>alt<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(hist, **kwargs):
"""
This extra line allows to set a fixed colormap range when drawing the
plot 2D for the first time, when inserted in the right place in the original
code.
"""
hist.setLevels(*(kwargs["zrange"] if "zrange" in kwargs else hist.getLevels()))
return None<|fim_middle|>dummy_func<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> None:
"""Test if message sending is a success."""
sid = send_sms(TWILIO_CLEAN_TARGET, MESSAGE, callback=False)
assert sid<|fim_middle|>test_twilio_success<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
with self.assertRaises(XmlWriterError) as cm:
self.writer.add_attribute("foo", "bar")
self.assertEqual("Empty pending tag.", str(cm.exception))
self.writer.start_tag("a")
self.writer.add_attribute("a", "bar")
self.writer.add_attribute("b", True)
self.writer.add_attribute("c", "{")
self.writer.add_attribute("d", "{a}b")
self.writer.add_attribute(QNames.XSI_TYPE, str(DataType.STRING))
expected = {
(None, "a"): "bar",
(None, "b"): "true",
(None, "c"): "{",
(None, "d"): "{a}b",
("http://www.w3.org/2001/XMLSchema-instance", "type"): "xs:string",
}
self.assertEqual(expected, self.writer.attrs)<|fim_middle|>test_add_attribute<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)<|fim_middle|>on_200<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.thread.get_absolute_url()<|fim_middle|>get_success_url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(context, page):
"""
The current requirement for this is really tricky.
See https://github.com/unicef/iogt/pull/955#issuecomment-1008277982 for more
context on why this logic is complicated.
"""
language_options = list()
locales = Locale.objects.select_related('locale_detail').all()
current_language = translation.get_language_info(translation.get_language())
try:
if resolve(context.request.path_info).url_name == 'translation-not-found':
page = get_object_or_404(Page, pk=context.request.GET.get('page'), live=True)
except Resolver404:
pass
for locale in locales:
option = {}
try:
language = translation.get_language_info(locale.language_code)
option['language'] = language
option['selected'] = locale.language_code == current_language.get('code')
except:
continue
try:
should_append = locale.locale_detail.is_active
except LocaleDetail.DoesNotExist:
should_append = True
if should_append:
if page: # If the current URL belongs to a wagtail page
translated_page = page and page.get_translation_or_none(locale)
if translated_page and translated_page.live:
url = translated_page.url
else:
translated_url = translate_url(reverse('translation-not-found'), locale.language_code)
url = f'{translated_url}?page={page.id}'
else: # If the current URL belongs to a django view
url = translate_url(context.request.path_info, locale.language_code)
option['url'] = url
language_options.append(option)
context.update({
'language_options': language_options,
'current_language': current_language,
})
return context<|fim_middle|>language_switcher<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Union[AtlasRelationship, None]:
try:
return next(self._atlas_relation_iterator)
except StopIteration:
return None<|fim_middle|>create_next_atlas_relation<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(**kwargs):
kwargs.setdefault("mac_src", '00.00.00.00.00.01')
kwargs.setdefault("mac_dst", '00.00.00.00.00.02')
kwargs.setdefault("mac_src_step", '00.00.00.00.00.01')
kwargs.setdefault("mac_dst_step", '00.00.00.00.00.01')
kwargs.setdefault("arp_src_hw_addr", '01.00.00.00.00.01')
kwargs.setdefault("arp_dst_hw_addr", '01.00.00.00.00.02')
kwargs.setdefault("ip_src_addr", '11.1.1.1')
kwargs.setdefault("ip_dst_addr", '225.1.1.1')
kwargs.setdefault("ip_src_step", '0.0.0.1')
kwargs.setdefault("ip_dst_step", '0.0.0.1')
kwargs.setdefault("mac_src_count", 20)
kwargs.setdefault("mac_dst_count", 20)
kwargs.setdefault("arp_src_hw_count", 20)
kwargs.setdefault("arp_dst_hw_count", 10)
kwargs.setdefault("ip_src_count", 20)
kwargs.setdefault("ip_dst_count", 20)
kwargs.setdefault("transmit_mode", 'continuous')
kwargs.setdefault("length_mode", 'fixed')
kwargs.setdefault("vlan_id", 10)
kwargs.setdefault("vlan_id_count", 10)
kwargs.setdefault("vlan_id_step", 3)
kwargs.setdefault("l2_encap", 'ethernet_ii')
kwargs.setdefault("frame_size", 64)
kwargs.setdefault("pkts_per_burst", 10)
kwargs.setdefault("mode", "create")
return kwargs<|fim_middle|>build<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
with torch.no_grad():
return "arch-parameters :\n{:}".format(
nn.functional.softmax(self.arch_parameters, dim=-1).cpu()
)<|fim_middle|>show_alphas<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(aws_service):
assert aws_service.island_aws_instance.region == REGION<|fim_middle|>test_region<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
uri = "http://example.com/foo"
n3 = (URIRef(uri) * ZeroOrMore).n3()
assert n3 == "<" + uri + ">*"<|fim_middle|>test_mulpath_n3<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, sid, namespace):
return self.is_connected(sid, namespace)<|fim_middle|>can_disconnect<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict
"""
params1 = {
"use_box_cox": False,
"use_trend": False,
"use_damped_trend": False,
"sp": [],
"use_arma_errors": False,
"n_jobs": 1,
}
return [params1]<|fim_middle|>get_test_params<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self) -> TrainingState:<|fim_middle|>save<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
res = requests.get(PING_URL)
assert res.status_code == 200<|fim_middle|>test_ping<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.setup_nodes()<|fim_middle|>setup_network<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# only client's ip is present
request = RequestFactory().get("/")
request.META["HTTP_X_FORWARDED_FOR"] = "203.0.113.195"
client_ip = get_client_ip(request)
self.assertEqual(client_ip, "203.0.113.195")
# only client's ip is present
request = RequestFactory().get("/")
ip = "2001:abc:def:012:345:6789:abcd:ef12"
request.META["HTTP_X_FORWARDED_FOR"] = ip
client_ip = get_client_ip(request)
self.assertEqual(client_ip, ip)
# proxy1 and proxy2 are present along with client's ip
request = RequestFactory().get("/")
request.META[
"HTTP_X_FORWARDED_FOR"
] = "203.0.113.195, 70.41.3.18, 150.172.238.178"
client_ip = get_client_ip(request)
self.assertEqual(client_ip, "203.0.113.195")
# client ip with port
request = RequestFactory().get("/")
request.META[
"HTTP_X_FORWARDED_FOR"
] = "203.0.113.195:8080, 70.41.3.18, 150.172.238.178"
client_ip = get_client_ip(request)
self.assertEqual(client_ip, "203.0.113.195")
# client ip (ipv6), other clients with port
request = RequestFactory().get("/")
ip = "2001:abc:def:012:345:6789:abcd:ef12"
x_forwarded_for = f"{ip}, 203.0.113.195:8080, 70.41.3.18"
request.META["HTTP_X_FORWARDED_FOR"] = x_forwarded_for
client_ip = get_client_ip(request)
self.assertEqual(client_ip, ip)
# client ip with port but not proxy1 and proxy2
request = RequestFactory().get("/")
request.META["HTTP_X_FORWARDED_FOR"] = "203.0.113.195:8080"
client_ip = get_client_ip(request)
self.assertEqual(client_ip, "203.0.113.195")
# no header is present
request = RequestFactory().get("/")
if request.META["REMOTE_ADDR"]:
del request.META["REMOTE_ADDR"]
client_ip = get_client_ip(request)
self.assertEqual(client_ip, None)<|fim_middle|>test_get_client_ip_with_x_forwarded<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, token, type_token=False):
"""convert sentencpiece token to Id"""
if isinstance(token, (CommandToken)):
return token.Id
try:
return self._vocab[token]
except KeyError:
return self._vocab[token.strip()]<|fim_middle|>token_to_id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
argspec = getargspec(api.list_skeletons)
assert argspec.args == []
assert argspec.defaults is None<|fim_middle|>test_api_list_skeletons<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(model, test_loader, args, optimizer, writer):
model.eval()
test_loss = 0
correct = 0
for niter, (data, target) in enumerate(test_loader):
if args.cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').data.item() # sum up batch loss
pred = output.data.max(1)[1] # get the index of the max log-probability
pred = pred.eq(target.data).cpu().sum()
writer.add_scalar('Test/Loss', pred, niter)
correct += pred
if niter % 100 == 0:
writer.add_image('test', data[0, :, :, :], niter)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))<|fim_middle|>test<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(_instance):
matrix = {
"default": {
"idempotence": ["idempotence"],
"syntax": ["syntax"],
"converge": ["dependency", "create", "prepare", "converge"],
"cleanup": ["cleanup"],
"check": [
"dependency",
"cleanup",
"destroy",
"create",
"prepare",
"converge",
"check",
"cleanup",
"destroy",
],
"verify": ["verify"],
"create": ["dependency", "create", "prepare"],
"prepare": ["prepare"],
"side_effect": ["side_effect"],
"dependency": ["dependency"],
"test": [
"dependency",
"cleanup",
"destroy",
"syntax",
"create",
"prepare",
"converge",
"idempotence",
"side_effect",
"verify",
"cleanup",
"destroy",
],
"destroy": ["dependency", "cleanup", "destroy"],
},
"foo": {
"idempotence": ["idempotence"],
"syntax": ["syntax"],
"converge": ["dependency", "create", "prepare", "converge"],
"check": [
"dependency",
"cleanup",
"destroy",
"create",
"prepare",
"converge",
"check",
"cleanup",
"destroy",
],
"cleanup": ["cleanup"],
"create": ["dependency", "create", "prepare"],
"verify": ["verify"],
"prepare": ["prepare"],
"side_effect": ["side_effect"],
"dependency": ["dependency"],
"test": [
"dependency",
"cleanup",
"destroy",
"syntax",
"create",
"prepare",
"converge",
"idempotence",
"side_effect",
"verify",
"cleanup",
"destroy",
],
"destroy": ["dependency", "cleanup", "destroy"],
},
}
assert matrix == _instance._get_matrix()<|fim_middle|>test_get_matrix<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dataset: str, group: str) -> None:
train, horizon, freq, seasonality = get_data('data', dataset, group)
partial_fit_and_predict = partial(fit_and_predict,
horizon=horizon, freq=freq, seasonality=seasonality)
start = time.time()
print(f'Parallelism on {cpu_count()} CPU')
with Pool(cpu_count()) as pool:
results = pool.starmap(partial_fit_and_predict, train.groupby('unique_id'))
end = time.time()
print(end - start)
forecasts = pd.concat(results)
forecasts.to_csv(f'data/statsmodels-forecasts-{dataset}-{group}-pred-int.csv', index=False)
time_df = pd.DataFrame({'time': [end - start], 'model': ['ets_statsmodels']})
time_df.to_csv(f'data/statsmodels-time-{dataset}-{group}-pred-int.csv', index=False)<|fim_middle|>main<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self, workingdir):<|fim_middle|>setup_and_chdir_to_working_directory<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(sovryn, loanToken, web3, set_demand_curve, lend_to_pool, open_margin_trade_position, priceFeeds, chain, return_token_is_collateral):
close_complete_margin_trade(sovryn, loanToken, web3, set_demand_curve, lend_to_pool, open_margin_trade_position, priceFeeds, chain, return_token_is_collateral)<|fim_middle|>test_close_complete_margin_trade<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, name: Incomplete | None = None) -> None: ...<|fim_middle|>cache_clear<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
weight = torch.tensor([0.25, 0.66])
predictions = self._get_default_predictions_tensor(0.0)
# only label 0 is predicted as positive.
predictions[:, 0] = 1.0
target = self._get_default_target_zeroes_tensor()
# half target with label 0, the other half with 1.
target[:, : self.img_size // 2] = 1
intersection = torch.tensor([0.5, 0.0])
denominator = torch.tensor([1.5, 0.5])
expected_dice_loss = 1.0 - ((2.0 * intersection) / (denominator + self.eps))
expected_dice_loss *= weight
expected_dice_loss = expected_dice_loss.mean()
criterion = DiceLoss(smooth=0, eps=self.eps, apply_softmax=False, weight=weight)
dice_loss = criterion(predictions, target)
self._assertion_dice_torch_values(expected_dice_loss, dice_loss)<|fim_middle|>test_dice_weight_classes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, id, value, type='string', meta=None):
raise ValueError('%s cannot be set.' % escape(id))<|fim_middle|>set_property<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(a, b):
return _auto_broadcast(a, b, tir.GE)<|fim_middle|>ge<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Abort current command
:returns: None
:rtype: None
"""
return self._sc.METHOD_NAME()<|fim_middle|>do_abort<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(monkeypatch):
monkeypatch.setattr("mlflow.gateway.utils._gateway_uri", None)
valid_uri = "http://localhost"
set_gateway_uri(valid_uri)
assert get_gateway_uri() == valid_uri
invalid_uri = "localhost"
with pytest.raises(MlflowException, match="The gateway uri provided is missing required"):
set_gateway_uri(invalid_uri)<|fim_middle|>test_set_gateway_uri<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> _Reply: ...<|fim_middle|>quit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(parser: ArgumentParser) -> None:
parser.add_argument("--auth-token", help="specify api key for authentication")<|fim_middle|>configure_parser<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
code = '''<|fim_middle|>test_capture_bound_method<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
mc = mcscf.CASCI(m, 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.8896744464714, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.6910275883606078, 4)<|fim_middle|>test_casci_4o4e<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, exc, task_id, args, kwargs, einfo):
super().METHOD_NAME(exc, task_id, args, kwargs, einfo=einfo)
if not self._result:
self._result = TaskResult(status=TaskResultStatus.ERROR, messages=[str(exc)])
if not self._result.uid:
self._result.uid = self._uid
TaskInfo(
task_name=self.__name__,
task_description=self.__doc__,
start_timestamp=self.start or default_timer(),
finish_timestamp=default_timer(),
finish_time=datetime.now(),
result=self._result,
task_call_module=self.__module__,
task_call_func=self.__name__,
task_call_args=args,
task_call_kwargs=kwargs,
).save(self.result_timeout_hours)
Event.new(
EventAction.SYSTEM_TASK_EXCEPTION,
message=f"Task {self.__name__} encountered an error: {exception_to_string(exc)}",
).save()<|fim_middle|>on_failure<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
return {"window_length": 3}<|fim_middle|>get_test_params<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
m: types.ModuleType | None = None,
name: str | None = None,
globs: dict[str, Any] | None = None,
verbose: bool | None = None,
report: bool = True,
optionflags: int = 0,
extraglobs: dict[str, Any] | None = None,
raise_on_error: bool = False,
exclude_empty: bool = False,
) -> TestResults: ...<|fim_middle|>testmod<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
cmake_layout(self, src_folder="src")<|fim_middle|>layout<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, commit=True):
instance = super().METHOD_NAME(commit)
if commit:
usage = self.cleaned_data["usage"]
offers = self.cleaned_data["offers"]
if instance is not None:
# Update vouchers in this set
for i, voucher in enumerate(instance.vouchers.order_by("date_created")):
voucher.name = "%s - %d" % (instance.name, i + 1)
voucher.usage = usage
voucher.start_datetime = instance.start_datetime
voucher.end_datetime = instance.end_datetime
voucher.METHOD_NAME()
voucher.offers.set(offers)
# Add vouchers to this set
vouchers_added = False
for i in range(instance.vouchers.count(), instance.count):
voucher = Voucher.objects.create(
name="%s - %d" % (instance.name, i + 1),
code=get_unused_code(length=instance.code_length),
voucher_set=instance,
usage=usage,
start_datetime=instance.start_datetime,
end_datetime=instance.end_datetime,
)
voucher.offers.add(*offers)
vouchers_added = True
if vouchers_added:
instance.update_count()
return instance<|fim_middle|>save<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
from prowler.providers.aws.services.iam.iam_service import IAM, PasswordPolicy
audit_info = self.set_mocked_audit_info()
with mock.patch(
"prowler.providers.aws.lib.audit_info.audit_info.current_audit_info",
new=audit_info,
):
with mock.patch(
"prowler.providers.aws.services.iam.iam_password_policy_expires_passwords_within_90_days_or_less.iam_password_policy_expires_passwords_within_90_days_or_less.iam_client",
new=IAM(audit_info),
) as service_client:
from prowler.providers.aws.services.iam.iam_password_policy_expires_passwords_within_90_days_or_less.iam_password_policy_expires_passwords_within_90_days_or_less import (
iam_password_policy_expires_passwords_within_90_days_or_less,
)
service_client.password_policy = PasswordPolicy(
length=10,
symbols=True,
numbers=True,
uppercase=True,
lowercase=True,
allow_change=True,
expiration=True,
max_age=40,
reuse_prevention=2,
hard_expiry=True,
)
check = iam_password_policy_expires_passwords_within_90_days_or_less()
result = check.execute()
assert len(result) == 1
assert result[0].status == "PASS"
assert result[0].resource_id == AWS_ACCOUNT_NUMBER
assert result[0].resource_arn == AWS_ACCOUNT_ARN
assert result[0].region == AWS_REGION
assert search(
"Password expiration is set lower than 90 days",
result[0].status_extended,
)<|fim_middle|>test_password_expiration_lower_90<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(i, expected):
"""."""
r = non_negative_int(i)
assert r == expected<|fim_middle|>test_non_negative_int<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, block, predecessor=None, successor=None, loop=None, loop_successor=None, **kwargs
): # pylint:disable=no-self-use
if isinstance(successor, ContinueNode) or successor is loop_successor:
self.continue_preludes[loop].append(block)<|fim_middle|>handle_block<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.flownet.to(METHOD_NAME)<|fim_middle|>device<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# This is a serial task.
return 1<|fim_middle|>run_max_procs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> '_Hash':
return hashlib.METHOD_NAME(self.to_bytes())<|fim_middle|>sha512<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Sequence['outputs.ScheduledSourceSynchronizationSettingResponse']:
"""
Collection of items of type DataTransferObjects.
"""
return pulumi.get(self, "value")<|fim_middle|>value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(vfp_data: dict[str, dict[str, list[str]]]) -> dict[str, set[str]]:
"""
Look for files with the same "virtual file path".
input: {uid {parent_uid: [vfp]}} -> output: {vfp: [uid]}
"""
result = {}
for uid in vfp_data:
vfp_dict = vfp_data.get(uid)
for vfp_list in vfp_dict.values():
for vfp in vfp_list:
result.setdefault(vfp, set()).add(uid)
return result<|fim_middle|>transpose_vfp_dict<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
error_context.context("Start Iometer", test.log.info)
args = (
' && '.join((("cd %s" % ins_path), run_cmd % (icf_name, res_file))),
run_timeout)
if params.get('bg_mode', 'no') == 'yes':
_run_backgroud(args)
_is_iometer_alive()
time.sleep(int(params.get('sleep_time', '180')))
_is_iometer_alive()
else:
session.cmd(*args)
error_context.context(
"Copy result '%s' to host" % res_file, test.log.info)
vm.copy_files_from(res_file, test.resultsdir)<|fim_middle|>run_iometer<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return True<|fim_middle|>isatty<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
serializer = AllOrNoneTruthyFieldsValidatorSerializer(
data={"foo": "foo", "bar": "bar"}
)
is_valid = serializer.is_valid()
self.assertTrue(is_valid)<|fim_middle|>test_it_validates_when_values_for_all<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(report_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetReportResult:
"""
Gets the report for a subscription by report name.
Azure REST API version: 2018-08-01-preview.
:param str report_name: Report Name.
"""
__args__ = dict()
__args__['reportName'] = report_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:costmanagement:getReport', __args__, opts=opts, typ=GetReportResult).value
return AwaitableGetReportResult(
definition=pulumi.get(__ret__, 'definition'),
delivery_info=pulumi.get(__ret__, 'delivery_info'),
format=pulumi.get(__ret__, 'format'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
schedule=pulumi.get(__ret__, 'schedule'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))<|fim_middle|>get_report<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
f = open(os.path.join(pmdastatsd_dir, dbpmdarc_filename), "w+")
f.write("debug libpmda\n")
f.write("open pipe pmdastatsd\n")
f.write("namespace root_statsd\n")
f.write("status\n")
f.write("\n")
f.close()<|fim_middle|>setup_dbpmdarc<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
class A:
func = removed_function("foo")
a = A()
with pytest.warns(DeprecationWarning):
a.func()<|fim_middle|>test_removed_function_from_class<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args: argparse.Namespace) -> None:
import questionary
print_success("Welcome to Rasa! π€\n")
if args.no_prompt:
print(
f"To get started quickly, an "
f"initial project will be created.\n"
f"If you need some help, check out "
f"the documentation at {DOCS_BASE_URL}.\n"
)
else:
print(
f"To get started quickly, an "
f"initial project will be created.\n"
f"If you need some help, check out "
f"the documentation at {DOCS_BASE_URL}.\n"
f"Now let's start! ππ½\n"
)
if args.init_dir is not None:
path = args.init_dir
else:
path = (
questionary.text(
"Please enter a path where the project will be "
"created [default: current directory]"
)
.skip_if(args.no_prompt, default="")
.ask()
)
# set the default directory. we can't use the `default` property
# in questionary as we want to avoid showing the "." in the prompt as the
# initial value. users tend to overlook it and it leads to invalid
# paths like: ".C:\mydir".
# Can't use `if not path` either, as `None` will be handled differently (abort)
if path == "":
path = "."
if args.no_prompt and not os.path.isdir(path):
print_error_and_exit(f"Project init path '{path}' not found.")
if path and not os.path.isdir(path):
_ask_create_path(path)
if path is None or not os.path.isdir(path):
print_cancel()
if not args.no_prompt and len(os.listdir(path)) > 0:
_ask_overwrite(path)
telemetry.track_project_init(path)
init_project(args, path)<|fim_middle|>run<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Any:
"""Get AwaDB client."""
return self.awadb_client<|fim_middle|>client<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Method override."""
BaseStructHandleMappersHeaderGenerator.METHOD_NAME(self)
# Finish processing in superclass
Dx12BaseGenerator.METHOD_NAME(self)<|fim_middle|>end_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
if self.options.shared:
try:
del self.options.fPIC
except Exception:
pass<|fim_middle|>configure<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return dict(self.site_urls)[self.service] + self.value<|fim_middle|>url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Test :func:`colour.models.icacb.XYZ_to_ICaCb` definition domain and
range scale support.
"""
XYZ = np.array([0.07818780, 0.06157201, 0.28099326])
ICaCb = XYZ_to_ICaCb(XYZ)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_array_almost_equal(
XYZ_to_ICaCb(XYZ * factor), ICaCb * factor, decimal=7
)<|fim_middle|>test_domain_range_scale_xy_z_to<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
version_string = self.current_changes.version
if version_string is None:
version_string = "@AWS_JAVA_SDK_VERSION@"
self.write("# __%s__ __%s__\n" % (version_string, self.current_changes.date))<|fim_middle|>write_header<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, filename: str, remove_dep_on_pkg: str) -> str:
"""
Returns the content of the requirements.txt file with the dependency on `remove_dep_on_pkg` removed.
This method preserves all the comments.
"""
if not os.path.exists(filename):
raise Exception(f"File {filename} doesn't exist")
result = ""
line_continuation_buffer = ""
with open(filename, "r", encoding="utf-8") as fd:
for line in fd.readlines():
if line_continuation_buffer:
line_continuation_buffer += line
if not line.endswith("\\"):
if Requirement.parse(line_continuation_buffer).key != remove_dep_on_pkg:
result += line_continuation_buffer
line_continuation_buffer = ""
elif not line.strip() or line.strip().startswith("#"):
result += line
elif line.endswith("\\"):
line_continuation_buffer = line
elif Requirement.parse(line).key != remove_dep_on_pkg.lower():
result += line
else:
# Dependency matches `remove_dep_on_pkg` => Remove line from result
pass
return result<|fim_middle|>get_content_with_dep_removed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())<|fim_middle|>to_str<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(d, fn, mode):
fp = os.path.join(d, fn)
return open(fp, mode)<|fim_middle|>open_metadata_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(stream):
headers = stream.authenticator.get_auth_header()
response = create_note(stream, headers)
assert response.status_code == 201, "Note was note created"
created_note_id = response.json()["id"]
# A record may not be accessible right after creation. This workaround makes few attempts to receive latest record
notes = []
attempts = 10
while created_note_id not in notes:
now = pendulum.now(tz="UTC")
stream_slice = {
"start_date": now.add(days=-1).isoformat(timespec="milliseconds"),
"end_date": now.isoformat(timespec="milliseconds")
}
notes = set(record["Id"] for record in stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice))
try:
assert created_note_id in notes, "No created note during the sync"
break
except Exception as e:
if attempts:
time.sleep(2)
else:
raise e
attempts = attempts - 1
response = update_note(stream, created_note_id, headers)
assert response.status_code == 204, "Note was not updated"
stream_state = get_stream_state()
response = delete_note(stream, created_note_id, headers)
assert response.status_code == 204, "Note was not deleted"
# A record updates take some time to become accessible
attempts = 10
while created_note_id not in notes:
now = pendulum.now(tz="UTC")
stream_slice = {
"start_date": now.add(days=-1).isoformat(timespec="milliseconds"),
"end_date": now.isoformat(timespec="milliseconds")
}
record = None
for record in stream.read_records(sync_mode=SyncMode.incremental, stream_state=stream_state, stream_slice=stream_slice):
if created_note_id == record["Id"]:
break
try:
assert record, "No updated note during the sync"
assert record["IsDeleted"], "Wrong field value for deleted note during the sync"
assert record["TextPreview"] == UPDATED_NOTE_CONTENT and record["TextPreview"] != NOTE_CONTENT, "Note Content was not updated"
break
except Exception as e:
if attempts:
time.sleep(2)
else:
raise e
attempts = attempts - 1<|fim_middle|>test_deleted_record<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# Test that our custom field requirement is honored.
# The `title` field is not mandatory in the XML schema, however packages without
# a title are rendered confusingly in chocolatey output, so enforce it.
with self.assertRaises(NuspecValidationError):
NuspecGenerator(
id="test", title=None, version="4.4", authors="people", description=""
)
# Smoke test that the validation code provided by `generateDS` is working.
# This test is just to help catch any upstream (NuGet or generateDS) bugs,
# should they ever occur.
with self.assertRaises(NuspecValidationError):
NuspecGenerator(
id=None, title="", version="4.4", authors="people", description=""
)<|fim_middle|>test_nuspec_generator_basic_validation<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return _reader.history.maxSize<|fim_middle|>get_history_length<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, url_data):
"""Check for clamav and extern."""
return self.clamav_conf and not url_data.extern[0]<|fim_middle|>applies_to<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs
) -> List[List[Dict[str, Tensor]]]:
"""API to be invoked from :func:`~fairseq.tasks.fairseq_task.FairseqTask.inference_step()`"""
bos_token = kwargs.get("bos_token", None)
tokens, scores, alignments = self._generate(sample, bos_token=bos_token)
bsz = tokens.size(0)
# list of completed sentences
# see :class:`~fairseq.sequence_generator.SequenceGenerator` for specifications
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
)
for i in range(bsz):
finalized[i].append(
{
"tokens": tokens[i, :],
"score": scores[i],
"attention": None,
"alignment": alignments[i, :, :]
if self.print_alignment and alignments is not None
else None,
}
)
return finalized<|fim_middle|>generate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters<|fim_middle|>header_parameters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(elements: Iterable[ScoreElementMetadata]) -> List:
"""Returns score api used in icx_getScoreApi JSON-RPC method
:param elements:
:return:
"""
api = []
for element in elements:
if isinstance(element, FunctionMetadata):
func: FunctionMetadata = element
if func.flag == ScoreFlag.PAYABLE:
continue
item = _get_function(func.name, func.flag, func.signature)
elif isinstance(element, EventLogMetadata):
eventlog: EventLogMetadata = element
item = _get_eventlog(eventlog.name, eventlog.signature, eventlog.indexed_args_count)
else:
raise IllegalFormatException(f"Invalid score element: {element} {type(element)}")
api.append(item)
return api<|fim_middle|>get_score_api<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, node):
"""
Visits a single unit type element, checks for correct usage of units and builds the corresponding combined
unit.
:param node: a single unit type meta_model.
:type node: ASTUnitType
:return: a new type symbol representing this unit type.
:rtype: type_symbol
"""
if node.is_simple_unit():
type_s = PredefinedTypes.get_type(node.unit)
if type_s is None:
code, message = Messages.unknown_type(str(node.unit))
Logger.log_message(None, code, message, node.get_source_position(), LoggingLevel.ERROR)
return
node.set_type_symbol(type_s)
self.symbol = type_s<|fim_middle|>visit_unit_type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
plot_limits = PlotLimits()
limit_names = ["value", "index", "count", "density", "depth", "date"]
non_numbers = [
"string",
datetime.date(2001, 1, 1),
"3.0",
"1e-5",
"-5.5",
"-.5",
]
positive_floats = [1.0, 1.5, 3.1415, 1e10, 5.2e-7]
negative_floats = [-1.0, -1.5, -3.1415, -1e10, -5.2e-7]
positive_ints = [1, 5, 1000]
negative_ints = [-1, -5, -1000]
non_dates = ["string", "3.4", "2001-01-01", datetime.time()]
dates = [datetime.date(2001, 1, 1), datetime.datetime(2010, 3, 3)]
setter_should_fail_values = {
"value": non_numbers + dates,
"index": non_numbers + positive_floats + negative_floats + dates,
"depth": non_numbers + negative_floats + negative_ints + negative_ints,
"count": non_numbers + negative_ints + negative_floats + positive_floats,
"density": non_numbers + negative_floats + negative_ints,
"date": non_dates,
}
setter_should_succeed_values = {
"value": positive_floats + negative_floats + positive_ints + negative_ints,
"index": positive_ints,
"depth": positive_floats + positive_ints,
"count": positive_ints,
"density": positive_floats + positive_ints,
"date": dates,
}
for attribute_name in limit_names:
assert getattr(plot_limits, f"{attribute_name}_minimum") is None
assert getattr(plot_limits, f"{attribute_name}_maximum") is None
assert getattr(plot_limits, f"{attribute_name}_limits") == (None, None)
setattr(plot_limits, f"{attribute_name}_minimum", None)
setattr(plot_limits, f"{attribute_name}_maximum", None)
setattr(plot_limits, f"{attribute_name}_limits", (None, None))
with pytest.raises(TypeError):
setattr(plot_limits, f"{attribute_name}_limits", None)
for value in setter_should_fail_values[attribute_name]:
with pytest.raises((TypeError, ValueError)):
setattr(plot_limits, f"{attribute_name}_minimum", value)
with pytest.raises((TypeError, ValueError)):
setattr(plot_limits, f"{attribute_name}_maximum", value)
assert getattr(plot_limits, f"{attribute_name}_limits") == (None, None)
for value in setter_should_succeed_values[attribute_name]:
setattr(plot_limits, f"{attribute_name}_minimum", value)
setattr(plot_limits, f"{attribute_name}_maximum", value)
minimum = getattr(plot_limits, f"{attribute_name}_minimum")
maximum = getattr(plot_limits, f"{attribute_name}_maximum")
assert minimum == value
assert maximum == value
assert getattr(plot_limits, f"{attribute_name}_limits") == (
minimum,
maximum,
)<|fim_middle|>test_plot_limits<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
m0 = dict(
NWBFile=dict(
experimenter="Mr Tester",
identifier="abc123",
institution="My University",
lab="My lab",
session_description="testing conversion tools software",
session_start_time="2020-04-15T10:00:00+00:00",
),
Subject=dict(
description="ADDME",
sex="M",
species="ADDME",
subject_id="sid000",
weight="10g",
date_of_birth="2020-04-07T00:15:00+00:00",
),
Ecephys=dict(
Device=[dict(name="device_ecephys")],
ElectricalSeries=[
dict(description="ADDME", name="ElectricalSeries", rate=10.0, starting_time=0.0, conversion=1.0)
],
ElectrodeGroup=[
dict(description="ADDME", device="device_ecephys", location="ADDME", name="ElectrodeGroup")
],
),
)
yaml_file_path = os.path.join(os.path.dirname(__file__), "metadata_tests.yaml")
json_file_path = os.path.join(os.path.dirname(__file__), "metadata_tests.json")
m1 = load_dict_from_file(file_path=yaml_file_path)
compare_dicts_2(m0, m1)
m2 = load_dict_from_file(file_path=json_file_path)
compare_dicts_2(m0, m2)<|fim_middle|>test_load_metadata_from_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, ZigateIEEE):
self.ControllerIEEE = ZigateIEEE<|fim_middle|>update_zigate_ieee<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, file, path):
try:
self.filename = file.readObject(path + "/filename")
self.cycleHistory = file.readObject(path + "/cycleHistory")
self.timeHistory = file.readObject(path + "/timeHistory")
self.sampleHistory = file.readObject(path + "/sampleHistory")
file.read(self.nodeFlags, path + "/nodeFlags")
self.flushHistory()
except RuntimeError:
print("WARNING: unable to restore NodeHistory restart state")
return<|fim_middle|>restore_state<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dtype):
server = rpc.Server(key="roofline_flops_cpu")
remote = rpc.connect("127.0.0.1", server.port, key="roofline_flops_cpu")
target = tvm.target.Target("llvm -mattr=+fma,+avx2")
dev = remote.device(str(target))
# This test uses vectorized instructions so we need a target that supports them
flops = tvm.utils.roofline.x86.estimate_peak_fma_vector_flops(target, dev, remote, "float32")
# Assume we can achieve 1 GFLOP/s per thread, which is 1 FLOP per cycle on a 1GHz cpu.
assert (
flops > 10**9 and flops < 10**14
), f"FLOP/s should be between 10^9 and 10^14, but it is {flops}"<|fim_middle|>test_estimate_peak_flops_cpu<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> 'Transport':
"""Return a fully configured transport that can be used to connect to the computer set for this instance."""
computer = self.computer
transport_type = computer.transport_type
try:
transport_class = TransportFactory(transport_type)
except exceptions.EntryPointError as exception:
raise exceptions.ConfigurationError(f'transport type `{transport_type}` could not be loaded: {exception}')
return transport_class(machine=computer.hostname, **self.get_auth_params())<|fim_middle|>get_transport<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Test lines shorter than chunksize, data longer than chunk_size."""
chunk_size = 500
# Make sure the test data fits the testcase:
#
# 1. The lines must be shorter than the chunk_size, so that all
# chunks can be shorter than chunk_size
self.assertTrue(max(map(len, csv_test_data.splitlines())) < chunk_size)
# 2. The data must be longer than chunk_size so that chunking is
# useful
self.assertTrue(len(csv_test_data) > chunk_size)
# The actual test
chunked = list(read_delimited_chunks(io.BytesIO(csv_test_data),
chunk_size))
# Appending all chunks yields the original unchunked data
self.assertEqual(csv_test_data, b"".join(chunked))
# All chunks are shorter than chunk_size
self.assertTrue(max(map(len, chunked)) <= chunk_size)
# All chunks were split at line separators
self.assertEqual(csv_test_data.splitlines(),
list(itertools.chain.from_iterable(
chunk.splitlines() for chunk in chunked)))<|fim_middle|>test_read_delimited_chunks_short_lines<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(obj, header, itime: int, ntimes: int, dt):
if obj.nonlinear_factor not in (None, np.nan):
name = obj.data_code['name']
if isinstance(dt, (int, np.int32, np.int64)):
dt_line = ' %14s = %i\n' % (name.upper(), dt)
elif isinstance(dt, (float, np.float32, np.float64)):
dt_line = ' %14s = %12.5E\n' % (name, dt)
#elif isinstance(dt, np.complex):
#dt_line = ' %14s = %12.5E %12.5Ej\n' % (name, dt.real, dt.imag)
else:
raise NotImplementedError(type(dt))
#dt_line = ' %14s = %12.5E %12.5Ej\n' % (name, dt.real, dt.imag)
header[1] = dt_line
codes = getattr(obj, name + 's')
if not len(codes) == ntimes:
msg = (f'{name}s in {obj.__class__.__name__} the wrong size; '
f'ntimes={ntimes}; {name}s={codes}\n')
atts = object_attributes(obj)
msg += f'names={atts}\n'
msg += f'data_names={obj.data_names}\n'
raise IndexError(msg)
if hasattr(obj, 'eigr'):
try:
eigenvalue_real = obj.eigrs[itime]
except IndexError:
msg = 'eigrs[%s] not found; ntimes=%s; eigrs=%s' % (itime, ntimes, obj.eigrs)
msg += 'names=%s' % object_attributes(obj)
raise IndexError(msg)
eigr_line = ' %14s = %12.6E\n' % ('EIGENVALUE', eigenvalue_real)
header[2] = eigr_line
return header<|fim_middle|>eigenvalue_header<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request):
for feat in models.Feature.all(request.db):
for attr in ["everyone", "first_party", "admins", "staff"]:
val = request.POST.get(f"{feat.name}[{attr}]")
if val == "on":
setattr(feat, attr, True)
else:
setattr(feat, attr, False)
for cohort in request.db.query(models.FeatureCohort).all():
val = request.POST.get(f"{feat.name}[cohorts][{cohort.name}]")
if val == "on":
if cohort not in feat.cohorts:
feat.cohorts.append(cohort)
elif cohort in feat.cohorts:
feat.cohorts.remove(cohort)
request.session.flash(_("Changes saved."), "success")
return httpexceptions.HTTPSeeOther(location=request.route_url("admin.features"))<|fim_middle|>features_save<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"]
)
),
"image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"),
}
),
supervised_keys=None,
homepage="https://guillaumejaume.github.io/FUNSD/",
citation=_CITATION,
)<|fim_middle|>info<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_base_loss_info<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(date_time_str: str) -> datetime:
proper_datetime = datetime.strptime(date_time_str, "%Y-%m-%d %H:%M")
if proper_datetime.minute != 0:
proper_datetime = proper_datetime + timedelta(hours=1)
proper_datetime = proper_datetime.replace(minute=0)
return proper_datetime<|fim_middle|>format_dt<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.table = []
ctbl_file = open(self.directory + self.table_name, "r")
while 1:
line = ctbl_file.readline()
if not line: break
line = line[:-2]
[r, g, b] = line.split(" ")
self.table.append([int(r), int(g), int(b)])<|fim_middle|>load_color_table<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self, *args, **kwargs):<|fim_middle|>re_boot<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertEqual(tostr(DerivedList([1, 2])), '[1, 2]')
self.assertIs(tostr.handlers[DerivedList], tostr.handlers[list])<|fim_middle|>test_new_type_list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, wallet_name):
self.log.info("Make sure we can import wallet when pruned and required blocks are still available")
wallet_file = wallet_name + ".dat"
wallet_birthheight = self.get_birthheight(wallet_file)
# Verify that the block at wallet's birthheight is available at the pruned node
self.nodes[1].getblock(self.nodes[1].getblockhash(wallet_birthheight))
# Import wallet into pruned node
self.nodes[1].createwallet(wallet_name="wallet_pruned", descriptors=False, load_on_startup=True)
self.nodes[1].importwallet(os.path.join(self.nodes[0].datadir, wallet_file))
# Make sure that prune node's wallet correctly accounts for balances
assert_equal(self.nodes[1].getbalance(), self.nodes[0].getbalance())
self.log.info("- Done")<|fim_middle|>test_wallet_import_pruned<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(app, data_dir: Path) -> Security:
_setup_flask_mongo(app)
flask_security_config = _generate_flask_security_configuration(data_dir)
app.config["SECRET_KEY"] = flask_security_config["secret_key"]
app.config["SECURITY_PASSWORD_SALT"] = flask_security_config["password_salt"]
app.config["SECURITY_USERNAME_ENABLE"] = True
app.config["SECURITY_USERNAME_REQUIRED"] = True
app.config["SECURITY_REGISTERABLE"] = True
app.config["SECURITY_SEND_REGISTER_EMAIL"] = False
app.config["SECURITY_TOKEN_MAX_AGE"] = ACCESS_TOKEN_TTL
app.config["SECURITY_RETURN_GENERIC_RESPONSES"] = True
# Ignore CSRF, because we don't store tokens in cookies
app.config["WTF_CSRF_CHECK_DEFAULT"] = False
app.config["SECURITY_CSRF_IGNORE_UNAUTH_ENDPOINTS"] = True
# Forbid sending authentication token in URL parameters
app.config["SECURITY_TOKEN_AUTHENTICATION_KEY"] = None
# The database object needs to be created after we configure the flask application
db = MongoEngine(app)
user_datastore = MongoEngineUserDatastore(db, User, Role)
_create_roles(user_datastore)
class CustomConfirmRegisterForm(ConfirmRegisterForm):
# We don't use the email, but the field is required by ConfirmRegisterForm.
# Email validators need to be overriden, otherwise an error about invalid email is raised.
email = StringField("Email", default="[email protected]", validators=[])
def to_dict(self, only_user):
registration_dict = super().to_dict(only_user)
registration_dict.update({"roles": [AccountRole.ISLAND_INTERFACE.name]})
return registration_dict
app.security = Security(
app,
user_datastore,
confirm_register_form=CustomConfirmRegisterForm,
register_blueprint=False,
)
# Force Security to always respond as an API rather than HTTP server
# This will cause 401 response instead of 301 for unauthorized requests for example
app.security._want_json = lambda _request: True
app.session_interface = _disable_session_cookies()
return app.security<|fim_middle|>configure_flask_security<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""
"original": "Enrolment in 3894 Nutrition/Dietetics and Food Innovation or 3895 Pharmaceutical Medicine/Pharmacy<br/><br/>",
"processed": "Enrolment in 3894 Nutrition/Dietetics && Food Innovation || 3895 Pharmaceutical Medicine/Pharmacy"
"""
return "3894 || 3895"<|fim_middle|>che_m_1832<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache<|fim_middle|>register<|file_separator|> |