text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(self):
return self._stack.pop(-1)<|fim_middle|>pop_scope<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd):
cmd_str = "cd " + workspace + ";"
if set_jemalloc_version_cmd:
cmd_str += set_jemalloc_version_cmd
cmd_str += "LD_PRELOAD=${JEMALLOC_VERSION} "
cmd_str += " ".join(tf_envs) + " $(which python) -u "
cmd_str += tf_script + " " + " ".join(tf_args)
print("run tensorflow command:", cmd_str)
return sp.call(cmd_str, shell=True)<|fim_middle|>run_tensorflow_job<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_get_gpu_type_p100<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self,regexp_pattern=None):
assert True, \
"method available is not implemented at this time" <|fim_middle|>available<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""The repository variable secured"""
return self.get_data("secured")<|fim_middle|>secured<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(capfd, package):
"""Test using Amazon Braket to draw using ascii output is of the expected length."""
# pylint: disable=eval-used
circuit_wrapper(eval(f"{package}_bell")).draw(package="braket", output="ascii")
out, err = capfd.readouterr()
assert len(err) == 0
assert len(out) == 67<|fim_middle|>test_braket_bell_draw<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(port: int, share: bool = False, tunnel_id=str):
address = '0.0.0.0' if shared.args.listen else '127.0.0.1'
server = ThreadingHTTPServer((address, port), Handler)
def on_start(public_url: str):
print(f'Starting non-streaming server at public url {public_url}/api')
if share:
try:
try_start_cloudflared(port, tunnel_id, max_attempts=3, on_start=on_start)
except Exception:
pass
else:
print(
f'Starting API at http://{address}:{port}/api')
server.serve_forever()<|fim_middle|>run_server<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.run_loop.cancel()<|fim_middle|>cog_unload<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(coordinates, expected):
out = irsa.core._parse_coordinates(coordinates)
for a, b in zip(out.split(), expected.split()):
try:
a = float(a)
b = float(b)
np.testing.assert_almost_equal(a, b)
except ValueError:
assert a == b<|fim_middle|>test_parse_coordinates<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, scope, name, key, value, recursive=False, *, session: "Optional[Session]" = None):
"""
Add metadata to data identifier.
:param scope: The scope name.
:param name: The data identifier name.
:param key: the key.
:param value: the value.
:param did: The data identifier info.
:param recursive: Option to propagate the metadata change to content.
:param session: The database session in use.
"""
pass<|fim_middle|>set_metadata<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
queryset = super().METHOD_NAME()
if self.action == "list":
perm = WebhookPermission.create_scope_list(self.request)
queryset = perm.filter(queryset)
return queryset<|fim_middle|>get_queryset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
runner = CliRunner()
db = Db()
expected_output = """\<|fim_middle|>test_bfd_show_no_session<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tensor1, tensor2, padding_index=-100):
"""Concatenates `tensor1` and `tensor2` on first axis, applying padding on the second if necessary."""
if len(tensor1.shape) == 1 or tensor1.shape[1] == tensor2.shape[1]:
return paddle.concat((tensor1, tensor2), axis=0)
# raise ValueError("Error")
# Let's figure out the new shape
new_shape = (tensor1.shape[0] + tensor2.shape[0], max(tensor1.shape[1], tensor2.shape[1])) + tuple(
tensor1.shape[2:]
)
# Now let's fill the result tensor
# result = tensor1.new_full(new_shape, padding_index)
result = paddle.full(new_shape, padding_index, dtype=tensor1.dtype)
result[: tensor1.shape[0], : tensor1.shape[1]] = tensor1
result[tensor1.shape[0] :, : tensor2.shape[1]] = tensor2
return result<|fim_middle|>paddle_pad_and_concatenate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# The vote() call is synchronous, which makes it difficult to
# coordinate the action of multiple threads that all call
# vote(). This method sends the vote call, then sets the
# event saying vote was called, then waits for the vote
# response.
future = self.storage._server.call('vote', id(self.trans), wait=False)
self.ready.set()
future.result(9)<|fim_middle|>myvote<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(version_set):
highest = get_highest(version_set)
assert highest == "1.1.1"
assert get_highest(set(["1.1.1"])) == "1.1.1"<|fim_middle|>test_get_highest<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(c2d_x, c3d_x):
utility3d.ExpandFunctionTo3d(c2d_x, c3d_x).solve()
assert numpy.allclose(c3d_x.dat.data_ro.min(), 0.0)
assert numpy.allclose(c3d_x.dat.data_ro.max(), 2.0)<|fim_middle|>test_copy_2d_field_to_3d_x<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Returns a list of categories that do not contain any recipes"""
return self.repos.categories.get_empty()<|fim_middle|>get_all_empty<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, stage):
return self.pipeline.METHOD_NAME(stage)<|fim_middle|>index<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertEqual(self.bezier.npoints, 26)
for x in [0., .25, .5, .75, 1.]:
for y in [0., .25, .5, .75, 1.]:
if x or y:
self.assertIn([x, y], self.bezier.coords.tolist())
self.assertIn([0., .125], self.bezier.coords.tolist())
self.assertIn([.125, 0.], self.bezier.coords.tolist())<|fim_middle|>test_points<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default), "exec"
or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)<|fim_middle|>compile_command<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(pathname, **kwargs):
mime_types = kwargs["mime_types"]
uid = kwargs["uid"]
gid = kwargs["gid"]
destination = kwargs["destination"]
logger = kwargs["logger"] if "logger" in kwargs and kwargs["logger"] else logging
logger.info(f"{scriptName}:\t👓\t{pathname}")
if os.path.isfile(pathname) and os.path.isdir(destination):
time.sleep(0.1)
try:
os.chown(pathname, uid, gid)
# get the file magic mime type
fileMime = magic.from_file(pathname, mime=True)
if fileMime in mime_types:
# looks like this is a compressed file, we're assuming it's a zeek log archive to be processed by filebeat
logger.info(f"{scriptName}:\t🖅\t{pathname} [{fileMime}] to {destination}")
shutil.move(pathname, os.path.join(destination, os.path.basename(pathname)))
else:
# unhandled file type uploaded, delete it
logger.warning(f"{scriptName}:\t🗑\t{pathname} [{fileMime}]")
os.unlink(pathname)
except Exception as genericError:
logger.error(f"{scriptName}:\texception: {genericError}")<|fim_middle|>file_processor<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Returns the topmost commit id for the current branch.
:return: Commit id.
"""
return self.git_cmd("log --pretty=format:%H -1").stdout.strip()<|fim_middle|>get_top_commit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, workspace: Workspace, context: TracimContext) -> None:
...<|fim_middle|>on_workspace_deleted<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
try:
from sklearn.metrics import cohen_kappa_score
return False
except ImportError:
gscript.warning(_(""))
return True<|fim_middle|>load_skll<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(inputs, axis=-1, **kwargs):
"""Functional interface to the `Concatenate` layer.
Args:
inputs: A list of input tensors.
axis: Concatenation axis.
**kwargs: Standard layer keyword arguments.
Returns:
A tensor, the concatenation of the inputs alongside axis `axis`.
"""
return Concatenate(axis=axis, **kwargs)(inputs)<|fim_middle|>concatenate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(name, sig=None):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
if sig:
return bool(__salt__["status.pid"](sig))
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
cmd = "/etc/rc.d/{} onestatus".format(service)
results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True)
if contains_globbing:
return results
return results[name]<|fim_middle|>status<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
table_name: str,
rule_name: str
) -> None:
"""Remove PBH rule from Config DB."""
self.config_db.delete_entry(self.CDB_PBH_RULE, "{}|{}".format(table_name, rule_name))<|fim_middle|>remove_pbh_rule<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, trid):
return trid in self.__transports<|fim_middle|>exists<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, axis):
# for (1, 1), In cuNumeric, raises error in normalize_axis_tuple
expected_exc = ValueError
ndim = 2
with pytest.raises(expected_exc):
np.linalg.norm(np_arrays[ndim], axis=axis)
with pytest.raises(expected_exc):
num.linalg.norm(num_arrays[ndim], axis=axis)<|fim_middle|>test_axis_invalid_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
another_app = self.make_app()
another_app.date_created = None
another_app.save()
self.addCleanup(another_app.delete)
response = self._assert_auth_get_resource(self.list_endpoint, allow_session_auth=True)
content = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertEqual(content["meta"], {
'limit': None, 'next': None, 'offset': 0, 'previous': None,
'total_count': 3
})<|fim_middle|>test_get_list_null_sorting<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(target_value: str, ranges: list) -> str:
value = parse_target_value(target_value)
value_origin_len = parse_target_value_length(target_value)
if not value:
return target_value.replace("<", "<")
sorted_ranges = sorted(ranges, key=lambda x: x["start"])
for range_ in sorted_ranges:
if range_["start"] > value_origin_len or range_["stop"] > value_origin_len:
return f'<em style="color:red;">{value}</em>'
if sorted_ranges and value and len(value) == value_origin_len:
final_str = []
str_dict = {ind: xss_prevent(str_) for ind, str_ in enumerate(value)}
for range_ in sorted_ranges:
str_dict[range_["start"]] = '<em style="color:red;">' + str_dict[range_["start"]]
str_dict[range_["stop"] - 1] = str_dict[range_["stop"] - 1] + "</em>"
final_str = [x[1] for x in sorted(str_dict.items(), key=lambda kv: kv[0])]
return "".join(final_str)
if len(value) != AGENT_DEFAULT_LENGTH:
return f'<em style="color:red;">{value}</em>'
try:
if sorted_ranges and value and len(value) < value_origin_len:
begin_part_length = ceil((AGENT_DEFAULT_LENGTH - 3) / 2)
end_part_length = int((AGENT_DEFAULT_LENGTH - 3) / 2)
str_dict_begin = {ind: xss_prevent(str_) for ind, str_ in enumerate(value[:begin_part_length])}
str_dict_end = {
ind + (value_origin_len - end_part_length) + 3: xss_prevent(str_)
for ind, str_ in enumerate(value[-end_part_length:])
}
str_dict = {}
str_dict.update(str_dict_begin)
str_dict.update(str_dict_end)
str_dict[begin_part_length + 2] = "..."
for range_ in sorted_ranges:
if range_["start"] in str_dict and (range_["stop"] - 1) in str_dict:
str_dict[range_["start"]] = '<em style="color:red;">' + str_dict[range_["start"]]
str_dict[range_["stop"] - 1] = str_dict[range_["stop"] - 1] + "</em>"
if range_["start"] in str_dict and (range_["stop"] - 1) not in str_dict:
str_dict[range_["start"]] = '<em style="color:red;">' + str_dict[range_["start"]]
str_dict[begin_part_length] = "</em>" + str_dict[begin_part_length]
str_dict[begin_part_length] = "</em>" + str_dict[begin_part_length]
if range_["start"] not in str_dict and (range_["stop"] - 1) in str_dict:
str_dict[value_origin_len - end_part_length] = (
'<em style="color:red;">' + str_dict[value_origin_len - end_part_length]
)
str_dict[range_["stop"] - 1] = str_dict[range_["stop"] - 1] + "</em>"
if range_["start"] not in str_dict or (range_["stop"]) not in str_dict:
str_dict[begin_part_length + 2] = '<em style="color:red;">...</em>'
final_str = [x[1] for x in sorted(str_dict.items(), key=lambda kv: kv[0])]
return "".join(final_str)
except KeyError as e:
logger.warning(e, exc_info=e)
return f'<em style="color:red;">{value}</em>'<|fim_middle|>highlight_target_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, jobID):
if not jobID:
raise CaptchaBadJobID("CapSolver: Error bad job id to request task result.")
def _checkRequest(response):
self.checkErrorStatus(response, 'requestJob')
try:
if response.ok and response.json()['status'] == 'ready':
return True
except Exception:
pass
return None
response = polling2.poll(
lambda: self.session.post(
f'{self.host}/getTaskResult',
json={
'clientKey': self.api_key,
'taskId': jobID
},
timeout=30
),
check_success=_checkRequest,
step=5,
timeout=180
)
if response:
try:
rPayload = response.json()['solution']
if 'token' in rPayload:
return rPayload['token']
else:
return rPayload['gRecaptchaResponse']
except Exception:
pass
raise CaptchaTimeout(
"CapSolver: Error failed to solve Captcha."
)<|fim_middle|>request_job<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(spikeThreshold):
TaskTracker.SpikeThreshold = spikeThreshold<|fim_middle|>set_spike_threshold<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Any:
self.wait_window(self)
return self._result<|fim_middle|>show<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(func_id, args):
"""Set the maximum number of threads."""
_internal_assert(func_id == "max_num_threads", "This function cannot be directly invoked!")
_internal_assert(args.__len__() <= 1, "At most one argument accepted!")
if args.__len__() == 0:
res = Target.current().METHOD_NAME
else:
_internal_assert(isinstance(args[0], _expr.IntImm), "In tvm bool should be uint")
res = Target.current(args[0].value).METHOD_NAME
return convert(res)<|fim_middle|>max_num_threads<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")<|fim_middle|>id<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.if_axis_available<|fim_middle|>get_axis_available<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
if support.verbose:
sys.stdout.write('stopping HTTPS server\n')
server.stop()
if support.verbose:
sys.stdout.write('joining HTTPS thread\n')
server.join()<|fim_middle|>cleanup<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""Returns a string, the captured stderr."""
return self.capsys.readouterr().err<|fim_middle|>stderr<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, action, *args):
def register_formatter_class(formatter_cls):
formatter = formatter_cls()
self.formatters[action] = formatter
self.choices.append((action, formatter.label))
if args:
# register_action has been invoked as register_action(action, label, message); create a LogFormatter
# subclass and register that
label, message = args
formatter_cls = type(
"_LogFormatter", (LogFormatter,), {"label": label, "message": message}
)
register_formatter_class(formatter_cls)
else:
# register_action has been invoked as a @register_action(action) decorator; return the function that
# will register the class
return register_formatter_class<|fim_middle|>register_action<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tmp_path):
file_path = tmp_path / "my-attachment.txt"
new_file_path = counter_duplicate_path(file_path)
assert new_file_path == file_path
file_path.write_text("some data")
new_file_path = counter_duplicate_path(file_path)
assert new_file_path != file_path
assert new_file_path.name == "my-attachment-2.txt"
new_file_path.write_text("some data 2")
newest_file_path = counter_duplicate_path(file_path)
assert newest_file_path.name == "my-attachment-3.txt"<|fim_middle|>test_counter_duplicate_path<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, text):
"""Tokenize a string."""
char_tokens = []
for s in text:
char_tokens.extend(s)
return char_tokens<|fim_middle|>tokenize<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, **kwargs):
# figure out the MIME type
for ext in self.component.mime_types:
if self.office.name.lower().endswith(ext):
content_type = self.component.mime_types[ext]
break
response = HttpResponse(content_type=content_type)
self.sendfile(self.office, response)
return response<|fim_middle|>download_response<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(line, alist, cur=None, sql=None):
"""Update lines using only the boundary"""
to_up = []
bbox = Bbox()
aline = Line()
for area in alist:
bbox = area.bbox(bbox)
if (intersects(area.get_points(aline), line)) or (
area.contain_pnt(line[0], bbox)
):
to_up.append((line.cat, area.cat))
if (cur is not None) and (sql is not None):
cur.executemany(sql, to_up)
return to_up<|fim_middle|>update_lines<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters<|fim_middle|>query_parameters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
'''
Returns last Pose3d.
@return last JdeRobotTypes Pose3d saved
'''
self.lock.acquire()
pose = self.data
self.lock.release()
return pose<|fim_middle|>get_pose3d<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, mock_class):<|fim_middle|>test_s3_get_bucket_info<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
super(TestTOC, self).METHOD_NAME()<|fim_middle|>tear_down<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
print("> ====== Loading frozen graph into memory")
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, "rb") as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name="")
sess = tf.Session(graph=detection_graph)
print("> ====== Inference graph loaded.")
return detection_graph, sess<|fim_middle|>load_inference_graph<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(length: float):
print(type(length))
return gf.components.straight(length=length)<|fim_middle|>straigth_waveguide<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(clause):
def term2string(t):
if len(t)==0:
return "1"
return "*".join(["x("+str(v) +")" for v in t])
vars=tuple([v for v in clause if v>0])
negated_vars=tuple([-v for v in clause if v<0])
if len(negated_vars)>0:
terms=[tuple([negated_vars[i] for (i,j) in enumerate(combination) if j==1])\
+ vars for combination\
in Cartesian(list(repeat([0,1],len(negated_vars))))]
else:
terms=[vars]
res="+".join([term2string(t) for t in terms])
return res
#add_vars=[negated_var[i] for (i,j) in enumerate(combination) if j==1]<|fim_middle|>gen_poly_pb<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(Name, EnableCustomLogConfiguration = False, CustomLogObjects = None):
init_vars(CustomLogObjects)
return Test(EnableCustomLogConfiguration, CustomLogObjects)<|fim_middle|>test_marshall<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=', 1)
if '.' not in k:
config[k] = yaml.load(v, Loader=yaml.Loader)
else:
keys = k.split('.')
if keys[0] not in config:
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
return config<|fim_middle|>parse_opt<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
bootstrap = Bootstrap()
bootstrap.ztps.set_config_response()
bootstrap.ztps.set_node_check_response()
bootstrap.ztps.set_definition_response(actions=[{"action": "test_action"}])
flash_filename = random_string()
bootstrap.ztps.set_action_response(
"test_action", fail_flash_file_action(bootstrap.flash, flash_filename)
)
with io.open(bootstrap.rc_eos, "w", encoding="utf8") as fd:
fd.write(ensure_text(random_string()))
with io.open(bootstrap.startup_config, "w", encoding="utf8") as fd:
fd.write(ensure_text(random_string()))
with io.open(bootstrap.boot_extensions, "w", encoding="utf8") as fd:
fd.write(ensure_text(random_string()))
os.mkdir(bootstrap.boot_extensions_folder)
with io.open(
os.path.join(bootstrap.boot_extensions_folder, "my_extension"), "w", encoding="utf8"
) as fd:
fd.write(ensure_text(random_string()))
bootstrap.start_test()
try:
self.assertTrue(bootstrap.eapi_node_information_collected())
self.assertTrue(bootstrap.action_failure())
self.assertFalse(bootstrap.error)
self.assertFalse(os.path.isfile(os.path.join(bootstrap.flash, flash_filename)))
self.assertFalse(os.path.isfile(bootstrap.rc_eos))
self.assertFalse(os.path.isfile(bootstrap.startup_config))
self.assertFalse(os.path.isfile(bootstrap.boot_extensions))
self.assertFalse(os.path.isdir(bootstrap.boot_extensions_folder))
except AssertionError as assertion:
print("Output: {}".format(bootstrap.output))
print("Error: {}".format(bootstrap.error))
raise_exception(assertion)
finally:
bootstrap.end_test()<|fim_middle|>test_action_failure<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(n):
"""
Generates the alternating group on ``n`` elements as a permutation group.
For ``n > 2``, the generators taken are ``(0 1 2), (0 1 2 ... n-1)`` for
``n`` odd
and ``(0 1 2), (1 2 ... n-1)`` for ``n`` even (See [1], p.31, ex.6.9.).
After the group is generated, some of its basic properties are set.
The cases ``n = 1, 2`` are handled separately.
Examples
========
>>> G = AlternatingGroup(4)
>>> G.is_group
True
>>> a = list(G.generate_dimino())
>>> len(a)
12
>>> all(perm.is_even for perm in a)
True
See Also
========
SymmetricGroup, CyclicGroup, DihedralGroup
References
==========
[1] Armstrong, M. "Groups and Symmetry"
"""
# small cases are special
if n in (1, 2):
return PermutationGroup([Permutation([0])])
a = list(range(n))
a[0], a[1], a[2] = a[1], a[2], a[0]
gen1 = a
if n % 2:
a = list(range(1, n))
a.append(0)
gen2 = a
else:
a = list(range(2, n))
a.append(1)
a.insert(0, 0)
gen2 = a
gens = [gen1, gen2]
if gen1 == gen2:
gens = gens[:1]
G = PermutationGroup([_af_new(a) for a in gens], dups=False)
if n < 4:
G._is_abelian = True
G._is_nilpotent = True
else:
G._is_abelian = False
G._is_nilpotent = False
if n < 5:
G._is_solvable = True
else:
G._is_solvable = False
G._degree = n
G._is_transitive = True
G._is_alt = True
return G<|fim_middle|>alternating_group<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, request, event, data): # pragma: no cover
"""
Hook for after an event is updated through the API.
The given event has been saved already, naturally.
:param request: The request that caused this event to be updated.
:type request: rest_framework.request.Request
:param event: The event that was updated.
:type event: events.models.Event
:param data: The data dict that was used to update the Event
:type data: dict
"""
pass<|fim_middle|>post_update_event<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
events = []
class Test(unittest.IsolatedAsyncioTestCase):
def setUp(self):
self.assertEqual(events, [])
events.append('setUp')
async def asyncSetUp(self):
self.assertEqual(events, ['setUp'])
events.append('asyncSetUp')
async def test_func(self):
self.assertEqual(events, ['setUp',
'asyncSetUp'])
events.append('test')
self.addAsyncCleanup(self.on_cleanup)
async def asyncTearDown(self):
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test'])
events.append('asyncTearDown')
def tearDown(self):
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test',
'asyncTearDown'])
events.append('tearDown')
async def on_cleanup(self):
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test',
'asyncTearDown',
'tearDown'])
events.append('cleanup')
test = Test("test_func")
test.run()
self.assertEqual(events, ['setUp',
'asyncSetUp',
'test',
'asyncTearDown',
'tearDown',
'cleanup'])<|fim_middle|>test_full_cycle<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(monkeypatch):
monkeypatch.setattr(reporting, 'create_report', testutils.create_report_mocked())
def run_mocked(args):
if args == ['grub2-install', '/dev/vdb']:
raise_call_error(args)
else:
assert args == ['grub2-install', '/dev/vda']
monkeypatch.setattr(updategrubcore, 'run', run_mocked)
devices = ['/dev/vda', '/dev/vdb']
updategrubcore.update_grub_core(devices)
assert reporting.create_report.called
assert UPDATE_FAILED_TITLE == reporting.create_report.reports[0]['title']
summary = reporting.create_report.reports[0]['summary']
assert 'GRUB was successfully updated on the following devices: /dev/vda' in summary
assert 'however GRUB update failed on the following devices: /dev/vdb' in summary<|fim_middle|>test_update_grub_partial_success<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, component_config: Union[Dict,
TeslaBatSetup,
TeslaCounterSetup,
TeslaInverterSetup]) -> None:
if isinstance(component_config, Dict):
component_type = component_config["type"]
else:
component_type = component_config.type
component_config = dataclass_from_dict(COMPONENT_TYPE_TO_MODULE[
component_type].component_descriptor.configuration_factory, component_config)
if component_type in self.COMPONENT_TYPE_TO_CLASS:
self.components["component"+str(component_config.id)] = (self.COMPONENT_TYPE_TO_CLASS[component_type](
component_config))
else:
raise Exception(
"illegal component type " + component_type + ". Allowed values: " +
','.join(self.COMPONENT_TYPE_TO_CLASS.keys())
)<|fim_middle|>add_component<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
span, # type: Span
endpoint_name, # type: str
args, # type: Tuple[Any]
args_names, # type: Tuple[str]
args_traced, # type: Set[str]
):
# type: (...) -> None
if endpoint_name not in EXCLUDED_ENDPOINT:
exclude_set = EXCLUDED_ENDPOINT_TAGS.get(endpoint_name, frozenset()) # type: FrozenSet[str]
set_flattened_tags(
span,
items=((name, value) for (name, value) in zip(args_names, args) if name in args_traced),
exclude_policy=lambda tag: tag in exclude_set or tag.endswith("Body"),
processor=truncate_arg_value,
)<|fim_middle|>add_span_arg_tags<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("location", AAZStrType, ".location")
_builder.set_prop("name", AAZStrType, ".perimeter_name")
_builder.set_prop("tags", AAZDictType, ".tags")
tags = _builder.get(".tags")
if tags is not None:
tags.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)<|fim_middle|>content<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
with tmp_to_root_org():
queryset = self.model.get_user_related_tickets(self.request.user)
return queryset<|fim_middle|>get_queryset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.tgt_dict<|fim_middle|>target_dictionary<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, key, modifiers):
"""Called whenever a key is pressed."""
if key == arcade.key.UP or key == arcade.key.W:
if self.physics_engine.can_jump():
self.player_sprite.change_y = PLAYER_JUMP_SPEED
elif key == arcade.key.LEFT or key == arcade.key.A:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED<|fim_middle|>on_key_press<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""
Test present
"""
table_name = "awl"
name = "baruwa"
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
mock_true = MagicMock(return_value=True)
mock_false = MagicMock(return_value=False)
with patch.dict(
postgres_privileges.__salt__, {"postgres.has_privileges": mock_true}
):
comt = "The requested privilege(s) are already set"
ret.update({"comment": comt, "result": True})
assert postgres_privileges.present(name, table_name, "table") == ret
with patch.dict(
postgres_privileges.__salt__,
{
"postgres.has_privileges": mock_false,
"postgres.privileges_grant": mock_true,
},
):
with patch.dict(postgres_privileges.__opts__, {"test": True}):
comt = "The privilege(s): {} are set to be granted to {}".format(
"ALL", name
)
ret.update({"comment": comt, "result": None})
assert (
postgres_privileges.present(
name, table_name, "table", privileges=["ALL"]
)
== ret
)
with patch.dict(postgres_privileges.__opts__, {"test": False}):
comt = "The privilege(s): {} have been granted to {}".format("ALL", name)
ret.update(
{"comment": comt, "result": True, "changes": {"baruwa": "Present"}}
)
assert (
postgres_privileges.present(
name, table_name, "table", privileges=["ALL"]
)
== ret
)<|fim_middle|>test_present_table<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
if self.settings.os == "Windows":
del self.options.fPIC<|fim_middle|>config_options<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Check if EFI mode is requested
:return: The requested EFI mode or None if no EFI mode requested
:rtype: str
"""
if self.firmware in Defaults.get_efi_capable_firmware_names():
return self.firmware<|fim_middle|>efi_mode<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(vm_session, vm_iface):
"""
Check rx and tx package
:param vm_session: An session to VM
:param vm_iface: VM's interface
"""
cmd = "ip -s -json link show %s" % vm_iface
status, stdout = vm_session.cmd_status_output(cmd)
if status or not stdout:
raise exceptions.TestFail("Failed to run cmd - {}, status - {}, "
"output - {}.".format(cmd, status, stdout))
ip_info = eval(stdout.strip())
LOG.debug("VM iface's info: %s.", ip_info)
tx_info = ip_info[0]['stats64']['tx']['packets']
rx_info = ip_info[0]['stats64']['rx']['packets']
if rx_info != tx_info:
raise exceptions.TestFail("The value of rx and tx should be same.")<|fim_middle|>check_rx_tx_packages<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")<|fim_middle|>type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.failUnless(u'fmt' in self.riff_1)
self.failUnless(u'data' in self.riff_1)
self.failUnless(u'id3' in self.riff_1)
self.failUnless(u'fmt' in self.riff_2)
self.failUnless(u'data' in self.riff_2)<|fim_middle|>test_has_chunks<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())<|fim_middle|>to_str<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(url, expected):
"""Test get_name with different URLs."""
name = spack.cmd.create.get_name(None, url)
assert name == expected<|fim_middle|>test_get_name_urls<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
assert_range(1, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(2, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(3, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(4, 75, [1, 2, 3, 4, 5, 6, 7, 8])
assert_range(5, 75, [2, 3, 4, 5, 6, 7, 8])
assert_range(6, 75, [3, 4, 5, 6, 7, 8, 9])
assert_range(8, 75, [5, 6, 7, 8, 9, 10, 11])
assert_range(37, 75, [34, 35, 36, 37, 38, 39, 40])
assert_range(70, 75, [67, 68, 69, 70, 71, 72, 73])
assert_range(71, 75, [68, 69, 70, 71, 72, 73, 74])
assert_range(72, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(73, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(74, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(75, 75, [68, 69, 70, 71, 72, 73, 74, 75])
assert_range(1, 8, [1, 2, 3, 4, 5, 6, 7, 8])<|fim_middle|>test_page_range<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, X, y):
"""Fit time series regressor to training data.
private _fit containing the core logic, called from fit
Writes to self:
Sets fitted model attributes ending in "_".
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
y : 1D np.array of float, of shape [n_instances] - regression labels for fitting
indices correspond to instance indices in X
Returns
-------
self : Reference to self.
"""
estimator = self._get_delegate()
estimator.fit(X=X, y=y)
return self<|fim_middle|>fit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dev):
file_name = "sg_write_same.sh"
guest_dir = "/tmp/"
deps_dir = virttest_data_dir.get_deps_dir() + "/thin-provision/"
host_file = os.path.join(deps_dir, file_name)
guest_file = guest_dir + file_name
vm.copy_files_to(host_file, guest_dir)
status, output = session.cmd_status_output(
"$SHELL " + guest_file + " " + dev)
if status != 0:
test.fail("run sg_write_same failed:" + output)
test.log.debug(output)<|fim_middle|>run_sg_write_same<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
pass<|fim_middle|>process_init<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, exp_op: OperatorBase) -> Union[list, float]:
r"""
Compute the variance of the expectation estimator. Because Aer takes this expectation
with matrix multiplication, the estimation is exact and the variance is always 0,
but we need to return those values in a way which matches the Operator's structure.
Args:
exp_op: The full expectation value Operator after sampling.
Returns:
The variances or lists thereof (if exp_op contains ListOps) of the expectation value
estimation, equal to 0.
"""
# Need to do this to mimic Op structure
def sum_variance(operator):
if isinstance(operator, ComposedOp):
return 0.0
elif isinstance(operator, ListOp):
return operator.combo_fn([sum_variance(op) for op in operator.oplist])
raise TypeError(f"Variance cannot be computed for {operator.__class__.__name__}.")
return sum_variance(exp_op)<|fim_middle|>compute_variance<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response<|fim_middle|>get_next<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(plugin, item_id, sub_category_url, page, **kwargs):
resp = urlquick.get(sub_category_url + 'page/%s/' % page)
root = resp.parse("section", attrs={"class": "grid-container section-video"})
for video_datas in root.iterfind(".//div"):
if 'single-video' in video_datas.get('class'):
video_title = video_datas.find('.//img').get('title')
video_image = URL_ROOT % item_id + video_datas.find('.//img').get('src')
video_url = video_datas.find('.//a').get('href')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
root_change_pages = resp.parse()
if root_change_pages.find(".//a[@class='next page-numbers']") is not None:
yield Listitem.next_page(
item_id=item_id, sub_category_url=sub_category_url, page=str(int(page) + 1))<|fim_middle|>list_videos<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, token):
"""
Apply the Legality Principle in combination with
Onset Maximization to return a list of syllables.
:param token: Single word or token
:type token: str
:return syllable_list: Single word or token broken up into syllables.
:rtype: list(str)
"""
syllables = []
syllable, current_onset = "", ""
vowel, onset = False, False
for char in token[::-1]:
char_lower = char.lower()
if not vowel:
syllable += char
vowel = bool(char_lower in self.vowels)
else:
if char_lower + current_onset[::-1] in self.legal_onsets:
syllable += char
current_onset += char_lower
onset = True
elif char_lower in self.vowels and not onset:
syllable += char
current_onset += char_lower
else:
syllables.append(syllable)
syllable = char
current_onset = ""
vowel = bool(char_lower in self.vowels)
syllables.append(syllable)
syllables_ordered = [syllable[::-1] for syllable in syllables][::-1]
return syllables_ordered<|fim_middle|>tokenize<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request):
"""Basic table with or without index on integer column a"""
T = _get_test_table()
if request.param:
T.add_index("a")
return T<|fim_middle|>t1<|file_separator|> |
<|fim_prefix|> <|fim_suffix|>(self):<|fim_middle|>test_word_offsets_at_top<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, blob):
try:
json.loads(blob)
except ValueError:
return False
return True<|fim_middle|>is_json<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(experiment_id: int) -> None:
# We run this in a subprocess to avoid module name collisions
# when performing checkpoint export of different models.
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=_export_and_load_model,
args=(
experiment_id,
conf.make_master_url(),
),
)
p.start()
p.join()
assert p.exitcode == 0, p.exitcode<|fim_middle|>export_and_load_model<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(p):
x = p[..., 0]
y = p[..., 1]
val = 0.2-(np.abs(x-0.5)-0.5)*(np.abs(y-0.5)-0.5)
return val<|fim_middle|>phi0<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord('!'),
ord('~') + 1)) + list(range(
ord('¡'),
ord('¬') + 1)) + list(range(ord('®'),
ord('ÿ') + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))<|fim_middle|>bytes_to_unicode<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, expr):
"""Normalizes all tags in an OR expression (and return it as list).
:param expr: OR expression to normalize and split (as string).
:return: Generator of normalized tags (as string)
"""
for tag in expr.strip().split(','):
yield cls.normalize_tag(tag)<|fim_middle|>normalized_tags_from_or<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
*, db_session, case_severity: CaseSeverity, case_severity_in: CaseSeverityUpdate
) -> CaseSeverity:
"""Updates a case severity."""
case_severity_data = case_severity.dict()
update_data = case_severity_in.dict(skip_defaults=True, exclude={"project", "color"})
for field in case_severity_data:
if field in update_data:
setattr(case_severity, field, update_data[field])
if case_severity_in.color:
case_severity.color = case_severity_in.color.as_hex()
db_session.commit()
return case_severity<|fim_middle|>update<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
db: Session,
datadog_connection_config: ConnectionConfig,
datadog_dataset,
datadog_config,
) -> Generator:
fides_key = datadog_config["fides_key"]
datadog_connection_config.name = fides_key
datadog_connection_config.key = fides_key
datadog_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, datadog_dataset)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": datadog_connection_config.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)<|fim_middle|>datadog_dataset_config<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Overrides can return actual input device."""
return None<|fim_middle|>get_input_device<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(frame, word):
if frame < 28:
return False
return True<|fim_middle|>bitfilter<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_wer_ignore_comment_punc<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, z):<|fim_middle|>inv_z<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Returns the open lineage dataset name as per
https://github.com/OpenLineage/OpenLineage/blob/main/spec/Naming.md
"""
return urllib.parse.urlsplit(self.path).path<|fim_middle|>openlineage_dataset_name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(lumen, color):
return lumen * ((1 / MAX_LIGHT_EFFICIENCY_EFFICACY) / srgb_to_luminance(color))<|fim_middle|>watt_power_point<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(env):
for conn in shardsConnections(env):
allConnected = False
while not allConnected:
res = conn.execute_command('timeseries.INFOCLUSTER')
nodes = res[4]
allConnected = True
for n in nodes:
status = n[17]
if status != b'connected' and status != b'uninitialized':
allConnected = False
if not allConnected:
time.sleep(0.1)<|fim_middle|>verify_cluster_initialized<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> int: ...<|fim_middle|>fileno<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, request: Request) -> Response:
"""List system tasks"""
tasks = sorted(TaskInfo.all().values(), key=lambda task: task.task_name)
return Response(TaskSerializer(tasks, many=True).data)<|fim_middle|>list<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, context, opt):
"usage: quorum <bool>"
if not utils.verify_boolean(opt):
context.fatal_error("%s: bad boolean option" % opt)
return cib_status.set_quorum(utils.is_boolean_true(opt))<|fim_middle|>do_quorum<|file_separator|> |