text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(self, s=None, b=None, connection=None,
returnCursor=False):
"""
_executemanybinds_
b is a list of dictionaries for the binds, e.g.:
b = [ {'bind1':'value1a', 'bind2': 'value2a'},
{'bind1':'value1b', 'bind2': 'value2b'} ]
see: http://www.gingerandjohn.com/archives/2004/02/26/cx_oracle-executemany-example/
Can't executemany() selects - so do each combination of binds here instead.
This will return a list of sqlalchemy.engine.base.ResultProxy object's
one for each set of binds.
returns a list of sqlalchemy.engine.base.ResultProxy objects
"""
s = s.strip()
if s.lower().endswith('select', 0, 6):
"""
Trying to select many
"""
if returnCursor:
result = []
for bind in b:
result.append(connection.execute(s, bind))
else:
result = ResultSet()
for bind in b:
resultproxy = connection.execute(s, bind)
result.add(resultproxy)
resultproxy.close()
return self.makelist(result)
"""
Now inserting or updating many
"""
result = connection.execute(s, b)
return self.makelist(result)<|fim_middle|>executemanybinds<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response<|fim_middle|>get_next<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
pass<|fim_middle|>sync_with_peers<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(tag: Dict[str, Any]) -> bool:
return re.match(r"^[0-9]+-[0-9]+-[a-z0-9]+$", tag["name"]) is not None<|fim_middle|>check_tag<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(l):
for x in range(l.numberOfMothers()):
mom = l.mother(x)
if mom.status() > 2: return True
id = abs(mom.pdgId())
if id > 1000000: return True
if id > 100: return False
if id < 6: return False
if id == 21: return False
if id in [11,12,13,14,15,16]:
if l.status() > 2: return True
return METHOD_NAME(mom)
if id >= 22 and id <= 39: return True
return True<|fim_middle|>is_not_from_hadronic_shower<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
""" rx_sync_start_available: Returns a list of possible keys used for rx_sync_start """
try:
return self._get_iio_dev_attr_str(
"sync_start_enable_available", _ctrl=self._rxadc
)
except: # noqa: E722
return "arm"<|fim_middle|>rx_sync_start_available<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Sequence[Sequence[str]]:
"""
The rule baseline result
"""
return pulumi.get(self, "results")<|fim_middle|>results<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
# Connecting ClearML with the current process,
# from here on everything is logged automatically
task = Task.init(project_name='examples', task_name='Remote_execution PyTorch MNIST train')
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=True,
help='For Saving the current Model')
args = parser.parse_args()
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join('..', 'data'), train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(os.path.join('..', 'data'), train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.test_batch_size, shuffle=True, **kwargs)
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
for epoch in range(1, args.epochs + 1):
if epoch > 1:
# We run training for 1 epoch to make sure nothing crashes then local execution will be terminated.
# Execution will switch to remote execution by the agent listening to specified queue
task.execute_remotely(queue_name="default")
train(args, model, device, train_loader, optimizer, epoch)
test(args, model, device, test_loader, epoch)
if args.save_model:
torch.save(model.state_dict(), os.path.join(gettempdir(), "mnist_cnn_remote.pt"))<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(next_link=None):
if not next_link:
request = build_list_metrics_request(
resource_group_name=resource_group_name,
account_name=account_name,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_metrics.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request<|fim_middle|>prepare_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
#
# Technically we don't need to call alias_flavor() here (since it's
# already been invoked for this REPO_CFG variable), but we do it
# anyway to support `fail-on-flavor-aliasing` testing. Basically,
# alias_flavor() will fail() if flavor aliasing is disabled and we
# try to return an aliased flavor.
#
return alias_flavor(REPO_CFG.flavor_default)<|fim_middle|>get_flavor_default<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, a):
net = a.network()
agent1 = a.replicator(network=net)
agent2 = a.replicator(network=net)
agent3 = a.replicator(network=net)
agent1.connect(direction="to", whom=agent2)
agent1.connect(direction="to", whom=agent3)
info = a.info(origin=agent1)
agent1.transmit(what=models.Info, to_whom=nodes.Agent)
agent2.receive()
agent3.receive()
assert agent1.infos()[0].contents == agent2.infos()[0].contents
assert agent1.infos()[0].contents == agent3.infos()[0].contents
assert agent1.infos()[0].id != agent2.infos()[0].id != agent3.infos()[0].id
transmissions = info.transmissions()
assert len(transmissions) == 2<|fim_middle|>test_agent_transmit_everything_to_everyone<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> bytes:
"""Reads 4 bytes and increases cursor"""
if self.cursor + 4 > self.length_input:
raise ValueError(
"BMA Layer NRL Compressor: Reached EOF while reading data."
)
oc = self.cursor
self.cursor += 4
return read_bytes(self.uncompressed_data, oc, 4)<|fim_middle|>read<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(s: str):
if s.endswith("."):
return FAIL
return OK<|fim_middle|>trailing_period<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
db_session: Session,
function_calls: List[ContractFunctionCall],
blockchain_type: AvailableBlockchainType,
label_name=CRAWLER_LABEL,
) -> None:
label_model = get_label_model(blockchain_type)
transactions_hashes_to_save = [
function_call.transaction_hash for function_call in function_calls
]
existing_labels = (
db_session.query(label_model.transaction_hash)
.filter(
label_model.label == label_name,
label_model.log_index == None,
label_model.transaction_hash.in_(transactions_hashes_to_save),
)
.all()
)
existing_labels_transactions = [label[0] for label in existing_labels]
labels_to_save = [
_function_call_to_label(blockchain_type, function_call)
for function_call in function_calls
if function_call.transaction_hash not in existing_labels_transactions
]
logger.info(f"Saving {len(labels_to_save)} labels to session")
db_session.add_all(labels_to_save)<|fim_middle|>add_function_calls_to_session<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, data, padding=None, hashAlg=None, saltLen=None):
"""
:type data: bytearray
:param data: The value which will be signed (generally a binary
encoding of hash output.
:type padding: str
:param padding: Ignored, present for API compatibility with RSA
:type hashAlg: str
:param hashAlg: name of hash that was used for calculating the bytes
:type saltLen: int
:param saltLen: Ignored, present for API compatibility with RSA
"""
N = numBits(self.q)
digest_len = len(data) * 8
digest = bytesToNumber(data)
if N < digest_len:
digest >>= digest_len - N
k = getRandomNumber(1, (self.q-1))
if gmpyLoaded or GMPY2_LOADED:
k = mpz(k)
digest = mpz(digest)
r = powMod(self.g, k, self.p) % self.q
s = invMod(k, self.q) * (digest + self.private_key * r) % self.q
return encode_sequence(encode_integer(r), encode_integer(s))<|fim_middle|>sign<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self._events.append('startTestRun')
super(_BaseLoggingResult, self).METHOD_NAME()<|fim_middle|>start_test_run<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, input):
print("SIGNER state #3")
u = self.group.random()
s = self.group.random()
d = self.group.random()
g = input.get('g')
y = input.get('y')
str = "info"
msg = integer(SHA2(str))
z = (msg ** ((p - 1)/q)) % p
a = g ** u
b = (g ** s) * (z ** d)
Protocol.store(self, ('u', u), ('s', s), ('d', d))
Protocol.setState(self, 5)
return { 'a':a, 'b':b, 's':s }<|fim_middle|>signer_state3<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, spec, value):
return cls.class_map[spec['type']](spec, value)<|fim_middle|>from_spec<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(authorization_provider_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAuthorizationProviderResult]:
"""
Gets the details of the authorization provider specified by its identifier.
Azure REST API version: 2022-08-01.
:param str authorization_provider_id: Identifier of the authorization provider.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
...<|fim_middle|>get_authorization_provider_output<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""
test to_dict function for ShipDrift object
create a new ship_drift object and make sure it has same properties
"""
new_wind = ShipDriftMover(wind_file, topology_file, grid_type=2)
serial = new_wind.serialize()
nw2 = ShipDriftMover.deserialize(serial)
assert new_wind == nw2<|fim_middle|>test_serialize_deserialize<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
from litex.build.parser import LiteXArgumentParser
parser = LiteXArgumentParser(platform=xilinx_zcu104.Platform, description="LiteX SoC on ZCU104.")
parser.add_target_argument("--sys-clk-freq", default=125e6, type=float, help="System clock frequency.")
args = parser.parse_args()
soc = BaseSoC(
sys_clk_freq = args.sys_clk_freq,
**parser.soc_argdict
)
builder = Builder(soc, **parser.builder_argdict)
if args.build:
builder.build(**parser.toolchain_argdict)
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(builder.get_bitstream_filename(mode="sram"))<|fim_middle|>main<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(s: str):
level_mapping = StorageLevel.__members__
level_strings = [ss.strip() for ss in s.upper().split("|")]
levels = []
for ls in level_strings:
if ls not in level_mapping: # pragma: no cover
raise ValueError(f"Unknown level {ls}")
levels.append(level_mapping[ls])
return functools.reduce(operator.or_, levels)<|fim_middle|>from_str<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(expr, vx, vy, data, fref):
n = len(data)
A = te.placeholder((n,), name="A", dtype=expr.dtype)
B = te.placeholder((n,), name="B", dtype=expr.dtype)
def make_binds(i):
x = expr
x = tvm.tir.Let(vx, A[i], x)
x = tvm.tir.Let(vy, B[i], x)
return x
C = te.compute((n,), make_binds)
s = te.create_schedule([C.op])
f = tvm.build(s, [A, B, C], "llvm")
a = tvm.nd.array(np.array([x for x, y in data], dtype=expr.dtype))
b = tvm.nd.array(np.array([y for x, y in data], dtype=expr.dtype))
c = tvm.nd.array(np.zeros(len(data), dtype=expr.dtype))
f(a, b, c)
cref = np.array([fref(x, y) for x, y in data])
np.testing.assert_equal(c.numpy(), cref)<|fim_middle|>check_value<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(i, cai):
for sec in s:
sec.cai = cai
h.finitialize(-65)
while h.t < 15.0:
h.fadvance()
plt(i)<|fim_middle|>runvc<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(builder, authid):
return HelloNewAddAuthid(builder, authid)<|fim_middle|>add_authid<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, url, **kwargs):
"""
Create an :class:`Media` from a URL.
:code:`Media.from_url(url)` is equivalent to:
.. code-block: python
med = Media(value=url, format='url')
But both unicode and bytes arguments are allowed for ``url``.
Parameters
----------
url: [str, bytes]
The location of a URL to load.
"""
if isinstance(url, str):
# If str, it needs to be encoded to bytes
url = url.encode('utf-8')
return cls(value=url, format='url', **kwargs)<|fim_middle|>from_url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(jars_1: List, jars_2: List) -> bool:
"""
Checks if two lists of jar files contain the same jars.
The order of the jars in the list does not matter.
Args:
jars_1 (List): A list of jar files.
jars_2 (List): A list of jar files.
Returns:
bool: True if the lists contain the same jars, False otherwise.
"""
if jars_1 is None and jars_2 is None:
return True
if jars_1 is None or jars_2 is None:
return False
if len(jars_1) != len(jars_2):
return False
file_names_1 = get_file_names(jars_1)
file_names_2 = get_file_names(jars_2)
return set(file_names_1) == set(file_names_2)<|fim_middle|>contains_same_jars<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional["QObject"]:
return self._createViewFromQML()<|fim_middle|>get_display_item<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(string):
"""Escape all regular expressions special characters from STRING."""
return re.escape(string)<|fim_middle|>regex_escape<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(start, duration):
start_time = datetime.datetime.now().replace(hour=start, minute=0)
return start_time + datetime.timedelta(hours=duration // 60, minutes=duration % 60)<|fim_middle|>calclulate_end_time<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(fname, dtypes=None):
"""Read a tsv file into an OrderedDict.
Parameters
----------
fname : str
Path to the file being loaded.
dtypes : list, optional
List of types to cast the values loaded as. This is specified column by
column.
Defaults to None. In this case all the data is loaded as strings.
Returns
-------
data_dict : collections.OrderedDict
Keys are the column names, and values are the column data.
"""
from .utils import warn # avoid circular import
data = np.loadtxt(
fname, dtype=str, delimiter="\t", ndmin=2, comments=None, encoding="utf-8-sig"
)
column_names = data[0, :]
info = data[1:, :]
data_dict = OrderedDict()
if dtypes is None:
dtypes = [str] * info.shape[1]
if not isinstance(dtypes, (list, tuple)):
dtypes = [dtypes] * info.shape[1]
if not len(dtypes) == info.shape[1]:
raise ValueError(
"dtypes length mismatch. Provided: {0}, "
"Expected: {1}".format(len(dtypes), info.shape[1])
)
empty_cols = 0
for i, name in enumerate(column_names):
values = info[:, i].astype(dtypes[i]).tolist()
data_dict[name] = values
if len(values) == 0:
empty_cols += 1
if empty_cols == len(column_names):
warn(f"TSV file is empty: '{fname}'")
return data_dict<|fim_middle|>from_tsv<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(scope, transaction_style, request):
# type: (Scope, str, Any) -> None
name = ""
if transaction_style == "endpoint":
endpoint = request.scope.get("endpoint")
if endpoint:
name = transaction_from_function(endpoint) or ""
elif transaction_style == "url":
route = request.scope.get("route")
if route:
path = getattr(route, "path", None)
if path is not None:
name = path
if not name:
name = _DEFAULT_TRANSACTION_NAME
source = TRANSACTION_SOURCE_ROUTE
else:
source = SOURCE_FOR_STYLE[transaction_style]
scope.set_transaction_name(name, source=source)
logger.debug(
"[FastAPI] Set transaction name and source on scope: %s / %s", name, source
)<|fim_middle|>set_transaction_name_and_source<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
response = self.client.get(reverse("home"))
self.assertRedirects(response, self.project.get_absolute_url())<|fim_middle|>test_single_project_slug<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
for record in self:
country_code = record.country_id.code or ""
if record.cpf and country_code.upper() == "BR":
cpf = misc.punctuation_rm(record.cpf)
if not cnpj_cpf.validar(cpf):
raise ValidationError(_("Invalid CPF!"))
return True<|fim_middle|>check_cpf<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return base64.b64encode(self.serialize()).decode("utf8")<|fim_middle|>to_base64<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
for record in self:
self._event("on_record_unlink").notify(record)
result = super(Base, self).METHOD_NAME()
return result<|fim_middle|>unlink<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_basic<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(StrParI1, StrParI2):
IntLoc = 1
while IntLoc <= 1:
if Func1(StrParI1[IntLoc], StrParI2[IntLoc+1]) == Ident1:
CharLoc = 'A'
IntLoc = IntLoc + 1
if CharLoc >= 'W' and CharLoc <= 'Z':
IntLoc = 7
if CharLoc == 'X':
return TRUE
else:
if StrParI1 > StrParI2:
IntLoc = IntLoc + 7
return TRUE
else:
return FALSE<|fim_middle|>func2<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(chan0, chan1, phase_correction):
assert len(chan0) == len(chan1)
(p, s) = measure_phase_and_delay(chan0, chan1)
# print("Across Chips Sample delay: ",s)
# print("Phase delay: ",p,"(Degrees)")
# print(phase_correction)
return (sub_phases(phase_correction, [int(p * 1000)] * 4), s)<|fim_middle|>measure_and_adjust_phase_offset<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
sys.exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks<|fim_middle|>get_block_hashes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> dict:
parser = ArgumentParser()
add_required_arguments(parser)
add_optional_arguments(parser)
return vars(parser.METHOD_NAME())<|fim_middle|>parse_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False<|fim_middle|>logging_outputs_can_be_summed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, manifest):
return manifest.get('config').get("digest").split(":")[1]<|fim_middle|>get_image_config_file<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
logger.hr(f'{self.FUNCTION_NAME_BASE}{self.battle_count}', level=2)
prev = self.battle_count
result = False
for _ in range(10):
try:
result = self.battle_function()
break
except MapEnemyMoved:
if self.battle_count > prev:
result = True
break
else:
continue
if not result:
logger.warning('ScriptError, No combat executed.')
if self.config.Error_HandleError:
logger.warning('ScriptError, No combat executed, Withdrawing')
self.withdraw()
else:
raise ScriptError('No combat executed.')
return result<|fim_middle|>execute_a_battle<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self._testInsertGlyph(setGlyphName=False)<|fim_middle|>test_set_glyph_with_name_none<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(gherkin_languages_path, output_file=None,
encoding=None, verbose=False):
"""Workhorse.
Performs the conversion from "gherkin-languages.json" to "i18n.py".
Writes output to file or console (stdout).
:param gherkin_languages_path: File path for JSON file.
:param output_file: Output filename (or STDOUT for: None, "stdout", "-")
:param encoding: Optional output encoding to use (default: UTF-8).
:param verbose: Enable verbose mode (as bool; optional).
"""
if encoding is None:
encoding = "UTF-8"
# -- STEP 1: Load JSON data.
json_encoding = "UTF-8"
languages = json.load(open(gherkin_languages_path, encoding=json_encoding))
languages = data_normalize(languages, verbose=verbose)
# languages = yaml_normalize(languages)
# -- STEP 2: Generate python module with i18n data.
header = u'''# -*- coding: {encoding} -*-<|fim_middle|>gherkin_languages_to_python_module<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
"""Close connection"""
self.conn.close()<|fim_middle|>post_allocate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
segment_id: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWorkloadNetworkSegmentResult]:
"""
NSX Segment
:param str private_cloud_name: Name of the private cloud
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str segment_id: NSX Segment identifier. Generally the same as the Segment's display name
"""
...<|fim_middle|>get_workload_network_segment_output<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(data):
input_quantizer = model._bn.input_quantizers[0]
if isinstance(input_quantizer, StaticGridTensorQuantizer):
return input_quantizer.quantize_dequantize(data, input_quantizer.round_mode)
assert isinstance(input_quantizer, LearnedGridTensorQuantizer)
encoding = input_quantizer.encoding
encoding_min = torch.tensor([encoding.min])
encoding_max = torch.tensor([encoding.max])
return input_quantizer.quantize_dequantize(data, encoding_min, encoding_max)<|fim_middle|>quantize_input<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(A: dace.float64[10, 5, 3]):
return np.mean(A, axis=-2)<|fim_middle|>test_mean_negative<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, func, use_previous_behavior=True, preserves_partitioning=False, **kwargs):
if use_previous_behavior is True:
LOGGER.warning(f"please use `applyPartitions` instead of `mapPartitions` "
f"if the previous behavior was expected. "
f"The previous behavior will not work in future")
return self.applyPartitions(func)
return Table(self._rp.map_partitions(func, options={"shuffle": not preserves_partitioning}))<|fim_middle|>map_partitions<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
info = self.find_indextype(self.SPEC_HEADER)
_, _, offset, length = self.sections()[info]
with open(self.filename, 'rb') as f:
f.seek(offset)
dataType, numPoints, xUnits, yUnits, firstX, lastX, noise = \
struct.unpack('<iiiifff', f.read(28))
return numPoints, firstX, lastX,<|fim_middle|>read_spec_header<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self: SharedUtils) -> list:
return range_expand(default(get(self.switch_data_combined, "mlag_interfaces"), get(self.default_interfaces, "mlag_interfaces"), []))<|fim_middle|>mlag_interfaces<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
file = open("example.glsl")
shader_sourcecode = file.read()
size = self.width, self.height
self.shadertoy = Shadertoy(size, shader_sourcecode)
self.channel0 = self.shadertoy.ctx.framebuffer(
color_attachments=[self.shadertoy.ctx.texture(size, components=4)]
)
self.shadertoy.channel_0 = self.channel0.color_attachments[0]
self.channel1 = self.shadertoy.ctx.framebuffer(
color_attachments=[self.shadertoy.ctx.texture(size, components=4)]
)
self.shadertoy.channel_1 = self.channel1.color_attachments[0]<|fim_middle|>load_shader<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, token):
"""Get the AccessRequestToken referenced by the specified token."""
return cls.query.filter_by(token=token).one_or_none()<|fim_middle|>get_by_token<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(*args, **kwargs):
func_to_call = get_func_to_call()
logger.info("running %s()...", func.__name__)
try:
test_map[func.__name__] = dict()
test_map[func.__name__]["result"] = SUCCESSED
test_map[func.__name__]["error_message"] = ""
test_map[func.__name__]["error_stack"] = ""
test_map[func.__name__]["error_normalized"] = ""
test_map[func.__name__]["start_dt"] = dt.datetime.utcnow()
ret = func_to_call(*args, **kwargs)
except (AssertionError, AzureError, CliTestError, CliExecutionError, SystemExit,
JMESPathCheckAssertionError) as e:
use_exception_cache = os.getenv("TEST_EXCEPTION_CACHE")
if use_exception_cache is None or use_exception_cache.lower() != "true":
raise
test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
test_map[func.__name__]["result"] = FAILED
test_map[func.__name__]["error_message"] = str(e).replace("\r\n", " ").replace("\n", " ")[:500]
test_map[func.__name__]["error_stack"] = traceback.format_exc().replace(
"\r\n", " ").replace("\n", " ")[:500]
logger.info("--------------------------------------")
logger.info("step exception: %s", e)
logger.error("--------------------------------------")
logger.error("step exception in %s: %s", func.__name__, e)
logger.info(traceback.format_exc())
exceptions.append((func.__name__, sys.exc_info()))
else:
test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
return ret<|fim_middle|>wrapper<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(parameters_string):
parameters = []
for parameter_string in split_parameters_string(parameters_string):
match = re.search(r'\s*(?:\[(?P<attributes>.*?)\]\s+)?(?P<type_and_name>.*)', parameter_string)
attributes_string, type_and_name_string = match.group('attributes', 'type_and_name')
split = type_and_name_string.rsplit(' ', 1)
parameter_kind = 'class'
if split[0].startswith('struct '):
parameter_kind = 'struct'
split[0] = split[0][7:]
elif split[0].startswith('enum:'):
parameter_kind = split[0][:split[0].find(' ')]
split[0] = split[0][split[0].find(' ') + 1:]
parameter_type = split[0]
parameter_name = split[1]
parameters.append(model.Parameter(kind=parameter_kind, type=parameter_type, name=parameter_name, attributes=parse_attributes_string(attributes_string)))
return parameters<|fim_middle|>parse_parameters_string<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
connection,
engine,
Base,
User
):
create_view(
name='trivial_view',
selectable=sa.select(*_select_args(User.id)),
metadata=Base.metadata,
)
Base.metadata.create_all(engine)
view = CreateView(
name='trivial_view',
selectable=sa.select(*_select_args(User.id)),
replace=True,
)
with connection.begin():
connection.execute(view)
Base.metadata.drop_all(engine)<|fim_middle|>test_life_cycle_replace_existing<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> int:
return self.y<|fim_middle|>top<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.get("/installation/repositories")<|fim_middle|>get_repositories<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, node):
"""
Swap out Python's AnnAssign with an Assign node where the annotation function is called.
Example:
Original:
y: Tensor_Type(1,2,3, Dyn) = f2(x)
Output:
y = annotate(f2(x),Tensor_Type((1,2,3,Dyn)))
"""
return ast.Assign(targets=[node.target], value=ast.Call(
func=ast.Name(id='annotate', ctx=ast.Load()),
args=[node.value, node.annotation], keywords=[]))<|fim_middle|>visit_ann_assign<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[str]:
"""
The user that created the API key.
"""
return pulumi.get(self, "created_by")<|fim_middle|>created_by<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Get the description (shown below the title).
Defaults to ``None``, which means that no description
is rendered.
"""
return None<|fim_middle|>get_description<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
metric = WordMetric(mode='ignore_case_symbol')
metric.process(None, self.pred)
eval_res = metric.evaluate(size=3)
self.assertEqual(eval_res['recog/word_acc_ignore_case_symbol'], 1.0)<|fim_middle|>test_word_acc_ignore_case_symbol_metric<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
summary_modified_field = '_current_version__autoapprovalsummary__modified'
# We don't take deleted reports into account
valid_abuse_report_states = (
AbuseReport.STATES.UNTRIAGED,
AbuseReport.STATES.VALID,
AbuseReport.STATES.SUSPICIOUS,
)
recent_abuse_reports_subquery = AbuseReport.objects.filter(
state__in=valid_abuse_report_states,
created__gte=OuterRef(summary_modified_field),
guid=OuterRef('guid'),
)
return [
# Only recalculate add-ons that received recent abuse reports
# possibly through their authors.
Q(
Exists(recent_abuse_reports_subquery),
)
| Q(
authors__abuse_reports__state__in=valid_abuse_report_states,
authors__abuse_reports__created__gte=F(summary_modified_field),
)
# And check ratings that have a rating of 3 or less
| Q(
_current_version__ratings__deleted=False,
_current_version__ratings__created__gte=F(summary_modified_field),
_current_version__ratings__rating__lte=3,
)
]<|fim_middle|>get_recalc_needed_filters<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, mock_echo):
result = self.run_command(
["remote-build", "--launchpad-accept-public-upload", "--recover"]
)
self.assertThat(result.exit_code, Equals(0))
self.mock_lc_init.assert_called_once_with(
project=mock.ANY,
architectures=mock.ANY,
deadline=mock.ANY,
build_id="snapcraft-test-snap-fakehash123",
)<|fim_middle|>test_remote_build_recover_uses_calculated_hash<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
git_version = subprocess.Popen(
["git", "--version"],
shell=False,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).communicate()[0]
if not git_version:
log.error("Git not installed")
return False
log.debug("Detected git version %s", git_version)
return Version(git_version.split()[-1])<|fim_middle|>git_version<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, new_values, profile_name):
# The access_key/secret_key are now *always* written to the shared
# credentials file (~/.aws/credentials), see aws/aws-cli#847.
# post-conditions: ~/.aws/credentials will have the updated credential
# file values and new_values will have the cred vars removed.
credential_file_values = {}
if 'aws_access_key_id' in new_values:
credential_file_values['aws_access_key_id'] = new_values.pop(
'aws_access_key_id')
if 'aws_secret_access_key' in new_values:
credential_file_values['aws_secret_access_key'] = new_values.pop(
'aws_secret_access_key')
if credential_file_values:
if profile_name is not None:
credential_file_values['__section__'] = profile_name
shared_credentials_filename = os.path.expanduser(
self._session.get_config_variable('credentials_file'))
self._config_writer.update_config(
credential_file_values,
shared_credentials_filename)<|fim_middle|>write_out_creds_file_values<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(domain: str, message: str) -> str: ...<|fim_middle|>dgettext<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += "_0"
return id_<|fim_middle|>id_for_label<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(ws_app: WebSocketApp, msg: str):
# We strongly trust that the contract on API will hold atm :D
event_dict = json.loads(msg)
labels = _LogEventLabels(**event_dict.get("labels", {}))
if "message" in event_dict:
message = event_dict["message"]
timestamp = dateutil.parser.isoparse(event_dict["timestamp"])
event = _LogEvent(
message=message,
timestamp=timestamp,
component_name=component_name,
labels=labels,
)
read_queue.put(event)<|fim_middle|>callback<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, schema):
query = "SELECT database, table, name FROM system.columns WHERE database NOT IN ('system')"
results, error = self.run_query(query, None)
if error is not None:
self._handle_run_query_error(error)
results = json_loads(results)
for row in results["rows"]:
table_name = "{}.{}".format(row["database"], row["table"])
if table_name not in schema:
schema[table_name] = {"name": table_name, "columns": []}
schema[table_name]["columns"].append(row["name"])
return list(schema.values())<|fim_middle|>get_tables<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.run_test(ntime=1024, nchan=32, max_delay=1, batch_shape=(7,9,5))<|fim_middle|>test_ntime1024_nchan32_ndelay1_batch7_9_5<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, temperature):
"""
Sets the low threshold temperature of thermal
Args :
temperature: A float number up to nearest thousandth of one degree Celsius,
e.g. 30.125
Returns:
A boolean, True if threshold is set successfully, False if not
"""
raise NotImplementedError<|fim_middle|>set_low_threshold<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")<|fim_middle|>name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
"""Returns a platform-specific root directory for user config settings."""
# On Windows, prefer %LOCALAPPDATA%, then %APPDATA%, since we can expect the
# AppData directories to be ACLed to be visible only to the user and admin
# users (https://stackoverflow.com/a/7617601/1179226). If neither is set,
# return None instead of falling back to something that may be world-readable.
if os.name == "nt":
appdata = os.getenv("LOCALAPPDATA")
if appdata:
return appdata
appdata = os.getenv("APPDATA")
if appdata:
return appdata
return None
# On non-windows, use XDG_CONFIG_HOME if set, else default to ~/.config.
xdg_config_home = os.getenv("XDG_CONFIG_HOME")
if xdg_config_home:
return xdg_config_home
return os.path.join(os.path.expanduser("~"), ".config")<|fim_middle|>get_user_config_directory<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
models = []
models.extend(
[
VertaModelNoImpl,
VertaModelOnlyInit,
VertaModelOnlyPredict,
]
)
return models<|fim_middle|>incomplete_verta_models<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return QueryVersion(display=self.display,
opcode=self.display.get_extension_major(extname),
major_version=1,
minor_version=1)<|fim_middle|>query_version<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
'''Test that an instance of MetaRefElementArgMetadata can be created
successfully. Also check the input value with mixed case.
'''
ref_element_arg = MetaRefElementArgMetadata("Normals_To_Faces")
assert isinstance(ref_element_arg, MetaRefElementArgMetadata)
assert ref_element_arg.reference_element == "normals_to_faces"<|fim_middle|>test_create<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertEqual(utils.format_datetime(self.naive_dt),
self.datestring + ' -0000')<|fim_middle|>test_naive_datetime<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AzureStackHCI/clusters/{clusterName}/arcSettings/{arcSettingName}/createArcIdentity",
**self.url_parameters
)<|fim_middle|>url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(source_dir: Path) -> Path:
setup_py = source_dir / "setup.py"
setup_py.write_text(
"from setuptools import setup; "
'setup(name="demo", '
'version="0.1.0", '
'install_requires=["package"])'
)
return source_dir<|fim_middle|>demo_setup<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
asset_name = "wiki.en.vec"
asset_path = get_asset_path(asset_name)
with tempfile.TemporaryDirectory() as dir_name:
data_path = os.path.join(dir_name, asset_name)
shutil.copy(asset_path, data_path)
vector_transform = VectorTransform(FastText(root=dir_name, validate_file=False))
jit_vector_transform = torch.jit.script(vector_transform)
# The first 3 entries in each vector.
expected_fasttext_simple_en = torch.tensor(
[[-0.065334, -0.093031, -0.017571], [-0.32423, -0.098845, -0.0073467]]
)
self.assertEqual(vector_transform(["the", "world"])[:, 0:3], expected_fasttext_simple_en)
self.assertEqual(jit_vector_transform(["the", "world"])[:, 0:3], expected_fasttext_simple_en)<|fim_middle|>test_vector_transform<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> None: ...<|fim_middle|>pm<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>get_simple_bias_model<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_buffered_reply_queue<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(agent_plugin_service, flask_client, error):
agent_plugin_service.install_plugin_archive = MagicMock(side_effect=error)
resp = flask_client.put(
get_url_for_resource(InstallAgentPlugin),
data=AGENT_PLUGIN,
follow_redirects=True,
)
assert resp.status_code == HTTPStatus.INTERNAL_SERVER_ERROR<|fim_middle|>test_install_plugin_binary_internal_server_error<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(db, info):
FakeModel = get_fake_model(model_base=models.UUIDModel)
class Serializer(serializers.ModelSerializer):
class Meta:
model = FakeModel
fields = "__all__"
class CustomMutation1(Mutation):
class Meta:
serializer_class = Serializer
class CustomMutation2(Mutation):
class Meta:
serializer_class = Serializer
class CustomValidation(BaseValidation):
@validation_for(CustomMutation1)
@validation_for(CustomMutation2)
def validate_custom_mutation(self, mutation, data, info):
data["test"] = "test"
return data
data = CustomValidation().validate(CustomMutation1, {}, info)
assert data["test"] == "test"
data = CustomValidation().validate(CustomMutation2, {}, info)
assert data["test"] == "test"<|fim_middle|>test_custom_validation_chained_decorators<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Sequence: ensure ability to assign a Dataset to a Sequence item"""
ds = Dataset()
ds.add_new((1, 1), "IS", 1)
# Create a single element Sequence first
seq = Sequence(
[
Dataset(),
]
)
seq[0] = ds
assert ds == seq[0]<|fim_middle|>test_valid_assignment<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
df: Union[str, pd.Series, dd.Series, pd.DataFrame, dd.DataFrame],
column: str = "",
) -> Union[bool, pd.Series, pd.DataFrame]:
"""
Validate if a data cell is CPJ in a DataFrame column. For each cell, return True or False.
Parameters
----------
df
A pandas or Dask DataFrame containing the data to be validated.
col
The name of the column to be validated.
"""
if isinstance(df, (pd.Series, dd.Series)):
return df.apply(cpj.is_valid)
elif isinstance(df, (pd.DataFrame, dd.DataFrame)):
if column != "":
return df[column].apply(cpj.is_valid)
else:
return df.applymap(cpj.is_valid)
return cpj.is_valid(df)<|fim_middle|>validate_cr_cpj<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(x, is_training):
# Reduce hw by avg and max
# Return cat([avg_pool_0, avg_pool_1, ..., max_pool_0, max_pool_1, ...])
if not isinstance(x, (list, tuple)):
return avg_max_reduce_hw_helper(x, is_training)
elif len(x) == 1:
return avg_max_reduce_hw_helper(x[0], is_training)
else:
res_avg = []
res_max = []
for xi in x:
avg, max = avg_max_reduce_hw_helper(xi, is_training, False)
res_avg.append(avg)
res_max.append(max)
res = res_avg + res_max
return paddle.concat(res, axis=1)<|fim_middle|>avg_max_reduce_hw<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(sents, args):
g2p = G2p()
out_sents = []
res_wrds = load_reserve_word(args.reserve_word)
for sent in sents:
col1 = ""
if args.reserve_first_column:
col1, sent = sent.split(None, 1)
sent = process_sent(sent, g2p, res_wrds, args)
if args.reserve_first_column and col1 != "":
sent = f"{col1} {sent}"
out_sents.append(sent)
return out_sents<|fim_middle|>process_sents<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
if isinstance(self._monitor_address, tuple):
moncdev = "socket,id=mon,host=%s,port=%s" % (
self._monitor_address[0],
self._monitor_address[1])
else:
moncdev = 'socket,id=mon,path=%s' % self._monitor_address
return ['-chardev', moncdev,
'-mon', 'chardev=mon,mode=control',
'-display', 'none', '-vga', 'none']<|fim_middle|>base_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, agent: AgentID) -> ObsType | None:
if not self._has_reset:
EnvLogger.error_observe_before_reset()
return super().METHOD_NAME(agent)<|fim_middle|>observe<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[List[ParameterBuilder]]:
return self._validation_parameter_builders<|fim_middle|>validation_parameter_builders<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
monitor: zmq.asyncio.Socket, loop: asyncio.BaseEventLoop
) -> None:
"""A thread that prints events
This is a convenience method. It could serve as an example for your code of a monitor,
For example if you don't need the prints, then copy paste this part of code to your code and modify it to your needs.
parameters:
monitor: a zmq monitor socket, from calling: my_zmq_socket.get_monitor_socket()
loop: an asyncio event loop, from calling zmq.asyncio.asyncio.get_event_loop() , whens starting a thread it does not contains an event loop
"""
print("libzmq-%s" % zmq.zmq_version())
if zmq.zmq_version_info() < (4, 0):
raise RuntimeError("monitoring in libzmq version < 4.0 is not supported")
EVENT_MAP = {}
print("Event names:")
for name in dir(zmq):
if name.startswith('EVENT_'):
value = getattr(zmq, name)
print("%21s : %4i" % (name, value))
EVENT_MAP[value] = name
print("\n")
asyncio.set_event_loop(loop)
async def run_loop() -> None:
while True:
try:
while monitor.poll():
evt: Dict[str, Any] = {}
mon_evt = await recv_monitor_message(monitor)
evt.update(mon_evt)
evt['description'] = EVENT_MAP[evt['event']]
print(f"Event: {evt}")
if evt['event'] == zmq.EVENT_MONITOR_STOPPED:
break
except RuntimeError as e:
print(e)
time.sleep(1)
monitor.close()
print()
print("event monitor thread done!")
asyncio.ensure_future(run_loop())<|fim_middle|>event_monitor_thread_async<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(fn, rate: float, deterministic: bool = False):
def attn_fn(scope: Scope, weights: Array):
attn_weights = fn(scope, weights)
return nn.dropout(
scope, attn_weights, deterministic=deterministic, rate=rate
)
return attn_fn<|fim_middle|>with_dropout<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertMarkdownRenders(
"""![Text](http://link.com/".png'title') more text""",
"""<p><img alt="Text" src="http://link.com/".png" title="title" /> more text</p>"""
)<|fim_middle|>test_mixed_title_quotes2<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
pass<|fim_middle|>start<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
"""Close and reopen our log file, if supported.
This should be overridden where needed."""
return # pragma: no cover<|fim_middle|>hup<|file_separator|> |