text
stringlengths 67
7.88k
|
---|
<|fim_prefix|>def <|fim_suffix|>(self) -> str:
"""
Type of resource. Type = Microsoft.OffAzure/VMWareSites.
"""
return pulumi.get(self, "type")<|fim_middle|>type<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(intfspec):
"""Given an interface specification return a cmdline command name"""
if len(intfspec) > 2:
name = intfspec[2]
else:
name = intfspec[0].split('.')[-1].replace('_', '-')
return name<|fim_middle|>get_cmdline_command_name<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(builder, subgraphs):
builder.PrependUOffsetTRelativeSlot(
2, flatbuffers.number_types.UOffsetTFlags.py_type(subgraphs), 0)<|fim_middle|>model_add_subgraphs<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(source: str) -> str:
# Filter out any magic lines (starting with %) if in a Jupyter notebook
import re
srclines = map(lambda x: re.sub(r"^\%.*", "", x), source.split("\n"))
source = "\n".join(srclines)
return source<|fim_middle|>strip_magic_line<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, visibility):
"""Set whether to display collision objects or not."""
pass<|fim_middle|>display_collisions<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(obj, user=None):
# Check if this is a recipe or project
project = Project.objects.filter(uid=obj.uid).first()
if not project:
project = auth.create_project(user=user, uid=obj.uid,
name=obj.name,
text=obj.text)
project.uid = obj.uid
project.name = obj.name
project.text = obj.text
project.date = obj.date
project.privacy = obj.privacy
update_image(project, obj.image)
project.save()
for recipe, vals in obj.recipes.items():
data = parse_json(vals)
email = data.owner_email or settings.DEFAULT_FROM_EMAIL
owner = get_or_create(email, data=data)
upload_recipe(data, project=project, user=owner)
return<|fim_middle|>upload_project<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# Test {} with many items
@njit
def foo():
return {1: 2.2, 3: 4.4, 5: 6.6}
d = foo()
self.assertEqual(d, {1: 2.2, 3: 4.4, 5: 6.6})<|fim_middle|>test_use_curlybraces_with_initmany<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(r, partition_key_range):
"""Evaluates and returns r - partition_key_range
:param dict partition_key_range: Partition key range.
:param routing_range.Range r: query range.
:return: The subtract r - partition_key_range.
:rtype: routing_range.Range
"""
left = max(partition_key_range[routing_range.PartitionKeyRange.MaxExclusive], r.min)
if left == r.min:
leftInclusive = r.isMinInclusive
else:
leftInclusive = False
queryRange = routing_range.Range(left, r.max, leftInclusive, r.isMaxInclusive)
return queryRange<|fim_middle|>subtract_range<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
tz = pytz.timezone('Australia/Sydney')
dt = datetime.datetime(2002, 2, 20, 13, 37, 42, 7, tzinfo=tz)
w = DatetimePicker(value=dt)
assert w.value == dt
# tzinfo only changes upon input from user
assert w.value.tzinfo == tz<|fim_middle|>test_datetime_tzinfo<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_location.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request<|fim_middle|>prepare_request<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
:avocado: tags=machine:pseries
:avocado: tags=accel:tcg
"""
self.require_accelerator("tcg")
self.vm.add_args("-accel", "tcg")
self.launch_and_wait(set_up_ssh_connection=False)<|fim_middle|>test_pseries_tcg<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(bot: str, raise_exception=True):
"""
Checks if daily event triggering limit exceeded.
@param bot: bot id.
@param raise_exception: Raise exception if event is in progress.
@return: boolean flag
"""
today = datetime.today()
today_start = today.replace(hour=0, minute=0, second=0)
doc_count = ValidationLogs.objects(
bot=bot, start_timestamp__gte=today_start
).count()
if doc_count >= BotSettings.objects(bot=bot).get().data_importer_limit_per_day:
if raise_exception:
raise AppException("Daily limit exceeded.")
else:
return True
else:
return False<|fim_middle|>is_limit_exceeded<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.args.command == "about"<|fim_middle|>is_about_command<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return reverse(self.urlname, args=[self.domain, self.program_id])<|fim_middle|>page_url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, transpiler_level):
transpile(
self.qv_14_x_14, self.melbourne, seed_transpiler=0, optimization_level=transpiler_level
)<|fim_middle|>time_transpile_qv_14_x_14<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
# https://mxnet.apache.org/get_started/build_from_source
args = [
self.define_from_variant("USE_CUDA", "cuda"),
self.define_from_variant("USE_CUDNN", "cudnn"),
self.define_from_variant("USE_OPENCV", "opencv"),
self.define_from_variant("USE_OPENMP", "openmp"),
self.define_from_variant("USE_LAPACK", "lapack"),
self.define("BLAS_LIBRARIES", self.spec["blas"].libs[0]),
]
if self.spec.satisfies("@:1"):
args.append(self.define_from_variant("USE_MKLDNN", "mkldnn"))
elif self.spec.satisfies("@2:"):
args.append(self.define_from_variant("USE_ONEDNN", "mkldnn"))
args.append(self.define("USE_CUTENSOR", False))
if "+cuda" in self.spec:
if "cuda_arch=none" not in self.spec:
cuda_arch = ";".join(
"{0:.1f}".format(float(i) / 10.0)
for i in self.spec.variants["cuda_arch"].value
)
args.append(self.define("MXNET_CUDA_ARCH", cuda_arch))
args.extend(
[
self.define_from_variant("USE_NCCL", "nccl"),
# Workaround for bug in GCC 8+ and CUDA 10 on PowerPC
self.define("CMAKE_CUDA_FLAGS", self.compiler.cxx11_flag),
# https://github.com/apache/mxnet/issues/21193
# https://github.com/spack/spack/issues/36922
self.define(
"CMAKE_CXX_FLAGS",
"-L" + join_path(self.spec["cuda"].libs.directories[0], "stubs"),
),
]
)
return args<|fim_middle|>cmake_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(addresses: Iterable[_N]) -> Iterator[_N]: ...<|fim_middle|>collapse_addresses<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"fills the widgets"
index_crosssectiontype = self.parameterWidget.cb_crosssectiontype.findText(
self.SectionType
)
self.parameterWidget.cb_crosssectiontype.setCurrentIndex(index_crosssectiontype)
self.parameterWidget.if_rec_height.setText(self.RectHeight.UserString)
self.parameterWidget.if_rec_width.setText(self.RectWidth.UserString)
self.parameterWidget.if_circ_diameter.setText(self.CircDiameter.UserString)
self.parameterWidget.if_pipe_diameter.setText(self.PipeDiameter.UserString)
self.parameterWidget.if_pipe_thickness.setText(self.PipeThickness.UserString)<|fim_middle|>update_parameter_widget<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
) -> DashboardListListResponse:
"""Get all dashboard lists.
Fetch all of your existing dashboard list definitions.
:rtype: DashboardListListResponse
"""
kwargs: Dict[str, Any] = {}
return self._list_dashboard_lists_endpoint.call_with_http_info(**kwargs)<|fim_middle|>list_dashboard_lists<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls, op: "TensorRechunk"):
from ..indexing.slice import TensorSlice
from ..merge.concatenate import TensorConcatenate
if has_unknown_shape(*op.inputs):
yield
out = op.outputs[0]
tensor = astensor(op.inputs[0])
chunk_size = get_nsplits(tensor, op.chunk_size, tensor.dtype.itemsize)
if chunk_size == tensor.nsplits:
return [tensor]
rechunk_infos = gen_rechunk_infos(tensor, chunk_size)
out_chunks = []
for rechunk_info in rechunk_infos:
chunk_index = rechunk_info.out_index
shape = rechunk_info.shape
inp_chunks = rechunk_info.input_chunks
inp_chunk_slices = rechunk_info.input_slices
inp_slice_chunks = []
for inp_chunk, inp_chunk_slice in zip(inp_chunks, inp_chunk_slices):
if all(slc == slice(None) for slc in inp_chunk_slice):
inp_slice_chunks.append(inp_chunk)
else:
slc_chunk = TensorSlice(slices=list(inp_chunk_slice)).new_chunk(
[inp_chunk],
dtype=inp_chunk.dtype,
shape=tuple(
calc_sliced_size(s, slc)
for s, slc in zip(inp_chunk.shape, inp_chunk_slice)
),
index=inp_chunk.index,
)
inp_slice_chunks.append(slc_chunk)
if len(inp_slice_chunks) > 1 or inp_slice_chunks[0].index != chunk_index:
chunk_op = TensorConcatenate()
out_chunk = chunk_op.new_chunk(
inp_slice_chunks,
shape=shape,
index=chunk_index,
dtype=out.dtype,
order=out.order,
)
out_chunks.append(out_chunk)
else:
out_chunks.append(inp_slice_chunks[0])
new_op = op.copy()
params = out.params
params["nsplits"] = chunk_size
params["chunks"] = out_chunks
tensor = new_op.new_tileable(op.inputs, kws=[params])
if op.reassign_worker:
for c in tensor.chunks:
c.op.reassign_worker = True
return [tensor]<|fim_middle|>tile<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
self.assertIsNone(StatValue.from_list([]))<|fim_middle|>test_empty<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, loss_function, dataset):
model_file = get_test_output_path('model{}.bin')
yc.execute([
CATBOOST_PATH,
'fit',
'--loss-function', loss_function,
'-f', dataset.train_file,
'--cd', dataset.cd_file,
'-i', '10',
'-T', '4',
'-m', model_file,
])
return model_file<|fim_middle|>fit<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(*, df_list, year, config, **_):
"""
Combine, parse, and format the provided dataframes
:param df_list: list of dataframes to concat and format
:param args: dictionary, used to run flowbyactivity.py
('year' and 'source')
:return: df, parsed and partially formatted to flowbyactivity
specifications
"""
df = pd.concat(df_list, sort=False, ignore_index=True)
fips = get_all_state_FIPS_2().reset_index(drop=True)
# ensure capitalization of state names
fips['State'] = fips['State'].apply(lambda x: x.title())
fips['StateAbbrev'] = fips['State'].map(us_state_abbrev)
# pad zeroes
fips['FIPS_2'] = fips['FIPS_2'].apply(lambda x: x.ljust(3 + len(x), '0'))
df = pd.merge(
df, fips, how='left', left_on='State', right_on='StateAbbrev')
# set us location code
df.loc[df['State_x'] == 'US', 'FIPS_2'] = US_FIPS
df = df.rename(columns={'FIPS_2': "Location"})
assign_fips_location_system(df, year)
df = df.drop(columns=['StateAbbrev', 'State_x', 'State_y'])
## Extract information for SEDS codes
units = pd.read_excel(config['url']['activities_url'],
sheet_name='Codes_and_Descriptions',
header=10, usecols='B:D')
units['FuelCode'] = units['MSN'].str[0:2]
units['SectorCode'] = units['MSN'].str[2:4]
units['UnitCode'] = units['MSN'].str[4:5]
units = units.query("UnitCode not in ['D', 'K']")
# get fuel names from Total Consumption and Industrial Consumption
fuels = (units.query("SectorCode.isin(['TC', 'IC'])")
.reset_index(drop=True))
fuels['Fuel'] = (fuels.query(
"Description.str.contains('total consumption')")
.Description.str.split(' total consumption', expand=True)[0])
fuels['FuelName2'] = (fuels.query(
"Description.str.contains('consumed by')")
.Description.str.split(' consumed by', expand=True)[0])
fuels['Fuel'] = fuels['Fuel'].fillna(fuels['FuelName2'])
fuels['Fuel'] = fuels['Fuel'].str.rstrip(',')
fuels = (fuels[['Fuel','FuelCode']].dropna().sort_values(by='Fuel')
.drop_duplicates(subset='FuelCode'))
# get sector names
sectors = units.copy()
sectors['ActivityConsumedBy'] = (units.query(
"Description.str.contains('consumed by')")
.Description.str.split('consumed by the ', expand=True)[1]
.str.strip())
sectors = (sectors[['SectorCode', 'ActivityConsumedBy']].dropna()
.sort_values(by='ActivityConsumedBy')
.drop_duplicates(subset='SectorCode'))
units = units.merge(fuels, how='left', on='FuelCode')
units = units.merge(sectors, how='left', on='SectorCode')
units = units.drop(columns=['FuelCode','SectorCode','UnitCode'])
units['Description'] = units['MSN'] + ': ' + units['Description']
df = df.merge(units, how='left', on='MSN')
df = (df.rename(columns={year: "FlowAmount",
"Fuel": "FlowName"})
.drop(columns=['Data_Status'])
.dropna())
# hard code data
df['Class'] = np.where(df['Unit'].str.contains('Btu') |
df['Unit'].str.contains('watt'),
'Energy', 'Other')
df['SourceName'] = 'EIA_SEDS'
df['ActivityProducedBy'] = 'None'
df['Year'] = year
df['FlowType'] = 'TECHNOSPHERE_FLOW'
# Fill in the rest of the Flow by fields so they show
# "None" instead of nan.
df['Compartment'] = 'None'
df['MeasureofSpread'] = 'None'
df['DistributionType'] = 'None'
# Add DQ scores
df['DataReliability'] = 5 # tmp
df['DataCollection'] = 5 # tmp
return df<|fim_middle|>eia_seds_parse<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return app.send_static_file("registration-form.html")<|fim_middle|>registration_form_html<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
ds = self.mkdataset(True)
ds = self.update.saveAndReturnObject(ds)
assert ds.details.externalInfo
self.assert_type(ds, "test")<|fim_middle|>test_external_info_on_creation<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(filename, silent=False):
schema = Schema()
print( "Try to read EXPRESS schema file" + filename)
with open(filename,'rt') as inp:
contents = inp.METHOD_NAME()
types = re.findall(re_match_type,contents)
for name,aggregate,equals,enums in types:
schema.types[name] = Type(name,aggregate,equals,enums)
entities = re.findall(re_match_entity,contents)
for name,parent,fields_raw in entities:
print('process entity {0}, parent is {1}'.format(name,parent)) if not silent else None
fields = re.findall(re_match_field,fields_raw)
members = [Field(name,type,opt,coll) for name, opt, coll, type in fields]
print(' got {0} fields'.format(len(members))) if not silent else None
schema.entities[name] = Entity(name,parent,members)
return schema<|fim_middle|>read<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(signal_num):
# once Python 3.8 is the minimum supported version,
# signal.strsignal can be used here
signals = type(signal.SIGINT)
try:
signal_str = f'{signals(signal_num).name} ({signal_num})'
except ValueError:
signal_str = f'{signal_num}'
return f'Terminated by signal {signal_str}'<|fim_middle|>signal_message<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(resource_group_name: Optional[str] = None,
trigger_name: Optional[str] = None,
workflow_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWorkflowTriggerCallbackUrlResult:
"""
Get the callback URL for a workflow trigger.
Azure REST API version: 2019-05-01.
:param str resource_group_name: The resource group name.
:param str trigger_name: The workflow trigger name.
:param str workflow_name: The workflow name.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['triggerName'] = trigger_name
__args__['workflowName'] = workflow_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:logic:listWorkflowTriggerCallbackUrl', __args__, opts=opts, typ=ListWorkflowTriggerCallbackUrlResult).value
return AwaitableListWorkflowTriggerCallbackUrlResult(
base_path=pulumi.get(__ret__, 'base_path'),
method=pulumi.get(__ret__, 'method'),
queries=pulumi.get(__ret__, 'queries'),
relative_path=pulumi.get(__ret__, 'relative_path'),
relative_path_parameters=pulumi.get(__ret__, 'relative_path_parameters'),
value=pulumi.get(__ret__, 'value'))<|fim_middle|>list_workflow_trigger_callback_url<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
tf = tempfile.NamedTemporaryFile(mode="w")
tf.write('TELEMETRY tgt1 pkt1 LITTLE_ENDIAN "Packet"\n')
tf.write(" LIMITS_RESPONSE\n")
tf.seek(0)
with self.assertRaisesRegex(
ConfigParser.Error, "No current item for LIMITS_RESPONSE"
):
self.pc.process_file(tf.name, "TGT1")
tf.close()<|fim_middle|>test_complains_if_a_current_item_is<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self, msg_builder):<|fim_middle|>localizer_handle_msg<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dataset_path: Path, clear: bool = False) -> None:
dataset = _load_dataset(dataset_path)
if clear:
clear_store(dataset)
store = get_store(dataset, external=True)
dedupe_ui(store, url_base="https://opensanctions.org/entities/%s/")<|fim_middle|>dedupe<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, username):
return Inventory().METHOD_NAME(username)<|fim_middle|>get_locations_for_user<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>variable_shape<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())<|fim_middle|>to_str<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
return PESIDRepositoryEntry(
pesid='rhel8-CRB',
major_version='8',
repoid='codeready-builder-for-rhel-8-x86_64-rpms',
rhui='',
arch='x86_64',
channel='ga',
repo_type='rpm')<|fim_middle|>rhel8_crb_pesidrepo<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(ontology):
"""Prints a list of entity types matching a list of input fields.
Args:
ontology: An instance of the OntologyWrapper class.
"""
standard_field_list = _InputFieldsFromUser()
entity_type_match_dict = {}
for i, match in enumerate(
ontology.GetEntityTypesFromFields(standard_field_list)
):
entity_type_match_dict[i] = match
for i in range(DEFAULT_MATCHED_TYPES_LIST_SIZE):
print(colored(f'{i+1}. {entity_type_match_dict[i]}', 'green'))
_PrintFieldMatchComparison(ontology, entity_type_match_dict)
match_selection = input('Would you like to see all matches? (y/n): ')
if match_selection == 'y':
for i, match in [
(index, match)
for index, match in entity_type_match_dict.items()
if match.GetMatchScore() > 0
]:
print(colored(f'{i+1}. {match}', 'green'))
_PrintFieldMatchComparison(ontology, entity_type_match_dict)<|fim_middle|>get_types_for_field_list<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_initialization_and_priming<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
@es_test(requires=[cats_adapter])
class TestCatsRequired(SimpleTestCase):
def test_index_exists(self):
assert_index_exists(cats_adapter)
dirty_test = TestCatsRequired()
dirty_test.setUp()
dirty_test.test_index_exists()
# dirty test never cleans up
tolerant_test = TestCatsRequired()
tolerant_test.setUp() # does not raise "index_already_exists_exception"
tolerant_test.test_index_exists()
tolerant_test.tearDown()
tolerant_test.doCleanups()
# tolerant test still cleans up
assert_not_index_exists(cats_adapter)<|fim_middle|>test_setup_tolerates_existing_index<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(request):
"""OSPF verify E-bit and N-bit mismatch."""
tc_name = request.node.name
write_test_header(tc_name)
tgen = get_topogen()
# Don't run this test if we have any failure.
if tgen.routers_have_failure():
pytest.skip(tgen.errors)
global topo
step("Bring up the base config as per the topology")
reset_config_on_routers(tgen)
input_dict = {"r3": {"ospf6": {"neighbors": []}}}
step("Configure r3 as stub router")
stub = {"r3": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "stub"}]}}}
result = create_router_ospf(tgen, topo, stub)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
# Verify r3 lost its adjacency with r2 due to E-bit mismatch
result = verify_ospf6_neighbor(tgen, topo, dut="r3", input_dict=input_dict)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
step("Configure r2 as stub router")
stub = {"r2": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "stub"}]}}}
result = create_router_ospf(tgen, topo, stub)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
# Verify r3 has an adjacency up with r2 again
result = verify_ospf6_neighbor(tgen, topo, dut="r3")
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
step("Configure r3 as NSSA router")
nssa = {"r3": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "nssa"}]}}}
result = create_router_ospf(tgen, topo, nssa)
# Verify r3 lost its adjacency with r2 due to N-bit mismatch
result = verify_ospf6_neighbor(tgen, topo, dut="r3", input_dict=input_dict)
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
step("Configure r2 as NSSA router")
nssa = {"r2": {"ospf6": {"area": [{"id": "1.1.1.1", "type": "nssa"}]}}}
result = create_router_ospf(tgen, topo, nssa)
# Verify r3 has an adjacency up with r2 again
result = verify_ospf6_neighbor(tgen, topo, dut="r3")
assert result is True, "Testcase {}: Failed \n Error: {}".format(tc_name, result)
write_test_footer(tc_name)<|fim_middle|>test_ospfv3_bit_mismatch<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, item: Node, start: int = 0, stop: int = sys.maxsize) -> int: ...<|fim_middle|>index<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, path, include_body=True):
"""
This is an edited method of original class so that we can show
directory listing and set correct Content-Type
"""
path = self.parse_url_path(path)
abspath = os.path.abspath(os.path.join(self.root, path))
self.absolute_path = abspath
if not os.path.exists(abspath):
raise tornado.web.HTTPError(404)
# Check if a directory if so provide listing
if os.path.isdir(abspath):
# need to look at the request.path here for when path is empty
# but there is some prefix to the path that was already
# trimmed by the routing
# Just loop once to get dirnames and filenames :P
for abspath, dirnames, filenames in os.walk(abspath):
break
directory_listing_template = tornado.template.Template(
"""
<html>
<head>
<title>Directory Listing</title>
</head>
<body>
<h1>Index of</h1>
<hr>
<ul>
<li><a href="../">../</a></li>
{% if len(dirnames) > 0 %}
<h2>Directories</h2>
{% for item in dirnames %}
<li><a href="{{ url_escape(item, plus=False) }}/">{{ item }}/</a></li>
{% end %}
{% end %}
{% if len(filenames) > 0 %}
<h2>Files</h2>
{% for item in filenames %}
<li><a href="{{ url_escape(item, plus=False) }}">{{ item }}</a></li>
{% end %}
{% end %}
</ul>
</body>
</html>
"""
)
self.write(directory_listing_template.generate(dirnames=dirnames, filenames=filenames))
return
if os.path.isfile(abspath): # So file
stat_result = os.stat(abspath)
modified = datetime.datetime.fromtimestamp(stat_result[stat.ST_MTIME])
self.set_header("Last-Modified", modified)
mime_type, encoding = mimetypes.guess_type(abspath)
if mime_type:
self.set_header("Content-Type", mime_type)
cache_time = self.get_cache_time(path, modified, mime_type)
if cache_time > 0:
self.set_header("Expires", datetime.datetime.utcnow() + datetime.timedelta(seconds=cache_time))
self.set_header("Cache-Control", "max-age={!s}".format(cache_time))
else:
self.set_header("Cache-Control", "public")
self.set_extra_headers(path)
# Check the If-Modified-Since, and don't send the result if the
# content has not been modified
ims_value = self.request.headers.METHOD_NAME("If-Modified-Since")
if ims_value is not None:
date_tuple = email.utils.parsedate(ims_value)
if_since = datetime.datetime.fromtimestamp(time.mktime(date_tuple))
if if_since >= modified:
self.set_status(304)
return
no_of_lines = self.get_argument("lines", default="-1")
if no_of_lines != "-1":
data = subprocess.check_output(["tail", "-" + no_of_lines, abspath])
else:
with open(abspath, "rb") as file:
data = file.read()
hasher = hashlib.sha1()
hasher.update(data)
self.set_header("Etag", '"{!s}"'.format(hasher.hexdigest()))
if include_body:
self.write(data)
else:
assert self.request.method == "HEAD"
self.set_header("Content-Length", len(data))<|fim_middle|>get<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> None:
self._archive_download_url: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._expired: Attribute[bool] = NotSet
self._expires_at: Attribute[datetime] = NotSet
self._head_sha: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._size_in_bytes: Attribute[int] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._workflow_run: Attribute[WorkflowRun] = NotSet<|fim_middle|>init_attributes<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, *args, **kwargs):
"""Overrides default init by loading value from checkpoint."""
# pylint: disable=protected-access
self._old_init(*args, **kwargs)
ckpt_name = self._map_func(self._shared_name)
if ckpt_name not in self._ckpt_var_cache:
raise errors.NotFoundError(None, None,
"%s not found in checkpoint" % ckpt_name)
val = self._ckpt_var_cache.get(ckpt_name, None)
if val is not None:
self.assign(val)
# Avoid assigning for the second time.
self._ckpt_var_cache[ckpt_name] = None
# pylint: enable=protected-access<|fim_middle|>init_from_checkpoint<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
multiprocessing_tcp_port_selector: TCPPortSelector, context: BaseContext
):
queue = context.Queue()
p1 = context.Process( # type: ignore[attr-defined]
target=get_multiprocessing_tcp_port,
args=(multiprocessing_tcp_port_selector, MULTIPROCESSING_PORT, queue),
)
p2 = context.Process( # type: ignore[attr-defined]
target=get_multiprocessing_tcp_port,
args=(multiprocessing_tcp_port_selector, MULTIPROCESSING_PORT, queue),
)
p1.start()
p2.start()
free_tcp_port_1 = queue.get()
free_tcp_port_2 = queue.get()
p1.join()
p2.join()
actual_results = [free_tcp_port_1, free_tcp_port_2]
assert MULTIPROCESSING_PORT in actual_results
assert None in actual_results<|fim_middle|>test_tcp_port_selector_uses_multiprocess_leases<|file_separator|> |
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>test_system_artifacts_are_not_marked_in<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, stat_buf, stat):
def store(offset, val):
return self.state.memory.store(stat_buf + offset, val, endness=self.state.arch.memory_endness)
store(0x00, stat.st_dev)
store(0x08, stat.st_ino)
store(0x10, stat.st_mode)
store(0x14, stat.st_nlink)
store(0x18, stat.st_uid)
store(0x1C, stat.st_gid)
store(0x20, stat.st_rdev)
store(0x28, self.state.solver.BVV(0, 64))
store(0x30, stat.st_size)
store(0x38, stat.st_blksize)
store(0x3C, self.state.solver.BVV(0, 32))
store(0x40, stat.st_blocks)
store(0x48, stat.st_atime)
store(0x4C, stat.st_atimensec)
store(0x50, stat.st_mtime)
store(0x54, stat.st_mtimensec)
store(0x58, stat.st_ctime)
store(0x5C, stat.st_ctimensec)
store(0x60, self.state.solver.BVV(0, 32))
store(0x64, self.state.solver.BVV(0, 32))<|fim_middle|>store_ppc32<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
"""
METHOD_NAME = {
key: value
for key, value in self.__dict__.items()
if key not in ("optimizer", "lr_lambdas")
}
METHOD_NAME["lr_lambdas"] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
METHOD_NAME["lr_lambdas"][idx] = fn.__dict__.copy()
return METHOD_NAME<|fim_middle|>state_dict<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
share_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetShareResult:
"""
Get a share
:param str account_name: The name of the share account.
:param str resource_group_name: The resource group name.
:param str share_name: The name of the share to retrieve.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
__args__['shareName'] = share_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:datashare/v20210801:getShare', __args__, opts=opts, typ=GetShareResult).value
return AwaitableGetShareResult(
created_at=pulumi.get(__ret__, 'created_at'),
description=pulumi.get(__ret__, 'description'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
share_kind=pulumi.get(__ret__, 'share_kind'),
system_data=pulumi.get(__ret__, 'system_data'),
terms=pulumi.get(__ret__, 'terms'),
type=pulumi.get(__ret__, 'type'),
user_email=pulumi.get(__ret__, 'user_email'),
user_name=pulumi.get(__ret__, 'user_name'))<|fim_middle|>get_share<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(sSearch=''):
oGui = cGui()
if sSearch:
sUrl = sSearch
else:
oInputParameterHandler = cInputParameterHandler()
sUrl = oInputParameterHandler.getValue('siteUrl')
oRequestHandler = cRequestHandler(sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'class="featured-image"><a href="([^"]+)" title="([^"]+)"><img width=".+?" height=".+?" src="([^"]+)'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if not aResult[0]:
oGui.addText(SITE_IDENTIFIER)
if aResult[0]:
total = len(aResult[1])
progress_ = progress().VScreate(SITE_NAME)
oOutputParameterHandler = cOutputParameterHandler()
for aEntry in aResult[1]:
progress_.VSupdate(progress_, total)
if progress_.iscanceled():
break
# first post filter
if (str(aEntry[2]) != "https://www.mamcin.com/wp-content/uploads/2017/10/plus-belle-la-vie-episode-suivant-en-avance.jpg"):
sUrl = aEntry[0]
sTitle = aEntry[1]
sThumb = aEntry[2]
oOutputParameterHandler.addParameter('siteUrl', sUrl)
oOutputParameterHandler.addParameter('sMovieTitle', sTitle)
oOutputParameterHandler.addParameter('sThumb', sThumb)
oGui.addMovie(SITE_IDENTIFIER, 'showHosters', sTitle, '', sThumb, '', oOutputParameterHandler)
progress_.VSclose(progress_)
sNextPage = __checkForNextPage(sHtmlContent)
if sNextPage:
oOutputParameterHandler = cOutputParameterHandler()
oOutputParameterHandler.addParameter('siteUrl', sNextPage)
sPaging = re.search('page/([0-9]+)', sNextPage).group(1)
oGui.addNext(SITE_IDENTIFIER, 'showMovies', 'Page ' + sPaging, oOutputParameterHandler)
if not sSearch:
oGui.setEndOfDirectory()<|fim_middle|>show_movies<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
has_arg, option = getopt.long_has_args('abc', ['abc='])
self.assertTrue(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abc'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abc')
has_arg, option = getopt.long_has_args('abc', ['abcd'])
self.assertFalse(has_arg)
self.assertEqual(option, 'abcd')
self.assertError(getopt.long_has_args, 'abc', ['def'])
self.assertError(getopt.long_has_args, 'abc', [])
self.assertError(getopt.long_has_args, 'abc', ['abcd','abcde'])<|fim_middle|>test_long_has_args<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
configuration: ExpectationConfiguration,
metrics,
runtime_configuration: dict = None,
execution_engine=None,
):
success = metrics.get("column_values.geometry_overlap").get("success")
indices = metrics.get("column_values.geometry_overlap").get("indices")
return {"success": success, "result": {"overlapping_indices": indices}}<|fim_middle|>validate<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self) -> None:
"""
Represents one loop of the service.
Users should override this method.
To actually run the service once, call `LoopService().start(loops=1)`
instead of `LoopService().run_once()`, because this method will not invoke setup
and teardown methods properly.
"""
raise NotImplementedError("LoopService subclasses must implement this method.")<|fim_middle|>run_once<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
sm2mm_1 = SomeM2MModel.objects.create(name='abc')
sm2mm_1.polymorphics = [self.pol_1, self.pol_2]
sm2mm_1 = SomeM2MModel.objects.get(name='abc')
sm2mm_2 = SomeM2MModel.objects.create(name='def')
sm2mm_2.polymorphics = [self.pol_2, self.pol_3]
with self.assertNumQueries(5):
# 5 queries:
# 1) SomeM2MModel
# 2) Content Types (usually cached, but turned off in tests)
# 3) PolymorphicModelBaseTest ids
# 4) PolymorphicModelTest based on 3)
# 5) PolymorphicModelTest2 based on 3)
result = {
sm.name: sm for sm in
SomeM2MModel.objects.prefetch_related(Prefetch(
lookup='polymorphics',
queryset=PolymorphicModelBaseTest.polymorphic_objects.polymorphic_filter( # noqa
some_m2m__in=SomeM2MModel.objects.all()
).all(),
)).order_by('name')
}
self.assertCountEqual(
result['abc'].polymorphics.all(),
[self.pol_1, self.pol_2]
)
self.assertCountEqual(
[inst._meta.model for inst in result['abc'].polymorphics.all()],
[PolymorphicModelTest, PolymorphicModelTest]
)
self.assertCountEqual(
result['def'].polymorphics.all(),
[self.pol_2, self.pol_3]
)
self.assertCountEqual(
[inst._meta.model for inst in result['def'].polymorphics.all()],
[PolymorphicModelTest, PolymorphicModelTest2]
)<|fim_middle|>test_m2m_with_prefetch_related_on_polymorphic<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Notes
-----
计算所有的时间层。
"""
rdir = self.options['rdir']
step = self.options['step']
timeline = self.timeline
dt = timeline.current_time_step_length()
timeline.reset() # 时间置零
fname = rdir + '/test_'+ str(timeline.current).zfill(10) + '.vtu'
self.write_to_vtk(fname)
print(fname)
while not timeline.stop():
self.one_step_solve()
timeline.current += 1
if timeline.current%step == 0:
fname = rdir + '/test_'+ str(timeline.current).zfill(10) + '.vtu'
print(fname)
self.write_to_vtk(fname)
timeline.reset()<|fim_middle|>solve<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(path):
isolation = {'cgroup': {'path': path}}
client.load('empty', processes=1, isolation=isolation)<|fim_middle|>set_cgroup_path<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Node | None: ...<|fim_middle|>previous_sibling<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>() -> None:
migrate_nulls()
with op.batch_alter_table("journalist_login_attempt", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("replies", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("revoked_tokens", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("seen_files", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("seen_messages", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)
with op.batch_alter_table("seen_replies", schema=None) as batch_op:
batch_op.alter_column("journalist_id", existing_type=sa.INTEGER(), nullable=False)<|fim_middle|>upgrade<|file_separator|> |
<|fim_prefix|>async def <|fim_suffix|>(self, unit_tag: int):
"""
Override this in your bot class.
This will event will be called when a unit (or structure, friendly or enemy) dies.
For enemy units, this only works if the enemy unit was in vision on death.
:param unit_tag:
"""<|fim_middle|>on_unit_destroyed<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
# Wrong type of b1 and bk2 power spectra
with pytest.raises(ValueError):
ccl.nl_pt.LagrangianPTCalculator(b1_pk_kind='non-linear')
# Wrong type of b1 and bk2 power spectra
with pytest.raises(ValueError):
ccl.nl_pt.LagrangianPTCalculator(bk2_pk_kind='non-linear')
# Uninitialized templates
with pytest.raises(ccl.CCLError):
ptc = ccl.nl_pt.LagrangianPTCalculator()
ptc.get_biased_pk2d(TRS['TG'])
# TODO: Discuss this test
# Wrong pair combination
with pytest.raises(ValueError):
ptc = ccl.nl_pt.LagrangianPTCalculator(cosmo=COSMO)
ptc.get_pk2d_template('b1:b3')<|fim_middle|>test_lpt_calculator_raises<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, op_fn, type_fn):
x = type_fn()
f = function([x], op_fn(x))
xval = _asarray(np.random.random(10) * 10, dtype=type_fn.dtype)
yval = f(xval)
assert str(yval.dtype) == op_fn.scalar_op.output_types_preference.spec[0].dtype<|fim_middle|>test_0<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(replay_start, timestamp):
if len(timestamp.split(".")[-1]) < 6:
for i in range(6 - len(timestamp.split(".")[-1])):
timestamp = timestamp + "0"
start = datetime.fromisoformat(replay_start)
stamp = datetime.fromisoformat(timestamp)
if start.tzinfo is None:
start = start.replace(tzinfo=timezone.utc)
if stamp.tzinfo is None:
stamp = stamp.replace(tzinfo=timezone.utc)
return ((stamp - start).total_seconds()) * 1000<|fim_middle|>calc_diff<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, other):
"""
The equals method.
:param other: a different object.
:type other: object
:return: True if equal, otherwise False.
:rtype: bool
"""
if not isinstance(other, ASTForStmt):
return False
if self.get_variable() != other.get_variable():
return False
if not self.get_start_from().METHOD_NAME(other.get_start_from()):
return False
if not self.get_end_at().METHOD_NAME(other.get_end_at()):
return False
if self.get_step() != other.get_step():
return False
return self.get_block().METHOD_NAME(other.get_block())<|fim_middle|>equals<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(left, right):
if diff_options:
opts = ' ' + ' '.join(diff_options)
else:
opts = ''
print('diff%s %s %s' % (opts, left, right))<|fim_middle|>diff_line<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, pathname, setting):
"""
_addEnvironmentPath_
add a key = value1:value2:value3 environment setting to this step
"""
if getattr(self.data.environment.paths, pathname, None) == None:
setattr(self.data.environment.paths, pathname, [])
pathentry = getattr(self.data.environment.paths, pathname)
pathentry.append(setting)
return<|fim_middle|>add_environment_path<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
"""
Testing the exact solution which is a property of every backend.
"""
n_qubits = 8
register = range(n_qubits)
p = 1
correct_energy = -8
correct_config = [0, 1, 0, 1, 0, 1, 0, 1]
# The tests pass regardless of the value of betas and gammas is this correct?
betas = [np.pi / 8]
gammas = [np.pi / 4]
cost_hamiltonian = ring_of_disagrees(register)
mixer_hamiltonian = X_mixer_hamiltonian(n_qubits)
qaoa_descriptor = QAOADescriptor(cost_hamiltonian, mixer_hamiltonian, p)
variational_params_std = QAOAVariationalStandardParams(
qaoa_descriptor, betas, gammas
)
backend_analytical = QAOABackendAnalyticalSimulator(qaoa_descriptor)
# exact solution is defined as the property of the cost function
energy_vec, config_vec = backend_analytical.exact_solution
assert np.isclose(energy_vec, correct_energy)
config_vec = [config.tolist() for config in config_vec]
assert correct_config in config_vec<|fim_middle|>test_exact_solution<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
cl = ConfigList(["un", "quatre", "trois"])
cl[1] = "deux"
self.assertEqual(cl.serialize(), ["un", "deux", "trois"])<|fim_middle|>test_config_list_setitem<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--config_path',
type=str,
default=None,
help="path of compression strategy config.",
required=True)
parser.add_argument(
'--analysis_file',
type=str,
default='sensitivity_0.data',
help="directory to save compressed model.")
parser.add_argument(
'--pruned_ratios',
nargs='+',
type=float,
default=[0.1, 0.2, 0.3, 0.4],
help="The ratios to be pruned when compute sensitivity.")
parser.add_argument(
'--target_loss',
type=float,
default=0.2,
help="use the target loss to get prune ratio of each parameter")
return parser<|fim_middle|>argsparser<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return self.try_parse_helper("#")<|fim_middle|>try_parse_hash<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
if get_env("USE_FAST_LN"):
paddle.nn.LayerNorm = FastLayerNorm
elif get_env("USE_FUSED_LN"):
paddle.nn.LayerNorm = FusedLayerNorm
elif get_env("USE_FUSED_RMS_NORM"):
paddle.nn.LayerNorm = FusedRMSNorm
if get_env("USE_LINEAR_WITH_GRAD_ADD"):
paddle.nn.functional.linear = FusedLinearWithGradAdd.apply
paddle.incubate.nn.functional.fused_linear = FusedLinearWithGradAdd.apply<|fim_middle|>mock_layers<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, item, spider): # noqa: C901
check_field(item, spider, "brand_wikidata", allowed_types=(str,), match_regex=self.wikidata_regex)
check_field(item, spider, "website", (str,), self.url_regex)
check_field(item, spider, "image", (str,), self.url_regex)
check_field(item, spider, "email", (str,), self.email_regex)
check_field(item, spider, "phone", (str,))
check_field(item, spider, "street_address", (str,))
check_field(item, spider, "city", (str,))
check_field(item, spider, "state", (str,))
check_field(item, spider, "postcode", (str,))
check_field(item, spider, "country", (str,), self.country_regex)
check_field(item, spider, "name", (str,))
check_field(item, spider, "brand", (str,))
if coords := get_lat_lon(item):
lat, lon = coords
if not (self.min_lat < lat < self.max_lat):
spider.crawler.stats.inc_value("atp/field/lat/invalid")
lat = None
if not (self.min_lon < lon < self.max_lon):
spider.crawler.stats.inc_value("atp/field/lon/invalid")
lon = None
if isinstance(lat, float) and isinstance(lon, float):
if math.fabs(lat) < 3 and math.fabs(lon) < 3:
spider.crawler.stats.inc_value("atp/geometry/null_island")
lat = None
lon = None
set_lat_lon(item, lat, lon)
if not (item.get("geometry") or get_lat_lon(item)):
spider.crawler.stats.inc_value("atp/field/lat/missing")
spider.crawler.stats.inc_value("atp/field/lon/missing")
if twitter := item.get("twitter"):
if not isinstance(twitter, str):
spider.crawler.stats.inc_value("atp/field/twitter/wrong_type")
elif not (self.url_regex.match(twitter) and "twitter.com" in twitter) and not self.twitter_regex.match(
twitter
):
spider.crawler.stats.inc_value("atp/field/twitter/invalid")
else:
spider.crawler.stats.inc_value("atp/field/twitter/missing")
if opening_hours := item.get("opening_hours"):
if isinstance(opening_hours, OpeningHours):
if opening_hours.day_hours:
item["opening_hours"] = opening_hours.as_opening_hours()
else:
item["opening_hours"] = None
spider.crawler.stats.inc_value("atp/field/opening_hours/missing")
elif not isinstance(opening_hours, str):
spider.crawler.stats.inc_value("atp/field/opening_hours/wrong_type")
elif not self.opening_hours_regex.match(opening_hours) and opening_hours != "24/7":
spider.crawler.stats.inc_value("atp/field/opening_hours/invalid")
else:
spider.crawler.stats.inc_value("atp/field/opening_hours/missing")
return item<|fim_middle|>process_item<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(stress):
mean = sigma_mean(stress)
return np.sqrt(0.5 * (np.power(stress[0] - mean, 2) + 2*stress[1]*stress[1] + 2*stress[2]*stress[2] + np.power(stress[3] - mean, 2) + 2*stress[4]*stress[4] + np.power(stress[5] - mean, 2)))<|fim_middle|>sigma_bar<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(space, expected_batch_space_4):
batch_space_4 = batch_space(space, n=4)
assert batch_space_4 == expected_batch_space_4<|fim_middle|>test_batch_space<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self):
return {
# sell
"energy_sell_rate": self._sell_energy_profile.input_energy_rate,
"energy_rate_profile": self._sell_energy_profile.input_profile,
"energy_rate_profile_uuid": self._sell_energy_profile.input_profile_uuid,
# buy
"energy_buy_rate": self._buy_energy_profile.input_energy_rate,
"buying_rate_profile": self._buy_energy_profile.input_profile,
"buying_rate_profile_uuid": self._buy_energy_profile.input_profile_uuid,
}<|fim_middle|>serialize<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
start_well: AbstractWellCore,
num_channels: int = 1,
fail_if_full: bool = False,
):
"""
Removes tips from the tip tracker.
This method should be called when a tip is picked up. Generally, it
will be called with `num_channels=1` or `num_channels=8` for single-
and multi-channel respectively. If picking up with more than one
channel, this method will automatically determine which tips are used
based on the start well, the number of channels, and the geometry of
the tiprack.
:param start_well: The :py:class:`.Well` from which to pick up a tip.
For a single-channel pipette, this is the well to
send the pipette to. For a multi-channel pipette,
this is the well to send the back-most nozzle of the
pipette to.
:type start_well: :py:class:`.Well`
:param num_channels: The number of channels for the current pipette
:type num_channels: int
:param fail_if_full: for backwards compatibility
"""
# Select the column of the labware that contains the target well
target_column = [col for col in self._columns if start_well in col][0]
well_idx = target_column.index(start_well)
# Number of tips to pick up is the lesser of (1) the number of tips
# from the starting well to the end of the column, and (2) the number
# of channels of the pipette (so a 4-channel pipette would pick up a
# max of 4 tips, and picking up from the 2nd-to-bottom well in a
# column would get a maximum of 2 tips)
num_tips = min(len(target_column) - well_idx, num_channels)
target_wells = target_column[well_idx : well_idx + num_tips]
# In API version 2.2, we no longer reset the tip tracker when a tip
# is dropped back into a tiprack well. This fixes a behavior where
# subsequent transfers would reuse the dirty tip. However, sometimes
# the user explicitly wants to use a dirty tip, and this check would
# raise an exception if they tried to do so.
# An extension of work here is to have separate tip trackers for
# dirty tips and non-present tips; but until then, we can avoid the
# exception.
if fail_if_full:
assert all(
well.has_tip() for well in target_wells
), "{} is out of tips".format(str(self))
for well in target_wells:
well.set_has_tip(False)<|fim_middle|>use_tips<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(administrator_name: Optional[str] = None,
managed_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedInstanceAdministratorResult:
"""
Gets a managed instance administrator.
Azure REST API version: 2021-11-01.
:param str managed_instance_name: The name of the managed instance.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
__args__ = dict()
__args__['administratorName'] = administrator_name
__args__['managedInstanceName'] = managed_instance_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql:getManagedInstanceAdministrator', __args__, opts=opts, typ=GetManagedInstanceAdministratorResult).value
return AwaitableGetManagedInstanceAdministratorResult(
administrator_type=pulumi.get(__ret__, 'administrator_type'),
id=pulumi.get(__ret__, 'id'),
login=pulumi.get(__ret__, 'login'),
name=pulumi.get(__ret__, 'name'),
sid=pulumi.get(__ret__, 'sid'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
type=pulumi.get(__ret__, 'type'))<|fim_middle|>get_managed_instance_administrator<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
blurb = lldb.debugger.GetVersionString()
top = blurb.split('\n')[0]
full = top.split(' ')[2]
major, minor = full.split('.')[:2]
return LldbVersion(full, int(major), int(minor))<|fim_middle|>compute_lldb_ver<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(module, client, backup_plan_name: str):
backup_plan_id = _list_backup_plans(client, backup_plan_name)
if not backup_plan_id:
return []
try:
result = client.get_backup_plan(BackupPlanId=backup_plan_id)
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg=f"Failed to describe plan {backup_plan_id}")
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_backup_plan = []
try:
resource = result.get("BackupPlanArn", None)
tag_dict = get_backup_resource_tags(module, client, resource)
result.update({"tags": tag_dict})
except (botocore.exceptions.BotoCoreError, botocore.exceptions.ClientError) as e:
module.fail_json_aws(e, msg="Failed to get the backup plan tags")
snaked_backup_plan.append(camel_dict_to_snake_dict(result, ignore_list="tags"))
# Remove AWS API response and add top-level plan name
for v in snaked_backup_plan:
if "response_metadata" in v:
del v["response_metadata"]
v["backup_plan_name"] = v["backup_plan"]["backup_plan_name"]
return snaked_backup_plan<|fim_middle|>get_plan_details<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, variable):
if isinstance(variable, VariableMatch):
return variable
match = search_variable(variable)
if not match.is_variable() or match.items:
raise DataError("Invalid variable name '%s'." % variable)
return match<|fim_middle|>get_match<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(end_point_name, model_name, token):
if token is None:
return False
FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password)
cached_token = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \
get_end_point_token(end_point_name, model_name)
if cached_token is not None and cached_token == token:
return True
return False<|fim_middle|>auth_request_token<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(config, env):
import rtyaml
fn = os.path.join(env['STORAGE_ROOT'], 'settings.yaml')
with open(fn, "w") as f:
f.write(rtyaml.dump(config))<|fim_middle|>write_settings<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, ep_name, lang, title=notifyStrings[NOTIFY_SUBTITLE_DOWNLOAD]):
"""
Sends a Join notification when subtitles for an episode are downloaded
:param ep_name: The name of the episode subtitles were downloaded for
:param lang: The language of the downloaded subtitles
:param title: The title of the notification to send
"""
if settings.JOIN_NOTIFY_ONSUBTITLEDOWNLOAD:
self._notify_join(title, "{0}: {1}".format(ep_name, lang))<|fim_middle|>notify_subtitle_download<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> 'outputs.ResourceReferenceResponse':
"""
Reference to a resource.
"""
return pulumi.get(self, "custom_certificate")<|fim_middle|>custom_certificate<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(s: bytes) -> bytes:
"""
Serialize a byte string with Bitcoin's variable length string serialization.
:param s: The byte string to be serialized
:returns: The serialized byte string
"""
return ser_compact_size(len(s)) + s<|fim_middle|>ser_string<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, method):
"""
Teardown method
"""
try:
self.client.admin_drop_user("example-test")
time.sleep(1)
except e.InvalidUser:
pass
self.client.close()<|fim_middle|>teardown_method<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>():
import subprocess
cmd = "xdputil query | grep 'DPU Batch' | awk -F':' '{ print $2}' | awk -F',' '{ print $1}' "
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
ret = p.communicate()
if ret[0] == b'':
return 1
return int(ret[0])<|fim_middle|>get_batch<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|> (programs):
print_test_name ("Rewrite test")
output = programs.meta_set (False, "--bext-originator \"Really, really long string\" output.wav")
output = programs.meta_set (False, "--bext-originator \"Short\" output.wav")
output = programs.meta_get (False, "--bext-originator output.wav")
if output.find ("really long") > 0:
print("\n\nError : output '%s' should not contain 'really long'." % output)
sys.exit (1)
print("ok")<|fim_middle|>test_rewrite<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(vertices, faces, root=0):
"""Unify the cycle directions of the given faces such that adjacent faces share opposite halfedges.
Parameters
----------
vertices : sequence[[float, float, float] | :class:`~compas.geometry.Point`]
A list of vertex coordinates.
faces : sequence[sequence[int]]
A list of faces with each face defined by a list of indices into the list of vertices.
root : int, optional
The starting face.
Returns
-------
list[list[int]]
A list of faces with the same orientation as the root face.
Raises
------
AssertionError
If not all faces were visited.
Notes
-----
The algorithm works by first building an adjacency dict of the faces, which can be traversed efficiently to unify all face cycles.
Although this process technically only requires the connectivity information contained in the faces,
the locations of the vertices can be used to speed up execution for very large collections of faces.
Examples
--------
>>> vertices = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 1.0]]
>>> faces = [[0, 1, 2], [0, 3, 2]]
>>> unify_cycles(vertices, faces)
[[0, 1, 2], [2, 3, 0]]
"""
def unify(node, nbr):
# find the common edge
for u, v in pairwise(faces[nbr] + faces[nbr][0:1]):
if u in faces[node] and v in faces[node]:
# node and nbr have edge u-v in common
i = faces[node].index(u)
j = faces[node].index(v)
if i == j - 1 or (j == 0 and u == faces[node][-1]):
# if the traversal of a neighboring halfedge
# is in the same direction
# flip the neighbor
faces[nbr][:] = faces[nbr][::-1]
return
adj = face_adjacency(vertices, faces)
visited = breadth_first_traverse(adj, root, unify)
assert len(list(visited)) == len(faces), "Not all faces were visited"
return faces<|fim_middle|>unify_cycles<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, key: str) -> Any:
"""@brief Return the highest priority value for the option, or its default."""
for layer in self._layers:
if key in layer:
return layer[key]
return self.get_default(key)<|fim_middle|>get<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(cls) -> dict[Any, Callable[[Any], Any]]:
if pydantic.VERSION.startswith("1"): # pragma: no cover
return {**_base_encoders, **cls._create_pydantic_v1_encoders()}
return {**_base_encoders, **cls._create_pydantic_v2_encoders()}<|fim_middle|>encoders<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
cls,
method_,
url_,
api_key=None,
idempotency_key=None,
stripe_version=None,
stripe_account=None,
params=None,
):
params = None if params is None else params.copy()
api_key = util.read_special_variable(params, "api_key", api_key)
idempotency_key = util.read_special_variable(
params, "idempotency_key", idempotency_key
)
stripe_version = util.read_special_variable(
params, "stripe_version", stripe_version
)
stripe_account = util.read_special_variable(
params, "stripe_account", stripe_account
)
headers = util.read_special_variable(params, "headers", None)
requestor = api_requestor.APIRequestor(
api_key, api_version=stripe_version, account=stripe_account
)
if idempotency_key is not None:
headers = {} if headers is None else headers.copy()
headers.update(util.populate_headers(idempotency_key)) # type: ignore
response, _ = requestor.request_stream(method_, url_, params, headers)
return response<|fim_middle|>static_request_stream<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(dataloader):
latency_list = []
for idx, (inputs, labels) in enumerate(dataloader):
# dataloader should keep the order and len of inputs same with input_tensor
inputs = np.array([inputs])
feed_dict = dict(zip(input_tensor, inputs))
start = time.time()
predictions = model.sess.run(output_tensor, feed_dict)
end = time.time()
metric.update(predictions, labels)
latency_list.append(end-start)
if idx + 1 == iteration:
break
latency = np.array(latency_list).mean() / args.batch_size
return latency<|fim_middle|>eval_func<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>( f, len ):
global Setsdrmasks, SetsdrmasksOnesCount
byteCount = (len+7)//8
Setsdrmasks = f.read( byteCount )
ls = []
SetsdrmasksOnesCount = 0
for b in Setsdrmasks:
ls.append( "%x" % ((b & 0xf0) >> 4) )
ls.append( "%x" % ( b & 0x0f ) )
for i in range(8):
if b & (1<<i):
SetsdrmasksOnesCount = SetsdrmasksOnesCount +1
return ''.join(ls)<|fim_middle|>read_sdrmasks<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(args: tuple[str, ...], expanded: tuple[str, ...]) -> None:
cli_alias = CliAlias.from_dict(
{
"--alias": "--flag goal",
}
)
assert cli_alias.expand_args(args) == expanded<|fim_middle|>test_expand_args_flag<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(filters):
amount = 0.0
# get amounts from all the apps
for method_name in frappe.get_hooks(
"get_amounts_not_reflected_in_system_for_bank_reconciliation_statement"
):
amount += frappe.get_attr(method_name)(filters) or 0.0
return amount<|fim_middle|>get_amounts_not_reflected_in_system<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(
self,
pipeline_types: Tuple[MypyType, ...],
pipeline_kinds: List[ArgKind],
ctx: CallableContext,
) -> MypyType:
"""Pass pipeline functions to infer them one by one."""
parameter = FuncArg(None, self._instance, ARG_POS)
ret_type = ctx.default_return_type
for pipeline, kind in zip(pipeline_types, pipeline_kinds):
ret_type = self._proper_type(
analyze_call(
cast(FunctionLike, pipeline),
[parameter],
ctx,
show_errors=True,
),
)
parameter = FuncArg(None, ret_type, kind)
return ret_type<|fim_middle|>from_callable_sequence<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional['outputs.PeeringPropertiesDirectResponse']:
"""
The properties that define a direct peering.
"""
return pulumi.get(self, "direct")<|fim_middle|>direct<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(model_name: str) -> None:
if SWIGNORE_PATH.exists():
lines = SWIGNORE_PATH.read_text().splitlines()
else:
lines = []
write_lines = []
for line in lines:
if "*/base-" in line or "*/adapter-" in line:
continue
write_lines.append(line)
for m in SUPPORTED_MODELS:
if m == model_name:
continue
write_lines.append(f"*/base-{m}/*")
write_lines.append(f"*/adapter-{m}/*")
SWIGNORE_PATH.write_text("\n".join(write_lines))<|fim_middle|>update_swignore<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, api_url):
response = requests.get(api_url, timeout=10)
response.raise_for_status()
return response.json()<|fim_middle|>call_api<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(band):
amp_mask = get_all_nonamp_fiberbitmask_val()
if band.lower().find('b')>=0:
amp_mask |= fmsk.BADAMPB
if band.lower().find('r')>=0:
amp_mask |= fmsk.BADAMPR
if band.lower().find('z')>=0:
amp_mask |= fmsk.BADAMPZ
return amp_mask<|fim_middle|>get_all_fiberbitmask_with_amp<|file_separator|> |
<|fim_prefix|>def <|fim_suffix|>(self, fields) -> Optional[List[FieldModel]]:
"""
Recursively convert the parsed schema into required models
"""
field_models = []
for field in fields:
try:
field_models.append(
FieldModel(
name=field.name,
dataType=ProtobufDataTypes(field.type).name,
children=self.METHOD_NAME(field.message_type.fields)
if field.type == 11
else None,
)
)
except Exception as exc: # pylint: disable=broad-except
logger.debug(traceback.format_exc())
logger.warning(
f"Unable to parse the protobuf schema into models: {exc}"
)
return field_models<|fim_middle|>get_protobuf_fields<|file_separator|> |