text
stringlengths
67
7.88k
<|fim_prefix|>def <|fim_suffix|>(self): """Transcribe spack variants into names of Catalyst Editions""" selected = ["Base"] # Always required if "+python" in self.spec: selected.append("Enable-Python") if "+essentials" in self.spec: selected.append("Essentials") if "+extras" in self.spec: selected.append("Essentials") selected.append("Extras") if "+rendering" in self.spec: selected.append("Essentials") selected.append("Extras") selected.append("Rendering-Base") return selected<|fim_middle|>editions<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(api_link_id: Optional[pulumi.Input[str]] = None, product_id: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, service_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetProductApiLinkResult]: """ Gets the API link for the product. :param str api_link_id: Product-API link identifier. Must be unique in the current API Management service instance. :param str product_id: Product identifier. Must be unique in the current API Management service instance. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str service_name: The name of the API Management service. """ ...<|fim_middle|>get_product_api_link_output<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, tmp_path): query_file_path = tmp_path / "sql" / "test" / "query_v1" os.makedirs(query_file_path) query_file = query_file_path / "query.sql" query_file.write_text("-- comment \n SELECT 1") metadata_conf = { "friendly_name": "test", "description": "test", "owners": ["[email protected]"], } metadata_file = query_file_path / "metadata.yaml" metadata_file.write_text(yaml.dump(metadata_conf)) runner = CliRunner() with patch("subprocess.check_call") as mock_call: mock_call.return_value = True result = runner.invoke( run, [str(query_file), "--dataset_id=test", "--destination_table=query_v1"], ) assert result.exit_code == 0 assert mock_call.call_args.args == ( [ "bq", "query", "--dataset_id=test", "--destination_table=query_v1", ], ) assert "stdin" in mock_call.call_args.kwargs<|fim_middle|>test_run_query<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): vec_up = [0, 0, 1] if self.alternate: vec_back = [0, 1, 0] else: vec_back = [0, -1, 0] vec_right = [1, 0, 0] self.set_view(vec_up, vec_back, vec_right)<|fim_middle|>set_view_y<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): out = StoreOptions.merge_options(['plot', 'style'], plot={'Image':dict(fig_size=150)}, style={'Image':dict(cmap='Blues')}) self.assertEqual(out, self.expected)<|fim_middle|>test_partitioned_format<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): user = self.request.user # user.has_perms hasn't been called yet so django-guardian # hasn't replaced the AnonymousUser with an actual user object if user.is_anonymous: return redirect_to_login( self.request.get_full_path(), settings.LOGIN_URL, "next" ) if hasattr(self, "get_object"): obj = self.get_object() else: obj = None perms = self.get_required_permissions() if user.has_perms(perms, obj): return None else: raise PermissionDenied<|fim_middle|>check_permissions<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(test_case): device = "cuda" channel = random(1, 32).to(int) height = random(1, 2).to(int) width = random(1024, 8192).to(int) def get_random_norm_shape(): begin_axis = random(1, 3).to(int).value() return tuple((channel.value(), height.value(), width.value())[begin_axis:]) m = torch.nn.LayerNorm( normalized_shape=get_random_norm_shape(), elementwise_affine=random().to(bool), ).to(device) x = random_tensor(ndim=4, dim1=channel, dim2=height, dim3=width).to(device) y = m(x) return y<|fim_middle|>test_layernorm_with_random_data_shared_mem<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> Optional[str]: """ The entity tag used for optimistic concurrency when modifying the resource. """ return pulumi.get(self, "etag")<|fim_middle|>etag<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>() -> bool: try: from bpython import embed # NOQA F841 except ImportError: return False return True<|fim_middle|>has_bpython<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(pipeline_response): deserialized = self._deserialize("OperationListResult", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) # type: ignore return None, AsyncList(list_of_elem)<|fim_middle|>extract_data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(opt, filename): if 'help' == opt.get_long_name(): if 'upgrade_pre' == filename: global pre_help_str print pre_help_str elif 'upgrade_post' == filename: global post_help_str print post_help_str else: raise MyError('not supported filename:{0} for help option'.format(filename)) elif 'version' == opt.get_long_name(): global version_str print version_str<|fim_middle|>deal_with_local_opt<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(args): golden_raw, pred_raw, golden_label, pred_label = load_from_file(args) golden_dict = pick_max_golden_evid(golden_raw, pred_raw) macro_f1, scores = calc_model_f1(golden_dict, pred_raw, len(golden_raw)) return macro_f1, len(golden_raw)<|fim_middle|>main<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): parameters = { **self.serialize_header_param( "Content-Type", "application/json", ), **self.serialize_header_param( "Accept", "application/json", ), } return parameters<|fim_middle|>header_parameters<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> None: """ Check for public and private keys. """ if not hasattr(self, "publicKeys"): self.publicKeys = self.getPublicKeys() if not hasattr(self, "privateKeys"): self.privateKeys = self.getPrivateKeys() if not self.publicKeys or not self.privateKeys: raise error.ConchError("no host keys, failing") if not hasattr(self, "primes"): self.primes = self.getPrimes()<|fim_middle|>start_factory<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(store): po_header = store.parseheader() index = {} for unit in store.units: for location in unit.getlocations(): index[location] = unit index["r_string_languagegroup_name"] = store.UnitClass(po_header["Language-Team"]) return index<|fim_middle|>build_location_index<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(path): def _cmp(x, y): if x[1] and not y[1]: return -1 if not x[1] and y[1]: return 1 if x[0].lower() > y[0].lower(): return 1 if x[0].lower() < y[0].lower(): return -1 return 0 items = [] if path.startswith("~"): path = fs.expanduser(path) if not os.path.isdir(path): return items for item in os.listdir(path): try: item_is_dir = os.path.isdir(os.path.join(path, item)) if item_is_dir: os.listdir(os.path.join(path, item)) items.append((item, item_is_dir)) except OSError: pass return sorted(items, key=cmp_to_key(_cmp))<|fim_middle|>list_dir<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, zip_path): """Checks if the ZIP file contains another file with the same name, so it is going to be overwritten. @param zip_path: zip file path @return: comparison boolean """ with ZipFile(zip_path, "r") as archive: # Test if zip file contains a file named as itself. try: return any(name == os.path.basename(zip_path) for name in archive.namelist()) except BadZipfile as e: raise CuckooPackageError("Invalid Zip file") from e<|fim_middle|>is_overwritten<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( hook_state: Any, bucket: dist.GradBucket, optim_stream_state, ) -> torch.futures.Future[torch.Tensor]: # Run original hook ddp_weakref = hook_state ddp_inst = ddp_weakref() reducer, process_group = ddp_inst.reducer, ddp_inst.process_group fut = reducer._run_allreduce_hook(bucket) optimizer_stream = optim_stream_state.optim_stream with torch.cuda.stream(optimizer_stream): fut.wait() # Apply gradient division since C++ side only allreduces and does # not average. TODO: (rohan-varma) the div factor may be different # when running with join hook bucket.buffer().div_(process_group.size()) model_params = bucket.parameters() grads = bucket.gradients() # TODO (rohan-varma): upcast as needed for DDP mixed precision, # once optimizer in backward + DDP mixed precision is supported. for p, g in zip(model_params, grads): if hasattr(p, '_in_backward_optimizers'): # Note: need to set grad to the bucket's grad, because # running allreduce results in the bucket's grad being # reduced, but not grad field. if not gradient_is_bucket_view: p.grad = g for optim in p._in_backward_optimizers: optim.step() # Need to return a Future[Tensor] to obey comm hook API contract. ret_fut = torch.futures.Future() ret_fut.set_result(bucket.buffer()) # enqueue a callback to wait for this optimizer stream at the end of # backward and set all DDP managed grads to None. def wait_for_optim_stream_callback(): torch.cuda.current_stream().wait_stream( optim_stream_state.optim_stream ) # Set DDP managed grads to None for param in ddp_inst._get_data_parallel_params(ddp_inst.module): if hasattr(param, '_in_backward_optimizers'): param.grad = None # reset for the next backwards pass optim_stream_state.wait_for_optim_stream_enqueued = False if not optim_stream_state.wait_for_optim_stream_enqueued: Variable._execution_engine.queue_callback( wait_for_optim_stream_callback ) # mark that the callback is enqueued optim_stream_state.wait_for_optim_stream_enqueued = True return ret_fut<|fim_middle|>apply_optim_in_backward_hook<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): sut = CustomSet([1, 2, 3]) self.assertNotIn(4, sut)<|fim_middle|>test_when_the_element_is_not_in<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): # create more sweat users to allow for a decent record_batch size while len(self.sweat.registered_users) < 1000: # creating 20 accounts in parallel, about 200 should fit in a chunk # but we don't want to assume we get to use all the chunk space for # ourself. Also, other SweatUsers will be in the same loop and at # some point the local CPU becomes a bottleneck, too. self.sweat.create_passive_users( 20, self.node, self.account, # protocol enforced max length is 64 but we want shorter names to # not hit the log limits too soon max_account_id_len=48) rng = random.Random() # just around 300Tgas batch_size = rng.randint(700, 750) receivers = self.sweat.random_receivers(self.account_id, batch_size) tx = SweatMintBatch( self.sweat.account.key.account_id, self.oracle, [[account_id, rng.randint(1000, 3000)] for account_id in receivers]) self.send_tx(tx, locust_name="Sweat record batch (stress test)")<|fim_middle|>record_batch_of_large_batches<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(bindings, ret_value): """Make a nested let expressions. Parameters ---------- bindings: List[Tuple[tvm.relay.Var,tvm.relay.Expr]] The sequence of let bindings ret_value: tvm.relay.Expr The final value of the expression. Returns ------- lets: tvm.relay.Expr A nested let expression. """ if ret_value is None: raise RuntimeError("ret is not called in this scope") if isinstance(ret_value, _expr.If) and ret_value.false_branch is None: raise RuntimeError("Creating an If expression without else.") let_expr = ret_value for var, value in reversed(bindings): let_expr = _expr.Let(var, value, let_expr) return let_expr<|fim_middle|>make_lets<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, minutes): """Need function for each pause-timer""" sabnzbd.Scheduler.plan_resume(minutes)<|fim_middle|>pausefor<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> Any: """ The Resource Manager template blueprint artifact body. """ return pulumi.get(self, "template")<|fim_middle|>template<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( source_dir, target_dir, output_file_path, build_variables, arch ): command = _get_command(arch) subprocess.check_call(command, cwd=source_dir) deb_arch = _DEB_ARCH[arch] deb_file_name = f"{build_variables['DEB_PKG_NAME']}_{build_variables['DEB_PKG_VERSION']}_{deb_arch}.deb" deb_file_path = mozpath.join(target_dir, deb_file_name) if not os.path.exists(deb_file_path): raise NoDebPackageFound(deb_file_path) subprocess.check_call(["dpkg-deb", "--info", deb_file_path]) shutil.move(deb_file_path, output_file_path)<|fim_middle|>generate_deb_archive<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(spec, suggestions=None): """If the user provided a bad format specification, returns a UsageError that tries to help them fix it.""" if suggestions is None: suggestion = from_spec(spec, allow_unrecognised=True) suggestions = ( [suggestion] if suggestion else all_supported_import_source_types() ) try_the_following = ( "Try one of the following" if len(suggestions) >= 2 else "Try the following" ) return click.UsageError( f"Unrecognised import-source specification: {spec}\n" f"{try_the_following}:\n{suggest_specs(suggestions)}" )<|fim_middle|>bad_spec_error<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(msg, sdk_version=None): """Generic warning raiser referencing V2 Args: phrase: The phrase to include in the warning. sdk_version: the sdk version of removal of support. """ _sdk_version = sdk_version if sdk_version is not None else "2" full_msg = f"{msg} in sagemaker>={_sdk_version}.\nSee: {V2_URL} for details." warnings.warn(full_msg, DeprecationWarning, stacklevel=2) logger.warning(full_msg)<|fim_middle|>warn<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( self, use_case_id: UseCaseID, org_id: int, project_id: int, metric_name: str, value: Sequence[Union[int, float]], tags: Dict[str, str], unit: Optional[str], ) -> None: """ Emit a distribution metric for internal use cases only. Can support a sequence of values. Note that, as of now, this function will return immediately even if the metric message has not been produced to the broker yet. """ dist_metric = { "org_id": org_id, "project_id": project_id, "name": build_mri(metric_name, "d", use_case_id, unit), "value": value, "timestamp": int(datetime.now().timestamp()), "tags": tags, "retention_days": get_retention_from_org_id(org_id), "type": "d", } self.__produce(dist_metric, use_case_id)<|fim_middle|>distribution<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(*, parameter_one: bool, **kwargs: Any) -> HttpRequest: _headers = case_insensitive_dict(kwargs.pop("headers", {}) or {}) _params = case_insensitive_dict(kwargs.pop("params", {}) or {}) api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2.0.0")) accept = _headers.pop("Accept", "application/json") # Construct URL _url = kwargs.pop("template_url", "/multiapi/two/testFourEndpoint") # Construct parameters _params["parameterOne"] = _SERIALIZER.query("parameter_one", parameter_one, "bool") _params["api-version"] = _SERIALIZER.query("api_version", api_version, "str") # Construct headers _headers["Accept"] = _SERIALIZER.header("accept", accept, "str") return HttpRequest(method="POST", url=_url, params=_params, headers=_headers, **kwargs)<|fim_middle|>build_test_four_request<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): img = cv2.imread(self.input) inputs = [{"input.image": img, }] op_name = list(self.model_cfg[0].keys())[0] op = global_config[op_name](self.model_cfg[0][op_name], self.env_cfg) result = op(inputs)<|fim_middle|>test_ppstructure_kie_ser<|file_separator|>
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>get_multiplicity<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, verbose=False, restore_nonants=True): """ Get a truncated z score and look for the closest overall. Returns: obj (float or None): objective value, none if infeasible. snamedict (str): closest scenarios """ def _vb(msg): if verbose and self.cylinder_rank == 0: print ("(rank0) xhat_looper: " + msg) localmindist = np.zeros(1, dtype='d') globalmindist = np.zeros(1, dtype='d') localwinnername = None for k, s in self.opt.local_scenarios.items(): dist = 0 for ndn_i, xvar in s._mpisppy_data.nonant_indices.items(): diff = pyo.value(xvar) - pyo.value(s._mpisppy_model.xbars[ndn_i]) variance = pyo.value(s._mpisppy_model.xsqbars[ndn_i]) \ - pyo.value(s._mpisppy_model.xbars[ndn_i])*pyo.value(s._mpisppy_model.xbars[ndn_i]) if variance > 0: stdev = np.sqrt(variance) dist += min(3, abs(diff)/stdev) if localwinnername is None: localmindist[0] = dist localwinnername = k elif dist < localmindist[0]: localmindist[0] = dist localwinnername = k self.comms["ROOT"].Allreduce([localmindist, mpi.DOUBLE], [globalmindist, mpi.DOUBLE], op=mpi.MIN) # ties are possible, so break the tie localwinrank = np.zeros(1, dtype='d') # could use a python variable. globalwinrank = np.zeros(1, dtype='d') if globalmindist[0] < localmindist[0]: localwinrank[0] = -1 # we lost else: localwinrank[0] = self.cylinder_rank self.comms["ROOT"].Allreduce([localwinrank, mpi.DOUBLE], [globalwinrank, mpi.DOUBLE], op=mpi.MAX) # We only used the rank to break a possible tie. if self.cylinder_rank == int(globalwinrank[0]): globalwinnername = localwinnername else: globalwinnername = None sroot = globalwinrank[0] sname = self.comms["ROOT"].bcast(globalwinnername, root=sroot) _vb("Trying scenario "+sname) _vb(" Solver options="+str(self.solver_options)) # xxx TBD mult-stage snamedict = {"ROOT": sname} obj = self._try_one(snamedict, solver_options=self.solver_options, verbose=False, restore_nonants=restore_nonants) if obj is None: _vb(" Infeasible") else: _vb(" Feasible, returning " + str(obj)) return obj, snamedict<|fim_middle|>xhat_closest_to_xbar<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, result): self._enable_widgets(True) if result['success']: self.set_results(result['result']) """ if self.table.currentRow() == 0: # Row number hasn't changed but the data probably has! self.s_show_selected(self.table.item(0, 0)) self.table.setCurrentItem(self.table.item(0, 0))""" else: self.set_results(None)<|fim_middle|>r_searched<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): process_cpu_usage = ProcessCpuUsage(1,1.34,self.__metric_repository) user_name = process_cpu_usage.user_name() self.assertEqual(user_name,'pcp')<|fim_middle|>test_user_name<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(flag: Flag) -> List[Flag]: members, _ = enum._decompose(flag.__class__, flag._value_) return members<|fim_middle|>decompose_flag<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): print(ds.DefaultSignature().get_digest_alg()) name_id = self.server.ident.transient_nameid("urn:mace:example.com:saml:roland:sp", "id12") ava = {"givenName": ["Derek"], "surName": ["Jeter"], "mail": ["[email protected]"], "title": "The man"} signed_resp = self.server.create_authn_response( ava, "id12", # in_response_to "http://lingon.catalogix.se:8087/", # consumer_url "urn:mace:example.com:saml:roland:sp", # sp_entity_id name_id=name_id, sign_assertion=True, ) print(signed_resp) assert signed_resp sresponse = response_from_string(signed_resp) assert ds.SIG_RSA_SHA1 in str(sresponse), "Not correctly signed!" assert ds.DIGEST_SHA1 in str(sresponse), "Not correctly signed!"<|fim_middle|>test_signed_response<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(uri, database, table, schema, rows_num): engine = create_engine(uri, echo=True) meta = basefunc.redshift_getmeta(database=database, schema=schema, table=table, engine=engine) sql = f'select * from {database}.{table} limit {rows_num}' res_list = basefunc.redshift_getresult(sql=sql, engine=engine) return [meta, res_list[0], res_list[1]]<|fim_middle|>redshift_getdetail<|file_separator|>
<|fim_prefix|> <|fim_suffix|>(self):<|fim_middle|>get_seek_position<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(fname: str, entry: str) -> T.Optional[str]: if is_cygwin() or is_osx(): raise unittest.SkipTest('Test only applicable to ELF platforms') try: raw_out = subprocess.check_output(['readelf', '-d', fname], universal_newlines=True) except FileNotFoundError: # FIXME: Try using depfixer.py:Elf() as a fallback raise unittest.SkipTest('readelf not found') pattern = re.compile(entry + r': \[(.*?)\]') for line in raw_out.split('\n'): m = pattern.search(line) if m is not None: return str(m.group(1)) return None # The file did not contain the specified entry.<|fim_middle|>get_dynamic_section_entry<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): kpts = cell.make_kpts([1,1,2]) kmf = pbcscf.KUHF(cell, kpts, exxdiv=None) Escf = kmf.scf() self._test_ip_diag(kmf)<|fim_middle|>test_he_112_ip_diag<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(file): """ _searchForEvent_ Searches for the last event output into the CMSSW output file """ MatchRunEvent = re.compile("Run: [0-9]+ Event: [0-9]+$") # I'm just grabbing the last twenty lines for the hell of it lines = tailNLinesFromFile(file, 20) lastMatch = None for line in lines: if MatchRunEvent.search(line.strip()): matches = MatchRunEvent.findall(line.strip()) lastMatch = matches[-1] if lastMatch != None: # // # // Extract and update last run/event number # // try: runInfo, lastEvent = lastMatch.split("Event:", 1) lastRun = int(runInfo.split("Run:", 1)[1]) lastEvent = int(lastEvent) return (lastRun, lastEvent) except Exception: return (None, None) return (None, None)<|fim_middle|>search_for_event<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): return {"col1": 1, "col2": 2, "col3": 3}<|fim_middle|>dummy_data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".yaml") as f: yaml.dump({"text": plain_text_str}, f) return f.name<|fim_middle|>mock_yaml_file<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): channel_to_child = [] # For each index in .power, the corresponding child for spec in self.dev.spectra.value: for c in (self.dependency1, self.dependency2): if any(spec == cs for cs in c.spectra.value): channel_to_child.append(c) break else: raise ValueError("Failed to find spectra %s in children" % (spec,)) # The first and last ones are normally always from different children c1 = channel_to_child[0] c2 = channel_to_child[-1] self.assertNotEqual(c1, c2) # Test if child light is updated multiplex light is updated as well and vice versa c1.power.value[0] = self.dev.power.range[1][0] * 0.5 self.assertEqual(self.dev.power.value[0], c1.power.value[0]) c2.power.value[-1] = self.dev.power.range[1][-1] * 0.9 self.assertEqual(self.dev.power.value[-1], c2.power.value[-1]) self.dev.power.value[0] = self.dev.power.range[1][0] * 0.2 self.assertEqual(self.dev.power.value[0], c1.power.value[0]) self.dev.power.value[-1] = self.dev.power.range[1][-1] * 0.7 self.assertEqual(self.dev.power.value[-1], c2.power.value[-1])<|fim_middle|>test_child_update<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, name): if "." in name: return name.split(".", 1) else: return "general", name<|fim_middle|>parse_name<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): pass<|fim_middle|>bar<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> None: self.get_csig()<|fim_middle|>make_ready<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, config): sparse_feature_number = config.get( "hyper_parameters.sparse_feature_number") sparse_feature_dim = config.get("hyper_parameters.sparse_feature_dim") fc_sizes = config.get("hyper_parameters.fc_sizes") layer_sizes_dnn = config.get("hyper_parameters.layer_sizes_dnn") sparse_num_field = config.get("hyper_parameters.sparse_num_field") sparse_inputs_slots = config.get( 'hyper_parameters.sparse_inputs_slots') flen_model = net.FLENLayer(sparse_feature_number, sparse_feature_dim, sparse_inputs_slots, sparse_num_field, layer_sizes_dnn) return flen_model<|fim_middle|>create_model<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( self, namespace, init_lvm, storageclass, status, volume_mode, pvc_factory, pod_factory, volume_binding_mode, ): """ Test to verify thin pool capacity alert: 1. run io up to 70, check alerts - no alert expected 2. run io up to 76, check alerts 3. run io up to 86, check alerts - critical alert expected """ log.info("Test Started successfully") log.info(f"LVMCluster version is {self.lvm.get_lvm_version()}") log.info( f"Lvm thin-pool overprovisionRation is {self.lvm.get_lvm_thin_pool_config_overprovision_ratio()}" ) log.info( f"Lvm thin-pool sizePrecent is {self.lvm.get_lvm_thin_pool_config_size_percent()}" ) size_to_70 = f"{int(float(self.thin_pool_size)*0.7)}Gi" size_to_76 = f"{int(float(self.thin_pool_size)*0.06)}Gi" size_to_86 = f"{int(float(self.thin_pool_size)*0.1)}Gi" sizes_list = [ { "size_to_fill": size_to_70, "file_name": "run-to-70", "pvc_expected_size": f"{float(self.pvc_size)*0.7}", "alert": None, }, { "size_to_fill": size_to_76, "file_name": "run-to-76", "pvc_expected_size": f"{float(self.pvc_size)*0.06}", "alert": constants.TOPOLVM_ALERTS.get("tp_data_75_precent"), }, { "size_to_fill": size_to_86, "file_name": "run-to-86", "pvc_expected_size": f"{float(self.pvc_size)*0.1}", "alert": constants.TOPOLVM_ALERTS.get("tp_data_85_precent"), }, ] log.info(f"LV Size:{self.thin_pool_size}") self.metric_data = dict() pvc_list = list() pods_list = list() storage_type = "fs" if volume_mode == constants.VOLUME_MODE_BLOCK: storage_type = "block" self.block = True for size in sizes_list: log.info( f"{size.get('size_to_fill')}, {size.get('file_name')}, {size.get('pvc_expected_size')}" ) pvc_list.append( pvc_factory( project=self.proj_obj, interface=None, storageclass=self.sc_obj, size=self.pvc_size, status=status, access_mode=self.access_mode, volume_mode=volume_mode, ) ) pods_list.append(pod_factory(pvc=pvc_list[-1], raw_block_pv=self.block)) pods_list[-1].run_io( storage_type=storage_type, size=size.get("size_to_fill"), rw_ratio=0, jobs=1, runtime=0, depth=4, rate="1250m", rate_process=None, bs="100M", end_fsync=0, invalidate=0, buffer_pattern=None, readwrite="write", direct=1, verify=False, timeout=1800, ) pods_list[-1].get_fio_results(timeout=1800) # Workaround for BZ-2108018 minimal_pvc = pvc_factory( project=self.proj_obj, interface=None, storageclass=self.sc_obj, size="1", status=status, access_mode=self.access_mode, volume_mode=volume_mode, ) mini_pod = pod_factory(pvc=minimal_pvc, raw_block_pv=self.block) log.info(f"{mini_pod} created") mini_pod.delete(wait=True) minimal_pvc.delete(wait=True) for sample in TimeoutSampler( 150, 30, self.lvm.check_for_alert, size.get("alert") ): if size["file_name"] == "run-to-70": time.sleep(60) break else: if sample: break # End of workaround self.lvm.compare_percent_data_from_pvc( pvc_list[-1], float(size["pvc_expected_size"]) ) expected_os_values = [ self.lvm.get_thin_pool1_data_percent(), self.lvm.get_thin_pool_metadata(), self.lvm.get_thin_pool1_size(), self.lvm.get_vg_free(), self.lvm.get_vg_size(), ] for metric, expected in zip(constants.TOPOLVM_METRICS, expected_os_values): self.lvm.validate_metrics_vs_operating_system_stats(metric, expected) log.info(f"getting alerts: {self.lvm.get_thin_provisioning_alerts()}") if size["file_name"] == "run-to-70": assert not self.lvm.check_for_alert( size.get("alert") ), "Alert already exists" else: log.info(f"size: {size['file_name']}") assert self.lvm.check_for_alert(size.get("alert")), "Alert not found" lvmo_health_check()<|fim_middle|>test_thin_pool_capacity_alert<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): value_cls = Value[List[CustomType]] empty = [] nonempty = [CustomType()] # Empty construction. value = value_cls() self.assertEqual(value.get_value(), empty) value = value_cls(empty) self.assert_equal_but_not_aliased(value.get_value(), empty) value = value_cls(nonempty) self.assert_equal_but_not_aliased(value.get_value(), nonempty) # Test mutation. value = value_cls(nonempty) self.assertNotEqual(value.get_value(), empty) value.set_value(empty) self.assert_equal_but_not_aliased(value.get_value(), empty)<|fim_middle|>test_abstract_value_py_list<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> DirsToCopySettings: def extract_dirs_data(root_item: QTreeWidgetItem) -> Dict[str, bool]: data = {} for i in range(root_item.childCount()): item = root_item.child(i) relative_path = item.data(0, Qt.UserRole) is_checked = item.checkState(0) == Qt.Checked data[relative_path] = is_checked if item.childCount() > 0: data.update(extract_dirs_data(item)) return data METHOD_NAME = extract_dirs_data(self.dirsTreeWidget.invisibleRootItem()) return METHOD_NAME<|fim_middle|>dirs_to_copy<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, position=None, on_stderr=False): if position is None: # I'm not currently tracking the position, so there is no default. # position = self.get_position() return handle = win32.STDOUT if on_stderr: handle = win32.STDERR win32.SetConsoleCursorPosition(handle, position)<|fim_middle|>set_cursor_position<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): fn = getattr(Permutated, 'fn(1, 2)') self.assertFalse(hasattr(fn, PERMUTATION_ATTR))<|fim_middle|>test_clear_permuations_attribute<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(template, values): from app.models import EMAIL_TYPE, LETTER_TYPE, SMS_TYPE return {SMS_TYPE: SMSMessageTemplate, EMAIL_TYPE: WithSubjectTemplate, LETTER_TYPE: WithSubjectTemplate}[ template["template_type"] ](template, values)<|fim_middle|>get_template_instance<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): """Verify that writing to a BytesIO object contains the same data as to_image(). The goal of this test is to ensure that Plotly correctly handles a writable buffer which doesn't correspond to a filesystem path. """ bio = BytesIO() pio.write_image(fig, bio, format="jpg", engine="kaleido", validate=False) bio.seek(0) # Rewind to the beginning of the buffer, otherwise read() returns b''. bio_bytes = bio.read() to_image_bytes = pio.to_image(fig, format="jpg", engine="kaleido", validate=False) assert bio_bytes == to_image_bytes<|fim_middle|>test_bytesio<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, signer: NeonLocalAccount, tx: dict) -> TransactionSent: tx = self.sign_transaction(signer, tx) return self.send_wait_transaction(tx)<|fim_middle|>sign_send_wait_transaction<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> str: """ The provider-assigned unique ID for this managed resource. """ return pulumi.get(self, "id")<|fim_middle|>id<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): return self._override<|fim_middle|>override_value<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): if self.auth and self.auth.is_valid(): return self.auth creds = self.config.credentials() res = requests.post( self.routes.auth(), data={ 'username': creds.qualified_username(), 'password': creds.passwd(), 'client_id': creds.client_id(), 'client_secret': creds.client_secret(), 'scope': creds.scope(), 'grant_type': 'password', }, ) res = cxmodel.AuthResponse(**res.json()) res.expires_at = datetime.datetime.fromtimestamp( datetime.datetime.now().timestamp() + res.expires_in - 10 ) self.auth = res return res<|fim_middle|>auth<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): return {"colors": ["wal"]}<|fim_middle|>wal_theme<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(unused_argv): if FLAGS.mode == "mobile_install": with open(FLAGS.input_manifest, "rb") as input_manifest: new_manifest, old_application, app_package = ( StubifyMobileInstall(input_manifest.read())) if FLAGS.override_package: app_package = FLAGS.override_package with open(FLAGS.output_manifest, "wb") as output_xml: output_xml.write(new_manifest) with open(FLAGS.output_datafile, "wb") as output_file: output_file.write("\n".join([old_application, app_package]).encode()) elif FLAGS.mode == "instant_run": with open(FLAGS.input_manifest, "rb") as input_manifest: new_manifest = StubifyInstantRun(input_manifest.read()) with open(FLAGS.output_manifest, "wb") as output_xml: output_xml.write(new_manifest)<|fim_middle|>main<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.logger = logging.getLogger("TestColors") self.color_list_length = len(ginga.colors.color_dict)<|fim_middle|>setup_class<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(source, case, store, libdir): with host.ServiceManager() as mgr: expects = case["expects"] if expects == "error": with pytest.raises(host.RemoteError): source.download(mgr, store, libdir) elif expects == "success": source.download(mgr, store, libdir) else: raise ValueError(f"invalid expectation: {expects}")<|fim_middle|>check_case<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(workload_name): # Return the global batch size. if workload_name == 'criteo1tb': return 262_144 elif workload_name == 'fastmri': return 32 elif workload_name == 'imagenet_resnet': return 1024 elif workload_name == 'imagenet_vit': return 1024 elif workload_name == 'librispeech_conformer': return 256 elif workload_name == 'librispeech_deepspeech': return 256 elif workload_name == 'ogbg': return 512 elif workload_name == 'wmt': return 128 elif workload_name == 'mnist': return 16 else: raise ValueError(f'Unsupported workload name: {workload_name}.')<|fim_middle|>get_batch_size<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, session, test_client, object_count): self.create_many_objects(session, object_count) res = test_client.get(self.page_url(1, -1)) assert res.status_code == 404<|fim_middle|>test_does_not_allow_negative_per_page<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, section: str) -> dict[str, str]: ...<|fim_middle|>section_as_dict<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self) -> str: """ Gets or sets the Id of the resource. """ return pulumi.get(self, "id")<|fim_middle|>id<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( self, _mode: str, cached_results: List[dict], _dump_dir: str = None, ): result = self.parse_cached_results(cached_results) target = torch.stack(result["label"], dim=0) prediction_logit = torch.stack(result["logit"], dim=0) prediction = torch.stack(result["prediction"], dim=0) loss = self.logit_loss(prediction_logit, target) logs = dict( loss=loss.detach().cpu().item(), ) if _mode in ["valid", "test"]: logs.update( self.log_scores( score_args=( prediction.detach().cpu().numpy(), target.detach().cpu().numpy(), ), ) ) return logs<|fim_middle|>reduction<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(nonlinear_domains): mappings_are_cellwise_constant(nonlinear_domains, False)<|fim_middle|>test_mappings_are_cellwise_not_constant_on<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(gl, user, GPG_KEY): gkey = user.gpgkeys.create({"key": GPG_KEY}) assert gkey in user.gpgkeys.list() # Seems broken on the gitlab side # gkey = user.gpgkeys.get(gkey.id) gkey.delete() assert gkey not in user.gpgkeys.list()<|fim_middle|>test_user_gpg_keys<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>( date_from=None, date_to=None, user=None, asset=None, account=None, input=None, session=None, risk_level=None, org_id=None): filter_kwargs = {} date_from_default = timezone.now() - datetime.timedelta(days=7) date_to_default = timezone.now() if not date_from and not session: date_from = date_from_default if not date_to and not session: date_to = date_to_default if date_from is not None: if isinstance(date_from, datetime.datetime): date_from = date_from.timestamp() filter_kwargs['timestamp__gte'] = int(date_from) if date_to is not None: if isinstance(date_to, datetime.datetime): date_to = date_to.timestamp() filter_kwargs['timestamp__lte'] = int(date_to) if user: filter_kwargs["user__startswith"] = user if asset: filter_kwargs['asset'] = asset if account: filter_kwargs['account'] = account if input: filter_kwargs['input__icontains'] = input if session: filter_kwargs['session'] = session if org_id is not None: filter_kwargs['org_id'] = org_id if risk_level is not None: filter_kwargs['risk_level'] = risk_level return filter_kwargs<|fim_middle|>make_filter_kwargs<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, chunk_size, event_handler=None): """ :param chunk_size: Maximum number of records to read from the database at one time :param event_handler: instance of ``PaginateViewLogHandler`` to be notified of view events. :return: an instance of ``ResumableFunctionIterator`` """ raise NotImplementedError<|fim_middle|>get_document_iterator<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(proof_req_name: str): proof_request_info = read_json_data("proof_request_" + proof_req_name + ".json") return proof_request_info["presentation_proposal"]<|fim_middle|>read_proof_req_data<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, prime): req = super(FDBRecord, self).make_req(prime) if 'NDA_VLAN' in req and req['NDA_VLAN'] == 0: req.pop('NDA_VLAN') return req<|fim_middle|>make_idx_req<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, board: AbstractBoard) -> None: if any(core.is_kvm_core() for core in self.get_cores()): board.kvm_vm = self.kvm_vm # To get the KVM CPUs to run on different host CPUs # Specify a different event queue for each CPU for i, core in enumerate(self.cores): for obj in core.get_simobject().descendants(): obj.eventq_index = 0 core.get_simobject().eventq_index = i + 1 board.set_mem_mode(MemMode.ATOMIC_NONCACHING) elif isinstance( self.cores[0].get_simobject(), (BaseTimingSimpleCPU, BaseO3CPU, BaseMinorCPU), ): board.set_mem_mode(MemMode.TIMING) elif isinstance( self.cores[0].get_simobject(), BaseNonCachingSimpleCPU ): board.set_mem_mode(MemMode.ATOMIC_NONCACHING) elif isinstance(self.cores[0].get_simobject(), BaseAtomicSimpleCPU): if board.get_cache_hierarchy().is_ruby(): warn( "Using an atomic core with Ruby will result in " "'atomic_noncaching' memory mode. This will skip caching " "completely." ) board.set_mem_mode(MemMode.ATOMIC_NONCACHING) else: board.set_mem_mode(MemMode.ATOMIC) else: raise NotImplementedError<|fim_middle|>incorporate_processor<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): mock_sdb = MagicMock(return_value=None) with patch.dict(vagrant.__utils__, {"sdb.sdb_set": mock_sdb}): mock_sdb = MagicMock(return_value={}) with patch.dict(vagrant.__utils__, {"sdb.sdb_get": mock_sdb}): vagrant.init("test3", cwd="/tmp") with self.assertRaises(salt.exceptions.SaltInvocationError): vagrant.get_ssh_config("test3") # has not been started<|fim_middle|>test_vagrant_get_ssh_config_fails<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(func): """ Runs the function only once (caches the return value for subsequent runs) """ @functools.wraps(func) def decorator(*args, **kwargs): if decorator.has_run: return decorator.result decorator.result = func(*args, **kwargs) decorator.has_run = True return decorator.result decorator.has_run = False decorator.result = None return decorator<|fim_middle|>run_once<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, agency_identifier, row): frec_inst = FrecMap.objects.filter( agency_identifier=agency_identifier, main_account_code=row["Account Code"], sub_function_code=row["Subfunction Code"], ) if frec_inst.exists(): return frec_inst.first().fr_entity_code<|fim_middle|>find_frec<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): with pytest.raises(ParseException) as exc: CupsFilesConf(context_wrap(CUPS_FILES_CONF_EMPTY)) assert str(exc.value) == "Empty Content"<|fim_middle|>test_cups_files_conf_empyt<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(report_filename_and_folder): plugin_folder = report_filename_and_folder[0] report_filename = report_filename_and_folder[1] plugin: PluginBase = get_plugin_from_cache(report_filename) assert plugin, report_filename assert plugin.id == plugin_folder<|fim_middle|>test_autodetected_on_all_report_collection<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): with open("valid_test_file.yaml", "w") as f: f.write("setting: value") result, message = validate_yaml_file("valid_test_file.yaml") os.remove("valid_test_file.yaml") assert result == True assert "Successfully validated" in message<|fim_middle|>test_validate_yaml_file_valid<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self._lines.add_line('Stats:') for label, value in self._obj._repr_stats(): self._lines.add_line('%s%s: %s' % (self._indent, label, value)) self._lines.add_separator()<|fim_middle|>process_stats<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(filename, fmt): if fmt == 'png': return find_open_image_png_function()(filename) raise NotImplementedError(f'Image format {fmt} not implemented yet')<|fim_middle|>load_image<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(args): print('Converting data...') queries_files = {} qrels_files = {} for set_name in ['train', 'dev', 'test']: queries_filepath = os.path.join( args.output_folder, 'queries.{}.tsv'.format(set_name)) qrels_filepath = os.path.join( args.output_folder, 'qrels.{}'.format(set_name)) queries_files[set_name] = open(queries_filepath, 'w') qrels_files[set_name] = open(qrels_filepath, 'w') file_names = os.listdir(args.collection_path) file_paths = [] for file_name in file_names: file_path = os.path.join(args.collection_path, file_name) if not os.path.isfile(file_path): continue if not file_path.endswith('.gz'): continue file_paths.append(file_path) print('{} files found'.format(len(file_paths))) # We first need to collect papers by year, sort them, and split between # training, dev, and test sets. train_ids, dev_ids, test_ids, id_years = get_id_years( file_paths=file_paths, train_fraction=args.train_fraction) doc_ids = train_ids | dev_ids | test_ids n_docs = 0 file_index = 0 num_train = 0 num_dev = 0 num_test = 0 start_time = time.time() for file_num, file_path in enumerate(file_paths): with gzip.open(file_path) as f: for line in f: obj = json.loads(line.strip()) doc_id = obj['id'] if doc_id not in doc_ids: continue if n_docs % args.max_docs_per_file == 0: if n_docs > 0: output_jsonl_file.close() output_path = os.path.join( args.output_folder, 'corpus/docs{:02d}.json'.format(file_index)) output_jsonl_file = open(output_path, 'w') file_index += 1 doc_text = '[Title]: {} [Abstract]: {}'.format( obj['title'], obj['paperAbstract']) doc_text = clean(doc_text) output_dict = {'id': doc_id, 'contents': doc_text} output_jsonl_file.write(json.dumps(output_dict) + '\n') n_docs += 1 out_citations = obj['outCitations'] # Remove citations not in the corpus. out_citations = [ out_citation for out_citation in out_citations if out_citation in doc_ids ] # Remove self citations. out_citations = [ out_citation for out_citation in out_citations if out_citation != doc_id ] # Use only citations that have an older publication year than the citing # paper's or do not have an year. out_citations2 = [] for out_citation in out_citations: if out_citation in id_years: if id_years[out_citation] <= obj['year']: out_citations2.append(out_citation) out_citations = out_citations2 # Skip papers with no out citations. if len(out_citations) == 0: continue if doc_id in train_ids: set_name = 'train' num_train += 1 elif doc_id in dev_ids: set_name = 'dev' num_dev += 1 elif doc_id in test_ids: set_name = 'test' num_test += 1 queries_file = queries_files[set_name] qrels_file = qrels_files[set_name] doc_title = obj['title'] doc_title = clean(doc_title) if args.use_abstract_in_query: doc_abstract = clean(obj['paperAbstract']) query = '[Title]: ' + doc_title + ' [Abstract]: ' + doc_abstract else: query = doc_title queries_file.write('{}\t{}\n'.format(doc_id, query)) for out_citation in out_citations: qrels_file.write('{} 0 {} 1\n'.format(doc_id, out_citation)) if n_docs % 100000 == 0: print('Read {}/{} files. {} docs written in {} files in {} secs.'.format( file_num, len(file_paths), n_docs, file_index, int(time.time() - start_time))) print('Examples: {} train, {} valid, {} test'.format( num_train, num_dev, num_test)) # Close queries and qrels files. for queries_file in queries_files.values(): queries_file.close() for qrels_file in qrels_files.values(): qrels_file.close()<|fim_middle|>create_dataset<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): self.create_ucr(self.active_domain, 'active-table', is_orphan=False) call_command('manage_orphaned_ucrs', 'delete', engine_id='ucr') self.mock_drop_ucrs.assert_not_called()<|fim_middle|>test_non_orphaned_tables_are_not_dropped<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, data, preprocessors): from auger_ml.preprocessors.text import TextPreprocessor res = data for p in preprocessors: name = list(p.keys())[0] params = list(p.values())[0] if name != 'text': raise Exception("Only text preprocessor supported.") tp = TextPreprocessor(params) res = tp.fit_transform(res) return res<|fim_middle|>preprocess_data_locally<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): """A simple test for running an atomic convolution on featurized data.""" dir_path = os.path.dirname(os.path.realpath(__file__)) ligand_file = os.path.join(dir_path, "../../feat/tests/data/3zso_ligand_hyd.pdb") protein_file = os.path.join(dir_path, "../../feat/tests/data/3zso_protein_noH.pdb") # Pulled from PDB files. For larger datasets with more PDBs, would use # max num atoms instead of exact. frag1_num_atoms = 44 # for ligand atoms frag2_num_atoms = 2334 # for protein atoms complex_num_atoms = 2378 # in total max_num_neighbors = 4 # Cutoff in angstroms neighbor_cutoff = 4 complex_featurizer = AtomicConvFeaturizer(frag1_num_atoms, frag2_num_atoms, complex_num_atoms, max_num_neighbors, neighbor_cutoff) # arbitrary label labels = np.array([0]) features = complex_featurizer.featurize([(ligand_file, protein_file)]) dataset = NumpyDataset(features, labels) batch_size = 1 print("Constructing Atomic Conv model") atomic_convnet = atomic_conv.AtomicConvModel( n_tasks=1, batch_size=batch_size, layer_sizes=[10], frag1_num_atoms=frag1_num_atoms, frag2_num_atoms=frag2_num_atoms, complex_num_atoms=complex_num_atoms) print("About to call fit") # Run a fitting operation atomic_convnet.fit(dataset) preds = atomic_convnet.predict(dataset) assert preds.shape == (1, 1, 1) assert np.count_nonzero(preds) > 0<|fim_middle|>test_atomic_conv_with_feat<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, preferred): supported = SupportedSRS([SRS(4326), SRS(25832)], preferred) assert [SRS(4326), SRS(25832)] == [srs for srs in supported]<|fim_middle|>test_iter<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, item) -> Iterable[Union[None, Tuple[str, Callable]]]: """ Use None to insert a MenuSeparator(). The tuples are: (menu entry text, callback) """ return []<|fim_middle|>build_context_menu_block<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, node: NearNodeProxy, account: Account): """ Passive users are only used as receiver, not as signer. """ node.send_tx_retry(InitFTAccount(self.account, account), locust_name="Init FT Account") self.registered_users.append(account.key.account_id)<|fim_middle|>register_passive_user<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self, item): """Parse or generate event title""" return item.css(".event-title::text").extract_first()<|fim_middle|>parse_title<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): """Converts/Lowers an expression containing QNN ops to an expression containing only core (non-Dialect) Relay ops. Each QNN op is lowered to a sequence of existing Relay ops. This is a target-independent pass. One can register the lowering/transformation function for this op using FTVMQnnCanonicalize attr_name for FTVMLegalize op attribute. An example of this transformation is below Examples ________ .. code-block:: python # Original expression qnn_expr = relay.qnn.op.requantize(y, input_scale=1, input_zero_point=0, output_scale=1, output_zero_point=0, out_dtype='int8') # We want to utilize all the existing Relay infrastructure. So, instead of supporting this # QNN requantize op, we convert it into a sequence of existing Relay operators. mod = tvm.IRModule.from_expr(qnn_expr) mod = relay.qnn.transform.CanonicalizeOps()(mod) relay_expr = mod['main'] print(relay_expr) def @main(%quantized_data: Tensor[(200), int32]) -> Tensor[(200), int8] { %0 = cast(%quantized_data, dtype="int64") /* ty=Tensor[(200), int64] */; %1 = multiply(%0, 2 /* ty=int64 */) /* ty=Tensor[(200), int64] */; %2 = multiply(%1, 1073741824 /* ty=int64 */) /* ty=Tensor[(200), int64] */; %3 = add(%2, 1073741824 /* ty=int64 */) /* ty=Tensor[(200), int64] */; %4 = right_shift(%3, 31 /* ty=int64 */) /* ty=Tensor[(200), int64] */; %5 = add(0 /* ty=int64 */, %4) /* ty=Tensor[(200), int64] */; %6 = clip(%5, a_min=-128f, a_max=127f) /* ty=Tensor[(200), int64] */; cast(%6, dtype="int8") /* ty=Tensor[(200), int8] */ } Returns ------- ret : tvm.transform.Pass The registered pass that canonicalizes QNN ops to Relay ops. """ return relay.transform.Legalize("FTVMQnnCanonicalize")<|fim_middle|>canonicalize_ops<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): """Convert the object to a yaml string. Returns: str: The yaml string. """ json_obj = self.to_json() yaml_obj = yaml.safe_dump(json_obj) return yaml_obj<|fim_middle|>to_yaml<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(self): if not self.precise: self._locate() return self._max_col<|fim_middle|>max_col<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(): """Find the modules in sys.modules which cause "mod" to be imported.""" # If we've run this function before, return the same result. global KNOWN_FTZ if KNOWN_FTZ: return KNOWN_FTZ # Start by determining our baseline: the FTZ and sys.modules state in a fresh # process which has only imported this module and nothing else. always_enables_ftz, always_imports = run_in_process(always_imported_modules) if always_enables_ftz: raise RuntimeError("Python is always in FTZ mode, even without imports!") CHECKED_CACHE.update(always_imports) # Next, we'll search through sys.modules looking for a package (or packages) such # that importing them in a new process sets the FTZ state. As a heuristic, we'll # start with packages known to have ever enabled FTZ, then top-level packages as # a way to eliminate large fractions of the search space relatively quickly. def key(name): """Prefer known-FTZ modules, then top-level packages, then alphabetical.""" return (name not in KNOWN_EVER_CULPRITS, name.count("."), name) # We'll track the set of modules to be checked, and those which do trigger FTZ. candidates = set(sys.modules) - CHECKED_CACHE triggering_modules = {} while candidates: mod = min(candidates, key=key) candidates.discard(mod) enables_ftz, imports = run_in_process(modules_imported_by, mod) imports -= CHECKED_CACHE if enables_ftz: triggering_modules[mod] = imports candidates &= imports else: candidates -= imports CHECKED_CACHE.update(imports) # We only want to report the 'top level' packages which enable FTZ - for example, # if the enabling code is in `a.b`, and `a` in turn imports `a.b`, we prefer to # report `a`. On the other hand, if `a` does _not_ import `a.b`, as is the case # for `hypothesis.extra.*` modules, then `a` will not be in `triggering_modules` # and we'll report `a.b` here instead. prefixes = tuple(n + "." for n in triggering_modules) result = {k for k in triggering_modules if not k.startswith(prefixes)} # Suppose that `bar` enables FTZ, and `foo` imports `bar`. At this point we're # tracking both, but only want to report the latter. for a in sorted(result): for b in sorted(result): if a in triggering_modules[b] and b not in triggering_modules[a]: result.discard(b) # There may be a cyclic dependency which that didn't handle, or simply two # separate modules which both enable FTZ. We already gave up comprehensive # reporting for speed above (`candidates &= imports`), so we'll also buy # simpler reporting by arbitrarily selecting the alphabetically first package. KNOWN_FTZ = min(result) # Cache the result - it's likely this will trigger again! return KNOWN_FTZ<|fim_middle|>identify_ftz_culprits<|file_separator|>
<|fim_prefix|>f <|fim_suffix|>(self):<|fim_middle|>get_results<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(event, mocker, req_with_session): class MockingStep(checkoutflow.BaseCheckoutFlowStep): identifier = 'mocking' priority = 100 def is_applicable(self, request): return False flow = with_mocked_step(mocker, MockingStep, event) req_with_session.event = event assert flow[3].get_next_applicable(req_with_session) is flow[6] # flow[3] is also skipped because no payment is required if there is no cart assert flow[3] is flow[6].get_prev_applicable(req_with_session)<|fim_middle|>test_step_ignored<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(domain_ownership_identifier_name: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebAppDomainOwnershipIdentifierResult]: """ Description for Get domain ownership identifier for web app. :param str domain_ownership_identifier_name: Name of domain ownership identifier. :param str name: Name of the app. :param str resource_group_name: Name of the resource group to which the resource belongs. """ ...<|fim_middle|>get_web_app_domain_ownership_identifier_output<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(self, strategy_tester): await strategy_tester.run_test_slow_uptrend(None, None)<|fim_middle|>test_slow_uptrend<|file_separator|>
<|fim_prefix|>async def <|fim_suffix|>(_session_git_repo, tmp_path, event_loop): # don't use this if you want to edit origin repo repo_path = tmp_path / "redbot-cloned_testrepo" repo = Repo( name="redbot-testrepo", url=str(_session_git_repo.folder_path), branch=_session_git_repo.branch, commit=_session_git_repo.commit, folder_path=repo_path, ) sp.run(("git", "clone", str(_session_git_repo.folder_path), str(repo_path)), check=True) return repo<|fim_middle|>cloned_git_repo<|file_separator|>
<|fim_prefix|>def <|fim_suffix|>(name, path, data, module=True): """ Process data to create a screenshot which will be saved in docs/screenshots/<name>.png If module is True the screenshot will include the name and py3status. """ # create dir if not exists try: path.mkdir(parents=True, exist_ok=True) except OSError: pass global font, glyph_data if font is None: font = ImageFont.truetype(FONT, FONT_SIZE * SCALE) if glyph_data is None: glyph_data = TTFont(font.path) # make sure that the data is in list form if not isinstance(data, list): data = [data] if contains_bad_glyph(glyph_data, data): print("** {} has characters not in {} **".format(name, font.getname()[0])) else: create_screenshot(name, data, path, font=font, is_module=module)<|fim_middle|>process<|file_separator|>