complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
2
4
def _get_class_labels_from_estimator(estimator): return estimator.classes_ if hasattr(estimator, "classes_") else None
mlflow/sklearn/utils.py
33
mlflow
{ "docstring": "\n Extracts class labels from `estimator` if `estimator.classes` is available.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
9
Python
9
1ddb2c9b5ace0fa605195a4b14c595e274a8c384
utils.py
19,200
2
19
_get_class_labels_from_estimator
https://github.com/mlflow/mlflow.git
Use `len(classes_)` instead of `len(set(y_true))` (#5275) * Use n_classes instead of len(set(y_true)) Signed-off-by: harupy <[email protected]> * fix attribute Signed-off-by: harupy <[email protected]> * use classes_ Signed-off-by: harupy <[email protected]> * handle meta estimator Signed-off-by: harupy <[email protected]> * address comment Signed-off-by: harupy <[email protected]> * fix Signed-off-by: harupy <[email protected]>
15
0
2,911
9
5
20
def get_search_choices(self): if not self._search_choice_options: # Organize choices by category categories = defaultdict(dict) for app_label, models in registry['search'].items(): for name, cls in models.items(): title = cls.model._meta.verbose_name.title() categories[cls.get_category()][name] = title # Compile a nested tuple of choices for form rendering results = ( ('', 'All Objects'), *[(category, choices.items()) for category, choices in categories.items()] ) self._search_choice_options = results return self._search_choice_options
netbox/netbox/search/backends.py
181
netbox
{ "docstring": "Return the set of choices for individual object types, organized by category.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
58
Python
43
ffce5d968d8a77c97852999b6ef916e80c1de55f
backends.py
265,845
13
110
get_search_choices
https://github.com/netbox-community/netbox.git
8927 plugin search (#10489) * #7016 base search classes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 8927 refactor search * 8927 refactor search * 8927 refactor search * 8927 refactor search * 8927 get search choices working * 8927 cleanup - optimize * 8927 use backend search function * 8927 fix for plugin search * 8927 add docs * Move search app to a module under netbox/ * Utilize global registry to register model search classes * Build search form options from registry * Determine search categories from model app by default * Enable dynamic search registration for plugins * Update docs & improve plugin support * Clean up search backend class * Docs for #8927 Co-authored-by: jeremystretch <[email protected]>
239
0
78,214
17
2
3
def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999):
modules/image/text_to_image/stable_diffusion/diffusers/schedulers/scheduling_ddim.py
18
PaddleHub
{ "docstring": "\n Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of\n (1-beta) over time from t = [0,1].\n\n :param num_diffusion_timesteps: the number of betas to produce. :param alpha_bar: a lambda that takes an argument t\n from 0 to 1 and\n produces the cumulative product of (1-beta) up to that part of the diffusion process.\n :param max_beta: the maximum beta to use; use values lower than 1 to\n prevent singularities.\n ", "language": "en", "n_whitespaces": 134, "n_words": 74, "vocab_size": 50 }
3
Python
3
a6790a651a12eb391060e533868bf0ba197f6f7e
scheduling_ddim.py
50,754
8
74
betas_for_alpha_bar
https://github.com/PaddlePaddle/PaddleHub.git
Add stable diffusion module
6
0
10,206
6
2
20
def left_integral3D(facets, index, expr, vertices, hp_param, degree): value = S.Zero facet = facets[index] x0 = vertices[facet[0]] facet_len = len(facet) for i, fac in enumerate(facet): side = (vertices[fac], vertices[facet[(i + 1) % facet_len]]) value += distance_to_side(x0, side, hp_param[0]) * \ lineseg_integrate(facet, i, side, expr, degree) return value
sympy/integrals/intpoly.py
149
sympy
{ "docstring": "Computes the left integral of Eq 10 in Chin et al.\n\n Explanation\n ===========\n\n For the 3D case, this is the sum of the integral values over constituting\n line segments of the face (which is accessed by facets[index]) multiplied\n by the distance between the first point of facet and that line segment.\n\n Parameters\n ==========\n\n facets :\n List of faces of the 3-Polytope.\n index :\n Index of face over which integral is to be calculated.\n expr :\n Input polynomial.\n vertices :\n List of vertices that constitute the 3-Polytope.\n hp_param :\n The hyperplane parameters of the face.\n degree :\n Degree of the ``expr``.\n\n Examples\n ========\n\n >>> from sympy.integrals.intpoly import left_integral3D\n >>> cube = [[(0, 0, 0), (0, 0, 5), (0, 5, 0), (0, 5, 5), (5, 0, 0),\\\n (5, 0, 5), (5, 5, 0), (5, 5, 5)],\\\n [2, 6, 7, 3], [3, 7, 5, 1], [7, 6, 4, 5], [1, 5, 4, 0],\\\n [3, 1, 0, 2], [0, 4, 6, 2]]\n >>> facets = cube[1:]\n >>> vertices = cube[0]\n >>> left_integral3D(facets, 3, 1, vertices, ([0, -1, 0], -5), 0)\n -50\n ", "language": "en", "n_whitespaces": 333, "n_words": 177, "vocab_size": 108 }
46
Python
37
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
intpoly.py
198,386
10
103
left_integral3D
https://github.com/sympy/sympy.git
Cleanup loops and ranges
92
0
48,898
14
3
14
def pad_list(xs, pad_value): n_batch = len(xs) max_len = max(x.size(0) for x in xs) pad = xs[0].new(n_batch, max_len, *xs[0].size()[1:]).fill_(pad_value) for i in range(n_batch): pad[i, :xs[i].size(0)] = xs[i] return pad
ppg2mel/utils/nets_utils.py
140
MockingBird
{ "docstring": "Perform padding for the list of tensors.\n\n Args:\n xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].\n pad_value (float): Value for padding.\n\n Returns:\n Tensor: Padded tensor (B, Tmax, `*`).\n\n Examples:\n >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]\n >>> x\n [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]\n >>> pad_list(x, 0)\n tensor([[1., 1., 1., 1.],\n [1., 1., 0., 0.],\n [1., 0., 0., 0.]])\n\n ", "language": "en", "n_whitespaces": 161, "n_words": 63, "vocab_size": 49 }
28
Python
22
b617a87ee40ab384767a27335313c2c65ee094ec
nets_utils.py
161,060
7
91
pad_list
https://github.com/babysor/MockingBird.git
Init ppg extractor and ppg2mel (#375) * Init ppg extractor and ppg2mel * add preprocess and training * FIx known issues * Update __init__.py Allow to gen audio * Fix length issue * Fix bug of preparing fid * Fix sample issues * Add UI usage of PPG-vc
53
0
38,876
15
2
7
def _genName(cls, name): if not name: name = "frame_" + str(uuid.uuid4()).replace("-", "") # TODO: reword name in case of caller's mistake return name
modin/experimental/core/execution/native/implementations/omnisci_on_native/base_worker.py
62
modin
{ "docstring": "\n Generate or mangle a table name.\n\n Parameters\n ----------\n name : str or None\n Table name to mangle or None to generate a unique\n table name.\n\n Returns\n -------\n str\n Table name.\n ", "language": "en", "n_whitespaces": 120, "n_words": 30, "vocab_size": 18 }
23
Python
21
1c0935c1bc0856d43f69c1e32498636ee24ebc85
base_worker.py
154,395
4
33
_genName
https://github.com/modin-project/modin.git
FEAT-#4913: Enabling pyhdk (#4900) Co-authored-by: ienkovich <[email protected]> Signed-off-by: izamyati <[email protected]>
62
0
35,956
15
4
19
def _canonicalize(self, parents): for field in dataclasses.fields(self): value = getattr(self, field.name) if isinstance(value, (Path, str)) and utils.is_path_like(field.type): setattr(self, field.name, utils.resolve_path(value, self._base_path)) else: _recursive_canonicalize_child(value, [self] + parents)
nni/experiment/config/base.py
122
nni
{ "docstring": "\n To be overrided by subclass.\n\n Convert the config object to canonical format.\n\n The default implementation will:\n\n 1. Resolve all ``PathLike`` fields to absolute path\n 2. Call ``_canonicalize([self] + parents)`` on all children config objects, including those inside list and dict\n\n If the subclass has nested config fields, be careful about where to call ``super()._canonicalize()``.\n\n Parameters\n ----------\n parents : list[ConfigBase]\n The upper level config objects.\n For example local training service's ``trialGpuNumber`` will be copied from top level when not set,\n in this case it will be invoked like ``localConfig._canonicalize([experimentConfig])``.\n ", "language": "en", "n_whitespaces": 192, "n_words": 88, "vocab_size": 75 }
26
Python
26
3f6a8274a97bf003b5eadc05faa324162b7f4123
base.py
111,629
7
80
_canonicalize
https://github.com/microsoft/nni.git
Some string changes around experiment module (#4442)
103
0
24,461
14
3
27
def test_iforest_sparse(global_random_seed): rng = check_random_state(global_random_seed) X_train, X_test = train_test_split(diabetes.data[:50], random_state=rng) grid = ParameterGrid({"max_samples": [0.5, 1.0], "bootstrap": [True, False]}) for sparse_format in [csc_matrix, csr_matrix]: X_train_sparse = sparse_format(X_train) X_test_sparse = sparse_format(X_test) for params in grid: # Trained on sparse format sparse_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train_sparse) sparse_results = sparse_classifier.predict(X_test_sparse) # Trained on dense format dense_classifier = IsolationForest( n_estimators=10, random_state=global_random_seed, **params ).fit(X_train) dense_results = dense_classifier.predict(X_test) assert_array_equal(sparse_results, dense_results)
sklearn/ensemble/tests/test_iforest.py
221
scikit-learn
{ "docstring": "Check IForest for various parameter settings on sparse input.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
65
Python
47
6ca1f5e4d0d16bc9a7f28582079a15e14f012719
test_iforest.py
259,992
17
144
test_iforest_sparse
https://github.com/scikit-learn/scikit-learn.git
TST use global_random_seed in sklearn/ensemble/tests/test_iforest.py (#22901) Co-authored-by: jeremie du boisberranger <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Olivier Grisel <[email protected]>
230
0
76,025
15
19
40
def get_approvers(doctype, txt, searchfield, start, page_len, filters): if not filters.get("employee"): frappe.throw(_("Please select Employee first.")) approvers = [] department_details = {} department_list = [] employee = frappe.get_value( "Employee", filters.get("employee"), ["employee_name", "department", "leave_approver", "expense_approver", "shift_request_approver"], as_dict=True, ) employee_department = filters.get("department") or employee.department if employee_department: department_details = frappe.db.get_value( "Department", {"name": employee_department}, ["lft", "rgt"], as_dict=True ) if department_details: department_list = frappe.db.sql( , (department_details.lft, department_details.rgt), as_list=True, ) if filters.get("doctype") == "Leave Application" and employee.leave_approver: approvers.append( frappe.db.get_value("User", employee.leave_approver, ["name", "first_name", "last_name"]) ) if filters.get("doctype") == "Expense Claim" and employee.expense_approver: approvers.append( frappe.db.get_value("User", employee.expense_approver, ["name", "first_name", "last_name"]) ) if filters.get("doctype") == "Shift Request" and employee.shift_request_approver: approvers.append( frappe.db.get_value( "User", employee.shift_request_approver, ["name", "first_name", "last_name"] ) ) if filters.get("doctype") == "Leave Application": parentfield = "leave_approvers" field_name = "Leave Approver" elif filters.get("doctype") == "Expense Claim": parentfield = "expense_approvers" field_name = "Expense Approver" elif filters.get("doctype") == "Shift Request": parentfield = "shift_request_approver" field_name = "Shift Request Approver" if department_list: for d in department_list: approvers += frappe.db.sql( , (d, "%" + txt + "%", parentfield), as_list=True, ) if len(approvers) == 0: error_msg = _("Please set {0} for the Employee: {1}").format( field_name, frappe.bold(employee.employee_name) ) if department_list: error_msg += _(" or for Department: {0}").format(frappe.bold(employee_department)) frappe.throw(error_msg, title=_(field_name + " Missing")) return set(tuple(approver) for approver in approvers)
erpnext/hr/doctype/department_approver/department_approver.py
728
erpnext
{ "docstring": "select name from `tabDepartment` where lft <= %s\n\t\t\tand rgt >= %s\n\t\t\tand disabled=0\n\t\t\torder by lft descselect user.name, user.first_name, user.last_name from\n\t\t\t\ttabUser user, `tabDepartment Approver` approver where\n\t\t\t\tapprover.parent = %s\n\t\t\t\tand user.name like %s\n\t\t\t\tand approver.parentfield = %s\n\t\t\t\tand approver.approver=user.name", "language": "en", "n_whitespaces": 32, "n_words": 41, "vocab_size": 29 }
198
Python
115
494bd9ef78313436f0424b918f200dab8fc7c20b
department_approver.py
66,060
69
420
get_approvers
https://github.com/frappe/erpnext.git
style: format code with black
137
0
14,097
16
10
22
def _add_name_vhost_if_necessary(self, vhost): need_to_save = False # See if the exact address appears in any other vhost # Remember 1.1.1.1:* == 1.1.1.1 -> hence any() for addr in vhost.addrs: # In Apache 2.2, when a NameVirtualHost directive is not # set, "*" and "_default_" will conflict when sharing a port addrs = {addr,} if addr.get_addr() in ("*", "_default_"): addrs.update(obj.Addr((a, addr.get_port(),)) for a in ("*", "_default_")) for test_vh in self.vhosts: if (vhost.filep != test_vh.filep and any(test_addr in addrs for test_addr in test_vh.addrs) and not self.is_name_vhost(addr)): self.add_name_vhost(addr) logger.info("Enabling NameVirtualHosts on %s", addr) need_to_save = True break if need_to_save: self.save()
certbot-apache/certbot_apache/_internal/configurator.py
216
certbot
{ "docstring": "Add NameVirtualHost Directives if necessary for new vhost.\n\n NameVirtualHosts was a directive in Apache < 2.4\n https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost\n\n :param vhost: New virtual host that was recently created.\n :type vhost: :class:`~certbot_apache._internal.obj.VirtualHost`\n\n ", "language": "en", "n_whitespaces": 64, "n_words": 29, "vocab_size": 27 }
97
Python
71
eeca208c8f57304590ac1af80b496e61021aaa45
configurator.py
186,364
17
130
_add_name_vhost_if_necessary
https://github.com/certbot/certbot.git
Various clean-ups in certbot-apache. Use f-strings. (#9132) * Various clean-ups in certbot-apache. Use f-strings. * Smaller tweaks
381
0
45,461
16
11
52
def line_search(self, X, y, sample_weight): # line search parameters beta, sigma = 0.5, 0.00048828125 # 1/2, 1/2**11 eps = 16 * np.finfo(self.loss_value.dtype).eps t = 1 # step size # gradient_times_newton = self.gradient @ self.coef_newton # was computed in inner_solve. armijo_term = sigma * self.gradient_times_newton _, _, raw_prediction_newton = self.linear_loss.weight_intercept_raw( self.coef_newton, X ) self.coef_old = self.coef self.loss_value_old = self.loss_value self.gradient_old = self.gradient # np.sum(np.abs(self.gradient_old)) sum_abs_grad_old = -1 is_verbose = self.verbose >= 2 if is_verbose: print(" Backtracking Line Search") print(f" eps=10 * finfo.eps={eps}") for i in range(21): # until and including t = beta**20 ~ 1e-6 self.coef = self.coef_old + t * self.coef_newton raw = self.raw_prediction + t * raw_prediction_newton self.loss_value, self.gradient = self.linear_loss.loss_gradient( coef=self.coef, X=X, y=y, sample_weight=sample_weight, l2_reg_strength=self.l2_reg_strength, n_threads=self.n_threads, raw_prediction=raw, ) # Note: If coef_newton is too large, loss_gradient may produce inf values, # potentially accompanied by a RuntimeWarning. # This case will be captured by the Armijo condition. # 1. Check Armijo / sufficient decrease condition. # The smaller (more negative) the better. loss_improvement = self.loss_value - self.loss_value_old check = loss_improvement <= t * armijo_term if is_verbose: print( f" line search iteration={i+1}, step size={t}\n" f" check loss improvement <= armijo term: {loss_improvement} " f"<= {t * armijo_term} {check}" ) if check: break # 2. Deal with relative loss differences around machine precision. tiny_loss = np.abs(self.loss_value_old * eps) check = np.abs(loss_improvement) <= tiny_loss if is_verbose: print( " check loss |improvement| <= eps * |loss_old|:" f" {np.abs(loss_improvement)} <= {tiny_loss} {check}" ) if check: if sum_abs_grad_old < 0: sum_abs_grad_old = scipy.linalg.norm(self.gradient_old, ord=1) # 2.1 Check sum of absolute gradients as alternative condition. sum_abs_grad = scipy.linalg.norm(self.gradient, ord=1) check = sum_abs_grad < sum_abs_grad_old if is_verbose: print( " check sum(|gradient|) < sum(|gradient_old|): " f"{sum_abs_grad} < {sum_abs_grad_old} {check}" ) if check: break t *= beta else: warnings.warn( f"Line search of Newton solver {self.__class__.__name__} at iteration " f"#{self.iteration} did no converge after 21 line search refinement " "iterations. It will now resort to lbfgs instead.", ConvergenceWarning, ) if self.verbose: print(" Line search did not converge and resorts to lbfgs instead.") self.use_fallback_lbfgs_solve = True return self.raw_prediction = raw
sklearn/linear_model/_glm/_newton_solver.py
645
scikit-learn
{ "docstring": "Backtracking line search.\n\n Sets:\n - self.coef_old\n - self.coef\n - self.loss_value_old\n - self.loss_value\n - self.gradient_old\n - self.gradient\n - self.raw_prediction\n ", "language": "en", "n_whitespaces": 109, "n_words": 18, "vocab_size": 12 }
339
Python
201
ff9344f3d8d11d38fa3a2497199113e5bac9537c
_newton_solver.py
261,382
70
350
line_search
https://github.com/scikit-learn/scikit-learn.git
FEA add (single) Cholesky Newton solver to GLMs (#24637) * FEA add NewtonSolver, CholeskyNewtonSolver and QRCholeskyNewtonSolver * ENH better singular hessian special solve * CLN fix some typos found by reviewer * TST assert ConvergenceWarning is raised * MNT add BaseCholeskyNewtonSolver * WIP colinear design in GLMs * FIX _solve_singular * FIX false unpacking in * TST add tests for unpenalized GLMs * TST fix solutions of glm_dataset * ENH add SVDFallbackSolver * CLN remove SVDFallbackSolver * ENH use gradient step for singular hessians * ENH print iteration number in warnings * TST improve test_linalg_warning_with_newton_solver * CLN LinAlgWarning fron scipy.linalg * ENH more robust hessian * ENH increase maxls for lbfgs to make it more robust * ENH add hessian_warning for too many negative hessian values * CLN some warning messages * ENH add lbfgs_step * ENH use lbfgs_step for hessian_warning * TST make them pass * TST tweek rtol for lbfgs * TST add rigoros test for GLMs * TST improve test_warm_start * ENH improve lbfgs options for better convergence * CLN fix test_warm_start * TST fix assert singular values in datasets * CLN address most review comments * ENH enable more vebosity levels for lbfgs * DOC add whatsnew * CLN remove xfail and clean a bit * CLN docstring about minimum norm * More informative repr for the glm_dataset fixture cases * Forgot to run black * CLN remove unnecessary filterwarnings * CLN address review comments * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN add comment for lbfgs ftol=64 * machine precision * CLN XXX code comment * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN link issue and remove code snippet in comment * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * CLN add catch_warnings * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Trigger [all random seeds] on the following tests: test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Trigger with -Werror [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * ENH increase maxls to 50 * [all random seeds] test_glm_regression test_glm_regression_hstacked_X test_glm_regression_vstacked_X test_glm_regression_unpenalized test_glm_regression_unpenalized_hstacked_X test_glm_regression_unpenalized_vstacked_X test_warm_start * Revert "Trigger with -Werror [all random seeds]" This reverts commit 99f4cf99ca41b4ad2bdad537ad60f936970e3a88. * TST add catch_warnings to filterwarnings * TST adapt tests for newton solvers * CLN cleaner gradient step with gradient_times_newton * DOC add whatsnew * ENH always use lbfgs as fallback * TST adapt rtol * TST fix test_linalg_warning_with_newton_solver * CLN address some review comments * Improve tests related to convergence warning on collinear data * overfit -> fit * Typo in comment * Apply suggestions from code review * ENH fallback_lbfgs_solve - Do not use lbfgs steps, fall back complete to lbfgs * ENH adapt rtol * Improve test_linalg_warning_with_newton_solver * Better comments * Fixed Hessian casing and improved warning messages * [all random seeds] test_linalg_warning_with_newton_solver * Ignore ConvergenceWarnings for now if convergence is good * CLN remove counting of warnings * ENH fall back to lbfgs if line search did not converge * DOC better comment on performance bottleneck * Update GLM related examples to use the new solver * CLN address reviewer comments * EXA improve some wordings * CLN do not pop "solver in parameter constraints * CLN fix typos * DOC fix docstring * CLN remove solver newton-qr-cholesky * DOC update PR number in whatsnew * CLN address review comments * CLN remove unnecessary catch_warnings * CLN address some review comments * DOC more precise whatsnew * CLN use init_zero_coef * CLN use and test init_zero_coef * CLN address some review comments * CLN mark NewtonSolver as private by leading underscore * CLN exact comments for inner_solve * TST add test_newton_solver_verbosity * TST extend test_newton_solver_verbosity * TST logic in test_glm_regression_unpenalized * TST use count_nonzero * CLN remove super rare line search checks * MNT move Newton solver to new file _newton_solver.py Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
1,364
0
76,792
16
15
48
def _yield_distributions(self): # We need to check if we've seen some resources already, because on # some Linux systems (e.g. some Debian/Ubuntu variants) there are # symlinks which alias other files in the environment. seen = set() for path in self.path: finder = resources.finder_for_path(path) if finder is None: continue r = finder.find('') if not r or not r.is_container: continue rset = sorted(r.resources) for entry in rset: r = finder.find(entry) if not r or r.path in seen: continue try: if self._include_dist and entry.endswith(DISTINFO_EXT): possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] for metadata_filename in possible_filenames: metadata_path = posixpath.join(entry, metadata_filename) pydist = finder.find(metadata_path) if pydist: break else: continue with contextlib.closing(pydist.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') logger.debug('Found %s', r.path) seen.add(r.path) yield new_dist_class(r.path, metadata=metadata, env=self) elif self._include_egg and entry.endswith(('.egg-info', '.egg')): logger.debug('Found %s', r.path) seen.add(r.path) yield old_dist_class(r.path, self) except Exception as e: msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s' logger.warning(msg, r.path, e) import warnings warnings.warn(msg % (r.path, e), stacklevel=2)
pipenv/patched/pip/_vendor/distlib/database.py
457
pipenv
{ "docstring": "\n Yield .dist-info and/or .egg(-info) distributions.\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
159
Python
115
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
database.py
21,982
42
277
_yield_distributions
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
1,081
0
4,098
20
14
26
def test_mutNodeReplacement(): pipeline_string = ( 'LogisticRegression(PolynomialFeatures' '(input_matrix, PolynomialFeatures__degree=2, PolynomialFeatures__include_bias=False, ' 'PolynomialFeatures__interaction_only=False), LogisticRegression__C=10.0, ' 'LogisticRegression__dual=False, LogisticRegression__penalty=l2)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) pipeline[0].ret = Output_Array old_ret_type_list = [node.ret for node in pipeline] old_prims_list = [node for node in pipeline if node.arity != 0] # test 10 times for _ in range(10): mut_ind = mutNodeReplacement(tpot_obj._toolbox.clone(pipeline), pset=tpot_obj._pset) new_ret_type_list = [node.ret for node in mut_ind[0]] new_prims_list = [node for node in mut_ind[0] if node.arity != 0] if new_prims_list == old_prims_list: # Terminal mutated assert new_ret_type_list == old_ret_type_list else: # Primitive mutated diff_prims = [x for x in new_prims_list if x not in old_prims_list] diff_prims += [x for x in old_prims_list if x not in new_prims_list] if len(diff_prims) > 1: # Sometimes mutation randomly replaces an operator that already in the pipelines assert diff_prims[0].ret == diff_prims[1].ret assert mut_ind[0][0].ret == Output_Array
tests/tpot_tests.py
302
tpot
{ "docstring": "Assert that mutNodeReplacement() returns the correct type of mutation node in a fixed pipeline.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
134
Python
75
388616b6247ca4ea8de4e2f340d6206aee523541
tpot_tests.py
181,804
23
193
test_mutNodeReplacement
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
292
0
43,590
15
1
21
def _async_update(self) -> None: super()._async_update() node = self.gateway.sensors[self.node_id] child = node.children[self.child_id] position: str = child.values[self.value_type] latitude, longitude, _ = position.split(",") self._latitude = float(latitude) self._longitude = float(longitude)
homeassistant/components/mysensors/device_tracker.py
126
core
{ "docstring": "Update the controller with the latest value from a device.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
26
Python
21
df2d0cd3e3ade2339a18415f92c85810308a9926
device_tracker.py
298,235
9
77
_async_update
https://github.com/home-assistant/core.git
Refactor mysensors device tracker (#84747)
82
0
97,180
9
1
8
def from_builtin(cls, func): warnings.warn("inspect.Signature.from_builtin() is deprecated since " "Python 3.5, use Signature.from_callable()", DeprecationWarning, stacklevel=2) return _signature_from_builtin(cls, func)
python3.10.4/Lib/inspect.py
48
XX-Net
{ "docstring": "Constructs Signature for the given builtin function.\n\n Deprecated since Python 3.5, use `Signature.from_callable()`.\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 13 }
17
Python
17
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,367
5
28
from_builtin
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
80
0
55,261
9
2
17
def update_step(self, grad, variable): lr = tf.cast(self.learning_rate, variable.dtype) var_key = self._var_key(variable) rho = self.rho accumulated_grad = self._accumulated_grads[self._index_dict[var_key]] accumulated_delta_var = self._accumulated_delta_vars[ self._index_dict[var_key] ]
keras/optimizers/optimizer_experimental/adadelta.py
97
keras
{ "docstring": "Update step given gradient and the associated model variable.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
22
Python
18
84afc5193d38057e2e2badf9c889ea87d80d8fbf
adadelta.py
275,240
33
211
update_step
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
82
0
81,342
9
2
4
def export(self, view_data): if view_data is not None: # pragma: NO COVER self.transport.export(view_data)
python/ray/_private/prometheus_exporter.py
38
ray
{ "docstring": "export send the data to the transport class\n in order to be sent to Prometheus in a sync or async way.\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 17 }
13
Python
13
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
prometheus_exporter.py
130,175
3
22
export
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
39
0
29,142
10
1
7
def job_hook(**kwargs): cmd = " ".join(kwargs["entrypoint"]) print(f"hook intercepted: {cmd}") sys.exit(0)
python/ray/_private/test_utils.py
59
ray
{ "docstring": "Function called by reflection by test_cli_integration.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 5 }
10
Python
10
517f78e2b810e506d61884c79d768a37a34f0f9c
test_utils.py
140,506
4
29
job_hook
https://github.com/ray-project/ray.git
[minor] Add a job submission hook by env var (#25343)
22
0
31,979
10
5
14
def get_dataset_info(tasks): curr_task_info = [] for task in tasks: # adding the name + attempted link tname = taskname(task) tsite = task_site + to_sublink(tname) curr_task_info.append(f"- [{tname}]({tsite})") # adding link links = make_task_links(task) curr_task_info[-1] += f" ({links})" if links else '' # adding description if all_tasks.get(task) and all_tasks[task].get('description'): curr_task_info[-1] += f": {all_tasks[task]['description']}" return curr_task_info ################################# # Table-Related Functions #################################
parlai/scripts/generate_model_card.py
170
ParlAI
{ "docstring": "\n dataset info comes from guessing where it would be at the tasks site and the\n task_list.py + anything else from the user.\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 19 }
58
Python
42
81f722d29045a7a5841d0931a082ded1d1f13863
generate_model_card.py
194,780
11
82
get_dataset_info
https://github.com/facebookresearch/ParlAI.git
autoformat (#4378)
141
0
47,073
14
5
9
def widget_to_element(self, widget): if self.AllKeysDict is None or len(self.AllKeysDict) == 0: return None for key, element in self.AllKeysDict.items(): if element.Widget == widget: return element return None
PySimpleGUI.py
80
PySimpleGUI
{ "docstring": "\n Returns the element that matches a supplied tkinter widget.\n If no matching element is found, then None is returned.\n\n\n :return: Element that uses the specified widget\n :rtype: Element | None\n ", "language": "en", "n_whitespaces": 73, "n_words": 30, "vocab_size": 24 }
26
Python
19
9b814f003b0685757d76ce56ee9c98eae114d346
PySimpleGUI.py
212,818
7
50
widget_to_element
https://github.com/PySimpleGUI/PySimpleGUI.git
Added key and widget Element properties, new focus methods Element.get_next_focus, Element.get_previous_focus. New Window method Window.widget_to_element
91
0
53,428
10
3
17
def _get_fallback_devices(self) -> List[plaidml._DeviceConfig]: # Try get a supported device experimental_setting = plaidml.settings.experimental plaidml.settings.experimental = False devices = plaidml.devices(self._ctx, limit=100, return_all=True)[0] # Try get any device if not devices: plaidml.settings.experimental = True devices = plaidml.devices(self._ctx, limit=100, return_all=True)[0] plaidml.settings.experimental = experimental_setting if not devices: raise RuntimeError("No valid devices could be found for plaidML.") self._log("warning", f"PlaidML could not find a GPU. Falling back to: " f"{[d.id.decode('utf-8') for d in devices]}") return devices
lib/gpu_stats/amd.py
201
faceswap
{ "docstring": " Called if a GPU has not been discovered. Return any devices we can run on.\n\n Returns\n -------\n list:\n The :class:`pladml._DeviceConfig` fallaback objects that PlaidML has discovered.\n ", "language": "en", "n_whitespaces": 66, "n_words": 26, "vocab_size": 24 }
70
Python
44
98a65277d8c55cfcbdbfa629f790a8f8731621a8
amd.py
100,857
20
109
_get_fallback_devices
https://github.com/deepfakes/faceswap.git
Fix AMD Tests + docs
197
0
20,308
14
1
5
def nargs_error(name, takes, given): return TypeError(f"{name}() takes {takes} positional arguments but " f"{given} were given")
lib/matplotlib/_api/__init__.py
43
matplotlib
{ "docstring": "Generate a TypeError to be raised by function calls with wrong arity.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
15
Python
15
973e475ef85524c5e9cef0638c90ca9a159935e4
__init__.py
110,159
3
18
nargs_error
https://github.com/matplotlib/matplotlib.git
Factor out error generation for function calls with wrong nargs. ... matching the wording for standard functions. Note that nargs_error returns the exception without raising it itself to make the control flow clearer on the caller side.
41
0
23,954
10
1
7
def test_tabular_model_form_meta_readonly_field(self): response = self.client.get(reverse("admin:admin_inlines_someparentmodel_add")) self.assertContains( response, '<img src="/static/admin/img/icon-unknown.svg" ' 'class="help help-tooltip" width="10" height="10" ' 'alt="(Help text from ModelForm.Meta)" ' 'title="Help text from ModelForm.Meta">', ) self.assertContains(response, "Label from ModelForm.Meta")
tests/admin_inlines/tests.py
75
django
{ "docstring": "\n Tabular inlines use ModelForm.Meta.help_texts and labels for read-only\n fields.\n ", "language": "en", "n_whitespaces": 31, "n_words": 9, "vocab_size": 9 }
29
Python
24
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,170
10
39
test_tabular_model_form_meta_readonly_field
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
119
0
51,888
11
1
2
def tiling(self): return self["tiling"]
packages/python/plotly/plotly/graph_objs/_icicle.py
22
plotly.py
{ "docstring": "\n The 'tiling' property is an instance of Tiling\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.icicle.Tiling`\n - A dict of string/value properties that will be passed\n to the Tiling constructor\n\n Supported dict properties:\n\n flip\n Determines if the positions obtained from\n solver are flipped on each axis.\n orientation\n When set in conjunction with `tiling.flip`,\n determines on which side the root nodes are\n drawn in the chart. If `tiling.orientation` is\n \"v\" and `tiling.flip` is \"\", the root nodes\n appear at the top. If `tiling.orientation` is\n \"v\" and `tiling.flip` is \"y\", the root nodes\n appear at the bottom. If `tiling.orientation`\n is \"h\" and `tiling.flip` is \"\", the root nodes\n appear at the left. If `tiling.orientation` is\n \"h\" and `tiling.flip` is \"x\", the root nodes\n appear at the right.\n pad\n Sets the inner padding (in px).\n\n Returns\n -------\n plotly.graph_objs.icicle.Tiling\n ", "language": "en", "n_whitespaces": 531, "n_words": 137, "vocab_size": 77 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_icicle.py
227,197
2
11
tiling
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,870
7
1
5
def _execute_impl(self, *args, **kwargs): return self._deployment_handle
python/ray/serve/pipeline/deployment_node.py
27
ray
{ "docstring": "Executor of DeploymentNode getting called each time on dag.execute.\n\n The execute implementation is recursive, that is, the method nodes will receive\n whatever this method returns. We return a handle here so method node can\n directly call upon.\n ", "language": "en", "n_whitespaces": 65, "n_words": 37, "vocab_size": 35 }
6
Python
6
b4d9fcdbf8be4c0f4985c29b251d2585cf269f76
deployment_node.py
138,734
2
16
_execute_impl
https://github.com/ray-project/ray.git
[Serve] Fix surprious `__call__` invocation in Deployment DAG's exec_impl (#24199)
20
0
31,508
6
1
9
def get_image_copy(self, color_format): logger.trace("Requested color format '%s' for frame '%s'", color_format, self._filename) image = getattr(self, f"_image_as_{color_format.lower()}")() return image
plugins/extract/pipeline.py
66
faceswap
{ "docstring": " Get a copy of the image in the requested color format.\n\n Parameters\n ----------\n color_format: ['BGR', 'RGB', 'GRAY']\n The requested color format of :attr:`image`\n\n Returns\n -------\n :class:`numpy.ndarray`:\n A copy of :attr:`image` in the requested :attr:`color_format`\n ", "language": "en", "n_whitespaces": 106, "n_words": 34, "vocab_size": 24 }
18
Python
17
d9c84a5f9f6ff22d6f91594f218bea15764de96b
pipeline.py
100,903
4
33
get_image_copy
https://github.com/deepfakes/faceswap.git
Add Laplacian Pyramid Loss
46
0
20,352
13
2
4
def subst_vars (s, local_vars): check_environ()
python3.10.4/Lib/distutils/util.py
21
XX-Net
{ "docstring": "Perform shell/Perl-style variable substitution on 'string'. Every\n occurrence of '$' followed by a name is considered a variable, and\n variable is substituted by the value found in the 'local_vars'\n dictionary, or in 'os.environ' if it's not in 'local_vars'.\n 'os.environ' is first checked/augmented to guarantee that it contains\n certain values: see 'check_environ()'. Raise ValueError for any\n variables not found in either 'local_vars' or 'os.environ'.\n ", "language": "en", "n_whitespaces": 86, "n_words": 63, "vocab_size": 49 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
util.py
223,399
7
39
subst_vars
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
11
0
56,886
7
1
6
def test_empty_lsb_djob_rankfile(): with pytest.raises(ValueError, match="The environment variable `LSB_DJOB_RANKFILE` is empty"): LSFEnvironment()
tests/plugins/environments/test_lsf_environment.py
40
lightning
{ "docstring": "Test an error when the LSB_DJOB_RANKFILE is not populated.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
11
Python
11
dbf1acd5a553ffc1546734be164cc89cef2b741d
test_lsf_environment.py
241,632
3
20
test_empty_lsb_djob_rankfile
https://github.com/Lightning-AI/lightning.git
Modify LSFEnvironment to use more reliable environment variable (#10825) Co-authored-by: thomas chaton <[email protected]> Co-authored-by: Carlos Mocholí <[email protected]> Co-authored-by: Adrian Wälchli <[email protected]> Co-authored-by: Jirka Borovec <[email protected]>
24
0
69,633
11
2
8
def get_user_timezone() -> str: dotenv.load_dotenv(USER_ENV_FILE) user_tz = os.getenv("OPENBB_TIMEZONE") if user_tz: return user_tz return ""
openbb_terminal/helper_funcs.py
53
OpenBBTerminal
{ "docstring": "Get user timezone if it is a valid one\n\n Returns\n -------\n str\n user timezone based on .env file\n ", "language": "en", "n_whitespaces": 37, "n_words": 18, "vocab_size": 16 }
14
Python
12
f18c44b0668ef8e40d14d79780558521b2c02304
helper_funcs.py
285,666
13
28
get_user_timezone
https://github.com/OpenBB-finance/OpenBBTerminal.git
New path for styles and add timezone as environment variable (#2509) * add log path * add test to check if log file is in correct dir * env path * black * mypy fix * add styles folder and styles from repo * add timezone as env variable * fix changes with main * fix test * flake8 * fix linting * fix linting * fix issue with light mpl stylesheet * change timezone variable name * change names * change names * names * simplify paths.py * change some names * fix error in logic * remove 3.11 from testing for now
36
0
85,370
9
1
5
def validator_xml_findtext(xpath) -> AllSchema: return AllSchema( validator_xml_find(xpath), validator_getattr("text"), )
src/streamlink/plugin/api/validate/_validators.py
39
streamlink
{ "docstring": "\n Find an XML element via xpath and extract its text.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
9
Python
9
120c10302381600abb4044083ce0a106b31df8f0
_validators.py
187,103
8
22
validator_xml_findtext
https://github.com/streamlink/streamlink.git
plugin.api.validate: turn module into package Turn module into package with multiple logical sub-modules: - Define a public interface in the package's `__init__` module - Split validation schemas, validators and validate logic - schemas: classes which register attributes used by their respective `validate` implementations - validators: functions which can internally call `validate` and which return something that can be validated - validate: singledispatch functions which implement the validation logic for schemas and various other types - Rename validation schemas for better internal references - Rename singledispatch methods Other clean-up work: - Update comments and fix grammar - Add type annotations - Use f-strings - Use `str` instead of the `text` alias - Simplify some code blocks - Rearrange classes and functions - Rephrase certain error messages - Add a few more tests for better code coverage
32
0
45,687
10
1
11
def get_instance_data_location() -> DataLocation: return DataLocation( name=prefect.settings.from_env().orion.data.name, base_path=prefect.settings.from_env().orion.data.base_path, scheme=prefect.settings.from_env().orion.data.scheme.lower(), )
src/prefect/orion/schemas/data.py
101
prefect
{ "docstring": "\n Return the current data location configured for this Orion instance\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
10
Python
10
1d4218a287ef343f32f1e32482592b471be5df1d
data.py
53,408
9
63
get_instance_data_location
https://github.com/PrefectHQ/prefect.git
Move `prefect.settings` to `prefect.settings.from_env()`
40
0
10,792
16
1
24
def test_reupload_different_file_size_and_file_hash(self): # Build a fake file, and create it through the admin view # since self.document doesn't have a file_size set. fake_file = SimpleUploadedFile("some_file.txt", b"this is the content") post_data = { "title": "My doc", "file": fake_file, } self.client.post(reverse("wagtaildocs:add"), post_data) document = models.Document.objects.get(title="My doc") old_file_size, old_file_hash = document.file_size, document.file_hash new_file = SimpleUploadedFile(document.filename, b"less content") self.client.post( reverse("wagtaildocs:edit", args=(document.pk,)), { "title": document.title, "file": new_file, }, ) document.refresh_from_db() self.assertNotEqual(document.file_size, old_file_size) self.assertNotEqual(document.file_hash, old_file_hash)
wagtail/documents/tests/test_admin_views.py
227
wagtail
{ "docstring": "\n Checks that reuploading the document file with a different file\n changes the file size and file hash (see #5704).\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 15 }
69
Python
58
d10f15e55806c6944827d801cd9c2d53f5da4186
test_admin_views.py
74,800
20
135
test_reupload_different_file_size_and_file_hash
https://github.com/wagtail/wagtail.git
Reformat with black
259
0
16,322
12
2
58
def __call__(self, feat_maps, comp_attribs): assert isinstance(feat_maps, paddle.Tensor) assert comp_attribs.ndim == 3 assert comp_attribs.shape[2] == 8 sorted_dist_inds_batch = [] local_graph_batch = [] knn_batch = [] node_feat_batch = [] node_label_batch = [] for batch_ind in range(comp_attribs.shape[0]): num_comps = int(comp_attribs[batch_ind, 0, 0]) comp_geo_attribs = comp_attribs[batch_ind, :num_comps, 1:7] node_labels = comp_attribs[batch_ind, :num_comps, 7].astype( np.int32) comp_centers = comp_geo_attribs[:, 0:2] distance_matrix = euclidean_distance_matrix(comp_centers, comp_centers) batch_id = np.zeros( (comp_geo_attribs.shape[0], 1), dtype=np.float32) * batch_ind comp_geo_attribs[:, -2] = np.clip(comp_geo_attribs[:, -2], -1, 1) angle = np.arccos(comp_geo_attribs[:, -2]) * np.sign( comp_geo_attribs[:, -1]) angle = angle.reshape((-1, 1)) rotated_rois = np.hstack( [batch_id, comp_geo_attribs[:, :-2], angle]) rois = paddle.to_tensor(rotated_rois) content_feats = self.pooling(feat_maps[batch_ind].unsqueeze(0), rois) content_feats = content_feats.reshape([content_feats.shape[0], -1]) geo_feats = feature_embedding(comp_geo_attribs, self.node_geo_feat_dim) geo_feats = paddle.to_tensor(geo_feats) node_feats = paddle.concat([content_feats, geo_feats], axis=-1) sorted_dist_inds = np.argsort(distance_matrix, axis=1) pivot_local_graphs, pivot_knns = self.generate_local_graphs( sorted_dist_inds, node_labels) node_feat_batch.append(node_feats) node_label_batch.append(node_labels) local_graph_batch.append(pivot_local_graphs) knn_batch.append(pivot_knns) sorted_dist_inds_batch.append(sorted_dist_inds) (node_feats, adjacent_matrices, knn_inds, gt_linkage) = \ self.generate_gcn_input(node_feat_batch, node_label_batch, local_graph_batch, knn_batch, sorted_dist_inds_batch) return node_feats, adjacent_matrices, knn_inds, gt_linkage
ppocr/modeling/heads/local_graph.py
607
PaddleOCR
{ "docstring": "Generate local graphs as GCN input.\n\n Args:\n feat_maps (Tensor): The feature maps to extract the content\n features of text components.\n comp_attribs (ndarray): The text component attributes.\n\n Returns:\n local_graphs_node_feat (Tensor): The node features of graph.\n adjacent_matrices (Tensor): The adjacent matrices of local graphs.\n pivots_knn_inds (Tensor): The k-nearest neighbor indices in local\n graph.\n gt_linkage (Tensor): The surpervision signal of GCN for linkage\n prediction.\n ", "language": "en", "n_whitespaces": 193, "n_words": 61, "vocab_size": 43 }
146
Python
103
1f9400dd7374ce9cc47981372e324ff412e53ba3
local_graph.py
25,205
48
406
__call__
https://github.com/PaddlePaddle/PaddleOCR.git
add drrg
845
0
4,867
14
1
9
def emit_message(self): message = self.as_airbyte_message().json(exclude_unset=True) filtered_message = filter_secrets(message) print(filtered_message)
airbyte-cdk/python/airbyte_cdk/utils/traced_exception.py
53
airbyte
{ "docstring": "\n Prints the exception as an AirbyteTraceMessage.\n Note that this will be called automatically on uncaught exceptions when using the airbyte_cdk entrypoint.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 20 }
9
Python
8
73c7fad7fce952a8c3ba827ca858e4280bd846f3
traced_exception.py
5,030
4
30
emit_message
https://github.com/airbytehq/airbyte.git
CDK: emit `AirbyteTraceMessage` with exception trace information (#12593)
37
0
709
10
1
24
def test_purge_room_and_block(self) -> None: # Test that room is not purged with self.assertRaises(AssertionError): self._is_purged(self.room_id) # Test that room is not blocked self._is_blocked(self.room_id, expect=False) # Assert one user in room self._is_member(room_id=self.room_id, user_id=self.other_user) channel = self.make_request( "DELETE", self.url.encode("ascii"), content={"block": True, "purge": True}, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(None, channel.json_body["new_room_id"]) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) self.assertIn("failed_to_kick_users", channel.json_body) self.assertIn("local_aliases", channel.json_body) self._is_purged(self.room_id) self._is_blocked(self.room_id, expect=True) self._has_no_members(self.room_id)
tests/rest/admin/test_room.py
298
synapse
{ "docstring": "Test to purge a room and block it.\n Members will not be moved to a new room and will not receive a message.\n ", "language": "en", "n_whitespaces": 37, "n_words": 23, "vocab_size": 16 }
57
Python
46
c97042f7eef3748e17c90e48a4122389a89c4735
test_room.py
249,156
22
183
test_purge_room_and_block
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13469)
231
0
72,663
12
1
4
def get_element_html_by_id(id, html): return get_element_html_by_attribute('id', id, html)
yt_dlp/utils.py
29
yt-dlp
{ "docstring": "Return the html of the tag with the specified ID in the passed HTML document", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 12 }
7
Python
7
6f32a0b5b70fe0f8b14c2946b40840b795044662
utils.py
162,151
2
17
get_element_html_by_id
https://github.com/yt-dlp/yt-dlp.git
[utils] Improve parsing for nested HTML elements (#2129) and add functions to return the HTML of elements Authored by: zmousm
13
0
39,167
8
1
3
def prereleases(self, value): # type: (bool) -> None
.venv/lib/python3.8/site-packages/pip/_vendor/packaging/specifiers.py
16
transferlearning
{ "docstring": "\n Sets whether or not pre-releases as a whole are allowed by this\n specifier.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
8
Python
8
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
specifiers.py
62,884
1
8
prereleases
https://github.com/jindongwang/transferlearning.git
upd; format
22
0
13,060
6
1
4
def receive_parameter(self) -> ParameterRecord | None: raise NotImplementedError()
nni/runtime/trial_command_channel/base.py
26
nni
{ "docstring": "Get the next parameter record from NNI manager.\n\n Returns\n -------\n :class:`~nni.typehint.ParameterRecord`\n The next parameter record.\n Could be ``None`` if no more parameter is available.\n ", "language": "en", "n_whitespaces": 74, "n_words": 24, "vocab_size": 21 }
8
Python
8
7f1495c8b338c547005770cb83f2f7f4b88798f3
base.py
113,798
10
14
receive_parameter
https://github.com/microsoft/nni.git
Trial command channel (#5254)
22
0
25,031
7
3
6
def rebuild_cablepaths(instance, raw=False, **kwargs): if not raw: peer_termination = instance.get_peer_termination() # if peer_termination: # rebuild_paths(peer_termination)
netbox/circuits/signals.py
43
netbox
{ "docstring": "\n Rebuild any CablePaths which traverse the peer CircuitTermination.\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
15
Python
13
5667a9c456e0514a2d00d6475e7013748b4a7c1e
signals.py
264,829
5
31
rebuild_cablepaths
https://github.com/netbox-community/netbox.git
Refactor CablePath.from_origin()
46
0
77,847
10
3
11
def register_handler(key, handler):
sympy/assumptions/ask.py
32
""" Register a handler in the ask system. key must be a string and handler athe ask system. key must be a
sympy
{ "docstring": "\n Register a handler in the ask system. key must be a string and handler a", "language": "en", "n_whitespaces": 18, "n_words": 15, "vocab_size": 12 }
3
Python
3
ad766d1c02943e86f50559abfd0c72e582c9ca6a
ask.py
196,755
16
77
register_handler
https://github.com/sympy/sympy.git
Update the AskHandler deprecation warnings n.b., the issue number in the original warning message was wrong. It should have been #20837.
6
2
48,151
7
2
22
def test_float32_float64_equivalence(is_sparse): rng = np.random.RandomState(0) X = rng.rand(10, 2) if is_sparse: X[X < 0.8] = 0 X = sp.csr_matrix(X) km64 = BisectingKMeans(n_clusters=3, random_state=0).fit(X) km32 = BisectingKMeans(n_clusters=3, random_state=0).fit(X.astype(np.float32)) assert_allclose(km32.cluster_centers_, km64.cluster_centers_) assert_array_equal(km32.labels_, km64.labels_)
sklearn/cluster/tests/test_bisect_k_means.py
167
scikit-learn
{ "docstring": "Check that the results are the same between float32 and float64.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
31
Python
24
0822851f5cb17827939a7d7b4f8c84f43184ae89
test_bisect_k_means.py
259,765
10
108
test_float32_float64_equivalence
https://github.com/scikit-learn/scikit-learn.git
FEA Bisecting K-Means (#20031) Co-authored-by: Gael Varoquaux <[email protected]> Co-authored-by: Tom Dupré la Tour <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
69
0
75,910
11
1
23
def delete_tasks_annotations(project, queryset, **kwargs): task_ids = queryset.values_list('id', flat=True) annotations = Annotation.objects.filter(task__id__in=task_ids) count = annotations.count() annotations_ids = list(annotations.values('id')) annotations.delete() emit_webhooks_for_instance(project.organization, project, WebhookAction.ANNOTATIONS_DELETED, annotations_ids) bulk_update_stats_project_tasks(queryset) return {'processed_items': count, 'detail': 'Deleted ' + str(count) + ' annotations'}
label_studio/data_manager/actions/basic.py
158
label-studio
{ "docstring": " Delete all annotations by tasks ids\n\n :param project: project instance\n :param queryset: filtered tasks db queryset\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
34
Python
29
85152f2c8c7f8b301b28fcd771f13b5c166c59eb
basic.py
177,572
10
93
delete_tasks_annotations
https://github.com/heartexlabs/label-studio.git
fix: DEV-1486: fix dm action when deleting all annotations, finished state is not updated (#1923) Co-authored-by: Max Tkachenko <[email protected]>
72
0
42,445
11
4
17
def _check_processes(self): while True: with self.server_lock: for client_id, specific_server in list(self.servers.items()): if specific_server.poll() is not None: logger.info( f"Specific server {client_id} is no longer running" f", freeing its port {specific_server.port}" ) del self.servers[client_id] # Port is available to use again. self._free_ports.append(specific_server.port) time.sleep(CHECK_PROCESS_INTERVAL_S)
python/ray/util/client/server/proxier.py
133
ray
{ "docstring": "\n Keeps the internal servers dictionary up-to-date with running servers.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
41
Python
39
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
proxier.py
132,945
12
72
_check_processes
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
280
0
29,875
19
1
21
def bounding_box(self, frame_index, face_index, pnt_x, width, pnt_y, height, aligner="FAN"): logger.trace("frame_index: %s, face_index %s, pnt_x %s, width %s, pnt_y %s, height %s, " "aligner: %s", frame_index, face_index, pnt_x, width, pnt_y, height, aligner) face = self._faces_at_frame_index(frame_index)[face_index] face.left = pnt_x face.width = width face.top = pnt_y face.height = height face._landmarks_xy = self._extractor.get_landmarks(frame_index, face_index, aligner) self._globals.tk_update.set(True)
tools/manual/detected_faces.py
150
faceswap
{ "docstring": " Update the bounding box for the :class:`~lib.align.DetectedFace` object at the\n given frame and face indices, with the given dimensions and update the 68 point landmarks\n from the :class:`~tools.manual.manual.Aligner` for the updated bounding box.\n\n Parameters\n ----------\n frame_index: int\n The frame that the face is being set for\n face_index: int\n The face index within the frame\n pnt_x: int\n The left point of the bounding box\n width: int\n The width of the bounding box\n pnt_y: int\n The top point of the bounding box\n height: int\n The height of the bounding box\n aligner: [\"cv2-dnn\", \"FAN\"], optional\n The aligner to use to generate the landmarks. Default: \"FAN\"\n ", "language": "en", "n_whitespaces": 264, "n_words": 102, "vocab_size": 55 }
52
Python
30
5e73437be47f2410439a3c6716de96354e6a0c94
detected_faces.py
101,252
10
100
bounding_box
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
135
0
20,672
9
5
13
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts): if ext == '.rc' or ext == '.res': # gcc needs '.res' and '.rc' compiled to object files !!! try: self.spawn(["windres", "-i", src, "-o", obj]) except DistutilsExecError as msg: raise CompileError(msg) else: # for other files use the C-compiler try: self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + extra_postargs) except DistutilsExecError as msg: raise CompileError(msg)
python3.10.4/Lib/distutils/cygwinccompiler.py
149
XX-Net
{ "docstring": "Compiles the source by spawning GCC and windres if needed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
63
Python
48
8198943edd73a363c266633e1aa5b2a9e9c9f526
cygwinccompiler.py
222,841
12
89
_compile
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
225
0
56,773
16
1
5
def __newobj_ex__(cls, args, kwargs): return cls.__new__(cls, *args, **kwargs)
python3.10.4/Lib/copyreg.py
36
XX-Net
{ "docstring": "Used by pickle protocol 4, instead of __newobj__ to allow classes with\n keyword-only arguments to be pickled correctly.\n ", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 17 }
8
Python
8
8198943edd73a363c266633e1aa5b2a9e9c9f526
copyreg.py
221,746
2
23
__newobj_ex__
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
14
0
56,500
8
1
7
def mellin_transform(f, x, s, **hints): r return MellinTransform(f, x, s).doit(**hints)
sympy/integrals/transforms.py
43
sympy
{ "docstring": "\n Compute the Mellin transform `F(s)` of `f(x)`,\n\n .. math :: F(s) = \\int_0^\\infty x^{s-1} f(x) \\mathrm{d}x.\n\n For all \"sensible\" functions, this converges absolutely in a strip\n `a < \\operatorname{Re}(s) < b`.\n\n Explanation\n ===========\n\n The Mellin transform is related via change of variables to the Fourier\n transform, and also to the (bilateral) Laplace transform.\n\n This function returns ``(F, (a, b), cond)``\n where ``F`` is the Mellin transform of ``f``, ``(a, b)`` is the fundamental strip\n (as above), and ``cond`` are auxiliary convergence conditions.\n\n If the integral cannot be computed in closed form, this function returns\n an unevaluated :class:`MellinTransform` object.\n\n For a description of possible hints, refer to the docstring of\n :func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=False``,\n then only `F` will be returned (i.e. not ``cond``, and also not the strip\n ``(a, b)``).\n\n Examples\n ========\n\n >>> from sympy import mellin_transform, exp\n >>> from sympy.abc import x, s\n >>> mellin_transform(exp(-x), x, s)\n (gamma(s), (0, oo), True)\n\n See Also\n ========\n\n inverse_mellin_transform, laplace_transform, fourier_transform\n hankel_transform, inverse_hankel_transform\n ", "language": "en", "n_whitespaces": 245, "n_words": 158, "vocab_size": 117 }
10
Python
9
498015021131af4dbb07eb110e5badaba8250c7b
transforms.py
196,338
42
29
mellin_transform
https://github.com/sympy/sympy.git
Updated import locations
15
0
47,838
9
2
9
def get_verts(self): trans = self.get_transform() path = self.get_path() polygons = path.to_polygons(trans) if len(polygons): return polygons[0] return []
lib/matplotlib/patches.py
72
matplotlib
{ "docstring": "\n Return a copy of the vertices used in this patch.\n\n If the patch contains Bézier curves, the curves will be interpolated by\n line segments. To access the curves as curves, use `get_path`.\n ", "language": "en", "n_whitespaces": 62, "n_words": 32, "vocab_size": 27 }
17
Python
14
03a0b5ea238014ba87f74ef766928287726aa00a
patches.py
110,301
7
42
get_verts
https://github.com/matplotlib/matplotlib.git
Doc: Fix grammar and spelling
70
0
24,041
8
1
6
def test_missing_cpp_namespace(self) -> None: yaml_str = output_error = self.get_errors_from_gen_backend_stubs(yaml_str) self.assertExpectedInline(output_error, )
tools/test/test_gen_backend_stubs.py
47
pytorch
{ "docstring": "\\\nbackend: XLA\nsupported:\n- absYou must provide a value for \"cpp_namespace\"", "language": "en", "n_whitespaces": 8, "n_words": 12, "vocab_size": 12 }
11
Python
10
bb5b4cceb6f737448eaaa6817cd773b6f4b0e77d
test_gen_backend_stubs.py
102,162
7
26
test_missing_cpp_namespace
https://github.com/pytorch/pytorch.git
Revert "Revert D32498569: allow external backend codegen to toggle whether to generate out= and inplace kernels" (#69950) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/69950 This reverts commit f6cad53443704dfe5a20cc62bee14d91e3bffcaa. Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33113545 Pulled By: bdhirsh fbshipit-source-id: d6590294662588d36c09662dea65919ad4e1e288
32
0
21,477
8
1
2
def labelside(self): return self["labelside"]
packages/python/plotly/plotly/graph_objs/_parcoords.py
22
plotly.py
{ "docstring": "\n Specifies the location of the `label`. \"top\" positions labels\n above, next to the title \"bottom\" positions labels below the\n graph Tilted labels with \"labelangle\" may be positioned better\n inside margins when `labelposition` is set to \"bottom\".\n\n The 'labelside' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['top', 'bottom']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 147, "n_words": 59, "vocab_size": 46 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_parcoords.py
227,541
2
11
labelside
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,214
7
1
5
async def test_find_in_range_altered_inverted(hass): mqtt_cover = MqttCover( hass, { "name": "cover.test", "state_topic": "state-topic", "get_position_topic": None, "command_topic": "command-topic", "availability_topic": None, "tilt_command_topic": "tilt-command-topic", "tilt_status_topic": "tilt-status-topic", "qos": 0, "retain": False, "state_open": "OPEN", "state_closed": "CLOSE", "position_open": 80, "position_closed": 180, "payload_open": "OPEN", "payload_close": "CLOSE", "payload_stop": "STOP", "payload_available": None, "payload_not_available": None, "optimistic": False, "value_template": None, "tilt_open_position": 180, "tilt_closed_position": 80, "tilt_min": 180, "tilt_max": 80, "tilt_optimistic": False, "set_position_topic": None, "set_position_template": None, "unique_id": None, "device_config": None, }, None, None, ) assert mqtt_cover.find_in_range_from_percent(60) == 120 assert mqtt_cover.find_in_range_from_percent(60, "cover") == 120
tests/components/mqtt/test_cover.py
288
core
{ "docstring": "Test find in range with altered range and inverted.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
79
Python
58
52561ce0769ddcf1e8688c8909692b66495e524b
test_cover.py
302,086
39
156
test_find_in_range_altered_inverted
https://github.com/home-assistant/core.git
Update MQTT tests to use the config entry setup (#72373) * New testframework and tests for fan platform * Merge test_common_new to test_common * Add alarm_control_panel * Add binary_sensor * Add button * Add camera * Add climate * Add config_flow * Add cover * Add device_tracker_disovery * Add device_trigger * Add diagnostics * Add discovery * Add humidifier * Add init * Add lecacy_vacuum * Add light_json * Add light_template * Add light * Add lock * Add number * Add scene * Add select * Add sensor * Add siren * Add state_vacuum * Add subscription * Add switch * Add tag * Add trigger * Add missed tests * Add another missed test * Add device_tracker * Remove commented out code * Correct tests according comments * Improve mqtt_mock_entry and recover tests * Split fixtures with and without yaml setup * Update fixtures manual_mqtt * Update fixtures mqtt_json * Fix test tasmota * Update fixture mqtt_room * Revert fixture changes, improve test * re-add test
448
0
100,923
11
1
24
def test_message(self) -> None: room_id = self.helper.create_room_as( self.other_user_id, tok=self.other_access_token ) # The user should be in the room. self.helper.join(room_id, self.banned_user_id, tok=self.banned_access_token) # Sending a message should complete successfully. result = self.helper.send_event( room_id=room_id, type=EventTypes.Message, content={"msgtype": "m.text", "body": "with right label"}, tok=self.banned_access_token, ) self.assertIn("event_id", result) event_id = result["event_id"] latest_events = self.get_success( self.store.get_latest_event_ids_in_room(room_id) ) self.assertNotIn(event_id, latest_events)
tests/rest/client/test_shadow_banned.py
192
synapse
{ "docstring": "Messages from shadow-banned users don't actually get sent.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
53
Python
46
1901cb1d4a8b7d9af64493fbd336e9aa2561c20c
test_shadow_banned.py
247,069
18
118
test_message
https://github.com/matrix-org/synapse.git
Add type hints to `tests/rest/client` (#12084)
210
0
71,479
12
2
9
def vf2pp_isomorphism(G1, G2, node_label=None, default_label=None): try: mapping = next(vf2pp_all_isomorphisms(G1, G2, node_label, default_label)) return mapping except StopIteration: return None
networkx/algorithms/isomorphism/vf2pp.py
61
networkx
{ "docstring": "Return an isomorphic mapping between `G1` and `G2` if it exists.\n\n Parameters\n ----------\n G1, G2 : NetworkX Graph or MultiGraph instances.\n The two graphs to check for isomorphism.\n\n node_label : str, optional\n The name of the node attribute to be used when comparing nodes.\n The default is `None`, meaning node attributes are not considered\n in the comparison. Any node that doesn't have the `node_labels`\n attribute uses `default_label` instead.\n\n default_label : scalar\n Default value to use when a node doesn't have an attribute\n named `node_label`. Default is `None`.\n\n Returns\n -------\n dict or None\n Node mapping if the two graphs are isomorphic. None otherwise.\n ", "language": "en", "n_whitespaces": 185, "n_words": 102, "vocab_size": 75 }
18
Python
15
bffcd74649fb95a57fb834846eb3c7d9693c55b8
vf2pp.py
177,240
6
40
vf2pp_isomorphism
https://github.com/networkx/networkx.git
Preliminary VF2++ Implementation (#5788) * Preliminary implementation of the candidate node pair ordering of VF2++ * Removed unused lines of code * Added todos * Added demo and pseudocode for VF2++ * Pointed out a problem with the pseudocode * Initialisation of the VF2++ basis structure * Initialise the GraphMatcher * Remove useless changes * Check labels for the node ordering + demo * Code to verify the ordering * Implement the ISO feasibility check * Implement the IND feasibility * Create State class * Fixed Dan's code for the ordering * Preliminary form of the node ordering * Add visualisation * Use list comprehension for the Ti computation * Remove function * Create Unit Tests * Add labels check + update unit tests * Add pre-computation of G-labels * Remove todo * First implementation of the candidate selection * Initial version of candidate selection * Remove unnecessary files * Merge candidate selection cases into one * Create a function to incrementally update Ti and Ti_out * Unit Test for the Ti updating * Implement the Ti/Ti_out restoring * Finish the restoring of Ti and create unit test * Update test file names * Uncommented test section * Replace redundant loop with for-any * Create unit test for candidate selection using the same label for all nodes * Create unit test for candidate selection using different labels for the nodes * Update feasibility tests without the use of the state class * Create more unit tests for the feasibility checking * Provide explanation for the unit tests * First successful test of the complete ISO VF2++ algorithm (except from the buggy ordering) * Fix bug: when popping a node to climb up the DFS tree we need the previous node ordering (containing the node that we just popped) * Create a separate file for the VF2++ ISO algorithm * Delete file * Remove redundant iteration and memory use * Demo for different labels * Add benchmark for the incremental Ti updating * Remove unnecessary class * Fix bug with the ordering WOOOHOOOOO * Unit tests for the node ordering * Add unit tests for the VF2++ ISO * Fix ordering * Probablly fix logic error in ordering * Reformatted with black * Test precommit * Test precommit * Test pre commit * Testing pre commit * Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Add unit tests for vf2++ * Added vf2++ unit test * Added precheck for VF2++ * Add unit tests for the precheck * Updated the benchmarking * Updated the benchmark * Apply hooks * Add documentation for the ordering * Add documentation for the candidate selection * Added documentation for the feasibility * Added documentation for vf2++ * Separate functions for ISO feasibility * Refine unit tests * Apply hooks * Force reformat all files * Remove redundant return statements from VF2__ * Apply hooks * Apply hooks * Format * Minor changes * Add unit tests * Adjusted benchmark * Fix benchmark * Isort * Isort benchmark * Apply optimization in the candidate selection * Track matched node with pointer * Adjust benchmark * Restructure in VF2 function * Make VF2++ EXTREMELY PRETTY * Removed sorting in feasibility rules * Get rid of visited set, check mapping instead * Update networkx/algorithms/isomorphism/tests/VF2++/test_vf2pp.py Co-authored-by: Dan Schult <[email protected]> * Made color assignement deterministic in VF2++ unit tests * Add keyword argument in unit tests * Hoepfully fix pipeline errors * Add vf2++ unit tests for multigraphs * Add Unit tests for Feasibility * Add unit tests for feasibility on multi graphs * Finalize feasibility tests for multigraph settings * Update documentation * Remove list comprehension and boost performance * Add unit tests for both graphs and multi graphs, using same labels * Isort * Optimized precheck * Replace loop with any * Optimize multigraph chceck * Transfer except statement * Check order consistency * Cache degrees and labels from the beginning * Delete benchmark to create new * Fix precheck bug * Adjust unit tests * Add benchmark for perofmance comparison between VF2 and VF2++ * Fix Ti computing tests * Hopefully fix isort * Add benchmark for the candidate selection methods * Rename modules: lower case, remove + * Refactor VF2++ arguments * Adjust VF2++ to work with multiple node labels * Add unit tests for multiple labels * Adjust for different number of labels per node * Finish arguments of VF2++ * Add user functions * Exported the two vf2++ functions * Added underscore prefix to private functions and fixed tests * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/isomorphism/demo.py Co-authored-by: Dan Schult <[email protected]> * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Dan Schult <[email protected]> * Apply suggested changes * Refactor rst files * Rm unnecessary toctree from isomorphism page. * Autodoc vf2pp module + public functions. * Rm dedicated vf2pp reference article. * Rm extra vf2pp listing from autosummaries. * Add summary of three functions to module docstring. * Make sure docstrings match their functions. * Refactor everything * Format code * Add unit test * Inline process level function in node ordering * Perform intersection first rather than last * Update networkx/algorithms/isomorphism/vf2pp_helpers/candidates.py Co-authored-by: Dan Schult <[email protected]> * Replace return statement with multiple operations and make it more readable * Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py Co-authored-by: Dan Schult <[email protected]> * Fix multigraph bug in update_Tinout * Abstract the argmax function * Add unit test for first case of candidate selection * Create unit test for all candidate selection cases * Remove re-definition of namedtuple parameters * Update doc/reference/algorithms/isomorphism.rst Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/__init__.py Co-authored-by: Ross Barnowski <[email protected]> * Delete benchmark file * Add demo file * Create util file containing the helper functions, common across all unit tests * Fix CI/CD * Make unit tests for Ti updating specific * Remove util functions from vf2pp tests * Remove utils functions from multivf2pp tests * Remove utils functions from candidate tests * Remove utils functions from ordering checks * Remove utils functions from Ti tests * Add example in docstring * Remove unused utils functions * Separate initialization of vf2pp * Inline functions and add new abstract function for pushing to stack * Inline push to stack * Add commentsa * Separate precheck functions * Replace method with existing networkx function * Include label initialization inside parameter initializer function * Rename Tiout to Titilde * Update networkx/algorithms/isomorphism/tests/vf2pp/test_Ti_computing.py Co-authored-by: Ross Barnowski <[email protected]> * Use canonical setitem for dictionary insertions * Update networkx/algorithms/isomorphism/tests/vf2pp/test_precheck.py Co-authored-by: Ross Barnowski <[email protected]> * Remove variable assignement * Merge unit tests of vf2pp for graphs and multigraphs into the same file * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Change variable name * Update networkx/algorithms/isomorphism/vf2pp.py Co-authored-by: Ross Barnowski <[email protected]> * Re-write ordering unit tests * Rename vf2pp solver * Update networkx/algorithms/isomorphism/vf2pp_helpers/feasibility.py Co-authored-by: Dan Schult <[email protected]> * Replace abstractified argmax function with two loops for readability * Apply final changes * Fix mistake * Update ref guide to reflect new fn names. * Update docstrings * Fix line length in module docstring * Copy updated parameter section to all 3 public fns. * Add Yields section to all_isomorphisms fn. Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]>
48
0
42,307
12
8
15
def url_params_from_lookup_dict(lookups): params = {} if lookups and hasattr(lookups, "items"): for k, v in lookups.items(): if callable(v): v = v() if isinstance(v, (tuple, list)): v = ",".join(str(x) for x in v) elif isinstance(v, bool): v = ("0", "1")[v] else: v = str(v) params[k] = v return params
django/contrib/admin/widgets.py
171
django
{ "docstring": "\n Convert the type of lookups specified in a ForeignKey limit_choices_to\n attribute to a dictionary of query parameters\n ", "language": "en", "n_whitespaces": 27, "n_words": 17, "vocab_size": 15 }
47
Python
31
9c19aff7c7561e3a82978a272ecdaad40dda5c00
widgets.py
203,569
14
103
url_params_from_lookup_dict
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
181
0
50,449
16
1
4
def test_single_gen_next(self) -> None: id_gen = self._create_id_generator()
tests/storage/test_id_generators.py
28
synapse
{ "docstring": "Check that we correctly increment the current token from the DB.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
7
Python
7
115f0eb2334b13665e5c112bd87f95ea393c9047
test_id_generators.py
249,828
5
26
test_single_gen_next
https://github.com/matrix-org/synapse.git
Reintroduce #14376, with bugfix for monoliths (#14468) * Add tests for StreamIdGenerator * Drive-by: annotate all defs * Revert "Revert "Remove slaved id tracker (#14376)" (#14463)" This reverts commit d63814fd736fed5d3d45ff3af5e6d3bfae50c439, which in turn reverted 36097e88c4da51fce6556a58c49bd675f4cf20ab. This restores the latter. * Fix StreamIdGenerator not handling unpersisted IDs Spotted by @erikjohnston. Closes #14456. * Changelog Co-authored-by: Nick Mills-Barrett <[email protected]> Co-authored-by: Erik Johnston <[email protected]>
21
0
73,158
8
13
59
def render(self): from dcim.models import Cable from wireless.models import WirelessLink traced_path = self.origin.trace() # Iterate through each (terms, cable, terms) segment in the path for i, segment in enumerate(traced_path): near_ends, connector, far_ends = segment # Near end parent if i == 0: # If this is the first segment, draw the originating termination's parent object parent_object = self._draw_box( x=0, width=self.width, color=self._get_color(near_ends[0].parent_object), url=near_ends[0].parent_object.get_absolute_url(), labels=self._get_labels(near_ends[0].parent_object) ) self.parent_objects.append(parent_object) # Near end termination self.draw_terminations(near_ends) # Connector (a Cable or WirelessLink) connector = connector[0] # Remove Cable from list if connector is not None: # Cable if type(connector) is Cable: connector_labels = [ f'Cable {connector}', connector.get_status_display() ] if connector.type: connector_labels.append(connector.get_type_display()) if connector.length and connector.length_unit: connector_labels.append(f'{connector.length} {connector.get_length_unit_display()}') cable = self.draw_cable( color=connector.color or '000000', url=connector.get_absolute_url(), labels=connector_labels ) self.connectors.append(cable) # WirelessLink elif type(connector) is WirelessLink: connector_labels = [ f'Wireless link {connector}', connector.get_status_display() ] if connector.ssid: connector_labels.append(connector.ssid) wirelesslink = self.draw_wirelesslink( url=connector.get_absolute_url(), labels=connector_labels ) self.connectors.append(wirelesslink) # Far end termination self.draw_terminations(far_ends) # Far end parent parent_object = self._draw_box( x=0, width=self.width, color=self._get_color(far_ends[0].parent_object), url=far_ends[0].parent_object.get_absolute_url(), labels=self._get_labels(far_ends[0].parent_object), ) self.parent_objects.append(parent_object) elif far_ends: # Attachment attachment = self.draw_attachment() self.connectors.append(attachment) # ProviderNetwork parent_object = self._draw_box( x=0, width=self.width, color=self._get_color(far_ends[0]), url=far_ends[0].get_absolute_url(), labels=self._get_labels(far_ends[0]) ) self.parent_objects.append(parent_object) # Determine drawing size self.drawing = svgwrite.Drawing( size=(self.width, self.cursor + 2) ) # Attach CSS stylesheet with open(f'{settings.STATIC_ROOT}/cable_trace.css') as css_file: self.drawing.defs.add(self.drawing.style(css_file.read())) # Add elements to the drawing in order of depth (Z axis) for element in self.connectors + self.parent_objects + self.terminations: self.drawing.add(element) return self.drawing
netbox/dcim/svg/cables.py
832
netbox
{ "docstring": "\n Return an SVG document representing a cable trace.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
228
Python
141
bab6fb0de24d568371c8a55bcb22768b2d60f515
cables.py
265,020
73
504
render
https://github.com/netbox-community/netbox.git
Update SVG trace rendering to support multiple terminations per cable end
1,582
0
77,946
19
1
5
def _unquote_match(match): s = match.group(0) return unquote(s) # Header decoding is done a bit differently
python3.10.4/Lib/email/quoprimime.py
35
XX-Net
{ "docstring": "Turn a match in the form =AB to the ASCII character with value 0xab", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
15
Python
15
8198943edd73a363c266633e1aa5b2a9e9c9f526
quoprimime.py
223,868
3
19
_unquote_match
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
23
0
57,119
8
1
2
def backoff(self): return self["backoff"]
packages/python/plotly/plotly/graph_objs/scatter/_line.py
22
plotly.py
{ "docstring": "\n Sets the line back off from the end point of the nth line\n segment (in px). This option is useful e.g. to avoid overlap\n with arrowhead markers. With \"auto\" the lines would trim before\n markers if `marker.angleref` is set to \"previous\".\n\n The 'backoff' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n - A tuple, list, or one-dimensional numpy array of the above\n\n Returns\n -------\n int|float|numpy.ndarray\n ", "language": "en", "n_whitespaces": 158, "n_words": 76, "vocab_size": 64 }
4
Python
4
d5a345d01507f8b6792c51507d1d8f35d7386d29
_line.py
231,182
2
11
backoff
https://github.com/plotly/plotly.py.git
update to plotly.js 2.16.1
18
0
62,764
7
4
43
def _update_png_headers(self): to_update = [ # Items whose face index has changed x for x in self._items.file_list_sorted if x["face_index"] != self._items.items[x["source_filename"]].index(x["face_index"])] for file_info in tqdm(to_update, desc="Updating PNG Headers", leave=False): frame = file_info["source_filename"] face_index = file_info["face_index"] new_index = self._items.items[frame].index(face_index) fullpath = os.path.join(self._items.folder, file_info["current_filename"]) logger.debug("Updating png header for '%s': face index from %s to %s", fullpath, face_index, new_index) # Update file_list_sorted for rename task orig_filename = f"{os.path.splitext(frame)[0]}_{new_index}.png" file_info["face_index"] = new_index file_info["original_filename"] = orig_filename face = DetectedFace() face.from_alignment(self._alignments.get_faces_in_frame(frame)[new_index]) meta = dict(alignments=face.to_png_meta(), source=dict(alignments_version=file_info["alignments_version"], original_filename=orig_filename, face_index=new_index, source_filename=frame, source_is_video=file_info["source_is_video"], source_frame_dims=file_info.get("source_frame_dims"))) update_existing_metadata(fullpath, meta) logger.info("%s Extracted face(s) had their header information updated", len(to_update))
tools/alignments/jobs.py
387
faceswap
{ "docstring": " Update the EXIF iTXt field of any face PNGs that have had their face index changed.\n\n Notes\n -----\n This could be quicker if parellizing in threads, however, Windows (at least) does not seem\n to like this and has a tendency to throw permission errors, so this remains single threaded\n for now.\n ", "language": "en", "n_whitespaces": 94, "n_words": 51, "vocab_size": 48 }
95
Python
73
a9908b46f77dc66ac7efe7100ea0eed4b1f2b460
jobs.py
100,663
25
224
_update_png_headers
https://github.com/deepfakes/faceswap.git
Alignments tool - Replace 'extract-large' with 'min-size'
511
0
20,122
17
2
8
def copy(self, deep=True): # noqa: PR01, RT01, D200 if deep: return self.__constructor__(query_compiler=self._query_compiler.copy()) new_obj = self.__constructor__(query_compiler=self._query_compiler) self._add_sibling(new_obj) return new_obj
modin/pandas/base.py
80
modin
{ "docstring": "\n Make a copy of the object's metadata.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
18
Python
16
605efa618e7994681f57b11d04d417f353ef8d50
base.py
153,569
6
48
copy
https://github.com/modin-project/modin.git
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
65
0
35,450
13
1
2
def z(self): return self["z"]
packages/python/plotly/plotly/graph_objs/_choropleth.py
22
plotly.py
{ "docstring": "\n Sets the color values.\n\n The 'z' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "language": "en", "n_whitespaces": 76, "n_words": 26, "vocab_size": 26 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_choropleth.py
226,452
2
11
z
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,125
7
1
15
def test_self_hosted_rate_limit_check(self, default_rate_limit_mock): request = self.factory.get("/") default_rate_limit_mock.return_value = RateLimit(10, 100) self.middleware.process_view(request, self._test_endpoint, [], {}) assert not request.will_be_rate_limited default_rate_limit_mock.return_value = RateLimit(1, 1) with freeze_time("2000-01-01") as frozen_time: self.middleware.process_view(request, self._test_endpoint, [], {}) assert not request.will_be_rate_limited frozen_time.tick(1) self.middleware.process_view(request, self._test_endpoint, [], {}) assert not request.will_be_rate_limited
tests/sentry/middleware/test_ratelimit_middleware.py
193
sentry
{ "docstring": "Check that for self hosted installs we don't rate limit", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
40
Python
23
2d33f7cba85abb192111733892f0e7ac49812054
test_ratelimit_middleware.py
98,629
12
121
test_self_hosted_rate_limit_check
https://github.com/getsentry/sentry.git
ref(rate limits): Move settings out of sentry (#33806) * ref(rate limits): Move settings out of sentry
144
0
19,592
11
3
13
def get_queryset(self, request): queryset = SavedFilter.objects.all() user = request.user if user.is_superuser: return queryset if user.is_anonymous: return queryset.filter(shared=True) return queryset.filter( Q(shared=True) | Q(user=user) )
netbox/extras/views.py
101
netbox
{ "docstring": "\n Return only shared SavedFilters, or those owned by the current user, unless\n this is a superuser.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
23
Python
18
484efdaf75f267a43f9321b938fda1bc967b9e53
views.py
265,983
10
62
get_queryset
https://github.com/netbox-community/netbox.git
Closes #9623: Implement saved filters (#10801) * Initial work on saved filters * Return only enabled/shared filters * Add tests * Clean up filtering of usable SavedFilters
105
0
78,254
11
1
2
def locations(self): return self["locations"]
packages/python/plotly/plotly/graph_objs/_choropleth.py
22
plotly.py
{ "docstring": "\n Sets the coordinates via location IDs or names. See\n `locationmode` for more info.\n\n The 'locations' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "language": "en", "n_whitespaces": 92, "n_words": 35, "vocab_size": 34 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_choropleth.py
226,464
2
11
locations
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,137
7
5
21
def get_feature_names_out(self, input_features=None): powers = self.powers_ input_features = _check_feature_names_in(self, input_features) feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join( "%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] for ind, exp in zip(inds, row[inds]) ) else: name = "1" feature_names.append(name) return np.asarray(feature_names, dtype=object)
sklearn/preprocessing/_polynomial.py
176
scikit-learn
{ "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features is None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n ", "language": "en", "n_whitespaces": 221, "n_words": 76, "vocab_size": 54 }
51
Python
41
279388d9ed2ea83194dd45a2d78161be30b43aa7
_polynomial.py
259,120
17
111
get_feature_names_out
https://github.com/scikit-learn/scikit-learn.git
DOC Improve get_feature_names_out docstrings (#22718) Co-authored-by: Thomas J. Fan <[email protected]>
258
0
75,579
16
1
17
def squared_hinge(y_true, y_pred): y_pred = tf.convert_to_tensor(y_pred) y_true = tf.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return backend.mean( tf.square(tf.maximum(1.0 - y_true * y_pred, 0.0)), axis=-1 ) @keras_export("keras.metrics.hinge", "keras.losses.hinge") @tf.__internal__.dispatch.add_dispatch_support
keras/losses.py
125
@keras_export("keras.metrics.hinge", "keras.losses.hinge") @tf.__internal__.dispatch.add_dispatch_support
keras
{ "docstring": "Computes the squared hinge loss between `y_true` and `y_pred`.\n\n `loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`\n\n Standalone usage:\n\n >>> y_true = np.random.choice([-1, 1], size=(2, 3))\n >>> y_pred = np.random.random(size=(2, 3))\n >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)\n >>> assert loss.shape == (2,)\n >>> assert np.array_equal(\n ... loss.numpy(),\n ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))\n\n Args:\n y_true: The ground truth values. `y_true` values are expected to be -1 or 1.\n If binary (0 or 1) labels are provided we will convert them to -1 or 1.\n shape = `[batch_size, d0, .. dN]`.\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.\n\n Returns:\n Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.\n ", "language": "en", "n_whitespaces": 187, "n_words": 113, "vocab_size": 73 }
26
Python
22
84afc5193d38057e2e2badf9c889ea87d80d8fbf
losses.py
274,560
7
66
squared_hinge
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
49
1
81,237
13
2
11
async def results_directory(self) -> Path: directory = Path(os.getcwd()) / ".prefect-results" os.makedirs(directory, exist_ok=True) for filename in os.listdir(directory): os.unlink(directory / filename) return directory
tests/flow_runners/test_kubernetes.py
85
prefect
{ "docstring": "In order to share results reliably with the Kubernetes cluster, we need to be\n somehwere in the user's directory tree for the most cross-platform\n compatibilty. It's challenging to coordinate /tmp/ directories across systems", "language": "en", "n_whitespaces": 46, "n_words": 33, "vocab_size": 29 }
21
Python
19
ab322ef9b1bb65887984854dc39b316f98da3b97
test_kubernetes.py
56,186
9
50
results_directory
https://github.com/PrefectHQ/prefect.git
Allow Kubernetes users to customize or replace the Job manifest for flow runs Adding support for either replacing the base `job=` for a KubernetesFlowRunner, applying a list of RFC 6902 JSON patches provided by `customizations=`, or both. This implements the core changes, while preserving backwards compatiblity with the current API. Users can still provide `image=`, `namepace=` and other top-level parameters, which are now considered "shortcuts" for generating JSON patches. This is most of the work for PrefectHQ/orion#1900, but does not include the planned CLI updates to allow users to preview their jobs. Those will come in a separate change. Also updating the Kubernetes integration tests to be more reliable, and adding docs about how to get set up for running them.
67
0
11,458
11
1
2
def b(self): return self["b"]
packages/python/plotly/plotly/graph_objs/_carpet.py
22
plotly.py
{ "docstring": "\n A two dimensional array of y coordinates at each carpet point.\n\n The 'b' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "language": "en", "n_whitespaces": 83, "n_words": 33, "vocab_size": 32 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_carpet.py
226,416
2
11
b
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,089
7
1
2
def simplify(self): return self["simplify"]
packages/python/plotly/plotly/graph_objs/scatter/_line.py
22
plotly.py
{ "docstring": "\n Simplifies lines by removing nearly-collinear points. When\n transitioning lines, it may be desirable to disable this so\n that the number of points along the resulting SVG path is\n unaffected.\n\n The 'simplify' property must be specified as a bool\n (either True, or False)\n\n Returns\n -------\n bool\n ", "language": "en", "n_whitespaces": 116, "n_words": 45, "vocab_size": 42 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_line.py
233,420
2
11
simplify
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
64,864
7
8
27
def set_active(self, index): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') if colors.same_color( self._crosses.get_facecolor()[index], colors.to_rgba("none") ): self._crosses.get_facecolor()[index] = colors.to_rgba("k") else: self._crosses.get_facecolor()[index] = colors.to_rgba("none") if hasattr(self, "_rectangles"): for i, p in enumerate(self._rectangles): p.set_facecolor("k" if colors.same_color( p.get_facecolor(), colors.to_rgba("none")) else "none") if self.drawon: self.ax.figure.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text())
lib/matplotlib/widgets.py
295
matplotlib
{ "docstring": "\n Toggle (activate or deactivate) a check button by index.\n\n Callbacks will be triggered if :attr:`eventson` is True.\n\n Parameters\n ----------\n index : int\n Index of the check button to toggle.\n\n Raises\n ------\n ValueError\n If *index* is invalid.\n ", "language": "en", "n_whitespaces": 122, "n_words": 36, "vocab_size": 33 }
47
Python
37
723cd86d7d7bdc14a4d3fc0e08c3a01e72d310b6
widgets.py
110,191
18
174
set_active
https://github.com/matplotlib/matplotlib.git
Use scatter for check boxes instead of Rectangle With the current implementation, the boxes get stretched into rectangles if the aspect ratio is not maintained. To overcome this, the boxes are now created using scatter instead to maintain their shapes.
249
0
23,965
17
4
10
def column_partitions(cls, partitions, full_axis=True): if not isinstance(partitions, list): partitions = [partitions] return [ cls._column_partitions_class(col, full_axis=full_axis) for frame in partitions for col in frame.T ]
modin/core/dataframe/pandas/partitioning/partition_manager.py
74
modin
{ "docstring": "\n Get the list of `BaseDataframeAxisPartition` objects representing column-wise paritions.\n\n Parameters\n ----------\n partitions : list-like\n List of (smaller) partitions to be combined to column-wise partitions.\n full_axis : bool, default: True\n Whether or not this partition contains the entire column axis.\n\n Returns\n -------\n list\n A list of `BaseDataframeAxisPartition` objects.\n\n Notes\n -----\n Each value in this list will be an `BaseDataframeAxisPartition` object.\n `BaseDataframeAxisPartition` is located in `axis_partition.py`.\n ", "language": "en", "n_whitespaces": 189, "n_words": 64, "vocab_size": 48 }
24
Python
21
8d1004fdbdaa05700613c8e6287641a732acf606
partition_manager.py
153,178
8
49
column_partitions
https://github.com/modin-project/modin.git
FIX-#3675: Expand virtual partitioning utility (#3886) Co-authored-by: mvashishtha <[email protected]> Co-authored-by: jeffreykennethli <[email protected]> Co-authored-by: Anatoly Myachev <[email protected]> Co-authored-by: Vasily Litvinov <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Naren Krishna <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Co-authored-by: Doris Lee <[email protected]> Co-authored-by: Aditya Parameswaran <[email protected]> Co-authored-by: Rehan Sohail Durrani <[email protected]> Co-authored-by: Susmit Vengurlekar <[email protected]> Signed-off-by: Devin Petersohn <[email protected]>
96
0
35,281
9
16
13
def token_kwargs(bits, parser, support_legacy=False): if not bits: return {} match = kwarg_re.match(bits[0]) kwarg_format = match and match[1] if not kwarg_format: if not support_legacy: return {} if len(bits) < 3 or bits[1] != "as": return {} kwargs = {} while bits: if kwarg_format: match = kwarg_re.match(bits[0]) if not match or not match[1]: return kwargs key, value = match.groups() del bits[:1] else: if len(bits) < 3 or bits[1] != "as": return kwargs key, value = bits[2], bits[0] del bits[:3] kwargs[key] = parser.compile_filter(value) if bits and not kwarg_format: if bits[0] != "and": return kwargs del bits[:1] return kwargs
django/template/base.py
303
django
{ "docstring": "\n Parse token keyword arguments and return a dictionary of the arguments\n retrieved from the ``bits`` token list.\n\n `bits` is a list containing the remainder of the token (split by spaces)\n that is to be checked for arguments. Valid arguments are removed from this\n list.\n\n `support_legacy` - if True, the legacy format ``1 as foo`` is accepted.\n Otherwise, only the standard ``foo=1`` format is allowed.\n\n There is no requirement for all remaining token ``bits`` to be keyword\n arguments, so return the dictionary as soon as an invalid argument format\n is reached.\n ", "language": "en", "n_whitespaces": 124, "n_words": 90, "vocab_size": 59 }
95
Python
40
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
206,187
29
188
token_kwargs
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
334
0
51,399
14
1
2
def xpad(self): return self["xpad"]
packages/python/plotly/plotly/graph_objs/bar/marker/_colorbar.py
22
plotly.py
{ "docstring": "\n Sets the amount of padding (in px) along the x direction.\n\n The 'xpad' property is a number and may be specified as:\n - An int or float in the interval [0, inf]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 87, "n_words": 35, "vocab_size": 33 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_colorbar.py
228,737
2
11
xpad
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
60,410
7
6
6
async def load(self) -> bool: if not self.name and self.flow_name: raise ValueError("Both a deployment name and flow name must be provided.")
src/prefect/deployments.py
43
prefect
{ "docstring": "\n Queries the API for a deployment with this name for this flow, and if found, prepopulates\n settings. Returns a boolean specifying whether a load was successful or not.\n ", "language": "en", "n_whitespaces": 51, "n_words": 28, "vocab_size": 24 }
21
Python
19
f107fb0dcffae284cbefd7590274087b147c8483
deployments.py
58,295
34
159
load
https://github.com/PrefectHQ/prefect.git
Implement load and update methods on deployment objects
46
0
11,750
10
1
27
async def test_homeassistant_bridge_fan_setup(hass): accessories = await setup_accessories_from_file( hass, "home_assistant_bridge_fan.json" ) await setup_test_accessories(hass, accessories) await assert_devices_and_entities_created( hass, DeviceTestInfo( unique_id=HUB_TEST_ACCESSORY_ID, name="Home Assistant Bridge", model="Bridge", manufacturer="Home Assistant", sw_version="0.104.0.dev0", hw_version="", serial_number="homekit.bridge", devices=[ DeviceTestInfo( name="Living Room Fan", model="Fan", manufacturer="Home Assistant", sw_version="0.104.0.dev0", hw_version="", serial_number="fan.living_room_fan", unique_id="00:00:00:00:00:00:aid:1256851357", devices=[], entities=[ EntityTestInfo( entity_id="fan.living_room_fan", friendly_name="Living Room Fan", unique_id="00:00:00:00:00:00_1256851357_8", supported_features=( FanEntityFeature.DIRECTION | FanEntityFeature.SET_SPEED | FanEntityFeature.OSCILLATE ), capabilities={ "preset_modes": None, }, state="off", ) ], ), ], entities=[], ), )
tests/components/homekit_controller/specific_devices/test_homeassistant_bridge.py
257
core
{ "docstring": "Test that a SIMPLEconnect fan can be correctly setup in HA.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
66
Python
50
f23b1750e85f07091eb896a0b12b8f95e5646338
test_homeassistant_bridge.py
288,865
46
156
test_homeassistant_bridge_fan_setup
https://github.com/home-assistant/core.git
Migrate HomeKit Controller to use stable identifiers (#80064)
828
0
88,014
23
1
2
def get_progressbar_class(): @hookspec
src/ocrmypdf/pluginspec.py
16
@hookspec
OCRmyPDF
{ "docstring": "Called to obtain a class that can be used to monitor progress.\n\n A progress bar is assumed, but this could be used for any type of monitoring.\n\n The class should follow a tqdm-like protocol. Calling the class should return\n a new progress bar object, which is activated with ``__enter__`` and terminated\n ``__exit__``. An update method is called whenever the progress bar is updated.\n Progress bar objects will not be reused; a new one will be created for each\n group of tasks.\n\n The progress bar is held in the main process/thread and not updated by child\n process/threads. When a child notifies the parent of completed work, the\n parent updates the progress bar.\n\n The arguments are the same as `tqdm <https://github.com/tqdm/tqdm>`_ accepts.\n\n Progress bars should never write to ``sys.stdout``, or they will corrupt the\n output if OCRmyPDF writes a PDF to standard output.\n\n The type of events that OCRmyPDF reports to a progress bar may change in\n minor releases.\n\n Here is how OCRmyPDF will use the progress bar:\n\n Example:\n\n pbar_class = pm.hook.get_progressbar_class()\n with pbar_class(**tqdm_kwargs) as pbar:\n ...\n pbar.update(1)\n ", "language": "en", "n_whitespaces": 263, "n_words": 176, "vocab_size": 111 }
3
Python
3
1950acfbda3a659ca70658c848f900306ab2e35e
pluginspec.py
30,502
1
5
get_progressbar_class
https://github.com/ocrmypdf/OCRmyPDF.git
docs: proofread plugins
5
1
5,624
6
1
4
def test_context_placement_group(): driver_code = proc = run_string_as_driver_nonblocking(driver_code)
python/ray/data/tests/test_context_propagation.py
27
ray
{ "docstring": "\nimport ray\nfrom ray.data.context import DatasetContext\nfrom ray.util.scheduling_strategies import PlacementGroupSchedulingStrategy\nfrom ray._private.test_utils import placement_group_assert_no_leak\n\nray.init(num_cpus=1)\n\ncontext = DatasetContext.get_current()\n# This placement group will take up all cores of the local cluster.\nplacement_group = ray.util.placement_group(\n name=\"core_hog\",\n strategy=\"SPREAD\",\n bundles=[\n {\"CPU\": 1},\n ],\n)\nray.get(placement_group.ready())\ncontext.scheduling_strategy = PlacementGroupSchedulingStrategy(placement_group)\npipe = ray.data.range(100, parallelism=2) \\\n .window(blocks_per_window=1) \\\n .map(lambda x: x + 1)\nassert pipe.take_all() == list(range(1, 101))\nplacement_group_assert_no_leak([placement_group])\nray.shutdown()\n ", "language": "en", "n_whitespaces": 78, "n_words": 64, "vocab_size": 55 }
7
Python
6
68d4dd3a8b2defa5549cfa70e59aa26f2d4825a3
test_context_propagation.py
139,754
30
23
test_context_placement_group
https://github.com/ray-project/ray.git
[Datasets] Add explicit resource allocation option via a top-level scheduling strategy (#24438) Instead of letting Datasets implicitly use cluster resources in the margins of explicit allocations of other libraries, such as Tune, Datasets should provide an option for explicitly allocating resources for a Datasets workload for users that want to box Datasets in. This PR adds such an explicit resource allocation option, via exposing a top-level scheduling strategy on the DatasetContext with which a placement group can be given.
13
0
31,769
8
4
13
def child_identifiers(self): used_names = set() result = [] for panel in self.children: base_name = panel.clean_name or "panel" candidate_name = base_name suffix = 0 while candidate_name in used_names: suffix += 1 candidate_name = "%s%d" % (base_name, suffix) result.append(candidate_name) used_names.add(candidate_name) return result
wagtail/admin/panels.py
113
wagtail
{ "docstring": "\n A list of identifiers corresponding to child panels in ``self.children``, formed from the clean_name property\n but validated to be unique and non-empty.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 21 }
40
Python
29
5521e3b59f45af830ebac3c5686e092616eb82e4
panels.py
78,837
13
66
child_identifiers
https://github.com/wagtail/wagtail.git
Update panel templates for new designs (EditHandler rewrite) Co-authored-by: Thibaud Colas <[email protected]>
171
0
16,826
12
1
7
def standard_b64decode(s): return b64decode(s) _urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') _urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/')
python3.10.4/Lib/base64.py
59
XX-Net
{ "docstring": "Decode bytes encoded with the standard Base64 alphabet.\n\n Argument s is a bytes-like object or ASCII string to decode. The result\n is returned as a bytes object. A binascii.Error is raised if the input\n is incorrectly padded. Characters that are not in the standard alphabet\n are discarded prior to the padding check.\n ", "language": "en", "n_whitespaces": 70, "n_words": 52, "vocab_size": 41 }
12
Python
11
8198943edd73a363c266633e1aa5b2a9e9c9f526
base64.py
221,074
2
11
standard_b64decode
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
16
0
56,186
7
1
7
def test_logout_doesnt_cache(self): response = self.client.get("/logout/") self.assertIn("no-store", response.headers["Cache-Control"])
tests/auth_tests/test_views.py
54
django
{ "docstring": "\n The logout() view should send \"no-cache\" headers for reasons described\n in #25490.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
7
Python
7
9c19aff7c7561e3a82978a272ecdaad40dda5c00
test_views.py
201,610
3
29
test_logout_doesnt_cache
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
28
0
49,975
9
4
10
def reset_page_edit_handler_cache(**kwargs): if kwargs["setting"] == "WAGTAILADMIN_COMMENTS_ENABLED": set_default_page_edit_handlers(Page) for model in apps.get_models(): if issubclass(model, Page): model.get_edit_handler.cache_clear()
wagtail/admin/edit_handlers.py
76
wagtail
{ "docstring": "\n Clear page edit handler cache when global WAGTAILADMIN_COMMENTS_ENABLED settings are changed\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
15
Python
14
d10f15e55806c6944827d801cd9c2d53f5da4186
edit_handlers.py
71,107
6
43
reset_page_edit_handler_cache
https://github.com/wagtail/wagtail.git
Reformat with black
61
0
15,624
14
5
26
def coefficient_system(f, params): r if isinstance(f, Eq): # got equation, so move all the # terms to the left hand side f = f.lhs - f.rhs syms = set(params) # e.g. A*exp(x) + B - (exp(x) + y) fex = _mexpand(f, recursive=True) # -(exp(x) + y), A*exp(x) + B _, dep = fex.as_independent(*syms) # {x} = {x, A, B} - {A, B} ex = dep.free_symbols - syms # {exp(x): A - 1, 1: B - y} gen_co = fex.as_coefficients_dict(*ex) # ignore those that are 0 and return None # if any are coefficients are numbers eqs = [] for k, v in gen_co.items(): if v.is_zero: continue elif v.is_number: return eqs.append(v) return set(eqs)
sympy/solvers/solvers.py
180
sympy
{ "docstring": "Return a set of equations which can be solved to determine\n values for undetermined coefficients in an equation like\n $p(x; a_1, \\ldots, a_k) = q(x)$ where both\n $p$ and $q$ are univariate expressions (polynomial in generators of $x$\n but not necessarily in powers of $x$) that depend on $k$ parameters. If\n such a system cannot be determined or has no solution, return None.\n Return of a system does not imply that there is a solution to the\n system. No simplification of coefficients is done and there may be\n expressions which share a common factor.\n\n >>> from sympy import Eq\n >>> from sympy.solvers.solvers import coefficient_system\n >>> from sympy.abc import x, a, b, c\n >>> coefficient_system(Eq(3*a*x + b - 12*x, c), [a, b])\n {3*a - 12, b - c}\n >>> coefficient_system(a*x - x + b + c, [a, b])\n {a - 1, b + c}\n\n If a system is over-determined, it will still be returned. In the\n following, there are not 3 independent relationships for the\n 3 symbols:\n\n >>> coefficient_system(a*x + b + c, [a, b, c])\n {a, b + c}\n\n See Also\n ========\n solve_undetermined_coeffs\n ", "language": "en", "n_whitespaces": 255, "n_words": 183, "vocab_size": 116 }
112
Python
78
44b65804ef1e39f99a890cac82b6d5143251173b
solvers.py
198,556
45
107
coefficient_system
https://github.com/sympy/sympy.git
update free symbol idiom
223
0
48,999
10
4
8
def _iter_built_with_prepended(installed, infos): # type: (Candidate, Iterator[IndexCandidateInfo]) -> Iterator[Candidate] yield installed versions_found = {installed.version} # type: Set[_BaseVersion] for version, func in infos: if version in versions_found: continue candidate = func() if candidate is None: continue yield candidate versions_found.add(version)
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/found_candidates.py
85
transferlearning
{ "docstring": "Iterator for ``FoundCandidates``.\n\n This iterator is used when the resolver prefers the already-installed\n candidate and NOT to upgrade. The installed candidate is therefore\n always yielded first, and candidates from index come later in their\n normal ordering, except skipped when the version is already installed.\n ", "language": "en", "n_whitespaces": 59, "n_words": 44, "vocab_size": 37 }
38
Python
29
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
found_candidates.py
61,120
11
49
_iter_built_with_prepended
https://github.com/jindongwang/transferlearning.git
upd; format
111
0
12,415
10
4
11
def patch_response_headers(response, cache_timeout=None): if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS if cache_timeout < 0: cache_timeout = 0 # Can't have max-age negative if not response.has_header("Expires"): response.headers["Expires"] = http_date(time.time() + cache_timeout) patch_cache_control(response, max_age=cache_timeout)
django/utils/cache.py
106
django
{ "docstring": "\n Add HTTP caching headers to the given HttpResponse: Expires and\n Cache-Control.\n\n Each header is only added if it isn't already set.\n\n cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used\n by default.\n ", "language": "en", "n_whitespaces": 51, "n_words": 32, "vocab_size": 30 }
32
Python
25
9c19aff7c7561e3a82978a272ecdaad40dda5c00
cache.py
206,576
8
62
patch_response_headers
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
69
0
51,574
13
1
5
def data(self) -> 'DataRequest._DataContent': return DataRequest._DataContent(self.proto.data)
jina/types/request/data.py
35
jina
{ "docstring": "Get the data contaned in this data request\n\n :return: the data content as an instance of _DataContent wrapping docs and groundtruths\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 18 }
6
Python
6
933415bfa1f9eb89f935037014dfed816eb9815d
data.py
9,959
6
19
data
https://github.com/jina-ai/jina.git
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
20
0
1,807
9
1
12
def test_export_too_many_fields(self): payload = self.make_payload("discover", {"field": ["id"] * (MAX_FIELDS + 1)}) with self.feature("organizations:discover-query"): response = self.get_error_response(self.org.slug, status_code=400, **payload) assert response.data == { "non_field_errors": [ "You can export up to 20 fields at a time. Please delete some and try again." ] }
tests/sentry/data_export/endpoints/test_data_export.py
119
sentry
{ "docstring": "\n Ensures that if too many fields are requested, returns a 400 status code with the\n corresponding error message.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
42
Python
41
096b5511e244eecd8799b2a0324655207ce8985e
test_data_export.py
100,174
9
67
test_export_too_many_fields
https://github.com/getsentry/sentry.git
ref(tests): Remove `get_valid_response()` (#34822)
125
0
19,768
13
1
12
def test_delete_missing_current_version(self) -> None: e = self.get_failure(self.handler.delete_version(self.local_user), SynapseError) res = e.value.code self.assertEqual(res, 404)
tests/handlers/test_e2e_room_keys.py
68
synapse
{ "docstring": "Check that we get a 404 on deleting nonexistent current version", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
13
Python
12
652d1669c5a103b1c20478770c4aaf18849c09a3
test_e2e_room_keys.py
250,286
5
42
test_delete_missing_current_version
https://github.com/matrix-org/synapse.git
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
41
0
73,364
11
3
20
def test_write_animation_L(tmp_path): with Image.open("Tests/images/iss634.gif") as orig: assert orig.n_frames > 1 temp_file = str(tmp_path / "temp.webp") orig.save(temp_file, save_all=True) with Image.open(temp_file) as im: assert im.n_frames == orig.n_frames # Compare first and last frames to the original animated GIF orig.load() im.load() assert_image_similar(im, orig.convert("RGBA"), 32.9) if is_big_endian(): webp = parse_version(features.version_module("webp")) if webp < parse_version("1.2.2"): return orig.seek(orig.n_frames - 1) im.seek(im.n_frames - 1) orig.load() im.load() assert_image_similar(im, orig.convert("RGBA"), 32.9)
Tests/test_file_webp_animated.py
261
Pillow
{ "docstring": "\n Convert an animated GIF to animated WebP, then compare the frame count, and first\n and last frames to ensure they're visually similar.\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 19 }
62
Python
48
e05b8d74819fa18a908ea201a86138ea3168aba9
test_file_webp_animated.py
242,282
19
153
test_write_animation_L
https://github.com/python-pillow/Pillow.git
libwebp 1.2.2 fixed endian bugs
266
0
69,817
17
2
28
async def install_protected_system_blocks(session): for block in [ prefect.blocks.system.JSON, prefect.blocks.system.DateTime, prefect.blocks.system.Secret, prefect.filesystems.LocalFileSystem, prefect.infrastructure.Process, ]: block_type = block._to_block_type() block_type.is_protected = True block_type = await models.block_types.create_block_type( session=session, block_type=block_type, override=True ) block_schema = await models.block_schemas.create_block_schema( session=session, block_schema=block._to_block_schema(block_type_id=block_type.id), override=True, ) @router.post("/install_system_block_types")
src/prefect/orion/api/block_types.py
183
@router.post("/install_system_block_types")
prefect
{ "docstring": "Install block types that the system expects to be present", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
36
Python
29
1a3a3adf0bf4d83206f0367b98905a9db15cfec4
block_types.py
58,977
18
112
install_protected_system_blocks
https://github.com/PrefectHQ/prefect.git
Adds ability to delete block types via the CLI (#6849) * Ensure system blocks are protected on destructive API calls * Enable deleting block types * Ensure Block Types are protected against destructive API actions * Ensure updating protected Block Types on update doesn't impact saving * ⚫ * isort * Suppress status errors * ⚫
165
1
11,846
16
3
7
def pretty_duration(hours): seconds = int(3600 * hours) days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) if days > 0: return "%dd %dh %dm" % (days, hours, minutes) if hours > 0: return "%dh %dm" % (hours, minutes) return "%dm" % minutes
homeassistant/components/history_stats/helpers.py
123
core
{ "docstring": "Format a duration in days, hours, minutes, seconds.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
48
Python
30
73a368c24246b081cdb98923ca3180937d436c3b
helpers.py
296,854
10
76
pretty_duration
https://github.com/home-assistant/core.git
Refactor history_stats to minimize database access (part 2) (#70255)
126
0
95,828
9
1
8
def deserialize(name, custom_objects=None): return deserialize_keras_object( name, module_objects=globals(), custom_objects=custom_objects, printable_module_name="loss function", ) @keras_export("keras.losses.get")
keras/losses.py
59
@keras_export("keras.losses.get")
keras
{ "docstring": "Deserializes a serialized loss class/function instance.\n\n Args:\n name: Loss configuration.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras `Loss` instance or a loss function.\n ", "language": "en", "n_whitespaces": 75, "n_words": 36, "vocab_size": 33 }
12
Python
12
84afc5193d38057e2e2badf9c889ea87d80d8fbf
losses.py
274,546
7
30
deserialize
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
48
1
81,224
10
1
10
def paths_check(app_configs, **kwargs): return ( path_check("PAPERLESS_DATA_DIR", settings.DATA_DIR) + path_check("PAPERLESS_TRASH_DIR", settings.TRASH_DIR) + path_check("PAPERLESS_MEDIA_ROOT", settings.MEDIA_ROOT) + path_check("PAPERLESS_CONSUMPTION_DIR", settings.CONSUMPTION_DIR) ) @register()
src/paperless/checks.py
88
@register()
paperless-ngx
{ "docstring": "\n Check the various paths for existence, readability and writeability\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
18
Python
16
fc695896dd8b0169001c438054a79e347053fac6
checks.py
318,780
7
47
paths_check
https://github.com/paperless-ngx/paperless-ngx.git
Format Python code with black
54
1
116,915
12
3
12
def extract_operations(self) -> List[str]: if not self.step: return [] try: operations = re.findall(r'[-+*^/]', self.step) except TypeError as e: print(f"TYPE: {type(self.step)}") print(f"STEP: {self.step}") raise e return operations
parlai/tasks/reasoning/reason_types/step_by_step.py
111
ParlAI
{ "docstring": "\n Finds all instances of the math operations: -, +, *, ^, / in the step.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 14 }
26
Python
24
0f129e9c38b6b10d80982ecc412785db62842938
step_by_step.py
195,437
13
54
extract_operations
https://github.com/facebookresearch/ParlAI.git
ROSCOE suite of metrics (#4839) * ROSCOE suite of metrics * updating tests * lint * fixing protobuf version to stop cleaninstall failures * updating requirements * convert to absolute path * moving tests because of the dependency issues * adding new dependencies in tests * add test dependencies * fixing deps * updating task list * checklist deps can't be installed on circleci * actually fix protobuf version * protobuf range * protobuf conflict with google-api-core * return tests * convert imports to absolute path * trying checklist again * trying to avoid checklist failures * checklist to teacher tests * add user option to avoid installation failure * jupiter as well * typo * moving into virtual env setup * user param not allowed in virtual env * move spacy to circleCI because it's big * replace local model with HF * fixes based on comments * remove unused nli scores, fix tests * Added path to BART model Co-authored-by: Spencer Poff <[email protected]>
116
0
47,260
15
2
17
def _metadata_reader(self) -> ImgMetaType: for filename, metadata in tqdm(read_image_meta_batch(self._loader.file_list), total=self._loader.count, desc=self._description, leave=False): alignments = self._get_alignments(metadata.get("itxt", {})) yield filename, None, alignments
tools/sort/sort_methods.py
102
faceswap
{ "docstring": " Load metadata from saved aligned faces\n\n Yields\n ------\n filename: str\n The filename that has been read\n image: None\n This will always be ``None`` with the metadata reader\n alignments: dict or ``None``\n The alignment data for the given face or ``None`` if no alignments found\n ", "language": "en", "n_whitespaces": 120, "n_words": 44, "vocab_size": 38 }
20
Python
18
98d01760e469fd2108eed8d0b0a1ba6297c3177c
sort_methods.py
101,620
18
65
_metadata_reader
https://github.com/deepfakes/faceswap.git
Overhaul sort: - Standardize image data reading and writing - Optimize loading (just one pass required) - Make all sort groups binnable (to greater or lesser results) - Add sort by pitch - Deprecate multiple options - linting, docs + locales
170
0
21,028
13
20
30
def _parse_jp2_header(fp): # Find the JP2 header box reader = BoxReader(fp) header = None mimetype = None while reader.has_next_box(): tbox = reader.next_box_type() if tbox == b"jp2h": header = reader.read_boxes() break elif tbox == b"ftyp": if reader.read_fields(">4s")[0] == b"jpx ": mimetype = "image/jpx" size = None mode = None bpc = None nc = None dpi = None # 2-tuple of DPI info, or None while header.has_next_box(): tbox = header.next_box_type() if tbox == b"ihdr": height, width, nc, bpc = header.read_fields(">IIHB") size = (width, height) if nc == 1 and (bpc & 0x7F) > 8: mode = "I;16" elif nc == 1: mode = "L" elif nc == 2: mode = "LA" elif nc == 3: mode = "RGB" elif nc == 4: mode = "RGBA" elif tbox == b"res ": res = header.read_boxes() while res.has_next_box(): tres = res.next_box_type() if tres == b"resc": vrcn, vrcd, hrcn, hrcd, vrce, hrce = res.read_fields(">HHHHBB") hres = _res_to_dpi(hrcn, hrcd, hrce) vres = _res_to_dpi(vrcn, vrcd, vrce) if hres is not None and vres is not None: dpi = (hres, vres) break if size is None or mode is None: raise SyntaxError("Malformed JP2 header") return size, mode, mimetype, dpi ## # Image plugin for JPEG2000 images.
src/PIL/Jpeg2KImagePlugin.py
476
Pillow
{ "docstring": "Parse the JP2 header box to extract size, component count,\n color space information, and optionally DPI information,\n returning a (size, mode, mimetype, dpi) tuple.", "language": "en", "n_whitespaces": 29, "n_words": 24, "vocab_size": 23 }
198
Python
101
ee85e387bab535e2339b9d3cd1ab87c61d23af15
Jpeg2KImagePlugin.py
242,745
46
285
_parse_jp2_header
https://github.com/python-pillow/Pillow.git
Remove redundant parentheses
658
0
69,908
18
1
7
def sparsemax_bisect(X, dim=-1, n_iter=50, ensure_sum_one=True): return SparsemaxBisectFunction.apply(X, dim, n_iter, ensure_sum_one)
ludwig/utils/entmax/root_finding.py
47
ludwig
{ "docstring": "sparsemax: normalizing sparse transform (a la softmax), via bisection.\n\n Solves the projection:\n\n min_p ||x - p||_2 s.t. p >= 0, sum(p) == 1.\n\n Parameters\n ----------\n X : torch.Tensor\n The input tensor.\n\n dim : int\n The dimension along which to apply sparsemax.\n\n n_iter : int\n Number of bisection iterations. For float32, 24 iterations should\n suffice for machine precision.\n\n ensure_sum_one : bool,\n Whether to divide the result by its sum. If false, the result might\n sum to close but not exactly 1, which might cause downstream problems.\n\n Note: This function does not yet support normalizing along anything except\n the last dimension. Please use transposing and views to achieve more\n general behavior.\n\n Returns\n -------\n P : torch tensor, same shape as X\n The projection result, such that P.sum(dim=dim) == 1 elementwise.\n ", "language": "en", "n_whitespaces": 231, "n_words": 128, "vocab_size": 107 }
10
Python
10
20a8a6fdb516e543d4598c852063ba0fb407f3ba
root_finding.py
6,299
2
32
sparsemax_bisect
https://github.com/ludwig-ai/ludwig.git
Removes dependency on entmax from PyPI, adds entmax source to utils (#1778) * Removes dependency on entmax from PyPi, add entmax source code into utils instead. * Removes build status and image from README * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix python formatting in docs for pre-commit. * Removes __main__ from test_losses.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update entmax imports. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: Daniel Treiman <[email protected]> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
16
0
957
7
2
7
async def async_load(self) -> _T | None: if self._load_task is None: self._load_task = self.hass.async_create_task(self._async_load()) return await self._load_task
homeassistant/helpers/storage.py
65
core
{ "docstring": "Load data.\n\n If the expected version and minor version do not match the given versions, the\n migrate function will be invoked with migrate_func(version, minor_version, config).\n\n Will ensure that when a call comes in while another one is in progress,\n the second call will wait and return the result of the first call.\n ", "language": "en", "n_whitespaces": 87, "n_words": 52, "vocab_size": 42 }
17
Python
14
16900dcef15bdb9016feabd12bfec94d61ed4df6
storage.py
316,852
12
38
async_load
https://github.com/home-assistant/core.git
Make Store a generic class (#74617)
49
0
115,428
12
3
17
def get_ld_headers(file): # get_ld_headers parsing: # 1. Find a line that starts with /, ./, or ../ - set as ld_header # 2. If "INDEX" in occurs in a following line - return ld_header # 3. get info (lines starting with [0-9]) ldr_headers = [] p = Popen(["/usr/bin/dump", f"-X{AIX_ABI}", "-H", file], universal_newlines=True, stdout=PIPE, stderr=DEVNULL) # be sure to read to the end-of-file - getting all entries while True: ld_header = get_ld_header(p) if ld_header: ldr_headers.append((ld_header, get_ld_header_info(p))) else: break p.stdout.close() p.wait() return ldr_headers
python3.10.4/Lib/ctypes/_aix.py
139
XX-Net
{ "docstring": "\n Parse the header of the loader section of executable and archives\n This function calls /usr/bin/dump -H as a subprocess\n and returns a list of (ld_header, ld_header_info) tuples.\n ", "language": "en", "n_whitespaces": 40, "n_words": 27, "vocab_size": 22 }
81
Python
64
8198943edd73a363c266633e1aa5b2a9e9c9f526
_aix.py
221,795
13
79
get_ld_headers
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
167
0
56,514
14
6
24
def make_variant_item_code(template_item_code, template_item_name, variant): if variant.item_code: return abbreviations = [] for attr in variant.attributes: item_attribute = frappe.db.sql( , {"attribute": attr.attribute, "attribute_value": attr.attribute_value}, as_dict=True, ) if not item_attribute: continue # frappe.throw(_('Invalid attribute {0} {1}').format(frappe.bold(attr.attribute), # frappe.bold(attr.attribute_value)), title=_('Invalid Attribute'), # exc=InvalidItemAttributeValueError) abbr_or_value = ( cstr(attr.attribute_value) if item_attribute[0].numeric_values else item_attribute[0].abbr ) abbreviations.append(abbr_or_value) if abbreviations: variant.item_code = "{0}-{1}".format(template_item_code, "-".join(abbreviations)) variant.item_name = "{0}-{1}".format(template_item_name, "-".join(abbreviations)) @frappe.whitelist()
erpnext/controllers/item_variant.py
224
@frappe.whitelist()
erpnext
{ "docstring": "Uses template's item code and abbreviations to make variant's item codeselect i.numeric_values, v.abbr\n\t\t\tfrom `tabItem Attribute` i left join `tabItem Attribute Value` v\n\t\t\t\ton (i.name=v.parent)\n\t\t\twhere i.name=%(attribute)s and (v.attribute_value=%(attribute_value)s or i.numeric_values = 1)", "language": "en", "n_whitespaces": 29, "n_words": 33, "vocab_size": 30 }
60
Python
49
494bd9ef78313436f0424b918f200dab8fc7c20b
item_variant.py
65,630
22
128
make_variant_item_code
https://github.com/frappe/erpnext.git
style: format code with black
37
1
13,962
13
1
10
def Generate(self, request, context): context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!')
server/bloom_inference/pb/generate_pb2_grpc.py
55
text-generation-inference
{ "docstring": "/ Generate tokens for a batch without cache\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
12
Python
10
295831a481241d3d06b49f646a40f27b1297fab5
generate_pb2_grpc.py
338,490
4
31
Generate
https://github.com/huggingface/text-generation-inference.git
Init
40
0
121,221
9