complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
3
as_airbyte_message
def as_airbyte_message(self) -> AirbyteMessage: now_millis = datetime.now().timestamp() * 1000.0 trace_exc = self._exception or self stack_trace_str = "".join(traceback.TracebackException.from_exception(trace_exc).format()) trace_message = AirbyteTraceMessage( type=TraceType.ERROR, emitted_at=now_millis, error=AirbyteErrorTraceMessage( message=self.message or "Something went wrong in the connector. See the logs for more details.", internal_message=self.internal_message, failure_type=self.failure_type, stack_trace=stack_trace_str, ), ) return AirbyteMessage(type=MessageType.TRACE, trace=trace_message)
73c7fad7fce952a8c3ba827ca858e4280bd846f3
14
traced_exception.py
166
CDK: emit `AirbyteTraceMessage` with exception trace information (#12593)
710
0
198
107
40
5,031
45
airbyte
30
airbyte-cdk/python/airbyte_cdk/utils/traced_exception.py
Python
18
{ "docstring": "\n Builds an AirbyteTraceMessage from the exception\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/airbytehq/airbyte.git
1
_get_model_info
def _get_model_info(self, model_name): # noqa return self.storage.get('models')[model_name]
4b49bf89ad95fcc645a249983efd764c1a73e3bb
9
lightwood_handler.py
36
copy from /lightwood_handler branch: added generic serializer for dill support, lw integration functional
25,193
0
22
20
7
114,440
7
mindsdb
5
mindsdb/integrations/lightwood_handler/lightwood_handler/lightwood_handler.py
Python
2
{ "docstring": " Returns a dictionary with three keys: 'jsonai', 'predictor' (serialized), and 'code'. ", "language": "en", "n_whitespaces": 12, "n_words": 11, "vocab_size": 11 }
https://github.com/mindsdb/mindsdb.git
1
get_views
def get_views(self): query = f"SELECT * FROM information_schema.views WHERE table_schema NOT IN ('information_schema', 'pg_catalog')" result = self.run_native_query(query) return result
7e3da9157508a5eb38dbfabbd7f08ba8fa6c5a88
8
postgres_handler.py
36
Get tables, views, describe
25,222
0
47
20
17
114,587
19
mindsdb
5
mindsdb/integrations/postgres_handler/postgres_handler.py
Python
4
{ "docstring": "\n List all views in PostgreSQL without the system views information_schema and pg_catalog\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
https://github.com/mindsdb/mindsdb.git
10
mod_inverse
def mod_inverse(a, m): r c = None try: a, m = as_int(a), as_int(m) if m != 1 and m != -1: x, _, g = igcdex(a, m) if g == 1: c = x % m except ValueError: a, m = sympify(a), sympify(m) if not (a.is_number and m.is_number): raise TypeError(filldedent()) big = (m > 1) if big not in (S.true, S.false): raise ValueError('m > 1 did not evaluate; try to simplify %s' % m) elif big: c = 1/a if c is None: raise ValueError('inverse of %s (mod %s) does not exist' % (a, m)) return c
f3b08522003f40868afb20304fc0fa5b16d13f6a
15
numbers.py
240
Cleanup documentation
48,421
0
240
149
64
197,274
97
sympy
18
sympy/core/numbers.py
Python
63
{ "docstring": "\n Return the number $c$ such that, $a \\times c = 1 \\pmod{m}$\n where $c$ has the same sign as $m$. If no such value exists,\n a ValueError is raised.\n\n Examples\n ========\n\n >>> from sympy import mod_inverse, S\n\n Suppose we wish to find multiplicative inverse $x$ of\n 3 modulo 11. This is the same as finding $x$ such\n that $3x = 1 \\pmod{11}$. One value of x that satisfies\n this congruence is 4. Because $3 \\times 4 = 12$ and $12 = 1 \\pmod{11}$.\n This is the value returned by ``mod_inverse``:\n\n >>> mod_inverse(3, 11)\n 4\n >>> mod_inverse(-3, 11)\n 7\n\n When there is a common factor between the numerators of\n `a` and `m` the inverse does not exist:\n\n >>> mod_inverse(2, 4)\n Traceback (most recent call last):\n ...\n ValueError: inverse of 2 mod 4 does not exist\n\n >>> mod_inverse(S(2)/7, S(5)/2)\n 7/2\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Modular_multiplicative_inverse\n .. [2] https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm\n \n Expected numbers for arguments; symbolic `mod_inverse`\n is not implemented\n but symbolic expressions can be handled with the\n similar function,\n sympy.polys.polytools.invert", "language": "en", "n_whitespaces": 326, "n_words": 166, "vocab_size": 119 }
https://github.com/sympy/sympy.git
9
potentially_ragged_concat
def potentially_ragged_concat(tensors): if len(tensors) == 1: return tensors[0] if isinstance(tensors[0], tf.SparseTensor): return tf.sparse.concat(axis=0, sp_inputs=tensors) elif isinstance(tensors[0], tf.RaggedTensor): return tf.concat(tensors, axis=0) elif not tf.__internal__.tf2.enabled(): return tf.concat(tensors, axis=0) non_batch_shapes = tf.stack([tf.shape(tensor)[1:] for tensor in tensors]) constant_dims = tf.math.reduce_all( non_batch_shapes == non_batch_shapes[:1], axis=0 ) if tf.math.reduce_all(constant_dims).numpy().item(): # All non-batch dims are constant return tf.concat(tensors, axis=0) # First, identify constant inner dimensions by finding the # rightmost dimension that is not constant constant_inner_dimensions = ( constant_dims.numpy().tolist()[::-1].index(False) ) # If there are constant inner dimensions, define a constant inner shape if constant_inner_dimensions == 0: constant_inner_shape = None else: constant_inner_shape = tensors[0].shape[-constant_inner_dimensions:] return tf.ragged.constant( [tensor.numpy() for tensor in tensors], inner_shape=constant_inner_shape ).merge_dims(0, 1)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
14
training.py
380
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,801
0
237
242
68
271,566
106
keras
31
keras/engine/training.py
Python
25
{ "docstring": "Concats `Tensor`s along their first dimension.\n\n Args:\n tensors: List of `Tensor`s.\n\n Returns:\n Concatenation of the inputs along the first dimension -- of type `Tensor`\n if all input shapes are compatible, or `RaggedTensor` if not.\n ", "language": "en", "n_whitespaces": 58, "n_words": 34, "vocab_size": 28 }
https://github.com/keras-team/keras.git
8
dag_longest_path
def dag_longest_path(G, weight="weight", default_weight=1, topo_order=None): if not G: return [] if topo_order is None: topo_order = nx.topological_sort(G) dist = {} # stores {v : (length, u)} for v in topo_order: us = [ (dist[u][0] + data.get(weight, default_weight), u) for u, data in G.pred[v].items() ] # Use the best predecessor if there is one and its distance is # non-negative, otherwise terminate. maxu = max(us, key=lambda x: x[0]) if us else (0, v) dist[v] = maxu if maxu[0] >= 0 else (0, v) u = None v = max(dist, key=lambda x: dist[x][0]) path = [] while u != v: path.append(v) u = v v = dist[v][1] path.reverse() return path @not_implemented_for("undirected")
304682fd71ba3bae9c456b004bbabafb96022add
@not_implemented_for("undirected")
14
dag.py
295
Add example of topo_order kwarg to dag_longest_path (#5728)
42,147
1
241
185
74
176,856
109
networkx
23
networkx/algorithms/dag.py
Python
22
{ "docstring": "Returns the longest path in a directed acyclic graph (DAG).\n\n If `G` has edges with `weight` attribute the edge data are used as\n weight values.\n\n Parameters\n ----------\n G : NetworkX DiGraph\n A directed acyclic graph (DAG)\n\n weight : str, optional\n Edge data key to use for weight\n\n default_weight : int, optional\n The weight of edges that do not have a weight attribute\n\n topo_order: list or tuple, optional\n A topological order for `G` (if None, the function will compute one)\n\n Returns\n -------\n list\n Longest path\n\n Raises\n ------\n NetworkXNotImplemented\n If `G` is not directed\n\n Examples\n --------\n >>> DG = nx.DiGraph([(0, 1, {'cost':1}), (1, 2, {'cost':1}), (0, 2, {'cost':42})])\n >>> list(nx.all_simple_paths(DG, 0, 2))\n [[0, 1, 2], [0, 2]]\n >>> nx.dag_longest_path(DG)\n [0, 1, 2]\n >>> nx.dag_longest_path(DG, weight=\"cost\")\n [0, 2]\n\n In the case where multiple valid topological orderings exist, `topo_order`\n can be used to specify a specific ordering:\n\n >>> DG = nx.DiGraph([(0, 1), (0, 2)])\n >>> sorted(nx.all_topological_sorts(DG)) # Valid topological orderings\n [[0, 1, 2], [0, 2, 1]]\n >>> nx.dag_longest_path(DG, topo_order=[0, 1, 2])\n [0, 1]\n >>> nx.dag_longest_path(DG, topo_order=[0, 2, 1])\n [0, 2]\n\n See also\n --------\n dag_longest_path_length\n\n ", "language": "en", "n_whitespaces": 331, "n_words": 180, "vocab_size": 114 }
https://github.com/networkx/networkx.git
6
test_conversation_part_filtering_based_on_conversation
def test_conversation_part_filtering_based_on_conversation(requests_mock, conversation_parts_responses): updated_at = 1650988200 state = {"updated_at": updated_at} expected_record_ids = set() for response_tuple in conversation_parts_responses: requests_mock.register_uri('GET', response_tuple[0], json=response_tuple[1]) if "conversation_parts" in response_tuple[1]: expected_record_ids.update([cp["id"] for cp in response_tuple[1]["conversation_parts"]["conversation_parts"]]) records = [] conversation_parts = ConversationParts(authenticator=NoAuth()) for slice in conversation_parts.stream_slices(sync_mode=SyncMode.incremental, stream_state=state): records.extend(list(conversation_parts.read_records(sync_mode=SyncMode.incremental, stream_slice=slice, stream_state=state))) assert expected_record_ids == {r["id"] for r in records}
1642630461c25e447ede67bb42ba6ea6ec700e52
16
unit_test.py
239
🐛 Source Intercom: Fixed filtering of conversation_parts (#12374)
690
0
110
149
39
4,928
51
airbyte
28
airbyte-integrations/connectors/source-intercom/unit_tests/unit_test.py
Python
13
{ "docstring": "\n Test shows that conversation_parts filters conversations (from parent stream) correctly\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/airbytehq/airbyte.git
6
get_lexer_for_mimetype
def get_lexer_for_mimetype(_mime, **options): for modname, name, _, _, mimetypes in LEXERS.values(): if _mime in mimetypes: if name not in _lexer_cache: _load_lexers(modname) return _lexer_cache[name](**options) for cls in find_plugin_lexers(): if _mime in cls.mimetypes: return cls(**options) raise ClassNotFound('no lexer for mimetype %r found' % _mime)
f3166e673fe8d40277b804d35d77dcdb760fc3b3
13
__init__.py
123
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,380
0
116
77
31
20,452
42
pipenv
14
pipenv/patched/notpip/_vendor/pygments/lexers/__init__.py
Python
10
{ "docstring": "Get a lexer for a mimetype.\n\n Raises ClassNotFound if not found.\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 10 }
https://github.com/pypa/pipenv.git
6
_extract_gradient_tags
def _extract_gradient_tags(self): tags = re.finditer( r'<gradient\s+from="([^"]+)"\s+to="([^"]+)"(\s+offset="([^"]+)")?>(.+?)</gradient>', self.original_text, re.S, ) gradientmap = [] for tag in tags: start = self._count_real_chars(self.original_text[: tag.start(0)]) end = start + self._count_real_chars(tag.group(5)) offsets = tag.group(4).split(",") if tag.group(4) else [0] start_offset = int(offsets[0]) if offsets[0] else 0 end_offset = int(offsets[1]) if len(offsets) == 2 and offsets[1] else 0 gradientmap.append( { "start": start, "end": end, "from": tag.group(1), "to": tag.group(2), "start_offset": start_offset, "end_offset": end_offset, }, ) self.text = re.sub("<gradient[^>]+>(.+?)</gradient>", r"\1", self.text, 0, re.S) return gradientmap
902e7eb4f0147b5882a613b67467e38a1d47f01e
14
text_mobject.py
311
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
46,096
0
378
197
60
189,496
75
manim
22
manim/mobject/svg/text_mobject.py
Python
25
{ "docstring": "Used to determine which parts (if any) of the string should be formatted\n with a gradient.\n\n Removes the ``<gradient>`` tag, as it is not part of Pango's markup and would cause an error.\n ", "language": "en", "n_whitespaces": 54, "n_words": 33, "vocab_size": 31 }
https://github.com/ManimCommunity/manim.git
5
get_feature_names_out
def get_feature_names_out(self, input_features=None): input_features = _check_feature_names_in( self, input_features, generate_names=True ) est_name = self.__class__.__name__.lower() names_list = [f"{est_name}_{name}_sqrt" for name in input_features] for j in range(1, self.sample_steps): cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features] sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features] names_list.extend(cos_names + sin_names) return np.asarray(names_list, dtype=object)
67a3feed2fe4e82c1cc129c34b9e223b94a8d531
11
kernel_approximation.py
176
ENH Adds get_feature_names_out for AdditiveChi2Sampler (#22137) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
75,584
0
138
94
31
259,125
45
scikit-learn
21
sklearn/kernel_approximation.py
Python
11
{ "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Only used to validate feature names with the names seen in :meth:`fit`.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n ", "language": "en", "n_whitespaces": 110, "n_words": 39, "vocab_size": 32 }
https://github.com/scikit-learn/scikit-learn.git
3
scalar_potential_difference
def scalar_potential_difference(field, frame, point1, point2, origin): _check_frame(frame) if isinstance(field, Vector): # Get the scalar potential function scalar_fn = scalar_potential(field, frame) else: # Field is a scalar scalar_fn = field # Express positions in required frame position1 = express(point1.pos_from(origin), frame, variables=True) position2 = express(point2.pos_from(origin), frame, variables=True) # Get the two positions as substitution dicts for coordinate variables subs_dict1 = {} subs_dict2 = {} for i, x in enumerate(frame): subs_dict1[frame[i]] = x.dot(position1) subs_dict2[frame[i]] = x.dot(position2) return scalar_fn.subs(subs_dict2) - scalar_fn.subs(subs_dict1)
9a3ffc6781bd44c47cf49e128ef154389c32876a
10
fieldfunctions.py
208
Some pep8 cleanup of sympy.physics.vector.
48,541
0
155
133
56
197,433
77
sympy
23
sympy/physics/vector/fieldfunctions.py
Python
14
{ "docstring": "\n Returns the scalar potential difference between two points in a\n certain frame, wrt a given field.\n\n If a scalar field is provided, its values at the two points are\n considered. If a conservative vector field is provided, the values\n of its scalar potential function at the two points are used.\n\n Returns (potential at position 2) - (potential at position 1)\n\n Parameters\n ==========\n\n field : Vector/sympyfiable\n The field to calculate wrt\n\n frame : ReferenceFrame\n The frame to do the calculations in\n\n point1 : Point\n The initial Point in given frame\n\n position2 : Point\n The second Point in the given frame\n\n origin : Point\n The Point to use as reference point for position vector\n calculation\n\n Examples\n ========\n\n >>> from sympy.physics.vector import ReferenceFrame, Point\n >>> from sympy.physics.vector import scalar_potential_difference\n >>> R = ReferenceFrame('R')\n >>> O = Point('O')\n >>> P = O.locatenew('P', R[0]*R.x + R[1]*R.y + R[2]*R.z)\n >>> vectfield = 4*R[0]*R[1]*R.x + 2*R[0]**2*R.y\n >>> scalar_potential_difference(vectfield, R, O, P, O)\n 2*R_x**2*R_y\n >>> Q = O.locatenew('O', 3*R.x + R.y + 2*R.z)\n >>> scalar_potential_difference(vectfield, R, P, Q, O)\n -2*R_x**2*R_y + 18\n\n ", "language": "en", "n_whitespaces": 298, "n_words": 174, "vocab_size": 93 }
https://github.com/sympy/sympy.git
7
prev_lexicographic
def prev_lexicographic(self): i = self.superset_size - 1 indices = Subset.subset_indices(self.subset, self.superset) while i >= 0 and i not in indices: i = i - 1 if i == 0 or i - 1 in indices: indices.remove(i) else: if i >= 0: indices.remove(i) indices.append(i - 1) indices.append(self.superset_size - 1) ret_set = [] super_set = self.superset for i in indices: ret_set.append(super_set[i]) return Subset(ret_set, super_set)
498015021131af4dbb07eb110e5badaba8250c7b
13
subsets.py
192
Updated import locations
47,719
0
217
120
35
196,219
62
sympy
13
sympy/combinatorics/subsets.py
Python
17
{ "docstring": "\n Generates the previous lexicographically ordered subset.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Subset\n >>> a = Subset([], ['a', 'b', 'c', 'd'])\n >>> a.prev_lexicographic().subset\n ['d']\n >>> a = Subset(['c','d'], ['a', 'b', 'c', 'd'])\n >>> a.prev_lexicographic().subset\n ['c']\n\n See Also\n ========\n\n next_lexicographic\n ", "language": "en", "n_whitespaces": 138, "n_words": 39, "vocab_size": 27 }
https://github.com/sympy/sympy.git
3
add_base_argument
def add_base_argument(self, parser, *args, **kwargs): for arg in args: if arg in self.suppressed_base_arguments: kwargs["help"] = argparse.SUPPRESS break parser.add_argument(*args, **kwargs)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
base.py
73
Refs #33476 -- Reformatted code with Black.
50,806
0
81
45
17
204,596
19
django
10
django/core/management/base.py
Python
6
{ "docstring": "\n Call the parser's add_argument() method, suppressing the help text\n according to BaseCommand.suppressed_base_arguments.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 11 }
https://github.com/django/django.git
3
get_holiday
def get_holiday(holiday_list, month): holiday_map = frappe._dict() for d in holiday_list: if d: holiday_map.setdefault( d, frappe.db.sql( , (d, month), ), ) return holiday_map @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
14
monthly_attendance_sheet.py
83
style: format code with black
14,150
1
10
47
22
66,259
23
erpnext
11
erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
Python
13
{ "docstring": "select day(holiday_date), weekly_off from `tabHoliday`\n\t\t\t\twhere parent=%s and month(holiday_date)=%s", "language": "en", "n_whitespaces": 7, "n_words": 9, "vocab_size": 9 }
https://github.com/frappe/erpnext.git
1
add_hedge_option
def add_hedge_option(price, implied_volatility, strike, days, side): # Determine delta position given the option delta = calc_delta(price, implied_volatility, strike, days, 0, side) # Determine gamma position given the option gamma = calc_gamma(price, implied_volatility, strike, days, 0) # Determine vega position given the option vega = calc_vega(price, implied_volatility, strike, days, 0) return delta, gamma, vega
54a1b6f545a0016c576e9e00eef5c003d229dacf
8
hedge_model.py
88
Feature/hedge (#1768) * [Bug] Incorrect log for reddit keys. #1733 fix * Create new feature-hedge * Significantly improve code of hedge menu * More robust * Robustness * Fix tests * Fix can't multiply sequence by non-int of type 'numpy.float64' error * Temporary fix of singular matrix error. Return first feasible solution * Update Hugo Documentation * Combining menus and cleaning up code * Tidy up call_exp * Update tests Round 1 * Update tests Round 2 * Fix linting error * Fix linting? * Fixed glitch Co-authored-by: JerBouma <[email protected]> Co-authored-by: James Maslek <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: colin99d <[email protected]> Co-authored-by: didierlopes.eth <[email protected]>
84,764
0
77
64
25
284,498
53
OpenBBTerminal
12
openbb_terminal/stocks/options/hedge/hedge_model.py
Python
5
{ "docstring": "Determine the delta, gamma and vega value of the portfolio and/or options.\n\n Parameters\n ----------\n price: int\n The price.\n implied_volatility: float\n The implied volatility.\n strike: float\n The strike price.\n days: float\n The amount of days until expiration. Use annual notation thus a month would be 30 / 360.\n sign: int\n Whether you have a long (1) or short (-1) position\n\n Returns\n -------\n delta: float\n gamma: float\n portfolio: float\n ", "language": "en", "n_whitespaces": 141, "n_words": 67, "vocab_size": 54 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
__radd__
def __radd__(self, other): # In analogy with __rsub__ and __rdiv__, use original order: # we get here from `other + self`. return np.add(other, self)
6d77c591c59b5678f14ae5af2127eebb7d2415bc
7
core.py
30
ENH: Adding __array_ufunc__ capability to MaskedArrays. This enables any ufunc numpy operations that are called on a MaskedArray to use the masked version of that function automatically without needing to resort to np.ma.func() calls.
38,785
0
52
17
23
160,889
24
numpy
5
numpy/ma/core.py
Python
2
{ "docstring": "\n Add other to self, and return a new masked array.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/numpy/numpy.git
6
__getattribute__
def __getattribute__(self, attr): # All module metadata must be garnered from __spec__ in order to avoid # using mutated values. # Stop triggering this method. self.__class__ = types.ModuleType # Get the original name to make sure no object substitution occurred # in sys.modules. original_name = self.__spec__.name # Figure out exactly what attributes were mutated between the creation # of the module and now. attrs_then = self.__spec__.loader_state['__dict__'] attrs_now = self.__dict__ attrs_updated = {} for key, value in attrs_now.items(): # Code that set the attribute may have kept a reference to the # assigned object, making identity more important than equality. if key not in attrs_then: attrs_updated[key] = value elif id(attrs_now[key]) != id(attrs_then[key]): attrs_updated[key] = value self.__spec__.loader.exec_module(self) # If exec_module() was used directly there is no guarantee the module # object was put into sys.modules. if original_name in sys.modules: if id(self) != id(sys.modules[original_name]): raise ValueError(f"module object for {original_name!r} " "substituted in sys.modules during a lazy " "load") # Update after loading since that's what would happen in an eager # loading situation. self.__dict__.update(attrs_updated) return getattr(self, attr)
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
util.py
249
add python 3.10.4 for windows
55,258
0
494
143
123
218,361
174
XX-Net
25
python3.10.4/Lib/importlib/util.py
Python
19
{ "docstring": "Trigger the load of the module and return the attribute.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
https://github.com/XX-net/XX-Net.git
2
_reset_build_compile_trackers
def _reset_build_compile_trackers(model): # Reset build state model.built = False model.inputs = None model.outputs = None # Reset compile state model._is_compiled = False # pylint:disable=protected-access if not tf.compat.v1.executing_eagerly_outside_functions(): model._v1_compile_was_called = False model.optimizer = None @keras_export( "keras.__internal__.models.in_place_subclassed_model_state_restoration", v1=[], )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export( "keras.__internal__.models.in_place_subclassed_model_state_restoration", v1=[], )
10
cloning.py
103
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,325
1
76
48
24
275,169
37
keras
13
keras/models/cloning.py
Python
8
{ "docstring": "Reset state trackers for model.\n\n Note that we do not actually zero out attributes such as optimizer,\n but instead rely on the expectation that all of the attrs will be\n over-written on calling build/compile/etc. This is somewhat fragile,\n insofar as we check elsewhere for the presence of these attributes as\n evidence of having been built/compiled/etc. Pending a better way to do this,\n we reset key attributes here to allow building and compiling.\n\n Args:\n model: the model that is being reset\n ", "language": "en", "n_whitespaces": 109, "n_words": 80, "vocab_size": 61 }
https://github.com/keras-team/keras.git
5
compress
def compress(self): modules_to_compress = self.get_modules_to_compress() for layer, config in modules_to_compress: module = layer.module if "weight" in config.get("quant_types", []): scale, zero_point = self.calculate_qparams(layer.name, 'weight') module.register_buffer('weight_scale', scale.to(self.device)) module.register_buffer('weight_zero_point', zero_point.to(self.device)) weight = module.weight quantized_weight = self._quantize(weight, module.weight_scale, module.weight_zero_point, module.weight_qmin, module.weight_qmax) delattr(module, 'weight') module.register_buffer('weight', quantized_weight) if "input" in config.get("quant_types", []): scale, zero_point = self.calculate_qparams(layer.name, 'input') module.register_buffer('input_scale', scale.to(self.device)) module.register_buffer('input_zero_point', zero_point.to(self.device)) if "output" in config.get("quant_types", []): scale, zero_point = self.calculate_qparams(layer.name, 'output') module.register_buffer('output_scale', scale.to(self.device)) module.register_buffer('output_zero_point', zero_point.to(self.device)) self.compressed = True super().compress()
d68c786ff81bad19c04619d6a999ff34aaa724e7
14
observer_quantizer.py
421
[Compression] remove pruning v1 & refactor directory (#5228)
24,988
0
519
251
46
113,648
73
nni
25
nni/compression/pytorch/quantization/observer_quantizer.py
Python
26
{ "docstring": "\n Calculate quantization information of each tensor. Note that the inference of\n the compressed model will no longer update the corresponding. Instead, the quantization\n process will be simulated, which is used to test the accuracy of the quantization.\n ", "language": "en", "n_whitespaces": 66, "n_words": 37, "vocab_size": 28 }
https://github.com/microsoft/nni.git
1
change_logging
def change_logging(request): current_request.set(request) webhooks_queue.set([]) # Connect our receivers to the post_save and post_delete signals. post_save.connect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.connect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.connect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.connect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') yield # Disconnect change logging signals. This is necessary to avoid recording any errant # changes during test cleanup. post_save.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') m2m_changed.disconnect(handle_changed_object, dispatch_uid='handle_changed_object') pre_delete.disconnect(handle_deleted_object, dispatch_uid='handle_deleted_object') clear_webhooks.disconnect(clear_webhook_queue, dispatch_uid='clear_webhook_queue') # Flush queued webhooks to RQ flush_webhooks(webhooks_queue.get()) # Clear context vars current_request.set(None) webhooks_queue.set([])
cd8943144bec52ff608ddad3db5d0155832a4a23
9
context_managers.py
215
Use context vars instead of thread-local storage for change logging
78,284
0
122
121
49
266,089
62
netbox
17
netbox/extras/context_managers.py
Python
15
{ "docstring": "\n Enable change logging by connecting the appropriate signals to their receivers before code is run, and\n disconnecting them afterward.\n\n :param request: WSGIRequest object with a unique `id` set\n ", "language": "en", "n_whitespaces": 41, "n_words": 28, "vocab_size": 28 }
https://github.com/netbox-community/netbox.git
1
upgrade
def upgrade(): with op.batch_alter_table("task_instance", schema=None) as batch_op: batch_op.alter_column("pool_slots", existing_type=sa.Integer, nullable=False, server_default='1')
66c342d033bd3cb959b4dc4e7e4b8aad597aab63
11
8646922c8a04_change_default_pool_slots_to_1.py
70
Support generating SQL script for upgrades (#20962) This PR attempts to add support for generating sql scripts for upgrade. Example command: `airflow db upgrade --revision-range e8d98d8ss99:78daisdu38d` `airflow db upgrade --range 2.0.0:2.2.3`
8,439
0
24
39
11
45,007
11
airflow
11
airflow/migrations/versions/8646922c8a04_change_default_pool_slots_to_1.py
Python
3
{ "docstring": "Change default pool_slots to 1 and make pool_slots not nullable", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/apache/airflow.git
3
gca
def gca(self, **kwargs): if kwargs: _api.warn_deprecated( "3.4", message="Calling gca() with keyword arguments was deprecated " "in Matplotlib %(since)s. Starting %(removal)s, gca() will " "take no keyword arguments. The gca() function should only be " "used to get the current axes, or if no axes exist, create " "new axes with default keyword arguments. To create a new " "axes with non-default arguments, use plt.axes() or " "plt.subplot().") ax = self._axstack.current() return ax if ax is not None else self.add_subplot(**kwargs)
8669c4636ce3b6ac6f4905c365ab41685186da56
12
figure.py
97
Rewrite AxesStack independently of cbook.Stack. AxesStack is fairly independent from cbook.Stack: cbook.Stack handles the forward/back/home buttons of the navbar, and therefore additionally maintains a movable "cursor" in the stack; AxesStack, on the other hand, needs to keep track both of "original" order and of "gca" order. Rewriting it from scratch, and using "original" order as main storage order (the "gca" stack being tracked using indices) shortens the implementation and simplifies it (as there's no more need to figure out what the super()calls do).
22,547
0
238
52
59
107,009
79
matplotlib
10
lib/matplotlib/figure.py
Python
13
{ "docstring": "\n Get the current Axes.\n\n If there is currently no Axes on this Figure, a new one is created\n using `.Figure.add_subplot`. (To test whether there is currently an\n Axes on a Figure, check whether ``figure.axes`` is empty. To test\n whether there is currently a Figure on the pyplot figure stack, check\n whether `.pyplot.get_fignums()` is empty.)\n\n The following kwargs are supported for ensuring the returned Axes\n adheres to the given projection etc., and for Axes creation if\n the active Axes does not exist:\n\n %(Axes:kwdoc)s\n ", "language": "en", "n_whitespaces": 162, "n_words": 82, "vocab_size": 54 }
https://github.com/matplotlib/matplotlib.git
2
run_report
def run_report(job_result, *args, **kwargs): module_name, report_name = job_result.name.split('.', 1) report = get_report(module_name, report_name) try: report.run(job_result) except Exception as e: print(e) job_result.set_status(JobResultStatusChoices.STATUS_ERRORED) job_result.save() logging.error(f"Error during execution of report {job_result.name}")
f13a00b2dd33bffc3048c861b494096df457f212
13
reports.py
127
Save old JobResults
77,780
0
78
108
26
264,668
28
netbox
20
netbox/extras/reports.py
Python
17
{ "docstring": "\n Helper function to call the run method on a report. This is needed to get around the inability to pickle an instance\n method for queueing into the background processor.\n ", "language": "en", "n_whitespaces": 39, "n_words": 29, "vocab_size": 24 }
https://github.com/netbox-community/netbox.git
4
location
def location(self) -> str: location = "" if self.storage: location = ( self.storage.basepath + "/" if not self.storage.basepath.endswith("/") else "" ) if self.path: location += self.path return location
0a59ec9279d929fe6a7199ff7ff7c0b58cffa100
15
deployments.py
94
Add location as a computed property of deployments
11,802
0
141
53
21
58,708
28
prefect
7
src/prefect/deployments.py
Python
17
{ "docstring": "\n The 'location' that this deployment points to is given by `path` alone\n in the case of no remote storage, and otherwise by `storage.basepath / path`.\n\n The underlying flow entrypoint is interpreted relative to this location.\n ", "language": "en", "n_whitespaces": 64, "n_words": 35, "vocab_size": 30 }
https://github.com/PrefectHQ/prefect.git
1
test_dispatch_key_raises_when_public_and_private_handlers
async def test_dispatch_key_raises_when_public_and_private_handlers(): widget = DuplicateHandlersWidget() with pytest.raises(DuplicateKeyHandlers): await widget.dispatch_key(Key(widget, key="x", char="x")) assert widget.called_by is None
17bc375e080c30d8754dfeccf44f9a607e041e1b
14
test_message_pump.py
77
Support for key aliases, key handling tests
44,966
0
35
42
16
185,308
16
textual
11
tests/test_message_pump.py
Python
5
{ "docstring": "When both a public and private handler exists for one key, we fail fast via exception.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 16 }
https://github.com/Textualize/textual.git
1
test_unschedule_view_post
def test_unschedule_view_post(self): # Post to the unschedule page response = self.client.post( reverse( "wagtailadmin_pages:revisions_unschedule", args=(self.christmas_event.id, self.this_christmas_revision.id), ) ) # Should be redirected to page history self.assertRedirects( response, reverse("wagtailadmin_pages:history", args=(self.christmas_event.id,)), ) # Check that the page has no approved_schedule self.assertFalse( EventPage.objects.get(id=self.christmas_event.id).approved_schedule ) # Check that the approved_go_live_at has been cleared from the revision self.assertIsNone( self.christmas_event.revisions.get( id=self.this_christmas_revision.id ).approved_go_live_at )
d10f15e55806c6944827d801cd9c2d53f5da4186
14
test_revisions.py
167
Reformat with black
15,720
0
269
103
40
71,728
56
wagtail
19
wagtail/admin/tests/pages/test_revisions.py
Python
19
{ "docstring": "\n This posts to the unschedule view and checks that the revision was unscheduled\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/wagtail/wagtail.git
1
_wrap_call_and_conditional_losses
def _wrap_call_and_conditional_losses(layer): # Create function that generates both outputs and losses layer_call = _get_layer_call_method(layer)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
8
save_impl.py
24
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,567
0
23
21
14
276,118
14
keras
4
keras/saving/saved_model/save_impl.py
Python
4
{ "docstring": "Wraps call function that returns a tuple of (outputs, losses).\n\n The losses returned are conditional on the inputs passed to the call function.\n Unconditional losses (e.g. weight regularizeration) are wrapped separately.\n\n Args:\n layer: a Keras layer object\n\n Returns:\n python call function that returns outputs and conditional losses -- excludes\n activity regularizer\n ", "language": "en", "n_whitespaces": 81, "n_words": 51, "vocab_size": 40 }
https://github.com/keras-team/keras.git
1
_get_no_autofield_sequence_name
def _get_no_autofield_sequence_name(self, table): name_length = self.max_name_length() - 3 return "%s_SQ" % truncate_name(strip_quotes(table), name_length).upper()
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
operations.py
57
Refs #33476 -- Reformatted code with Black.
51,012
0
34
33
13
205,087
13
django
8
django/db/backends/oracle/operations.py
Python
3
{ "docstring": "\n Manually created sequence name to keep backward compatibility for\n AutoFields that aren't Oracle identity columns.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/django/django.git
6
_unify_values
def _unify_values(self, section, vars): sectiondict = {} try: sectiondict = self._sections[section] except KeyError: if section != self.default_section: raise NoSectionError(section) from None # Update with the entry specific variables vardict = {} if vars: for key, value in vars.items(): if value is not None: value = str(value) vardict[self.optionxform(key)] = value return _ChainMap(vardict, sectiondict, self._defaults)
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
configparser.py
147
add python 3.10.4 for windows
56,460
0
206
93
42
221,655
53
XX-Net
17
python3.10.4/Lib/configparser.py
Python
14
{ "docstring": "Create a sequence of lookups with 'vars' taking priority over\n the 'section' which takes priority over the DEFAULTSECT.\n\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 15 }
https://github.com/XX-net/XX-Net.git
5
dag_state
def dag_state(args, session=NEW_SESSION): dag = DagModel.get_dagmodel(args.dag_id, session=session) if not dag: raise SystemExit(f"DAG: {args.dag_id} does not exist in 'dag' table") dr = session.query(DagRun).filter_by(dag_id=args.dag_id, execution_date=args.execution_date).one_or_none() out = dr.state if dr else None conf_out = '' if out and dr.conf: conf_out = ', ' + json.dumps(dr.conf) print(str(out) + conf_out) @cli_utils.action_cli
b1ad017cee66f5e042144cc7baa2d44b23b47c4f
@cli_utils.action_cli
12
dag_command.py
179
pydocstyle D202 added (#24221)
7,780
1
84
101
36
42,996
47
airflow
25
airflow/cli/commands/dag_command.py
Python
10
{ "docstring": "\n Returns the state (and conf if exists) of a DagRun at the command line.\n >>> airflow dags state tutorial 2015-01-01T00:00:00.000000\n running\n >>> airflow dags state a_dag_with_conf_passed 2015-01-01T00:00:00.000000\n failed, {\"name\": \"bob\", \"age\": \"42\"}\n ", "language": "en", "n_whitespaces": 51, "n_words": 32, "vocab_size": 25 }
https://github.com/apache/airflow.git
1
_search
def _search(self, check_return_type=True) -> Union[SourceReadList, DestinationReadList, ConnectionReadList]: return self._search_fn(self.api_instance, self.search_payload, _check_return_type=check_return_type)
56bf982cb96f831fe04f5e44a92ee4a669b9e16a
8
resources.py
53
🐙 octavia-cli: `apply` connections (#10881)
641
0
25
36
11
4,247
11
airbyte
11
octavia-cli/octavia_cli/apply/resources.py
Python
7
{ "docstring": "Run search of a resources on the remote Airbyte instance.\n\n Returns:\n Union[SourceReadList, DestinationReadList, ConnectionReadList]: Search results\n ", "language": "en", "n_whitespaces": 41, "n_words": 16, "vocab_size": 16 }
https://github.com/airbytehq/airbyte.git
4
_resize
def _resize(self, image, shorter=800, longer=1333, size_divisor=32, resample=Image.BICUBIC): if not isinstance(image, Image.Image): image = self.to_pil_image(image) w, h = image.size min_size = shorter max_size = longer scale = min_size / min(w, h) if h < w: newh, neww = min_size, scale * w else: newh, neww = scale * h, min_size if max(newh, neww) > max_size: scale = max_size / max(newh, neww) newh = newh * scale neww = neww * scale newh, neww = int(newh + 0.5), int(neww + 0.5) newh, neww = newh // size_divisor * size_divisor, neww // size_divisor * size_divisor return self.resize(image, size=(neww, newh), resample=resample)
ac227093e41cecb07c7e0f2fc9a504850907bd06
11
feature_extraction_vilt.py
266
Add ViLT (#14895) * First commit * Add conversion script * Make conversion script work for base model * More improvements * Update conversion script, works for vqa * Add indexing argument to meshgrid * Make conversion script work for ViltForPreTraining * Add ViltForPreTraining to docs * Fix device issue * Add processor * Add MinMaxResize to feature extractor * Implement call method of ViltProcessor * Fix tests * Add integration test * Add loss calculation for VQA * Improve tests * Improve some more tests * Debug tests * Small improvements * Add support for attention_mask * Remove mask_it * Add pixel_mask * Add tests for ViltFeatureExtractor * Improve tests * Add ViltForNaturalLanguageVisualReasoning * Add ViltForNaturalLanguageVisualReasoning to conversion script * Minor fixes * Add support for image_embeds, update docstrings to markdown * Update docs to markdown * Improve conversion script * Rename ViltForPreTraining to ViltForMaskedLM * Improve conversion script * Convert docstrings to markdown * Fix code example of retrieval model * Properly convert masked language model * Add integration test for nlvr * Fix code quality * Apply suggestions from code review * Add copied from statements * Fix pretrained_config_archive_map * Fix docs * Add model to README * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> * Apply more suggestions from code review * Make code more readable * Add ViltForNaturalLanguageVisualReasoning to the tests * Rename ViltForVisualQuestionAnswering to ViltForQuestionAnswering * Replace pixel_values_2 by single tensor * Add hidden_states and attentions * Fix one more test * Fix all tests * Update year * Fix rebase issues * Fix another rebase issue * Remove ViltForPreTraining from auto mapping * Rename ViltForImageRetrievalTextRetrieval to ViltForImageAndTextRetrieval * Make it possible to use BertTokenizerFast in the processor * Use BertTokenizerFast by default * Rename ViltForNaturalLanguageVisualReasoning, define custom model output Co-authored-by: Sylvain Gugger <[email protected]>
6,252
0
247
169
52
34,302
97
transformers
23
src/transformers/models/vilt/feature_extraction_vilt.py
Python
18
{ "docstring": "\n Resizes the shorter edge of `image` to `shorter` and limits the longer edge to under `longer`, while preserving\n the aspect ratio. Also makes sure that both the height and width can be divided by `size_divisor`.\n\n Based on original implementation:\n https://github.com/dandelin/ViLT/blob/3db8b5035464afee84d951bf6322e1b27f1d072d/vilt/transforms/utils.py#L5\n\n Args:\n image (`PIL.Image`):\n The image to resize.\n shorter (`int`, *optional*, defaults to `800`):\n The size to which to resize the shorter side of the image.\n longer (`int`, *optional*, defaults to `1333`):\n The size by which to limit the longer side of the image, while preserving the aspect ratio.\n size_divisor (`int`, *optional*, defaults to `32`):\n The size by which both the height and the width must be divisible.\n resample (`int`, *optional*, defaults to `PIL.Image.BICUBIC`):\n An optional resampling filter.\n ", "language": "en", "n_whitespaces": 290, "n_words": 117, "vocab_size": 61 }
https://github.com/huggingface/transformers.git
1
test_instance_url_mismatch
def test_instance_url_mismatch(self): self.plugin.set_option("instance_url", "https://hellboy.atlassian.net", self.project) group = self.create_group(message="Hello world", culprit="foo.bar") plugin_issue = GroupMeta.objects.create( key=f"{self.plugin.slug}:tid", group_id=group.id, value="SEN-1" ) with self.tasks(): self.installation.migrate_issues() assert not ExternalIssue.objects.filter( organization_id=self.organization.id, integration_id=self.integration.id, key=plugin_issue.value, ).exists() assert GroupMeta.objects.filter( key=f"{self.plugin.slug}:tid", group_id=group.id, value="SEN-1" ).exists()
f5e5a3b1ed97383e0699aff9eb0363e9eb5db479
14
test_integration.py
237
feat(Jira): Plugin issue migration endpoint (#37577) * feat(jira): Plugin issue migration endpoint
19,082
0
169
132
27
94,414
33
sentry
28
tests/sentry/integrations/jira/test_integration.py
Python
16
{ "docstring": "Test that if the plugin's instance URL does not match the integration's base URL, we don't migrate the issues", "language": "en", "n_whitespaces": 18, "n_words": 19, "vocab_size": 17 }
https://github.com/getsentry/sentry.git
1
create_experiment
def create_experiment(workspace, experiment_name): logger.debug("create: experiment_name {}".format(experiment_name)) exp = Experiment(workspace=workspace, name=experiment_name) return exp
f1b06e2f758b5b4a965f7bf428d006621d19c0b0
10
submit_azureml_pytest.py
56
changed folder structure for aml tests
7,141
0
24
33
11
39,219
12
recommenders
9
tests/ci/aml_tests_old/submit_azureml_pytest.py
Python
4
{ "docstring": "\n AzureML requires an experiment as a container of trials.\n This will either create a new experiment or use an\n existing one.\n\n Args:\n workspace (str) : name of AzureML workspace\n experiment_name (str) : AzureML experiment name\n Return:\n exp - AzureML experiment\n ", "language": "en", "n_whitespaces": 80, "n_words": 40, "vocab_size": 27 }
https://github.com/microsoft/recommenders.git
3
test_localtaskjob_essential_attr
def test_localtaskjob_essential_attr(self, dag_maker): with dag_maker('test_localtaskjob_essential_attr'): op1 = EmptyOperator(task_id='op1') dr = dag_maker.create_dagrun() ti = dr.get_task_instance(task_id=op1.task_id) job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor()) essential_attr = ["dag_id", "job_type", "start_date", "hostname"] check_result_1 = [hasattr(job1, attr) for attr in essential_attr] assert all(check_result_1) check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr] assert all(check_result_2)
49e336ae0302b386a2f47269a6d13988382d975f
12
test_local_task_job.py
184
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
9,123
0
129
111
36
47,474
48
airflow
23
tests/jobs/test_local_task_job.py
Python
11
{ "docstring": "\n Check whether essential attributes\n of LocalTaskJob can be assigned with\n proper values without intervention\n ", "language": "en", "n_whitespaces": 43, "n_words": 14, "vocab_size": 14 }
https://github.com/apache/airflow.git
1
decode
def decode(s): return urllib.parse.parse_qsl(s, keep_blank_values=True, errors="surrogateescape")
b3587b52b25077f68116b9852b041d33e7fc6601
9
url.py
39
make it black!
73,725
0
12
23
6
251,414
6
mitmproxy
7
mitmproxy/net/http/url.py
Python
2
{ "docstring": "\n Takes a urlencoded string and returns a list of surrogate-escaped (key, value) tuples.\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
https://github.com/mitmproxy/mitmproxy.git
1
test_collapse_event
def test_collapse_event(self) -> None: client = self.get_client_descriptor() queue = client.event_queue queue.push({"type": "restart", "server_generation": 1, "timestamp": "1"}) # Verify the server_generation event is stored as a virtual event self.assertEqual( queue.virtual_events, {"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}}, ) # And we can reconstruct newest_pruned_id etc. self.verify_to_dict_end_to_end(client) queue.push({"type": "unknown", "timestamp": "1"}) self.assertEqual(list(queue.queue), [{"id": 1, "type": "unknown", "timestamp": "1"}]) self.assertEqual( queue.virtual_events, {"restart": {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}}, ) # And we can still reconstruct newest_pruned_id etc. correctly self.verify_to_dict_end_to_end(client) # Verify virtual events are converted to real events by .contents() self.assertEqual( queue.contents(), [ {"id": 0, "type": "restart", "server_generation": 1, "timestamp": "1"}, {"id": 1, "type": "unknown", "timestamp": "1"}, ], ) # And now verify to_dict after pruning queue.prune(0) self.verify_to_dict_end_to_end(client) queue.prune(1) self.verify_to_dict_end_to_end(client)
b0ce4f1bce8031881addecb1e86073483517f392
12
test_event_queue.py
414
docs: Fix many spelling mistakes. Signed-off-by: Anders Kaseorg <[email protected]>
17,634
0
389
223
64
83,239
121
zulip
13
zerver/tests/test_event_queue.py
Python
34
{ "docstring": "\n This mostly focuses on the internals of\n how we store \"virtual_events\" that we\n can collapse if subsequent events are\n of the same form. See the code in\n EventQueue.push for more context.\n ", "language": "en", "n_whitespaces": 75, "n_words": 31, "vocab_size": 27 }
https://github.com/zulip/zulip.git
3
test_get_primary_key_column
def test_get_primary_key_column(self): testable_column_strings = ( ("id", "id"), ("[id]", "id"), ("`id`", "id"), ('"id"', "id"), ("[id col]", "id col"), ("`id col`", "id col"), ('"id col"', "id col"), ) with connection.cursor() as cursor: for column, expected_string in testable_column_strings: sql = "CREATE TABLE test_primary (%s int PRIMARY KEY NOT NULL)" % column with self.subTest(column=column): try: cursor.execute(sql) field = connection.introspection.get_primary_key_column( cursor, "test_primary" ) self.assertEqual(field, expected_string) finally: cursor.execute("DROP TABLE test_primary")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
17
test_introspection.py
215
Refs #33476 -- Reformatted code with Black.
49,992
0
390
119
52
201,772
64
django
14
tests/backends/sqlite/test_introspection.py
Python
22
{ "docstring": "\n Get the primary key column regardless of whether or not it has\n quotation.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/django/django.git
2
print_help
def print_help(self): colored = self.ticker and self.selected_date help_text = f print(help_text)
5a5c7193db3cf15ee7bb881ba912b8289aa83a80
10
options_controller.py
140
Volatility surface (#1176) * Add a 3D volatility surface from yfinance data * Update _index.md * other tests
83,744
0
39
23
10
281,411
11
OpenBBTerminal
10
gamestonk_terminal/stocks/options/options_controller.py
Python
30
{ "docstring": "Print help.\n unu show unusual options activity [fdscanner.com]\n calc basic call/put PnL calculator\n\n load load new ticker\n exp see and set expiration dates\n\nTicker: {self.ticker or None}\nExpiry: {self.selected_date or None}\n{\"\" if self.ticker else Style.DIM}\n pcr display put call ratio for ticker [AlphaQuery.com]{Style.DIM if not colored else ''}\n info display option information (volatility, IV rank etc) [Barchart.com]\n chains display option chains with greeks [Tradier]\n oi plot open interest [Tradier/YF]\n vol plot volume [Tradier/YF]\n voi plot volume and open interest [Tradier/YF]\n hist plot option history [Tradier]\n vsurf show 3D volatility surface [Yfinance]\n grhist plot option greek history [Syncretism.io]\n plot plot variables provided by the user [Yfinance]\n parity shows whether options are above or below expected price [Yfinance]\n binom shows the value of an option using binomial options pricing [Yfinance]\n{Style.RESET_ALL if not colored else ''}\n> screen screens tickers based on preset [Syncretism.io]{\"\" if colored else Style.DIM}\n> payoff shows payoff diagram for a selection of options [Yfinance]\n> pricing shows options pricing and risk neutral valuation [Yfinance]\n{Style.RESET_ALL if not colored else ''}", "language": "en", "n_whitespaces": 382, "n_words": 174, "vocab_size": 109 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
list_installed_files
def list_installed_files(self): for result in self._get_records(): yield result
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
8
database.py
31
upd; format
12,755
0
33
17
7
61,930
8
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
Python
3
{ "docstring": "\n Iterates over the ``RECORD`` entries and returns a tuple\n ``(path, hash, size)`` for each line.\n\n :returns: iterator of (path, hash, size)\n ", "language": "en", "n_whitespaces": 50, "n_words": 21, "vocab_size": 20 }
https://github.com/jindongwang/transferlearning.git
2
test_dataset
def test_dataset(ray_start_4_cpus, use_local): model_creator = mlp_identity.model_creator optimizer_creator = mlp_identity.optimizer_creator dataset_creator = mlp_identity.dataset_creator DatasetOperator = TrainingOperator.from_creators( model_creator=model_creator, optimizer_creator=optimizer_creator, loss_creator=nn.MSELoss, ) trainer = TorchTrainer( training_operator_cls=DatasetOperator, use_local=use_local, num_workers=2, ) dataset = dataset_creator() for i in range(5): trainer.train(dataset=dataset, num_steps=100) x = mlp_identity.to_mat(0.5) prediction = float(trainer.get_model()(x)[0][0]) assert 0.4 <= prediction <= 0.6 trainer.shutdown() @pytest.mark.parametrize("use_local", [True, False])
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
@pytest.mark.parametrize("use_local", [True, False])
13
test_torch_2.py
216
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,963
1
141
130
41
133,242
51
ray
31
python/ray/util/sgd/tests/test_torch_2.py
Python
21
{ "docstring": "\n This test tries training the mlp_identity example. We check the accuracy of\n the model as an all inclusive way of ensuring that we are properly sharding\n and iterating over the entire dataset (instead of repeating the first set\n of points for example).\n ", "language": "en", "n_whitespaces": 58, "n_words": 42, "vocab_size": 35 }
https://github.com/ray-project/ray.git
4
_is_current
def _is_current(self, file_path, zip_path): timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) if not os.path.isfile(file_path): return False stat = os.stat(file_path) if stat.st_size != size or stat.st_mtime != timestamp: return False # check that the contents match zip_contents = self.loader.get_data(zip_path) with open(file_path, 'rb') as f: file_contents = f.read() return zip_contents == file_contents
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
__init__.py
152
upd; format
13,147
0
143
92
36
63,102
47
transferlearning
21
.venv/lib/python3.8/site-packages/pip/_vendor/pkg_resources/__init__.py
Python
11
{ "docstring": "\n Return True if the file_path is current for this zip_path\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/jindongwang/transferlearning.git
4
load
def load(self): # Try to scan config file model_config_fpaths = list(self.model_fpath.parent.rglob("*.json")) if len(model_config_fpaths)>0 and model_config_fpaths[0].exists(): with model_config_fpaths[0].open("r", encoding="utf-8") as f: hparams.loadJson(json.load(f)) self._model = Tacotron(embed_dims=hparams.tts_embed_dims, num_chars=len(symbols), encoder_dims=hparams.tts_encoder_dims, decoder_dims=hparams.tts_decoder_dims, n_mels=hparams.num_mels, fft_bins=hparams.num_mels, postnet_dims=hparams.tts_postnet_dims, encoder_K=hparams.tts_encoder_K, lstm_dims=hparams.tts_lstm_dims, postnet_K=hparams.tts_postnet_K, num_highways=hparams.tts_num_highways, dropout=hparams.tts_dropout, stop_threshold=hparams.tts_stop_threshold, speaker_embedding_size=hparams.speaker_embedding_size).to(self.device) self._model.load(self.model_fpath, self.device) self._model.eval() if self.verbose: print("Loaded synthesizer \"%s\" trained to step %d" % (self.model_fpath.name, self._model.state_dict()["step"]))
0536874dec68e68969502ce1774168552727fa17
15
inference.py
335
Add config file for pretrained
38,850
0
535
213
49
160,998
52
MockingBird
50
synthesizer/inference.py
Python
26
{ "docstring": "\n Instantiates and loads the model given the weights file that was passed in the constructor.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 13 }
https://github.com/babysor/MockingBird.git
4
paste
def paste(self, im, box=None): if box is not None: warnings.warn( "The box parameter is deprecated and will be removed in Pillow 10 " "(2023-07-01).", DeprecationWarning, ) # convert to blittable im.load() image = im.im if image.isblock() and im.mode == self.__mode: block = image else: block = image.new_block(self.__mode, im.size) image.convert2(block, image) # convert directly between buffers _pyimagingtkcall("PyImagingPhoto", self.__photo, block.id) # -------------------------------------------------------------------- # BitmapImage
a724be66bef4b692994e5defa22ba3f1b2a1f771
12
ImageTk.py
154
Deprecated PhotoImage.paste() box parameter
69,895
0
217
92
50
242,703
62
Pillow
19
src/PIL/ImageTk.py
Python
15
{ "docstring": "\n Paste a PIL image into the photo image. Note that this can\n be very slow if the photo image is displayed.\n\n :param im: A PIL image. The size must match the target region. If the\n mode does not match, the image is converted to the mode of\n the bitmap image.\n ", "language": "en", "n_whitespaces": 117, "n_words": 50, "vocab_size": 36 }
https://github.com/python-pillow/Pillow.git
1
test_sensor_arrival_time_custom_timestamp
async def test_sensor_arrival_time_custom_timestamp(hass): assert hass.states.get("sensor.google_travel_time").state == "27" @pytest.mark.usefixtures("mock_update")
beb30a1ff199596163c655e8ae745a0f1649b78a
@pytest.mark.usefixtures("mock_update")
10
test_sensor.py
55
Add google_travel_time sensor tests (#66568) Co-authored-by: Paulus Schoutsen <[email protected]>
91,334
1
13
19
8
292,234
8
core
8
tests/components/google_travel_time/test_sensor.py
Python
2
{ "docstring": "Test that sensor works for arrival time with a custom timestamp.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git
3
col
def col (loc, strg): s = strg return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
pyparsing.py
72
upd; format
13,225
0
34
44
23
63,273
25
transferlearning
6
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
Python
3
{ "docstring": "Returns current column within a string, counting newlines as line separators.\n The first column is number 1.\n\n Note: the default parsing behavior is to expand tabs in the input string\n before starting the parsing process. See\n :class:`ParserElement.parseString` for more\n information on parsing strings containing ``<TAB>`` s, and suggested\n methods to maintain a consistent view of the parsed string, the parse\n location, and line and column positions within the parsed string.\n ", "language": "en", "n_whitespaces": 87, "n_words": 70, "vocab_size": 52 }
https://github.com/jindongwang/transferlearning.git
1
ensure_future
def ensure_future(coro_or_future, *, loop=None): return _ensure_future(coro_or_future, loop=loop)
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
tasks.py
34
add python 3.10.4 for windows
56,137
0
13
21
7
220,829
7
XX-Net
4
python3.10.4/Lib/asyncio/tasks.py
Python
2
{ "docstring": "Wrap a coroutine or an awaitable in a future.\n\n If the argument is a Future, it is returned directly.\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 16 }
https://github.com/XX-net/XX-Net.git
5
_discover_secrets_backends
def _discover_secrets_backends(self) -> None: for provider_package, provider in self._provider_dict.items(): if provider.data.get("secrets-backends"): for secrets_backends_class_name in provider.data["secrets-backends"]: if _sanity_check(provider_package, secrets_backends_class_name, provider): self._secrets_backend_class_name_set.add(secrets_backends_class_name)
b5a786b38148295c492da8ab731d5e2f6f86ccf7
16
providers_manager.py
97
Suppress import errors for providers from sources (#22579) When we are running airflow locally with providers installed from sources, often many providers will be discovered which we haven't installed the deps for. This generally results in a very large amount of traceback logging, which has a very negative effect on usefulness of terminal output. Here we suppress this error logging for providers that are installed from sources.
8,932
0
102
59
17
46,604
20
airflow
12
airflow/providers_manager.py
Python
7
{ "docstring": "Retrieves all secrets backends defined in the providers", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/apache/airflow.git
1
test_sends_note_notification
def test_sends_note_notification(self): # leave a comment url = f"/api/0/issues/{self.group.id}/comments/" with self.tasks(): response = self.client.post(url, format="json", data={"text": "blah blah"}) assert response.status_code == 201, response.content msg = mail.outbox[0] # check the txt version assert "blah blah" in msg.body # check the html version assert "blah blah</p></div>" in msg.alternatives[0][0] attachment, text = get_attachment() # check the Slack version assert text == f"New comment by {self.name}" assert attachment["title"] == f"{self.group.title}" assert ( attachment["title_link"] == f"http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=note_activity-slack" ) assert attachment["text"] == "blah blah" assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=note_activity-slack-user|Notification Settings>" )
1730c481f1a8a71446326fa1ff72e10663016385
14
test_notifications.py
271
fix(notifications): Use `metrics_key` (#34572)
19,673
0
274
122
53
99,595
86
sentry
26
tests/sentry/notifications/test_notifications.py
Python
20
{ "docstring": "\n Test that an email AND Slack notification are sent with\n the expected values when a comment is created on an issue.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 20 }
https://github.com/getsentry/sentry.git
2
connect
def connect(self) -> HandlerStatusResponse: url = self.connection_args.get('url') try: ckan = rc(url) self.is_connected = True self.ckan = ckan except Exception as e: return HandlerStatusResponse(False, f'Failed to connect to CKAN: {e}') self.connection = ckan return HandlerStatusResponse(True)
75686f43be1d82794e50277acefda517eced0b6c
12
ckan_handler.py
105
Add CKAN Handler for MindsDB
25,464
0
120
59
26
115,450
34
mindsdb
12
mindsdb/integrations/handlers/ckan_handler/ckan_handler.py
Python
13
{ "docstring": "\n Handles the connection to a CKAN remote portal instance.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/mindsdb/mindsdb.git
3
unbatch
def unbatch(batches_struct): flat_batches = tree.flatten(batches_struct) out = [] for batch_pos in range(len(flat_batches[0])): out.append( tree.unflatten_as( batches_struct, [flat_batches[i][batch_pos] for i in range(len(flat_batches))], ) ) return out
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
17
space_utils.py
104
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
33,102
0
105
66
19
144,060
24
ray
12
rllib/utils/spaces/space_utils.py
Python
11
{ "docstring": "Converts input from (nested) struct of batches to batch of structs.\n\n Input: Struct of different batches (each batch has size=3):\n {\"a\": [1, 2, 3], \"b\": ([4, 5, 6], [7.0, 8.0, 9.0])}\n Output: Batch (list) of structs (each of these structs representing a\n single action):\n [\n {\"a\": 1, \"b\": (4, 7.0)}, <- action 1\n {\"a\": 2, \"b\": (5, 8.0)}, <- action 2\n {\"a\": 3, \"b\": (6, 9.0)}, <- action 3\n ]\n\n Args:\n batches_struct (any): The struct of component batches. Each leaf item\n in this struct represents the batch for a single component\n (in case struct is tuple/dict).\n Alternatively, `batches_struct` may also simply be a batch of\n primitives (non tuple/dict).\n\n Returns:\n List[struct[components]]: The list of rows. Each item\n in the returned list represents a single (maybe complex) struct.\n ", "language": "en", "n_whitespaces": 274, "n_words": 126, "vocab_size": 85 }
https://github.com/ray-project/ray.git
8
sample
def sample(self, n_samples=1): check_is_fitted(self) if n_samples < 1: raise ValueError( "Invalid value for 'n_samples': %d . The sampling requires at " "least one sample." % (self.n_components) ) _, n_features = self.means_.shape rng = check_random_state(self.random_state) n_samples_comp = rng.multinomial(n_samples, self.weights_) if self.covariance_type == "full": X = np.vstack( [ rng.multivariate_normal(mean, covariance, int(sample)) for (mean, covariance, sample) in zip( self.means_, self.covariances_, n_samples_comp ) ] ) elif self.covariance_type == "tied": X = np.vstack( [ rng.multivariate_normal(mean, self.covariances_, int(sample)) for (mean, sample) in zip(self.means_, n_samples_comp) ] ) else: X = np.vstack( [ mean + rng.standard_normal(size=(sample, n_features)) * np.sqrt(covariance) for (mean, covariance, sample) in zip( self.means_, self.covariances_, n_samples_comp ) ] ) y = np.concatenate( [np.full(sample, j, dtype=int) for j, sample in enumerate(n_samples_comp)] ) return (X, y)
254ea8c453cd2100ade07644648f1f00392611a6
18
_base.py
372
ENH Replaced RandomState with Generator compatible calls (#22271)
75,335
0
657
245
73
258,624
118
scikit-learn
35
sklearn/mixture/_base.py
Python
41
{ "docstring": "Generate random samples from the fitted Gaussian distribution.\n\n Parameters\n ----------\n n_samples : int, default=1\n Number of samples to generate.\n\n Returns\n -------\n X : array, shape (n_samples, n_features)\n Randomly generated sample.\n\n y : array, shape (nsamples,)\n Component labels.\n ", "language": "en", "n_whitespaces": 126, "n_words": 37, "vocab_size": 32 }
https://github.com/scikit-learn/scikit-learn.git
1
test_adapter_recovery
async def test_adapter_recovery(hass, one_adapter): called_start = 0 called_stop = 0 _callback = None mock_discovered = []
0e2ebfe5c45716250280186234123f170e3bd08c
7
test_scanner.py
38
Move bluetooth watchdog into the scanner base class (#83888)
96,435
0
31
221
12
297,466
16
core
7
tests/components/bluetooth/test_scanner.py
Python
50
{ "docstring": "Test we can recover when the adapter stops responding.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
2
test_ohe_infrequent_two_levels_user_cats_one_frequent
def test_ohe_infrequent_two_levels_user_cats_one_frequent(kwargs): X_train = np.array([["a"] * 5 + ["e"] * 30], dtype=object).T ohe = OneHotEncoder( categories=[["c", "d", "a", "b"]], sparse=False, handle_unknown="infrequent_if_exist", **kwargs, ).fit(X_train) X_test = [["a"], ["b"], ["c"], ["d"], ["e"]] expected = np.array([[1, 0], [0, 1], [0, 1], [0, 1], [0, 1]]) X_trans = ohe.transform(X_test) assert_allclose(expected, X_trans) # 'a' is dropped drops = ["first", "if_binary", ["a"]] X_test = [["a"], ["c"]] for drop in drops: ohe.set_params(drop=drop).fit(X_train) assert_allclose([[0], [1]], ohe.transform(X_test))
7f0006c8aad1a09621ad19c3db19c3ff0555a183
14
test_encoders.py
324
ENH Adds infrequent categories to OneHotEncoder (#16018) * ENH Completely adds infrequent categories * STY Linting * STY Linting * DOC Improves wording * DOC Lint * BUG Fixes * CLN Address comments * CLN Address comments * DOC Uses math to description float min_frequency * DOC Adds comment regarding drop * BUG Fixes method name * DOC Clearer docstring * TST Adds more tests * FIX Fixes mege * CLN More pythonic * CLN Address comments * STY Flake8 * CLN Address comments * DOC Fix * MRG * WIP * ENH Address comments * STY Fix * ENH Use functiion call instead of property * ENH Adds counts feature * CLN Rename variables * DOC More details * CLN Remove unneeded line * CLN Less lines is less complicated * CLN Less diffs * CLN Improves readiabilty * BUG Fix * CLN Address comments * TST Fix * CLN Address comments * CLN Address comments * CLN Move docstring to userguide * DOC Better wrapping * TST Adds test to handle_unknown='error' * ENH Spelling error in docstring * BUG Fixes counter with nan values * BUG Removes unneeded test * BUG Fixes issue * ENH Sync with main * DOC Correct settings * DOC Adds docstring * DOC Immprove user guide * DOC Move to 1.0 * DOC Update docs * TST Remove test * DOC Update docstring * STY Linting * DOC Address comments * ENH Neater code * DOC Update explaination for auto * Update sklearn/preprocessing/_encoders.py Co-authored-by: Roman Yurchak <[email protected]> * TST Uses docstring instead of comments * TST Remove call to fit * TST Spelling error * ENH Adds support for drop + infrequent categories * ENH Adds infrequent_if_exist option * DOC Address comments for user guide * DOC Address comments for whats_new * DOC Update docstring based on comments * CLN Update test with suggestions * ENH Adds computed property infrequent_categories_ * DOC Adds where the infrequent column is located * TST Adds more test for infrequent_categories_ * DOC Adds docstring for _compute_drop_idx * CLN Moves _convert_to_infrequent_idx into its own method * TST Increases test coverage * TST Adds failing test * CLN Careful consideration of dropped and inverse_transform * STY Linting * DOC Adds docstrinb about dropping infrequent * DOC Uses only * DOC Numpydoc * TST Includes test for get_feature_names_out * DOC Move whats new * DOC Address docstring comments * DOC Docstring changes * TST Better comments * TST Adds check for handle_unknown='ignore' for infrequent * CLN Make _infrequent_indices private * CLN Change min_frequency default to None * DOC Adds comments * ENH adds support for max_categories=1 * ENH Describe lexicon ordering for ties * DOC Better docstring * STY Fix * CLN Error when explicity dropping an infrequent category * STY Grammar Co-authored-by: Joel Nothman <[email protected]> Co-authored-by: Roman Yurchak <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
75,663
0
146
201
54
259,229
68
scikit-learn
22
sklearn/preprocessing/tests/test_encoders.py
Python
17
{ "docstring": "'a' is the only frequent category, all other categories are infrequent.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/scikit-learn/scikit-learn.git
1
test_decrease_stock_multiple_lines_deallocate_stock_raises_error
def test_decrease_stock_multiple_lines_deallocate_stock_raises_error(order_with_lines): # given order_line_1 = order_with_lines.lines.first() order_line_2 = order_with_lines.lines.last() allocation_1 = order_line_1.allocations.first() allocation_2 = order_line_2.allocations.first() stock_quantity = 100 allocation_1_qty_allocated = 10 allocation_2_qty_allocated = 80 stock_1 = allocation_1.stock stock_2 = allocation_2.stock stock_1.quantity = stock_quantity stock_2.quantity = stock_quantity Stock.objects.bulk_update([stock_1, stock_2], ["quantity"]) allocation_1.quantity_allocated = allocation_1_qty_allocated allocation_1.order_line = order_line_1 warehouse_pk_1 = stock_1.warehouse.pk allocation_2.quantity_allocated = allocation_2_qty_allocated allocation_2.order_line = order_line_2 warehouse_pk_2 = stock_2.warehouse.pk Allocation.objects.bulk_update( [allocation_1, allocation_2], ["quantity_allocated", "order_line"] ) line_1_qty = 50 line_2_qty = 20 # when decrease_stock( [ OrderLineInfo( line=order_line_1, quantity=line_1_qty, variant=order_line_1.variant, warehouse_pk=warehouse_pk_1, ), OrderLineInfo( line=order_line_2, quantity=line_2_qty, variant=order_line_2.variant, warehouse_pk=warehouse_pk_2, ), ], manager=get_plugins_manager(), ) # then stock_1.refresh_from_db() assert stock_1.quantity == stock_quantity - line_1_qty stock_2.refresh_from_db() assert stock_2.quantity == stock_quantity - line_2_qty allocation_1.refresh_from_db() assert allocation_1.quantity_allocated == 0 allocation_2.refresh_from_db() assert allocation_2.quantity_allocated == allocation_2_qty_allocated - line_2_qty
c052f59016d82568a675e2c202ea1363f9e355ff
12
test_stock_management.py
399
Fix incorrect stock allocation (#8963) * Fix incorrect stock allocation * Drop unused restock_order_lines order utils
4,910
0
417
251
70
25,599
117
saleor
37
saleor/warehouse/tests/test_stock_management.py
Python
49
{ "docstring": "Ensure that when some of the lines raise an error during the deallocation\n quantity allocated value for all allocation will be updated.", "language": "en", "n_whitespaces": 24, "n_words": 22, "vocab_size": 21 }
https://github.com/saleor/saleor.git
27
verify_local_collection
def verify_local_collection(local_collection, remote_collection, artifacts_manager): # type: (Candidate, Candidate | None, ConcreteArtifactsManager) -> CollectionVerifyResult result = CollectionVerifyResult(local_collection.fqcn) b_collection_path = to_bytes(local_collection.src, errors='surrogate_or_strict') display.display("Verifying '{coll!s}'.".format(coll=local_collection)) display.display( u"Installed collection found at '{path!s}'". format(path=to_text(local_collection.src)), ) modified_content = [] # type: list[ModifiedContent] verify_local_only = remote_collection is None # partial away the local FS detail so we can just ask generically during validation get_json_from_validation_source = functools.partial(_get_json_from_installed_dir, b_collection_path) get_hash_from_validation_source = functools.partial(_get_file_hash, b_collection_path) if not verify_local_only: # Compare installed version versus requirement version if local_collection.ver != remote_collection.ver: err = ( "{local_fqcn!s} has the version '{local_ver!s}' but " "is being compared to '{remote_ver!s}'".format( local_fqcn=local_collection.fqcn, local_ver=local_collection.ver, remote_ver=remote_collection.ver, ) ) display.display(err) result.success = False return result manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME) signatures = list(local_collection.signatures) if verify_local_only and local_collection.source_info is not None: signatures = [info["signature"] for info in local_collection.source_info["signatures"]] + signatures elif not verify_local_only and remote_collection.signatures: signatures = list(remote_collection.signatures) + signatures keyring_configured = artifacts_manager.keyring is not None if not keyring_configured and signatures: display.warning( "The GnuPG keyring used for collection signature " "verification was not configured but signatures were " "provided by the Galaxy server. " "Configure a keyring for ansible-galaxy to verify " "the origin of the collection. " "Skipping signature verification." ) elif keyring_configured: if not verify_file_signatures( local_collection.fqcn, manifest_file, signatures, artifacts_manager.keyring, artifacts_manager.required_successful_signature_count, artifacts_manager.ignore_signature_errors, ): result.success = False return result display.vvvv(f"GnuPG signature verification succeeded, verifying contents of {local_collection}") if verify_local_only: # since we're not downloading this, just seed it with the value from disk manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME) elif keyring_configured and remote_collection.signatures: manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME) else: # fetch remote b_temp_tar_path = ( # NOTE: AnsibleError is raised on URLError artifacts_manager.get_artifact_path if remote_collection.is_concrete_artifact else artifacts_manager.get_galaxy_artifact_path )(remote_collection) display.vvv( u"Remote collection cached as '{path!s}'".format(path=to_text(b_temp_tar_path)) ) # partial away the tarball details so we can just ask generically during validation get_json_from_validation_source = functools.partial(_get_json_from_tar_file, b_temp_tar_path) get_hash_from_validation_source = functools.partial(_get_tar_file_hash, b_temp_tar_path) # Verify the downloaded manifest hash matches the installed copy before verifying the file manifest manifest_hash = get_hash_from_validation_source(MANIFEST_FILENAME) _verify_file_hash(b_collection_path, MANIFEST_FILENAME, manifest_hash, modified_content) display.display('MANIFEST.json hash: {manifest_hash}'.format(manifest_hash=manifest_hash)) manifest = get_json_from_validation_source(MANIFEST_FILENAME) # Use the manifest to verify the file manifest checksum file_manifest_data = manifest['file_manifest_file'] file_manifest_filename = file_manifest_data['name'] expected_hash = file_manifest_data['chksum_%s' % file_manifest_data['chksum_type']] # Verify the file manifest before using it to verify individual files _verify_file_hash(b_collection_path, file_manifest_filename, expected_hash, modified_content) file_manifest = get_json_from_validation_source(file_manifest_filename) collection_dirs = set() collection_files = { os.path.join(b_collection_path, b'MANIFEST.json'), os.path.join(b_collection_path, b'FILES.json'), } # Use the file manifest to verify individual file checksums for manifest_data in file_manifest['files']: name = manifest_data['name'] if manifest_data['ftype'] == 'file': collection_files.add( os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict')) ) expected_hash = manifest_data['chksum_%s' % manifest_data['chksum_type']] _verify_file_hash(b_collection_path, name, expected_hash, modified_content) if manifest_data['ftype'] == 'dir': collection_dirs.add( os.path.join(b_collection_path, to_bytes(name, errors='surrogate_or_strict')) ) # Find any paths not in the FILES.json for root, dirs, files in os.walk(b_collection_path): for name in files: full_path = os.path.join(root, name) path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict') if full_path not in collection_files: modified_content.append( ModifiedContent(filename=path, expected='the file does not exist', installed='the file exists') ) for name in dirs: full_path = os.path.join(root, name) path = to_text(full_path[len(b_collection_path) + 1::], errors='surrogate_or_strict') if full_path not in collection_dirs: modified_content.append( ModifiedContent(filename=path, expected='the directory does not exist', installed='the directory exists') ) if modified_content: result.success = False display.display( 'Collection {fqcn!s} contains modified content ' 'in the following files:'. format(fqcn=to_text(local_collection.fqcn)), ) for content_change in modified_content: display.display(' %s' % content_change.filename) display.v(" Expected: %s\n Found: %s" % (content_change.expected, content_change.installed)) else: what = "are internally consistent with its manifest" if verify_local_only else "match the remote collection" display.display( "Successfully verified that checksums for '{coll!s}' {what!s}.". format(coll=local_collection, what=what), ) return result
b439e41a915ccec0ccbabecc966919ea406db74e
17
__init__.py
1,307
expand ansible-doc coverage (#74963) * Expand ansible-doc to tests/filters and fix existing issues enable filter/test docs if in single file or companion yaml add docs for several filters/tests plugins allow .yml companion for docs for other plugins, must be colocated verify plugins are valid (not modules, cannot) fix 'per collection' filtering limit old style deprecation (_ prefix) to builtin/legacy start move to pathlib for saner path handling moved some funcitons, kept backwards compat shims with deprecation notice Co-authored-by: Abhijeet Kasurde <[email protected]> Co-authored-by: Felix Fontein <[email protected]> Co-authored-by: Sandra McCann <[email protected]>
78,753
0
1,652
793
300
267,135
550
ansible
79
lib/ansible/galaxy/collection/__init__.py
Python
126
{ "docstring": "Verify integrity of the locally installed collection.\n\n :param local_collection: Collection being checked.\n :param remote_collection: Upstream collection (optional, if None, only verify local artifact)\n :param artifacts_manager: Artifacts manager.\n :return: a collection verify result object.\n ", "language": "en", "n_whitespaces": 48, "n_words": 33, "vocab_size": 29 }
https://github.com/ansible/ansible.git
6
traverse_by
def traverse_by(self, fixers, traversal): if not fixers: return for node in traversal: for fixer in fixers[node.type]: results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: node.replace(new) node = new
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
refactor.py
104
add python 3.10.4 for windows
55,531
0
191
66
24
218,888
34
XX-Net
12
python3.10.4/Lib/lib2to3/refactor.py
Python
11
{ "docstring": "Traverse an AST, applying a set of fixers to each node.\n\n This is a helper method for refactor_tree().\n\n Args:\n fixers: a list of fixer instances.\n traversal: a generator that yields AST nodes.\n\n Returns:\n None\n ", "language": "en", "n_whitespaces": 95, "n_words": 34, "vocab_size": 30 }
https://github.com/XX-net/XX-Net.git
3
test_appo_compilation_use_kl_loss
def test_appo_compilation_use_kl_loss(self): config = ppo.appo.DEFAULT_CONFIG.copy() config["num_workers"] = 1 config["use_kl_loss"] = True num_iterations = 2 for _ in framework_iterator(config, with_eager_tracing=True): trainer = ppo.APPOTrainer(config=config, env="CartPole-v0") for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer) trainer.stop()
8ebc50f844f42e283f125792d630ea6d0a2a7000
12
test_appo.py
152
[RLlib] Issue 21334: Fix APPO when kl_loss is enabled. (#21855)
29,010
0
165
90
27
129,737
34
ray
22
rllib/agents/ppo/tests/test_appo.py
Python
13
{ "docstring": "Test whether an APPOTrainer can be built with kl_loss enabled.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
2
intercept_unary_unary
async def intercept_unary_unary(self, continuation, client_call_details, request): if context.get_value(_SUPPRESS_INSTRUMENTATION_KEY): return await continuation(client_call_details, request) method = client_call_details.method.decode("utf-8") with self._start_span( method, end_on_exit=False, record_exception=False, set_status_on_exception=False, ) as span: new_details = self.propagate_trace_in_details(client_call_details) continuation_with_args = functools.partial( continuation, new_details, request ) return await self._wrap_unary_response(continuation_with_args, span)
107631e955b21db8a4ddb3bee02130de3650d032
11
_aio_client.py
143
feat(instrumentation): add OpenTelemetry tracing and metrics with basic configurations (#5175)
2,579
0
187
91
32
13,243
38
jina
21
jina/serve/instrumentation/_aio_client.py
Python
15
{ "docstring": "Intercepts a unary-unary invocation asynchronously.\n\n :param continuation: A coroutine that proceeds with the invocation by executing\n the next interceptor in the chain or invoking the actual RPC on the\n underlying Channel. It is the interceptor's responsibility to call it if\n it decides to move the RPC forward. The interceptor can use\n `call = await continuation(client_call_details, request)` to continue with\n the RPC. `continuation` returns the call to the RPC.\n :param client_call_details: A ClientCallDetails object describing the outgoing RPC.\n :param request: The request value for the RPC.\n\n :returns: An object with the RPC response.\n\n :raises: AioRpcError: Indicating that the RPC terminated with non-OK status.\n :raises: asyncio.CancelledError: Indicating that the RPC was canceled.\n ", "language": "en", "n_whitespaces": 215, "n_words": 110, "vocab_size": 70 }
https://github.com/jina-ai/jina.git
7
from_current_timezone
def from_current_timezone(value): if settings.USE_TZ and value is not None and timezone.is_naive(value): current_timezone = timezone.get_current_timezone() try: if not timezone._is_pytz_zone( current_timezone ) and timezone._datetime_ambiguous_or_imaginary(value, current_timezone): raise ValueError("Ambiguous or non-existent time.") return timezone.make_aware(value, current_timezone) except Exception as exc: raise ValidationError( _( "%(datetime)s couldn’t be interpreted " "in time zone %(current_timezone)s; it " "may be ambiguous or it may not exist." ), code="ambiguous_timezone", params={"datetime": value, "current_timezone": current_timezone}, ) from exc return value
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
utils.py
170
Refs #33476 -- Reformatted code with Black.
51,326
0
300
100
54
206,016
68
django
18
django/forms/utils.py
Python
20
{ "docstring": "\n When time zone support is enabled, convert naive datetimes\n entered in the current time zone to aware datetimes.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 16 }
https://github.com/django/django.git
1
active
def active(self) -> Optional[Scope]: ctx = current_context() return ctx.scope
6ad012ef89c966cbb3616c1be63d964db48d49ca
8
scopecontextmanager.py
35
More type hints for `synapse.logging` (#13103) Completes type hints for synapse.logging.scopecontextmanager and (partially) for synapse.logging.opentracing.
72,415
0
30
20
9
248,680
9
synapse
7
synapse/logging/scopecontextmanager.py
Python
13
{ "docstring": "\n Returns the currently active Scope which can be used to access the\n currently active Scope.span.\n If there is a non-null Scope, its wrapped Span\n becomes an implicit parent of any newly-created Span at\n Tracer.start_active_span() time.\n\n Return:\n The Scope that is active, or None if not available.\n ", "language": "en", "n_whitespaces": 107, "n_words": 46, "vocab_size": 40 }
https://github.com/matrix-org/synapse.git
1
isocalendar
def isocalendar(self): return self._get_values().isocalendar().set_index(self._parent.index)
5531195f6f0d87817a704b288008809a3c98a304
11
accessors.py
44
fix-ci-isocalendar (#46690)
39,746
0
18
25
4
165,949
4
pandas
6
pandas/core/indexes/accessors.py
Python
2
{ "docstring": "\n Calculate year, week, and day according to the ISO 8601 standard.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n DataFrame\n With columns year, week and day.\n\n See Also\n --------\n Timestamp.isocalendar : Function return a 3-tuple containing ISO year,\n week number, and weekday for the given Timestamp object.\n datetime.date.isocalendar : Return a named tuple object with\n three components: year, week and weekday.\n\n Examples\n --------\n >>> ser = pd.to_datetime(pd.Series([\"2010-01-01\", pd.NaT]))\n >>> ser.dt.isocalendar()\n year week day\n 0 2009 53 5\n 1 <NA> <NA> <NA>\n >>> ser.dt.isocalendar().week\n 0 53\n 1 <NA>\n Name: week, dtype: UInt32\n ", "language": "en", "n_whitespaces": 293, "n_words": 88, "vocab_size": 64 }
https://github.com/pandas-dev/pandas.git
9
update
def update(self, user=None, next_task=None): if self.status != self.STATUS_IN_PROGRESS: # Updating a completed or cancelled workflow should have no effect return try: current_status = self.current_task_state.status except AttributeError: current_status = None if current_status == TaskState.STATUS_REJECTED: self.status = self.STATUS_NEEDS_CHANGES self.save() workflow_rejected.send(sender=self.__class__, instance=self, user=user) else: if not next_task: next_task = self.get_next_task() if next_task: if ( (not self.current_task_state) or self.current_task_state.status != self.current_task_state.STATUS_IN_PROGRESS ): # if not on a task, or the next task to move to is not the current task (ie current task's status is # not STATUS_IN_PROGRESS), move to the next task self.current_task_state = next_task.specific.start(self, user=user) self.save() # if task has auto-approved, update the workflow again if ( self.current_task_state.status != self.current_task_state.STATUS_IN_PROGRESS ): self.update(user=user) # otherwise, continue on the current task else: # if there is no uncompleted task, finish the workflow. self.finish(user=user)
d10f15e55806c6944827d801cd9c2d53f5da4186
17
__init__.py
275
Reformat with black
16,154
0
630
168
71
73,874
129
wagtail
22
wagtail/core/models/__init__.py
Python
29
{ "docstring": "Checks the status of the current task, and progresses (or ends) the workflow if appropriate. If the workflow progresses,\n next_task will be used to start a specific task next if provided.", "language": "en", "n_whitespaces": 37, "n_words": 31, "vocab_size": 26 }
https://github.com/wagtail/wagtail.git
2
new_name
def new_name(self, template="xxx_todo_changeme"): name = template while name in self.used_names: name = template + str(next(self.numbers)) self.used_names.add(name) return name
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
fixer_base.py
73
add python 3.10.4 for windows
55,411
0
64
43
13
218,598
18
XX-Net
9
python3.10.4/Lib/lib2to3/fixer_base.py
Python
6
{ "docstring": "Return a string suitable for use as an identifier\n\n The new name is guaranteed not to conflict with other identifiers.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 20 }
https://github.com/XX-net/XX-Net.git
4
getxmp
def getxmp(self): for segment, content in self.applist: if segment == "APP1": marker, xmp_tags = content.rsplit(b"\x00", 1) if marker == b"http://ns.adobe.com/xap/1.0/": return self._getxmp(xmp_tags) return {}
601c9d8515dba996af3f0b96d1a671619de37f10
13
JpegImagePlugin.py
83
Fix return in docs
69,830
0
105
49
21
242,323
24
Pillow
9
src/PIL/JpegImagePlugin.py
Python
7
{ "docstring": "\n Returns a dictionary containing the XMP tags.\n Requires defusedxml to be installed.\n\n :returns: XMP tags in a dictionary.\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 16 }
https://github.com/python-pillow/Pillow.git
1
alias
def alias(self, *args, **kwargs): self._not_support_combined_queries("alias") return self._annotate(args, kwargs, select=False)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
8
query.py
51
Refs #33476 -- Reformatted code with Black.
51,184
0
30
31
9
205,739
9
django
7
django/db/models/query.py
Python
3
{ "docstring": "\n Return a query set with added aliases for extra data or aggregations.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/django/django.git
2
agg
def agg(self, agg): assert isinstance(agg, str) agg_exprs = OrderedDict() for col in self.columns: agg_exprs[col] = AggregateExpr(agg, self.ref(col)) return self.__constructor__( columns=self.columns, dtypes=self._dtypes_for_exprs(agg_exprs), op=GroupbyAggNode(self, [], agg_exprs, {"sort": False}), index_cols=None, force_execution_mode=self._force_execution_mode, )
e5b1888cd932909e49194d58035da34b210b91c4
13
dataframe.py
138
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
36,085
0
137
92
28
154,575
29
modin
18
modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py
Python
12
{ "docstring": "\n Perform specified aggregation along columns.\n\n Parameters\n ----------\n agg : str\n Name of the aggregation function to perform.\n\n Returns\n -------\n HdkOnNativeDataframe\n New frame containing the result of aggregation.\n ", "language": "en", "n_whitespaces": 106, "n_words": 27, "vocab_size": 24 }
https://github.com/modin-project/modin.git
5
get_feature_names_out
def get_feature_names_out(self, input_features=None): powers = self.powers_ input_features = _check_feature_names_in(self, input_features) feature_names = [] for row in powers: inds = np.where(row)[0] if len(inds): name = " ".join( "%s^%d" % (input_features[ind], exp) if exp != 1 else input_features[ind] for ind, exp in zip(inds, row[inds]) ) else: name = "1" feature_names.append(name) return np.asarray(feature_names, dtype=object)
279388d9ed2ea83194dd45a2d78161be30b43aa7
16
_polynomial.py
176
DOC Improve get_feature_names_out docstrings (#22718) Co-authored-by: Thomas J. Fan <[email protected]>
75,579
0
258
111
41
259,120
51
scikit-learn
21
sklearn/preprocessing/_polynomial.py
Python
17
{ "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Input features.\n\n - If `input_features is None`, then `feature_names_in_` is\n used as feature names in. If `feature_names_in_` is not defined,\n then the following input feature names are generated:\n `[\"x0\", \"x1\", ..., \"x(n_features_in_ - 1)\"]`.\n - If `input_features` is an array-like, then `input_features` must\n match `feature_names_in_` if `feature_names_in_` is defined.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n Transformed feature names.\n ", "language": "en", "n_whitespaces": 221, "n_words": 76, "vocab_size": 54 }
https://github.com/scikit-learn/scikit-learn.git
1
revert_challenge_config
def revert_challenge_config(self) -> None: self.revert_temporary_config() self.new_vhost = None self.parser.load()
16aad35d31a887dab157f9d4f5e0fe9218d06064
8
configurator.py
45
Fully type certbot-nginx module (#9124) * Work in progress * Fix type * Work in progress * Work in progress * Work in progress * Work in progress * Work in progress * Oups. * Fix typing in UnspacedList * Fix logic * Finish typing * List certbot-nginx as fully typed in tox * Fix lint * Fix checks * Organize imports * Fix typing for Python 3.6 * Fix checks * Fix lint * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <[email protected]> * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <[email protected]> * Fix signature of deploy_cert regarding the installer interface * Update certbot-nginx/certbot_nginx/_internal/obj.py Co-authored-by: alexzorin <[email protected]> * Fix types * Update certbot-nginx/certbot_nginx/_internal/parser.py Co-authored-by: alexzorin <[email protected]> * Precise type * Precise _coerce possible inputs/outputs * Fix type * Update certbot-nginx/certbot_nginx/_internal/http_01.py Co-authored-by: ohemorange <[email protected]> * Fix type * Remove an undesirable implementation. * Fix type Co-authored-by: alexzorin <[email protected]> Co-authored-by: ohemorange <[email protected]>
45,497
0
37
25
9
186,581
9
certbot
6
certbot-nginx/certbot_nginx/_internal/configurator.py
Python
9
{ "docstring": "Used to cleanup challenge configurations.\n\n :raises .errors.PluginError: If unable to revert the challenge config.\n\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 12 }
https://github.com/certbot/certbot.git
7
execute
def execute(): frappe.reload_doc("Accounts", "doctype", "Salary Component Account") if frappe.db.has_column("Salary Component Account", "default_account"): rename_field("Salary Component Account", "default_account", "account") doctype_list = [ {"module": "HR", "doctype": "Employee Advance"}, {"module": "HR", "doctype": "Leave Encashment"}, {"module": "Payroll", "doctype": "Additional Salary"}, {"module": "Payroll", "doctype": "Employee Benefit Application"}, {"module": "Payroll", "doctype": "Employee Benefit Claim"}, {"module": "Payroll", "doctype": "Employee Incentive"}, {"module": "Payroll", "doctype": "Employee Tax Exemption Declaration"}, {"module": "Payroll", "doctype": "Employee Tax Exemption Proof Submission"}, {"module": "Payroll", "doctype": "Income Tax Slab"}, {"module": "Payroll", "doctype": "Payroll Entry"}, {"module": "Payroll", "doctype": "Retention Bonus"}, {"module": "Payroll", "doctype": "Salary Structure"}, {"module": "Payroll", "doctype": "Salary Structure Assignment"}, {"module": "Payroll", "doctype": "Salary Slip"}, ] for item in doctype_list: frappe.reload_doc(item["module"], "doctype", item["doctype"]) # update company in employee advance based on employee company for dt in [ "Employee Incentive", "Leave Encashment", "Employee Benefit Application", "Employee Benefit Claim", ]: frappe.db.sql( .format( doctype=dt ) ) # update exchange rate for employee advance frappe.db.sql("update `tabEmployee Advance` set exchange_rate=1") # get all companies and it's currency all_companies = frappe.db.get_all( "Company", fields=["name", "default_currency", "default_payroll_payable_account"] ) for d in all_companies: company = d.name company_currency = d.default_currency default_payroll_payable_account = d.default_payroll_payable_account if not default_payroll_payable_account: default_payroll_payable_account = frappe.db.get_value( "Account", { "account_name": _("Payroll Payable"), "company": company, "account_currency": company_currency, "is_group": 0, }, ) # update currency in following doctypes based on company currency doctypes_for_currency = [ "Employee Advance", "Leave Encashment", "Employee Benefit Application", "Employee Benefit Claim", "Employee Incentive", "Additional Salary", "Employee Tax Exemption Declaration", "Employee Tax Exemption Proof Submission", "Income Tax Slab", "Retention Bonus", "Salary Structure", ] for dt in doctypes_for_currency: frappe.db.sql( .format(doctype=dt), (company_currency, company), ) # update fields in payroll entry frappe.db.sql( , (company_currency, default_payroll_payable_account, company), ) # update fields in Salary Structure Assignment frappe.db.sql( , (company_currency, default_payroll_payable_account, company), ) # update fields in Salary Slip frappe.db.sql( , (company_currency, company), )
494bd9ef78313436f0424b918f200dab8fc7c20b
17
updates_for_multi_currency_payroll.py
773
style: format code with black
14,360
0
197
415
133
66,842
285
erpnext
24
erpnext/patches/v13_0/updates_for_multi_currency_payroll.py
Python
106
{ "docstring": "\n\t\t\tupdate `tab{doctype}`\n\t\t\tset company = (select company from tabEmployee where name=`tab{doctype}`.employee)\n\t\tupdate `tab{doctype}` set currency = %s where company=%s\n\t\t\tupdate `tabPayroll Entry`\n\t\t\tset currency = %s,\n\t\t\t\texchange_rate = 1,\n\t\t\t\tpayroll_payable_account=%s\n\t\t\twhere company=%s\n\t\t\n\t\t\tupdate `tabSalary Structure Assignment`\n\t\t\tset currency = %s,\n\t\t\t\tpayroll_payable_account=%s\n\t\t\twhere company=%s\n\t\t\n\t\t\tupdate `tabSalary Slip`\n\t\t\tset currency = %s,\n\t\t\t\texchange_rate = 1,\n\t\t\t\tbase_hour_rate = hour_rate,\n\t\t\t\tbase_gross_pay = gross_pay,\n\t\t\t\tbase_total_deduction = total_deduction,\n\t\t\t\tbase_net_pay = net_pay,\n\t\t\t\tbase_rounded_total = rounded_total,\n\t\t\t\tbase_total_in_words = total_in_words\n\t\t\twhere company=%s\n\t\t", "language": "en", "n_whitespaces": 51, "n_words": 73, "vocab_size": 35 }
https://github.com/frappe/erpnext.git
5
get_libdir
def get_libdir(self): # Module unavailable if not self.available: raise ValueError(f"Module {self.name} {self.version} is unavailable!") # Module has no associated shared libraries if not self.sharedlibs: return None for lib in self.sharedlibs: path = findSystemLibrary(lib) if path: return os.path.normpath(os.path.dirname(path)) raise ValueError(f"Could not resolve any shared library of {self.name} {self.version}: {self.sharedlibs}!")
684bfac8adcf254fec5777f212c13eb62181f900
14
gi.py
143
hooks: refactor GObject introspection (gi) hooks The modules imported from gi.repository are marked as runtime modules by their corresponding pre-safe-import-module hooks. Therefore, their standard hooks are always loaded and executed, regardless of whether the modue is actually importable or not. In PyInstaller v5, this behavior triggers errors in hooks for GI modules that are not importable, because the new `isolated` framework propagates the errors instead of swallowing them. While these errors could be caught and demoted to warnings to match the old behavior, it would be better hooks checked whether module is importable before doing any processing at all. To that end, we introduce new class, `GiModuleInfo` that, as part of its initialization, allows us to: - perform availability check - obtain data previously returned by `get_gi_typelibs` - obtain data previously returned by `get_gi_libdir` using a single isolated import attempt (instead of one being performed in each of those steps). In addition, if passed `hook_api` as an optional argument, the `GiModuleInfo` can use hook configuration API to override the GI module version to be collected (which allows the standard use pattern to be removed from the hook itself). The old `get_gi_typelibs` and `get_gi_libdir` functions now internally use `GiModuleInfo` to provide backward compatible behavior to (potential) exetnal user. All `gi` hooks are ported to the `GiModuleInfo` and now become no-op if the module is not available. In addition, hooks are cleaned up/refactored so that all processing is performed either in the loading stage ("simple" hooks that do not require access to hook configuration API) or in the `hook()` function (hooks that require access to hook configuration API), but not in the mixture of the two.
77,455
0
156
64
37
263,831
48
pyinstaller
13
PyInstaller/utils/hooks/gi.py
Python
10
{ "docstring": "\n Return the path to shared library used by the module. If no libraries are associated with the typelib, None is\n returned. If multiple library names are associated with the typelib, the path to the first resolved shared\n library is returned. Raises exception if module is unavailable or none of the shared libraries could be\n resolved.\n ", "language": "en", "n_whitespaces": 91, "n_words": 55, "vocab_size": 34 }
https://github.com/pyinstaller/pyinstaller.git
2
ratio
def ratio(self): matches = sum(triple[-1] for triple in self.get_matching_blocks()) return _calculate_ratio(matches, len(self.a) + len(self.b))
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
difflib.py
70
add python 3.10.4 for windows
56,607
0
35
43
14
222,511
14
XX-Net
10
python3.10.4/Lib/difflib.py
Python
3
{ "docstring": "Return a measure of the sequences' similarity (float in [0,1]).\n\n Where T is the total number of elements in both sequences, and\n M is the number of matches, this is 2.0*M / T.\n Note that this is 1 if the sequences are identical, and 0 if\n they have nothing in common.\n\n .ratio() is expensive to compute if you haven't already computed\n .get_matching_blocks() or .get_opcodes(), in which case you may\n want to try .quick_ratio() or .real_quick_ratio() first to get an\n upper bound.\n\n >>> s = SequenceMatcher(None, \"abcd\", \"bcde\")\n >>> s.ratio()\n 0.75\n >>> s.quick_ratio()\n 0.75\n >>> s.real_quick_ratio()\n 1.0\n ", "language": "en", "n_whitespaces": 208, "n_words": 96, "vocab_size": 71 }
https://github.com/XX-net/XX-Net.git
2
_linear_eq_to_dict_inner
def _linear_eq_to_dict_inner(eqs, syms): syms = set(syms) eqsdict, ind = [], [] for eq in eqs: c, eqdict = _lin_eq2dict(eq, syms) eqsdict.append(eqdict) ind.append(c) return eqsdict, ind
041eeb41b6a083dd106a0e6316d6f07e2248cd61
10
linsolve.py
88
linear_coeffs has nonstrict option
49,076
0
61
54
21
198,995
25
sympy
11
sympy/polys/matrices/linsolve.py
Python
8
{ "docstring": "Convert a system Expr/Eq equations into dict form, returning\n the coefficient dictionaries and a list of syms-independent terms\n from each expression in ``eqs```.\n\n Examples\n ========\n\n >>> from sympy.polys.matrices.linsolve import _linear_eq_to_dict_inner as F\n >>> from sympy.abc import x\n >>> F([2*x + 3], {x})\n ([{x: 2}], [3])\n ", "language": "en", "n_whitespaces": 72, "n_words": 45, "vocab_size": 39 }
https://github.com/sympy/sympy.git
3
construct_actor_groups
def construct_actor_groups(actors): actor_groups = _group_actors_by_python_class(actors) stats_by_group = { name: _get_actor_group_stats(group) for name, group in actor_groups.items() } summarized_actor_groups = {} for name, group in actor_groups.items(): summarized_actor_groups[name] = { "entries": group, "summary": stats_by_group[name], } return summarized_actor_groups
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
11
actor_utils.py
110
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,041
0
98
67
24
129,861
34
ray
10
dashboard/modules/actor/actor_utils.py
Python
12
{ "docstring": "actors is a dict from actor id to an actor or an\n actor creation task The shared fields currently are\n \"actorClass\", \"actorId\", and \"state\" ", "language": "en", "n_whitespaces": 30, "n_words": 24, "vocab_size": 21 }
https://github.com/ray-project/ray.git
1
test_change_view_history_link
def test_change_view_history_link(self): url = reverse( "admin:%s_modelwithstringprimarykey_change" % ModelWithStringPrimaryKey._meta.app_label, args=(quote(self.pk),), ) response = self.client.get(url) self.assertEqual(response.status_code, 200) expected_link = reverse( "admin:%s_modelwithstringprimarykey_history" % ModelWithStringPrimaryKey._meta.app_label, args=(quote(self.pk),), ) self.assertContains( response, '<a href="%s" class="historylink"' % escape(expected_link) )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
tests.py
144
Refs #33476 -- Reformatted code with Black.
52,085
0
171
89
22
207,753
31
django
18
tests/admin_views/tests.py
Python
16
{ "docstring": "Object history button link should work and contain the pk value quoted.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/django/django.git
10
generate_dimino
def generate_dimino(self, af=False): idn = list(range(self.degree)) order = 0 element_list = [idn] set_element_list = {tuple(idn)} if af: yield idn else: yield _af_new(idn) gens = [p._array_form for p in self.generators] for i in range(len(gens)): # D elements of the subgroup G_i generated by gens[:i] D = element_list[:] N = [idn] while N: A = N N = [] for a in A: for g in gens[:i + 1]: ag = _af_rmul(a, g) if tuple(ag) not in set_element_list: # produce G_i*g for d in D: order += 1 ap = _af_rmul(d, ag) if af: yield ap else: p = _af_new(ap) yield p element_list.append(ap) set_element_list.add(tuple(ap)) N.append(ap) self._order = len(element_list)
498015021131af4dbb07eb110e5badaba8250c7b
23
perm_groups.py
305
Updated import locations
47,647
0
728
187
68
196,147
106
sympy
30
sympy/combinatorics/perm_groups.py
Python
32
{ "docstring": "Yield group elements using Dimino's algorithm.\n\n If ``af == True`` it yields the array form of the permutations.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> a = Permutation([0, 2, 1, 3])\n >>> b = Permutation([0, 2, 3, 1])\n >>> g = PermutationGroup([a, b])\n >>> list(g.generate_dimino(af=True))\n [[0, 1, 2, 3], [0, 2, 1, 3], [0, 2, 3, 1],\n [0, 1, 3, 2], [0, 3, 2, 1], [0, 3, 1, 2]]\n\n References\n ==========\n\n .. [1] The Implementation of Various Algorithms for Permutation Groups in\n the Computer Algebra System: AXIOM, N.J. Doye, M.Sc. Thesis\n\n ", "language": "en", "n_whitespaces": 206, "n_words": 93, "vocab_size": 64 }
https://github.com/sympy/sympy.git
4
size
def size(self) -> DataFrame | Series: result = self.grouper.size() if self.axis == 1: return DataFrame( data=np.tile(result.values, (self.obj.shape[0], 1)), columns=result.index, index=self.obj.index, ) # GH28330 preserve subclassed Series/DataFrames through calls if issubclass(self.obj._constructor, Series): result = self._obj_1d_constructor(result, name=self.obj.name) else: result = self._obj_1d_constructor(result) if not self.as_index: # Item "None" of "Optional[Series]" has no attribute "reset_index" result = result.rename("size").reset_index() # type: ignore[union-attr] return self._reindex_output(result, fill_value=0)
15a06d3d9e7656afff239da7a295a7b684456680
16
groupby.py
214
BUG: groupby.size and groupby.transform('size') incorrect for axis=1 (#45987)
39,620
0
228
135
49
164,913
60
pandas
24
pandas/core/groupby/groupby.py
Python
24
{ "docstring": "\n Compute group sizes.\n\n Returns\n -------\n DataFrame or Series\n Number of rows in each group as a Series if as_index is True\n or a DataFrame if as_index is False.\n ", "language": "en", "n_whitespaces": 86, "n_words": 28, "vocab_size": 20 }
https://github.com/pandas-dev/pandas.git
7
_iter_requires_txt_entries
def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]: content = self._dist.read_text("requires.txt") if content is None: return extra = marker = "" # Section-less entries don't have markers. for line in content.splitlines(): line = line.strip() if not line or line.startswith("#"): # Comment; ignored. continue if line.startswith("[") and line.endswith("]"): # A section header. extra, _, marker = line.strip("[]").partition(":") continue yield RequiresEntry(requirement=line, extra=extra, marker=marker)
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
15
_dists.py
191
Vendor in pip 22.1.2
3,750
0
195
108
44
21,283
57
pipenv
17
pipenv/patched/notpip/_internal/metadata/importlib/_dists.py
Python
23
{ "docstring": "Parse a ``requires.txt`` in an egg-info directory.\n\n This is an INI-ish format where an egg-info stores dependencies. A\n section name describes extra other environment markers, while each entry\n is an arbitrary string (not a key-value pair) representing a dependency\n as a requirement string (no markers).\n\n There is a construct in ``importlib.metadata`` called ``Sectioned`` that\n does mostly the same, but the format is currently considered private.\n ", "language": "en", "n_whitespaces": 114, "n_words": 65, "vocab_size": 50 }
https://github.com/pypa/pipenv.git
6
named_parameters
def named_parameters(self, *args, **kwargs): arch = kwargs.pop('arch', False) for name, p in super().named_parameters(*args, **kwargs): if any(name == par_name for par_name in self._arch_parameter_names): if arch: yield name, p else: if not arch: yield name, p
14d2966b9e91ae16dcc39de8f41017a75cec8ff9
14
differentiable.py
117
Valuechoice oneshot lightning (#4602)
24,602
0
145
71
22
112,161
34
nni
12
nni/retiarii/oneshot/pytorch/supermodule/differentiable.py
Python
9
{ "docstring": "Named parameters excluding architecture parameters.", "language": "en", "n_whitespaces": 4, "n_words": 5, "vocab_size": 5 }
https://github.com/microsoft/nni.git
2
get_stats
def get_stats(self) -> SystemStats: deleted = self.db.query(Message.deleted, func.count()).group_by(Message.deleted) nthreads = self.db.query(None, func.count(Message.id)).filter(Message.parent_id.is_(None)) query = deleted.union_all(nthreads) result = {k: v for k, v in query.all()} return SystemStats( all=result.get(True, 0) + result.get(False, 0), active=result.get(False, 0), deleted=result.get(True, 0), message_trees=result.get(None, 0), )
ef3a35ff9c9da7aab45e6f40103f32558f861e9b
13
prompt_repository.py
222
fixes
54,687
0
131
146
31
216,799
38
Open-Assistant
23
backend/oasst_backend/prompt_repository.py
Python
15
{ "docstring": "\n Get data stats such as number of all messages in the system,\n number of deleted and active messages and number of message trees.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 17 }
https://github.com/LAION-AI/Open-Assistant.git
1
make_sampling_table
def make_sampling_table(size, sampling_factor=1e-5): gamma = 0.577 rank = np.arange(size) rank[0] = 1 inv_fq = rank * (np.log(rank) + gamma) + 0.5 - 1. / (12. * rank) f = sampling_factor * inv_fq return np.minimum(1., f / np.sqrt(f)) @keras_export('keras.preprocessing.sequence.skipgrams')
f1aa8b7d2a0c89591c5c42eca5b6f013114a7bbd
@keras_export('keras.preprocessing.sequence.skipgrams')
13
sequence.py
125
Copy sequence utils from keras_preprocessing directly into core keras PiperOrigin-RevId: 424915569
79,742
1
44
81
27
268,874
38
keras
13
keras/preprocessing/sequence.py
Python
7
{ "docstring": "Generates a word rank-based probabilistic sampling table.\n\n Used for generating the `sampling_table` argument for `skipgrams`.\n `sampling_table[i]` is the probability of sampling\n the word i-th most common word in a dataset\n (more common words should be sampled less frequently, for balance).\n\n The sampling probabilities are generated according\n to the sampling distribution used in word2vec:\n\n ```\n p(word) = (min(1, sqrt(word_frequency / sampling_factor) /\n (word_frequency / sampling_factor)))\n ```\n\n We assume that the word frequencies follow Zipf's law (s=1) to derive\n a numerical approximation of frequency(rank):\n\n `frequency(rank) ~ 1/(rank * (log(rank) + gamma) + 1/2 - 1/(12*rank))`\n where `gamma` is the Euler-Mascheroni constant.\n\n Args:\n size: Int, number of possible words to sample.\n sampling_factor: The sampling factor in the word2vec formula.\n\n Returns:\n A 1D Numpy array of length `size` where the ith entry\n is the probability that a word of rank i should be sampled.\n ", "language": "en", "n_whitespaces": 181, "n_words": 140, "vocab_size": 97 }
https://github.com/keras-team/keras.git
3
execute
def execute(): company = frappe.get_all("Company", filters={"country": "India"}) if not company: return frappe.reload_doc("Payroll", "doctype", "payroll_period") frappe.reload_doc("Payroll", "doctype", "employee_tax_exemption_declaration") frappe.reload_doc("Payroll", "doctype", "employee_tax_exemption_proof_submission") frappe.reload_doc("Payroll", "doctype", "employee_tax_exemption_declaration_category") frappe.reload_doc("Payroll", "doctype", "employee_tax_exemption_proof_submission_detail") frappe.reload_doc("accounts", "doctype", "tax_category") for doctype in ["Sales Invoice", "Delivery Note", "Purchase Invoice"]: frappe.db.sql( , doctype, ) make_custom_fields() frappe.db.sql( ) frappe.db.sql( )
494bd9ef78313436f0424b918f200dab8fc7c20b
12
sync_india_custom_fields.py
241
style: format code with black
14,367
0
27
126
34
66,869
47
erpnext
10
erpnext/patches/v8_7/sync_india_custom_fields.py
Python
31
{ "docstring": "delete from `tabCustom Field` where dt = %s\n\t\t\tand fieldname in ('port_code', 'shipping_bill_number', 'shipping_bill_date')\n\t\tupdate `tabCustom Field`\n\t\tset reqd = 0, `default` = ''\n\t\twhere fieldname = 'reason_for_issuing_document'\n\t\n\t\tupdate tabAddress\n\t\tset gst_state_number=concat(\"0\", gst_state_number)\n\t\twhere ifnull(gst_state_number, '') != '' and gst_state_number<10\n\t", "language": "en", "n_whitespaces": 32, "n_words": 40, "vocab_size": 28 }
https://github.com/frappe/erpnext.git
3
run
def run(self, request, pk): # Check that the user has permission to run reports. if not request.user.has_perm('extras.run_report'): raise PermissionDenied("This user does not have permission to run reports.") # Check that at least one RQ worker is running if not Worker.count(get_connection('default')): raise RQWorkerNotRunningException() # Retrieve and run the Report. This will create a new JobResult. report = self._retrieve_report(pk) report_content_type = ContentType.objects.get(app_label='extras', model='report') job_result = JobResult.enqueue_job( run_report, report.full_name, report_content_type, request.user, job_timeout=report.job_timeout ) report.result = job_result serializer = serializers.ReportDetailSerializer(report, context={'request': request}) return Response(serializer.data) # # Scripts #
36d6ae33d15e93cc552827cdea363a9c00c7f823
12
views.py
201
Allow setting individual timeouts on scripts and reports
77,772
0
249
118
62
264,636
84
netbox
32
netbox/extras/api/views.py
Python
17
{ "docstring": "\n Run a Report identified as \"<module>.<script>\" and return the pending JobResult as the result\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 12 }
https://github.com/netbox-community/netbox.git
1
test_purge_room_and_not_block
def test_purge_room_and_not_block(self) -> None: # Test that room is not purged with self.assertRaises(AssertionError): self._is_purged(self.room_id) # Test that room is not blocked self._is_blocked(self.room_id, expect=False) # Assert one user in room self._is_member(room_id=self.room_id, user_id=self.other_user) channel = self.make_request( "DELETE", self.url.encode("ascii"), content={"block": False, "purge": True}, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(None, channel.json_body["new_room_id"]) self.assertEqual(self.other_user, channel.json_body["kicked_users"][0]) self.assertIn("failed_to_kick_users", channel.json_body) self.assertIn("local_aliases", channel.json_body) self._is_purged(self.room_id) self._is_blocked(self.room_id, expect=False) self._has_no_members(self.room_id)
c97042f7eef3748e17c90e48a4122389a89c4735
12
test_room.py
298
Use literals in place of `HTTPStatus` constants in tests (#13469)
72,642
0
231
183
45
249,135
57
synapse
24
tests/rest/admin/test_room.py
Python
22
{ "docstring": "Test to purge a room and do not block it.\n Members will not be moved to a new room and will not receive a message.\n ", "language": "en", "n_whitespaces": 39, "n_words": 25, "vocab_size": 17 }
https://github.com/matrix-org/synapse.git
1
get_next_connection
async def get_next_connection(self): return await self._get_next_connection(num_retries=3)
d21870ac47fc1594b45a5f01ee48cddf5c18b2ff
9
networking.py
30
fix: fix reconnect issues (#4941)
2,365
0
20
16
6
12,659
6
jina
4
jina/serve/networking.py
Python
2
{ "docstring": "\n Returns a connection from the list. Strategy is round robin\n :returns: A connection from the pool\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 13 }
https://github.com/jina-ai/jina.git
3
load_data_for_viz
def load_data_for_viz(load_type, model_file_statistics, dtype=int, ground_truth_split=2) -> Dict[str, Any]: supported_load_types = dict( load_json=load_json, load_from_file=partial(load_from_file, dtype=dtype, ground_truth_split=ground_truth_split), ) loader = supported_load_types[load_type] # Loads training stats from JSON file(s). try: stats_per_model = [loader(stats_f) for stats_f in model_file_statistics] except (TypeError, AttributeError): logger.exception(f"Unable to open model statistics file {model_file_statistics}!") raise return stats_per_model
4f40ffec8e81eb3f6385243498babe1409a675be
12
visualize.py
131
Changes learning_curves to use "step" or "epoch" as x-axis label. (#2578) * Started building dataclasses for model training output. * Adds EvaluationFrequency to training stats, dataclasses for training results. * Adds x_label, x_step to learning_curves. * fix x axis when using checkpoints_per_epoch. * Fixes CLI test by making dataclass JSON-serializable and implementing __contains__. * Adds default value for EvaluationFrequency, maybe fixes test_learning_curves with only training metrics. * Fixes kfold CV. * Fixes viz tests, restoring original functionality of load_data_for_viz * Adds todos to deprecate.
1,405
0
106
84
44
8,364
47
ludwig
21
ludwig/visualize.py
Python
19
{ "docstring": "Load JSON files (training stats, evaluation stats...) for a list of models.\n\n :param load_type: type of the data loader to be used.\n :param model_file_statistics: JSON file or list of json files containing any\n model experiment stats.\n :return List of training statistics loaded as json objects.\n ", "language": "en", "n_whitespaces": 67, "n_words": 45, "vocab_size": 37 }
https://github.com/ludwig-ai/ludwig.git
7
check_input_folder
def check_input_folder(self) -> Optional[cv2.VideoCapture]: err = None loadtype = self.__class__.__name__ if not self.folder: err = f"ERROR: A {loadtype} folder must be specified" elif not os.path.exists(self.folder): err = f"ERROR: The {loadtype} location {self.folder} could not be found" if err: logger.error(err) sys.exit(0) if (loadtype == "Frames" and os.path.isfile(self.folder) and os.path.splitext(self.folder)[1].lower() in _video_extensions): logger.verbose("Video exists at: '%s'", self.folder) # type: ignore retval = cv2.VideoCapture(self.folder) # pylint: disable=no-member # TODO ImageIO single frame seek seems slow. Look into this # retval = imageio.get_reader(self.folder, "ffmpeg") else: logger.verbose("Folder exists at '%s'", self.folder) # type: ignore retval = None return retval
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
14
media.py
252
Alignments Tool - Typing, Documentation + Re-org
21,148
0
300
140
65
101,744
94
faceswap
23
tools/alignments/media.py
Python
27
{ "docstring": " makes sure that the frames or faces folder exists\n If frames folder contains a video file return imageio reader object\n\n Returns\n -------\n :class:`cv2.VideoCapture`\n Object for reading a video stream\n ", "language": "en", "n_whitespaces": 80, "n_words": 29, "vocab_size": 25 }
https://github.com/deepfakes/faceswap.git
2
generate_heatmap
def generate_heatmap(logits, num_classes): keypoint_colours = np.array([plt.cm.spectral(x) for x in np.linspace(0, 1, num_classes + 1)])[ ..., :3].astype(np.float32) prediction = tf.nn.softmax(logits) heatmap = tf.matmul(tf.reshape(prediction, (-1, num_classes + 1)), keypoint_colours) heatmap = tf.reshape(heatmap, (tf.shape(prediction)[0], tf.shape(prediction)[1], tf.shape(prediction)[2], 3)) return heatmap
7375ee364e0df2a417f92593e09557f1b2a3575a
16
utils.py
195
initialize ostec
1,586
0
129
131
29
9,335
36
insightface
21
reconstruction/ostec/external/landmark_detector/utils.py
Python
9
{ "docstring": "Generates a coloured heatmap from the keypoint logits.\n\n Args:\n features: A `Tensor` of dimensions [num_batch, height, width, FLAGS.n_landmarks + 1].\n ", "language": "en", "n_whitespaces": 33, "n_words": 20, "vocab_size": 20 }
https://github.com/deepinsight/insightface.git
1
update_worker_pea_args
def update_worker_pea_args(self): self.peas_args['peas'] = self._set_peas_args(self.args)
933415bfa1f9eb89f935037014dfed816eb9815d
9
__init__.py
38
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
1,761
0
19
21
5
9,894
5
jina
5
jina/peapods/pods/__init__.py
Python
2
{ "docstring": " Update args of all its worker peas based on Pod args. Does not touch head and tail", "language": "en", "n_whitespaces": 17, "n_words": 17, "vocab_size": 17 }
https://github.com/jina-ai/jina.git
1
to_local_object_without_private_data_child
def to_local_object_without_private_data_child(self) -> SingleEntityPhiTensor: public_shape = getattr(self, "public_shape", None) public_dtype = getattr(self, "public_dtype", None) return Tensor( child=SingleEntityPhiTensor( child=FixedPrecisionTensor(value=None), entity=self.entity, min_vals=self.min_vals, # type: ignore max_vals=self.max_vals, # type: ignore scalar_manager=self.scalar_manager, ), public_shape=public_shape, public_dtype=public_dtype, )
a90188ecc017971b64778aa0ff41127a9d5d9d44
14
single_entity_phi.py
121
working fpt for SMPC+DP
125
0
188
79
26
803
32
PySyft
14
packages/syft/src/syft/core/tensor/autodp/single_entity_phi.py
Python
16
{ "docstring": "Convert this pointer into a partial version of the SingleEntityPhiTensor but without\n any of the private data therein.", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 16 }
https://github.com/OpenMined/PySyft.git
6
_measurements
def _measurements(self): ismeasured = {} for i,g in enumerate(self._gates()): if getattr(g,'measurement',False): for target in g.targets: if target in ismeasured: if ismeasured[target] > i: ismeasured[target] = i else: ismeasured[target] = i return ismeasured
55c2eba0fc6eefa30c07e2e76795c1df89488b11
17
circuitplot.py
113
Update documentation
48,582
0
197
70
20
197,501
32
sympy
10
sympy/physics/quantum/circuitplot.py
Python
11
{ "docstring": "Return a dict ``{i:j}`` where i is the index of the wire that has\n been measured, and j is the gate where the wire is measured.\n ", "language": "en", "n_whitespaces": 40, "n_words": 26, "vocab_size": 19 }
https://github.com/sympy/sympy.git
1
parse_example_proto_and_decode
def parse_example_proto_and_decode(example_serialized): image_buffer, label = _parse_example_proto(example_serialized) image_buffer = tf.reshape(image_buffer, shape=[]) image_buffer = tf.io.decode_jpeg(image_buffer, channels=NUM_CHANNELS) return image_buffer, label
02f911ce78137cb63ecb685a8ef8e56dcb60062c
10
tf_utils.py
73
Benchmarking Ray Data bulk ingest as input file size changes. (#29296) This PR adds a benchmark which takes work from https://github.com/anyscale/air-benchmarks and makes it run as a release test. Full metrics are stored in Databricks. Signed-off-by: Cade Daniel <[email protected]>
30,205
0
32
45
12
134,154
17
ray
12
release/air_tests/air_benchmarks/mlperf-train/tf_utils.py
Python
5
{ "docstring": "Parses an example and decodes the image to prepare for caching.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/ray-project/ray.git
3
get_currency
def get_currency(filters): company = get_appropriate_company(filters) company_currency = get_company_currency(company) presentation_currency = ( filters["presentation_currency"] if filters.get("presentation_currency") else company_currency ) report_date = filters.get("to_date") if not report_date: fiscal_year_to_date = get_from_and_to_date(filters.get("to_fiscal_year"))["to_date"] report_date = formatdate(get_datetime_str(fiscal_year_to_date), "dd-MM-yyyy") currency_map = dict( company=company, company_currency=company_currency, presentation_currency=presentation_currency, report_date=report_date, ) return currency_map
494bd9ef78313436f0424b918f200dab8fc7c20b
14
utils.py
161
style: format code with black
13,877
0
23
95
29
65,389
40
erpnext
15
erpnext/accounts/report/utils.py
Python
17
{ "docstring": "\n\tReturns a dictionary containing currency information. The keys of the dict are\n\t- company: The company for which we are fetching currency information. if no\n\tcompany is specified, it will fallback to the default company.\n\t- company currency: The functional currency of the said company.\n\t- presentation currency: The presentation currency to use. Only currencies that\n\thave been used for transactions will be allowed.\n\t- report date: The report date.\n\t:param filters: Report filters\n\t:type filters: dict\n\n\t:return: str - Currency\n\t", "language": "en", "n_whitespaces": 71, "n_words": 81, "vocab_size": 54 }
https://github.com/frappe/erpnext.git
1
test_query_order_fields_order_with_new_id_by_anonymous_user
def test_query_order_fields_order_with_new_id_by_anonymous_user(order, api_client): # given variables = {"id": graphene.Node.to_global_id("Order", order.pk)} # when response = api_client.post_graphql(QUERY_ORDER_FIELDS_BY_ID, variables) # then content = get_graphql_content(response) assert content["data"]["order"] assert ( content["data"]["order"]["billingAddress"]["streetAddress1"] == order.billing_address.street_address_1 ) assert ( content["data"]["order"]["shippingAddress"]["streetAddress1"] == order.shipping_address.street_address_1 ) assert content["data"]["order"]["userEmail"] == order.user_email
71c19c951bcfba66fa9a9ee5809a46ad3af8f11f
12
test_order.py
194
Allow fetching by id all order data for new orders (#9728)
5,103
0
106
109
28
27,234
39
saleor
17
saleor/graphql/order/tests/test_order.py
Python
14
{ "docstring": "Ensure that all fields that are available for order owner can be fetched with\n use of new id by the customer user.", "language": "en", "n_whitespaces": 24, "n_words": 22, "vocab_size": 21 }
https://github.com/saleor/saleor.git
1
mkdtemp
def mkdtemp(self): d = tempfile.mkdtemp() self.tempdirs.append(d) return d
8198943edd73a363c266633e1aa5b2a9e9c9f526
8
support.py
41
add python 3.10.4 for windows
56,849
0
36
23
7
223,013
8
XX-Net
6
python3.10.4/Lib/distutils/tests/support.py
Python
4
{ "docstring": "Create a temporary directory that will be cleaned up.\n\n Returns the path of the directory.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 14 }
https://github.com/XX-net/XX-Net.git
3
__str__
def __str__(self): call_str = f'db.{self.collection}' for step in self.pipeline: args_str = [] for arg in step['args']: args_str.append(MongoJSONEncoder().encode(arg)) call_str += f'.{step["method"]}({",".join(args_str)})' return call_str
d5b6d8cf5bd69a1f3427bac153fa09159dfe96d0
15
mongodb_query.py
113
added converters: - mongo to str - str to mongo creating predictor from sql using mongo data tests
25,455
0
98
48
17
115,416
22
mindsdb
12
mindsdb/integrations/handlers/mongodb_handler/utils/mongodb_query.py
Python
8
{ "docstring": "\n converts call to string\n\n {\n 'collection': 'fish',\n 'call': [ // call is sequence of methods\n {\n 'method': 'find',\n 'args': [{a:1}, {b:2}]\n },\n {\n 'method': 'sort',\n 'args': [{c:3}]\n },\n ]\n }\n\n to:\n\n \"db_test.fish.find({a:1}, {b:2}).sort({c:3})\"\n ", "language": "en", "n_whitespaces": 248, "n_words": 33, "vocab_size": 27 }
https://github.com/mindsdb/mindsdb.git
1
pairwise
def pairwise(iterable): # type: (Iterable[Any]) -> Iterator[Tuple[Any, Any]] iterable = iter(iterable) return zip_longest(iterable, iterable)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
8
misc.py
34
upd; format
12,448
0
26
19
14
61,223
14
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_internal/utils/misc.py
Python
3
{ "docstring": "\n Return paired elements.\n\n For example:\n s -> (s0, s1), (s2, s3), (s4, s5), ...\n ", "language": "en", "n_whitespaces": 31, "n_words": 14, "vocab_size": 14 }
https://github.com/jindongwang/transferlearning.git
6
complete_code
def complete_code(accelerator, model, tokenizer, dataloader, n_tasks, batch_size=20, **gen_kwargs): gen_token_dict = defaultdict(list) # dict of list of generated tokens for step, batch in tqdm(enumerate(dataloader)): with torch.no_grad(): gen_kwargs["stopping_criteria"][0].start_length = batch["ids"].shape[-1] generated_tokens = accelerator.unwrap_model(model).generate( input_ids=batch["ids"][:, : batch["input_len"]], num_return_sequences=batch_size, **gen_kwargs ) # each task is generated batch_size times generated_tasks = batch["task_id"].repeat(batch_size) generated_tokens = accelerator.pad_across_processes( generated_tokens, dim=1, pad_index=tokenizer.pad_token_id ) generated_tokens, generated_tasks = accelerator.gather((generated_tokens, generated_tasks)) generated_tokens = generated_tokens.cpu().numpy() generated_tasks = generated_tasks.cpu().numpy() for task, generated_tokens in zip(generated_tasks, generated_tokens): gen_token_dict[task].append(generated_tokens) code_gens = [[] for _ in range(n_tasks)] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: gen_code = tokenizer.decode(s, skip_special_tokens=True, clean_up_tokenization_spaces=True) code_gens[task].append(remove_last_block(gen_code)) return code_gens
4868a830db5f19f56712f540979d637368221d50
17
human_eval.py
387
Jia multi gpu eval (#16428) * add simple multi gpu complet * add human_eval_multi_gpu * use copy strategy to distribute across gpu, to avoid padding * add doc string * update code style * use task id to arrange output * truncate input to avoid zero pad * Stop the copy mechanism * update style * restore copies to scale better in distributed mode * update style * replace human eval * Apply suggestions from code review 1. Tokenize all input at the same time 2. use attention_mask to get the input length 3. other small fixes Co-authored-by: Leandro von Werra <[email protected]> * correct typo and update docstring * update code style * remove num sample division constraint * remove max len calculation * use accelerator.gather once to speed up * use accelerate set_seed; update accelerate version * correct gather bug Co-authored-by: Leandro von Werra <[email protected]>
6,716
0
317
246
66
37,029
96
transformers
46
examples/research_projects/codeparrot/scripts/human_eval.py
Python
23
{ "docstring": "Generate multiple codes for each task in the dataset. This function leverage accelerator to distribute\n the processing to multiple GPUs.\n dataloader, a wrapper around a TokenizeDataset objectm is supposed to send all the prompts from\n the evalution dataset to the modelm as the following:\n [p_0_0, p_0_1, ..., p_0_nc-1, p_1_0, ..., p_nt-1_nc-1]\n where nc is the number of copies of the prompt, and nt is the number of tasks.\n nc is such that num_sample = nc * batch_size\n\n Parameters\n ----------\n accelerator: Accelerator\n\n model: transformers.PreTrainedModel\n Code generation model. AutoTokenizer.from_pretrained(model_ckpt), ex model_ckpt = \"lvwerra/codeparrot\"\n\n tokenizer: transformers.AutoTokenizer\n The tokenizer used to train model\n\n dataloader: DataLoader\n The dataloader is a wrapper around a TokenizeDataset object. It is designed to be used with multiple GPUs.\n\n n_tasks: int\n The number of tasks in the dataset. It is used to determine the length of the output.\n Should be aligned with the number of tasks in the TokenizeDataset.\n\n batch_size: int\n num_return_sequences per copy of the prompt such that num_sample = batch_size * n_copies\n\n gen_kwargs: dict\n Keyword arguments for the generation function of the model.\n\n Returns\n -------\n code_gens: list of list of str, of length n_tasks\n List of generated codes for each task.\n Each element is a list of generated codes for each task, with length num_samples\n ", "language": "en", "n_whitespaces": 327, "n_words": 207, "vocab_size": 115 }
https://github.com/huggingface/transformers.git
16
delegate_command
def delegate_command(args, host_state, exclude, require): # type: (EnvironmentConfig, HostState, t.List[str], t.List[str]) -> None con = host_state.controller_profile.get_origin_controller_connection() working_directory = host_state.controller_profile.get_working_directory() host_delegation = not isinstance(args.controller, OriginConfig) if host_delegation: if data_context().content.collection: content_root = os.path.join(working_directory, data_context().content.collection.directory) else: content_root = os.path.join(working_directory, 'ansible') ansible_bin_path = os.path.join(working_directory, 'ansible', 'bin') with tempfile.NamedTemporaryFile(prefix='ansible-source-', suffix='.tgz') as payload_file: create_payload(args, payload_file.name) con.extract_archive(chdir=working_directory, src=payload_file) else: content_root = working_directory ansible_bin_path = ANSIBLE_BIN_PATH command = generate_command(args, host_state.controller_profile.python, ansible_bin_path, content_root, exclude, require) if isinstance(con, SshConnection): ssh = con.settings else: ssh = None options = [] if isinstance(args, IntegrationConfig) and args.controller.is_managed and all(target.is_managed for target in args.targets): if not args.allow_destructive: options.append('--allow-destructive') with support_container_context(args, ssh) as containers: # type: t.Optional[ContainerDatabase] if containers: options.extend(['--containers', json.dumps(containers.to_dict())]) # Run unit tests unprivileged to prevent stray writes to the source tree. # Also disconnect from the network once requirements have been installed. if isinstance(args, UnitsConfig) and isinstance(con, DockerConnection): pytest_user = 'pytest' writable_dirs = [ os.path.join(content_root, ResultType.JUNIT.relative_path), os.path.join(content_root, ResultType.COVERAGE.relative_path), ] con.run(['mkdir', '-p'] + writable_dirs) con.run(['chmod', '777'] + writable_dirs) con.run(['chmod', '755', working_directory]) con.run(['chmod', '644', os.path.join(content_root, args.metadata_path)]) con.run(['useradd', pytest_user, '--create-home']) con.run(insert_options(command, options + ['--requirements-mode', 'only'])) container = con.inspect() networks = container.get_network_names() if networks is not None: for network in networks: con.disconnect_network(network) else: display.warning('Network disconnection is not supported (this is normal under podman). ' 'Tests will not be isolated from the network. Network-related tests may misbehave.') options.extend(['--requirements-mode', 'skip']) con.user = pytest_user success = False try: con.run(insert_options(command, options)) success = True finally: if host_delegation: download_results(args, con, content_root, success)
a06fa496d3f837cca3c437ab6e9858525633d147
17
delegation.py
803
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
78,575
0
770
487
154
266,772
231
ansible
76
test/lib/ansible_test/_internal/delegation.py
Python
57
{ "docstring": "Delegate execution based on the provided host state.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ansible/ansible.git
6
decrypt_string
def decrypt_string(self, content, key=0): # precondition assert isinstance(key, int) and isinstance(content, str) key = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned ans = "" for ch in content: ans += chr(ord(ch) ^ key) return ans
f0af0c43340763724f139fa68aa1e5a9ffe458b4
13
XOR_cipher.py
105
refactor: clean code Signed-off-by: slowy07 <[email protected]>
4,360
0
145
64
42
22,544
53
Python
12
XORcipher/XOR_cipher.py
Python
9
{ "docstring": "\n input: 'content' of type string and 'key' of type int\n output: decrypted string 'content'\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", "language": "en", "n_whitespaces": 66, "n_words": 30, "vocab_size": 22 }
https://github.com/geekcomputers/Python.git