complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
3
8
def entry_points(group=None): eps = importlib.metadata.entry_points() if group: try: return eps.select(group=group) except AttributeError: return eps.get(group, []) return eps
dask/compatibility.py
77
dask
{ "docstring": "Returns an iterable of entrypoints.\n\n For compatibility with Python 3.8/3.9.\n In 3.10 the return type changed from a dict to an ``importlib.metadata.EntryPoints``.\n This compatibility utility can be removed once Python 3.10 is the minimum.\n ", "language": "en", "n_whitespaces": 46, "n_words": 34, "vocab_size": 29 }
17
Python
14
a9ee6c2fdf0a3093747e675997143e0dbe584bad
compatibility.py
156,894
8
46
entry_points
https://github.com/dask/dask.git
Add `entry_points` compatibility utility (#9388)
65
0
36,798
13
32
6
def eval_sum_residue(f, i_a_b): r i, a, b = i_a_b
sympy/concrete/summations.py
25
sympy
{ "docstring": "Compute the infinite summation with residues\n\n Notes\n =====\n\n If $f(n), g(n)$ are polynomials with $\\deg(g(n)) - \\deg(f(n)) \\ge 2$,\n some infinite summations can be computed by the following residue\n evaluations.\n\n .. math::\n \\sum_{n=-\\infty, g(n) \\ne 0}^{\\infty} \\frac{f(n)}{g(n)} =\n -\\pi \\sum_{\\alpha|g(\\alpha)=0}\n \\text{Res}(\\cot(\\pi x) \\frac{f(x)}{g(x)}, \\alpha)\n\n .. math::\n \\sum_{n=-\\infty, g(n) \\ne 0}^{\\infty} (-1)^n \\frac{f(n)}{g(n)} =\n -\\pi \\sum_{\\alpha|g(\\alpha)=0}\n \\text{Res}(\\csc(\\pi x) \\frac{f(x)}{g(x)}, \\alpha)\n\n Examples\n ========\n\n >>> from sympy import Sum, oo, Symbol\n >>> x = Symbol('x')\n\n Doubly infinite series of rational functions.\n\n >>> Sum(1 / (x**2 + 1), (x, -oo, oo)).doit()\n pi/tanh(pi)\n\n Doubly infinite alternating series of rational functions.\n\n >>> Sum((-1)**x / (x**2 + 1), (x, -oo, oo)).doit()\n pi/sinh(pi)\n\n Infinite series of even rational functions.\n\n >>> Sum(1 / (x**2 + 1), (x, 0, oo)).doit()\n 1/2 + pi/(2*tanh(pi))\n\n Infinite series of alternating even rational functions.\n\n >>> Sum((-1)**x / (x**2 + 1), (x, 0, oo)).doit()\n pi/(2*sinh(pi)) + 1/2\n\n This also have heuristics to transform arbitrarily shifted summand or\n arbitrarily shifted summation range to the canonical problem the\n formula can handle.\n\n >>> Sum(1 / (x**2 + 2*x + 2), (x, -1, oo)).doit()\n 1/2 + pi/(2*tanh(pi))\n >>> Sum(1 / (x**2 + 4*x + 5), (x, -2, oo)).doit()\n 1/2 + pi/(2*tanh(pi))\n >>> Sum(1 / (x**2 + 1), (x, 1, oo)).doit()\n -1/2 + pi/(2*tanh(pi))\n >>> Sum(1 / (x**2 + 1), (x, 2, oo)).doit()\n -1 + pi/(2*tanh(pi))\n\n References\n ==========\n\n .. [#] http://www.supermath.info/InfiniteSeriesandtheResidueTheorem.pdf\n\n .. [#] Asmar N.H., Grafakos L. (2018) Residue Theory.\n In: Complex Analysis with Applications.\n Undergraduate Texts in Mathematics. Springer, Cham.\n https://doi.org/10.1007/978-3-319-94063-2_5\n ", "language": "en", "n_whitespaces": 431, "n_words": 242, "vocab_size": 124 }
9
Python
9
df873af365fff5b89164ed8eb3a1b62b6180f1bb
summations.py
197,934
149
629
eval_sum_residue
https://github.com/sympy/sympy.git
replaced i with a dummy symbol with no assumptions for get_residue_factor
14
0
48,746
7
6
30
def test_vtrace(self): seq_len = 5 batch_size = 10 # Create log_rhos such that rho will span from near-zero to above the # clipping thresholds. In particular, calculate log_rhos in # [-2.5, 2.5), # so that rho is in approx [0.08, 12.2). space_w_time = Box(-1.0, 1.0, (seq_len, batch_size), np.float32) space_only_batch = Box(-1.0, 1.0, (batch_size,), np.float32) log_rhos = space_w_time.sample() / (batch_size * seq_len) log_rhos = 5 * (log_rhos - 0.5) # [0.0, 1.0) -> [-2.5, 2.5). values = { "log_rhos": log_rhos, # T, B where B_i: [0.9 / (i+1)] * T "discounts": np.array( [[0.9 / (b + 1) for b in range(batch_size)] for _ in range(seq_len)] ), "rewards": space_w_time.sample(), "values": space_w_time.sample() / batch_size, "bootstrap_value": space_only_batch.sample() + 1.0, "clip_rho_threshold": 3.7, "clip_pg_rho_threshold": 2.2, } for fw, sess in framework_iterator(frameworks=("torch", "tf"), session=True): vtrace = vtrace_tf if fw != "torch" else vtrace_torch output = vtrace.from_importance_weights(**values) if sess: output = sess.run(output) ground_truth_v = _ground_truth_calculation(vtrace, **values) check(output, ground_truth_v)
rllib/agents/impala/tests/test_vtrace.py
344
ray
{ "docstring": "Tests V-trace against ground truth data calculated in python.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
150
Python
109
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_vtrace.py
133,739
25
230
test_vtrace
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
433
0
30,090
15
1
19
def test_kb_valid_entities(nlp): mykb = InMemoryLookupKB(nlp.vocab, entity_vector_length=3) # adding entities mykb.add_entity(entity="Q1", freq=19, entity_vector=[8, 4, 3]) mykb.add_entity(entity="Q2", freq=5, entity_vector=[2, 1, 0]) mykb.add_entity(entity="Q3", freq=25, entity_vector=[-1, -6, 5]) # adding aliases mykb.add_alias(alias="douglas", entities=["Q2", "Q3"], probabilities=[0.8, 0.2]) mykb.add_alias(alias="adam", entities=["Q2"], probabilities=[0.9]) # test the size of the corresponding KB assert mykb.get_size_entities() == 3 assert mykb.get_size_aliases() == 2 # test retrieval of the entity vectors assert mykb.get_vector("Q1") == [8, 4, 3] assert mykb.get_vector("Q2") == [2, 1, 0] assert mykb.get_vector("Q3") == [-1, -6, 5] # test retrieval of prior probabilities assert_almost_equal(mykb.get_prior_prob(entity="Q2", alias="douglas"), 0.8) assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglas"), 0.2) assert_almost_equal(mykb.get_prior_prob(entity="Q342", alias="douglas"), 0.0) assert_almost_equal(mykb.get_prior_prob(entity="Q3", alias="douglassssss"), 0.0)
spacy/tests/pipeline/test_entity_linker.py
423
spaCy
{ "docstring": "Test the valid construction of a KB with 3 entities and two aliases", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
94
Python
67
1f23c615d7a7326ca5a38a7d768b8b70caaa0e17
test_entity_linker.py
111,524
16
275
test_kb_valid_entities
https://github.com/explosion/spaCy.git
Refactor KB for easier customization (#11268) * Add implementation of batching + backwards compatibility fixes. Tests indicate issue with batch disambiguation for custom singular entity lookups. * Fix tests. Add distinction w.r.t. batch size. * Remove redundant and add new comments. * Adjust comments. Fix variable naming in EL prediction. * Fix mypy errors. * Remove KB entity type config option. Change return types of candidate retrieval functions to Iterable from Iterator. Fix various other issues. * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann <[email protected]> * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann <[email protected]> * Update spacy/kb_base.pyx Co-authored-by: Paul O'Leary McCann <[email protected]> * Update spacy/kb_base.pyx Co-authored-by: Paul O'Leary McCann <[email protected]> * Update spacy/pipeline/entity_linker.py Co-authored-by: Paul O'Leary McCann <[email protected]> * Add error messages to NotImplementedErrors. Remove redundant comment. * Fix imports. * Remove redundant comments. * Rename KnowledgeBase to InMemoryLookupKB and BaseKnowledgeBase to KnowledgeBase. * Fix tests. * Update spacy/errors.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Move KB into subdirectory. * Adjust imports after KB move to dedicated subdirectory. * Fix config imports. * Move Candidate + retrieval functions to separate module. Fix other, small issues. * Fix docstrings and error message w.r.t. class names. Fix typing for candidate retrieval functions. * Update spacy/kb/kb_in_memory.pyx Co-authored-by: Sofie Van Landeghem <[email protected]> * Update spacy/ml/models/entity_linker.py Co-authored-by: Sofie Van Landeghem <[email protected]> * Fix typing. * Change typing of mentions to be Span instead of Union[Span, str]. * Update docs. * Update EntityLinker and _architecture docs. * Update website/docs/api/entitylinker.md Co-authored-by: Paul O'Leary McCann <[email protected]> * Adjust message for E1046. * Re-add section for Candidate in kb.md, add reference to dedicated page. * Update docs and docstrings. * Re-add section + reference for KnowledgeBase.get_alias_candidates() in docs. * Update spacy/kb/candidate.pyx * Update spacy/kb/kb_in_memory.pyx * Update spacy/pipeline/legacy/entity_linker.py * Remove canididate.md. Remove mistakenly added config snippet in entity_linker.py. Co-authored-by: Paul O'Leary McCann <[email protected]> Co-authored-by: Sofie Van Landeghem <[email protected]>
157
0
24,423
11
1
4
def delete(self, name): raise NotImplementedError( "subclasses of Storage must provide a delete() method" )
django/core/files/storage.py
25
django
{ "docstring": "\n Delete the specified file from the storage system.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 7 }
14
Python
14
9c19aff7c7561e3a82978a272ecdaad40dda5c00
storage.py
204,484
4
13
delete
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
46
0
50,745
8
2
24
def test_sort_indicators_admin_order(self): models = [ (AdminOrderedField, "adminorderedfield"), (AdminOrderedModelMethod, "adminorderedmodelmethod"), (AdminOrderedAdminMethod, "adminorderedadminmethod"), (AdminOrderedCallable, "adminorderedcallable"), ] for model, url in models: model.objects.create(stuff="The Last Item", order=3) model.objects.create(stuff="The First Item", order=1) model.objects.create(stuff="The Middle Item", order=2) response = self.client.get( reverse("admin:admin_views_%s_changelist" % url), {} ) # Should have 3 columns including action checkbox col. self.assertContains(response, '<th scope="col"', count=3, msg_prefix=url) # Check if the correct column was selected. 2 is the index of the # 'order' column in the model admin's 'list_display' with 0 being # the implicit 'action_checkbox' and 1 being the column 'stuff'. self.assertEqual( response.context["cl"].get_ordering_field_columns(), {2: "asc"} ) # Check order of records. self.assertContentBefore(response, "The First Item", "The Middle Item") self.assertContentBefore(response, "The Middle Item", "The Last Item")
tests/admin_views/tests.py
266
django
{ "docstring": "\n The admin shows default sort indicators for all kinds of 'ordering'\n fields: field names, method on the model admin and model itself, and\n other callables. See #17252.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 24 }
112
Python
80
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,658
20
158
test_sort_indicators_admin_order
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
379
0
52,039
14
2
7
def __setattr__(self, name, value): if name in ("power_status", "status"): self._values[name] = value self._update() else: super().__setattr__(name, value)
tests/components/hdmi_cec/__init__.py
75
core
{ "docstring": "Set attributes in `_values` if not one of the known attributes.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
16
Python
16
7cd4be1310b3f76398b4404d3f4ecb26b9533cee
__init__.py
303,569
6
44
__setattr__
https://github.com/home-assistant/core.git
Add tests for the HDMI-CEC integration (#75094) * Add basic tests to the HDMI-CEC component * Add tests for the HDMI-CEC switch component * Add test for watchdog code * Start adding tests for the HDMI-CEC media player platform Also some cleanup and code move. * Add more tests for media_player And cleanup some switch tests. * Improve xfail message for features * Align test pyCEC dependency with main dependency * Make fixtures snake_case * Cleanup call asserts * Cleanup service tests * fix issues with media player tests * Cleanup MockHDMIDevice class * Cleanup watchdog tests * Add myself as code owner for the HDMI-CEC integration * Fix async fire time changed time jump * Fix event api sync context * Delint tests * Parametrize watchdog test Co-authored-by: Martin Hjelmare <[email protected]>
70
0
102,387
12
1
4
def remove_child_handler(self, pid): raise NotImplementedError()
python3.10.4/Lib/asyncio/unix_events.py
22
XX-Net
{ "docstring": "Removes the handler for process 'pid'.\n\n The function returns True if the handler was successfully removed,\n False if there was nothing to remove.", "language": "en", "n_whitespaces": 36, "n_words": 23, "vocab_size": 19 }
5
Python
5
8198943edd73a363c266633e1aa5b2a9e9c9f526
unix_events.py
220,945
2
12
remove_child_handler
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
19
0
56,172
7
5
23
def _set_modes_and_presets(self) -> None: all_modes: dict[HVACMode, int | None] = {} all_presets: dict[str, int | None] = {PRESET_NONE: None} # Z-Wave uses one list for both modes and presets. # Iterate over all Z-Wave ThermostatModes and extract the hvac modes and presets. if self._current_mode is None: self._hvac_modes = { ZW_HVAC_MODE_MAP[ThermostatMode.HEAT]: ThermostatMode.HEAT } return for mode_id, mode_name in self._current_mode.metadata.states.items(): mode_id = int(mode_id) if mode_id in THERMOSTAT_MODES: # treat value as hvac mode if hass_mode := ZW_HVAC_MODE_MAP.get(mode_id): all_modes[hass_mode] = mode_id else: # treat value as hvac preset all_presets[mode_name] = mode_id self._hvac_modes = all_modes self._hvac_presets = all_presets
homeassistant/components/zwave_js/climate.py
197
core
{ "docstring": "Convert Z-Wave Thermostat modes into Home Assistant modes and presets.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
94
Python
62
24b4690e5d855be362613583a3ba6fd6f60e9929
climate.py
299,023
18
123
_set_modes_and_presets
https://github.com/home-assistant/core.git
Use climate enums in zwave_js (#70757)
317
0
97,961
13
5
14
def __next__(self) -> Any: if not len(self.loader_iters) == len(self.loaders): raise RuntimeError('loader_iters must have the same length as loaders.') for i, (loader_name, iterator) in enumerate(self.loader_iters.items()): try: return (self.request_next_batch(iterator), loader_name) except StopIteration: if i + 1 == len(self.loader_iters): raise
nni/retiarii/oneshot/pytorch/dataloader.py
128
nni
{ "docstring": "Fetches the next batch from multiple data loaders,\n by looking for the first iterator that isn't exhausted yet.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 17 }
37
Python
34
39ec21ca1118e7a8df533aa06bcb5e515a93aa02
dataloader.py
112,922
12
78
__next__
https://github.com/microsoft/nni.git
Multi-GPU support of one-shot NAS (#4603)
140
0
24,789
14
4
18
def register(self, cmap, *, name=None, force=False): name = name or cmap.name if name in self and not force: raise ValueError( f'A colormap named "{name}" is already registered.') register_cmap(name, cmap.copy()) _cmap_registry = _gen_cmap_registry() globals().update(_cmap_registry) # This is no longer considered public API cmap_d = _DeprecatedCmapDictWrapper(_cmap_registry) __builtin_cmaps = tuple(_cmap_registry) # public access to the colormaps should be via `matplotlib.colormaps`. For now, # we still create the registry here, but that should stay an implementation # detail. _colormaps = ColormapRegistry(_cmap_registry)
lib/matplotlib/cm.py
139
matplotlib
{ "docstring": "\n Register a new colormap.\n\n The colormap name can then be used as a string argument to any ``cmap``\n parameter in Matplotlib. It is also available in ``pyplot.get_cmap``.\n\n The colormap registry stores a copy of the given colormap, so that\n future changes to the original colormap instance do not affect the\n registered colormap. Think of this as the registry taking a snapshot\n of the colormap at registration.\n\n Parameters\n ----------\n cmap : matplotlib.colors.Colormap\n The colormap to register.\n\n name : str, optional\n The name for the colormap. If not given, ``cmap.name`` is used.\n\n force : bool, default: False\n If False, a ValueError is raised if trying to overwrite an already\n registered name. True supports overwriting registered colormaps\n other than the builtin colormaps.\n ", "language": "en", "n_whitespaces": 266, "n_words": 119, "vocab_size": 82 }
77
Python
64
917c7c8b2cecf3ff85ed7527bb2aca13779fac13
cm.py
107,934
6
49
register
https://github.com/matplotlib/matplotlib.git
Add missing space before : for parameters
122
0
22,976
11
3
11
def config_test(self) -> None: os_info = util.get_os_info() fedora = os_info[0].lower() == "fedora" try: super().config_test() except errors.MisconfigurationError: if fedora: self._try_restart_fedora() else: raise
certbot-apache/certbot_apache/_internal/override_centos.py
94
certbot
{ "docstring": "\n Override config_test to mitigate configtest error in vanilla installation\n of mod_ssl in Fedora. The error is caused by non-existent self-signed\n certificates referenced by the configuration, that would be autogenerated\n during the first (re)start of httpd.\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 30 }
21
Python
20
7d9e9a49005de7961e84d2a7c608db57dbab3046
override_centos.py
186,661
16
52
config_test
https://github.com/certbot/certbot.git
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
119
0
45,569
12
4
11
def find_location(self, root, path, prefix=None): if prefix: prefix = "%s%s" % (prefix, os.sep) if not path.startswith(prefix): return None path = path[len(prefix) :] path = safe_join(root, path) if os.path.exists(path): return path
django/contrib/staticfiles/finders.py
110
django
{ "docstring": "\n Find a requested static file in a location and return the found\n absolute path (or ``None`` if no match).\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 18 }
30
Python
23
9c19aff7c7561e3a82978a272ecdaad40dda5c00
finders.py
204,340
9
69
find_location
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
117
0
50,703
12
7
13
def del_store(name, store, saltenv="base"): ret = {"name": name, "result": True, "comment": "", "changes": {}} cert_file = __salt__["cp.cache_file"](name, saltenv) if cert_file is False: ret["comment"] = "Certificate file not found: {}".format(name) ret["result"] = False return ret cert_serial = __salt__["certutil.get_cert_serial"](name) if cert_serial is None: ret["comment"] = "Invalid certificate file: {}".format(name) ret["result"] = False return ret old_serials = __salt__["certutil.get_stored_cert_serials"](store=store) if cert_serial not in old_serials: ret["comment"] = "Certificate already absent: {}".format(name) return ret if __opts__["test"]: ret["comment"] = "Certificate will be removed: {}".format(name) ret["result"] = None return ret retcode = __salt__["certutil.del_store"](name, store, retcode=True) if retcode != 0: ret["comment"] = "Error removing certificate: {}".format(name) ret["result"] = False return ret new_serials = __salt__["certutil.get_stored_cert_serials"](store=store) if cert_serial not in new_serials: ret["changes"]["removed"] = name ret["comment"] = "Removed certificate: {}".format(name) else: ret["comment"] = "Failed to remove certificate: {}".format(name) ret["result"] = False return ret
salt/states/win_certutil.py
449
salt
{ "docstring": "\n Remove a certificate from the given certificate store\n\n Args:\n\n name (str):\n The path to the certificate to remove from the store. This is either\n the path to a local file or a file from the file server in the form\n of ``salt://path/to/file``\n\n store (str):\n The certificate store to remove the certificate from\n\n saltenv (str):\n The salt environment to use. This is ignored if the path is local\n\n Returns:\n dict: A dictionary containing the results\n\n CLI Example:\n\n .. code-block:: yaml\n\n remove_certificate:\n certutil.del_store:\n name: salt://web_cert.cer\n store: TrustedPublisher\n ", "language": "en", "n_whitespaces": 225, "n_words": 85, "vocab_size": 49 }
131
Python
63
a8d2d1e1397cdc79b2c5f1ad7f6e3b729dcf8857
win_certutil.py
215,895
33
252
del_store
https://github.com/saltstack/salt.git
Add tests, fix state module
302
0
54,230
12
3
12
def _load_raw(self, datapath): if not PathManager.exists(datapath): raise RuntimeError( f'Conversations at path {datapath} not found. ' 'Double check your path.' ) with PathManager.open(datapath, 'r') as f: lines = f.read().splitlines() for line in lines: yield line
parlai/utils/conversations.py
101
ParlAI
{ "docstring": "\n Load the data as a raw, unparsed file.\n\n Useful for fast IO stuff like random indexing.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
34
Python
32
74e12d10bdf6e8f8abc82056e00d6e2360b871af
conversations.py
194,838
10
54
_load_raw
https://github.com/facebookresearch/ParlAI.git
[teachers] Speed up conversations teacher (#4404) * Speed up conversations teacher. * Whoops. * Okay let's try bringing back train mode. * Update test_conversations.py * Update conversations.py * Update test.py
144
0
47,108
12
4
20
def Error(filename, linenum, category, confidence, message): if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Matches strings. Escape codes should already be removed by ESCAPES. _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') # Matches characters. Escape codes should already be removed by ESCAPES. _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") # Matches multi-line C++ comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside # statements better. # The current rule is: We only clear spaces from both sides when we're at the # end of the line. Otherwise, we try to remove spaces from the right side, # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( r, re.VERBOSE)
code/deep/BJMMD/caffe/scripts/cpp_lint.py
241
transferlearning
{ "docstring": "Logs the fact we've found a lint error.\n\n We log where the error was found, and also our confidence in the error,\n that is, how certain we are this is a legitimate style regression, and\n not a misidentification or a use that's sometimes justified.\n\n False positives can be suppressed by the use of\n \"cpplint(category)\" comments on the offending line. These are\n parsed into _error_suppressions.\n\n Args:\n filename: The name of the file containing the error.\n linenum: The number of the line containing the error.\n category: A string used to describe the \"category\" this bug\n falls under: \"whitespace\", say, or \"runtime\". Categories\n may have a hierarchy separated by slashes: \"whitespace/indent\".\n confidence: A number from 1-5 representing a confidence score for\n the error, with 5 meaning that we are certain of the problem,\n and 1 meaning that it could be a legitimate construct.\n message: The error message.\n (\\s*/\\*.*\\*/\\s*$|\n /\\*.*\\*/\\s+|\n \\s+/\\*.*\\*/(?=\\W)|\n /\\*.*\\*/)", "language": "en", "n_whitespaces": 223, "n_words": 148, "vocab_size": 103 }
192
Python
120
cc4d0564756ca067516f71718a3d135996525909
cpp_lint.py
60,411
12
106
Error
https://github.com/jindongwang/transferlearning.git
Balanced joint maximum mean discrepancy for deep transfer learning
246
0
12,139
14
1
6
def _new_batch_builder(self, _) -> _PolicyCollectorGroup: return _PolicyCollectorGroup(self._worker.policy_map)
rllib/evaluation/env_runner_v2.py
32
ray
{ "docstring": "Create a new batch builder.\n\n We create a _PolicyCollectorGroup based on the full policy_map\n as the batch builder.\n ", "language": "en", "n_whitespaces": 39, "n_words": 18, "vocab_size": 14 }
7
Python
7
52bb8e47d483082e528fc8595005e0813a46efb8
env_runner_v2.py
124,015
7
19
_new_batch_builder
https://github.com/ray-project/ray.git
[RLlib] EnvRunnerV2 and EpisodeV2 that support Connectors. (#25922)
21
0
27,496
9
1
8
def test_huggingface_text_generation() -> None: llm = HuggingFaceHub(repo_id="gpt2", model_kwargs={"max_new_tokens": 10}) output = llm("Say foo:") assert isinstance(output, str)
tests/integration_tests/llms/test_huggingface_hub.py
65
langchain
{ "docstring": "Test valid call to HuggingFace text generation model.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
16
Python
15
b9f61390e9cf7f4b8b809cba56392d1f7b3ef6e6
test_huggingface_hub.py
191,455
5
36
test_huggingface_text_generation
https://github.com/hwchase17/langchain.git
add text2text generation (#93) fixes issue #90
28
0
46,587
12
2
9
def parse_options(self, server_name=None, from_ip=None, ip=None): self.ServerName = server_name self.ip = ip if isinstance(from_ip, str): self.from_ip = Net(from_ip) else: self.from_ip = from_ip
scapy/layers/netbios.py
81
scapy
{ "docstring": "\n NBNS answering machine\n\n :param server_name: the netbios server name to match\n :param from_ip: an IP (can have a netmask) to filter on\n :param ip: the IP to answer with\n ", "language": "en", "n_whitespaces": 65, "n_words": 29, "vocab_size": 23 }
21
Python
17
dd7a5c97d68c00d1d03ecf8ac27c6c7038525065
netbios.py
209,498
7
51
parse_options
https://github.com/secdev/scapy.git
Answering machines improvements (NBNS/DNS/LLMNR) (#3699) * Minor NBNS improvements * Improve Netbios/LLMNR/DNS answering machines * DNS_am: support IPv6 * More customization of some answering machines
78
0
52,692
10
2
6
def check_classification_targets(y): y_type = type_of_target(y, input_name="y") if y_type not in [ "binary", "multiclass", "multiclass-multioutput", "multilabel-indicator", "multilabel-sequences", ]: raise ValueError("Unknown label type: %r" % y_type)
sklearn/utils/multiclass.py
74
scikit-learn
{ "docstring": "Ensure that target y is of a non-regression type.\n\n Only the following target types (as defined in type_of_target) are allowed:\n 'binary', 'multiclass', 'multiclass-multioutput',\n 'multilabel-indicator', 'multilabel-sequences'\n\n Parameters\n ----------\n y : array-like\n Target values.\n ", "language": "en", "n_whitespaces": 68, "n_words": 32, "vocab_size": 30 }
24
Python
23
f9d74236e26f6169b32e23887f30879c32ac76c7
multiclass.py
259,173
10
40
check_classification_targets
https://github.com/scikit-learn/scikit-learn.git
fix docstring of dict_learning.sparse_encode and multiclass.check_classification_targets #21350 #pariswimlds (#22793) * fix docstring * fixed linting in multiclass * fixed linting in dict learning * fixed linting in dict learning * fixed linting in dict learning * fixed linting in dict learning * fixed linting in dict learning Co-authored-by: Sakina <[email protected]>
78
0
75,624
11
15
103
async def wma_command(ctx, ticker="", window="", offset="", start="", end=""): try: # Debug if cfg.DEBUG: logger.debug( "!stocks.ta.wma %s %s %s %s %s", ticker, window, offset, start, end, ) # Check for argument if ticker == "": raise Exception("Stock ticker is required") if start == "": start = datetime.now() - timedelta(days=365) else: start = datetime.strptime(start, cfg.DATE_FORMAT) if end == "": end = datetime.now() else: end = datetime.strptime(end, cfg.DATE_FORMAT) l_legend = [ticker] if window == "": window = [20, 50] else: window_temp = list() for wind in window.split(","): try: window_temp.append(float(wind)) except Exception as e: raise Exception("Window needs to be a float") from e window = window_temp if offset == "": offset = 0 else: if not offset.lstrip("-").isnumeric(): raise Exception("Number has to be an integer") offset = float(offset) ticker = ticker.upper() stock = discordbot.helpers.load(ticker, start) if stock.empty: raise Exception("Stock ticker is invalid") # Retrieve Data price_df = pd.DataFrame( stock["Adj Close"].values, columns=["Price"], index=stock.index ) i = 1 for win in window: wma_data = overlap_model.wma( s_interval="1440min", df_stock=stock, length=win, offset=offset ) price_df = price_df.join(wma_data) l_legend.append(f"WMA {win}") i += 1 # Output Data start = start.strftime("%Y-%m-%d") end = end.strftime("%Y-%m-%d") price_df = price_df.loc[(price_df.index >= start) & (price_df.index < end)] fig, ax = plt.subplots(figsize=plot_autoscale(), dpi=PLOT_DPI) ax.set_title(f"{ticker} WMA") ax.plot(price_df.index, price_df["Price"], lw=3, c="k") ax.set_xlabel("Time") ax.set_xlim([price_df.index[0], price_df.index[-1]]) ax.set_ylabel(f"{ticker} Price") for idx in range(1, price_df.shape[1]): ax.plot(price_df.iloc[:, idx]) ax.legend(l_legend) ax.grid(b=True, which="major", color="#666666", linestyle="-") plt.gcf().autofmt_xdate() fig.tight_layout(pad=1) plt.savefig("ta_wma.png") uploaded_image = gst_imgur.upload_image("ta_wma.png", title="something") image_link = uploaded_image.link if cfg.DEBUG: logger.debug("Image URL: %s", image_link) title = "Stocks: Weighted-Moving-Average " + ticker embed = discord.Embed(title=title, colour=cfg.COLOR) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) embed.set_image(url=image_link) os.remove("ta_wma.png") await ctx.send(embed=embed) except Exception as e: embed = discord.Embed( title="ERROR Stocks: Weighted-Moving-Average", colour=cfg.COLOR, description=e, ) embed.set_author( name=cfg.AUTHOR_NAME, icon_url=cfg.AUTHOR_ICON_URL, ) await ctx.send(embed=embed)
discordbot/stocks/technical_analysis/wma.py
1,097
OpenBBTerminal
{ "docstring": "Displays chart with weighted moving average [Yahoo Finance]", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
271
Python
169
f40ba0d256a78ab2b8461f0df3a9a52ca7dc5704
wma.py
281,206
93
653
wma_command
https://github.com/OpenBB-finance/OpenBBTerminal.git
Bot logging fix (#1105) * Write bot logs to stdout instead of a file Heroku's logging uses the stdout and has problems with files * Send "you snooze you lose" only if debug flag is enabled * Replace print statements with logger entries in the economy menu * Add logging to bot menu command calls * Silence bandit warnings about the REPLACE_ME token * Organize imports and update logging in economy menu * Organize imports and update logging in dps menu * Organize imports and update logging in dd menu * Organize imports and update logging in gov menu * Organize imports and update logging in options menu * Organize imports and update logging in screener menu * Organize imports and update logging in ta menu * Revert automatic import sorting * Add logging to the options reaction helper
1,162
0
83,612
18
1
20
def test_worker_group_replicas(group_index, expected_max_replicas, expected_replicas): raycluster = get_basic_ray_cr() # Add a worker group without maxReplicas to confirm behavior # when maxReplicas is not specified. no_max_replicas_group = copy.deepcopy(raycluster["spec"]["workerGroupSpecs"][0]) no_max_replicas_group["groupName"] = "no-max-replicas" del no_max_replicas_group["maxReplicas"] # Also, replicas field, just for the sake of testing. no_max_replicas_group["replicas"] = 0 raycluster["spec"]["workerGroupSpecs"].append(no_max_replicas_group) assert _worker_group_max_replicas(raycluster, group_index) == expected_max_replicas assert _worker_group_replicas(raycluster, group_index) == expected_replicas @pytest.mark.skipif(sys.platform.startswith("win"), reason="Not relevant on Windows.") @pytest.mark.parametrize( "attempted_target_replica_count,expected_target_replica_count", [(200, 200), (250, 250), (300, 300), (400, 300), (1000, 300)], )
python/ray/tests/kuberay/test_kuberay_node_provider.py
229
@pytest.mark.skipif(sys.platform.startswith("win"), reason="Not relevant on Windows.") @pytest.mark.parametrize( "attempted_target_replica_count,expected_target_replica_count", [(200, 200), (250, 250), (300, 300), (400, 300), (1000, 300)], )
ray
{ "docstring": "Basic unit test for _worker_group_max_replicas and _worker_group_replicas\n\n Uses a RayCluster CR with worker groups with 300 maxReplicas, 200 maxReplicas,\n and unspecified maxReplicas, in that order.\n ", "language": "en", "n_whitespaces": 34, "n_words": 25, "vocab_size": 21 }
73
Python
63
c976799dfd96806ec9972a287835f7a034ec3d2c
test_kuberay_node_provider.py
136,642
9
79
test_worker_group_replicas
https://github.com/ray-project/ray.git
KubeRay node provider refactor (#30281) Implements KubeRay node provider as a "BatchingNodeProvider". Builds on #29933. Summary of design An autoscaler update now works like this: list pod data from k8s check if it's safe to proceed with update. Abort the update if not. do some internal calculation to determine desired scale submit a single patch to the RayCluster CR if a scale change is required Everything is single-threaded and there are O(1) K8s API calls per autoscaler update. Signed-off-by: Dmitri Gekhtman <[email protected]>
112
1
30,964
12
1
10
def test_resolve_forward_ref_dataclass(create_module): module = create_module( # language=Python
tests/test_forward_ref.py
47
module = create_module( # language=Python """@dataclass
pydantic
{ "docstring": "\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nfrom pydantic import BaseModel\nfrom typing_extensions import Literal\n\n@dataclass", "language": "en", "n_whitespaces": 12, "n_words": 17, "vocab_size": 11 }
7
Python
7
594effa279668bd955e98f1cd5c036b37d3bbd40
test_forward_ref.py
14,420
20
36
test_resolve_forward_ref_dataclass
https://github.com/pydantic/pydantic.git
Switching to `pydantic_core` (#4516) * working on core schema generation * adapting main.py * getting tests to run * fix tests * disable pyright, fix mypy * moving to class-based model generation * working on validators * change how models are created * start fixing test_main.py * fixing mypy * SelfType * recursive models working, more tests fixed * fix tests on <3.10 * get docs build to pass * starting to cleanup types.py * starting works on custom types * working on using annotated-types * using annoated types for constraints * lots of cleanup, fixing network tests * network tests passing :tada: * working on types * working on types and cleanup * fixing UUID type, restructing again * more types and newer pydantic-core * working on Iterable * more test_types tests * support newer pydantic-core, fixing more test_types.py * working through more test_types.py * test_types.py at last passing locally :tada: * fixing more tests in test_types.py * fix datetime_parse tests and linting * get tests running again, rename to test_datetime.py * renaming internal modules * working through mypy errors * fixing mypy * refactoring _generate_schema.py * test_main.py passing * uprev deps * fix conftest and linting? * importing Annotated * ltining * import Annotated from typing_extensions * fixing 3.7 compatibility * fixing tests on 3.9 * fix linting * fixing SecretField and 3.9 tests * customising get_type_hints * ignore warnings on 3.11 * spliting repr out of utils * removing unused bits of _repr, fix tests for 3.7 * more cleanup, removing many type aliases * clean up repr * support namedtuples and typeddicts * test is_union * removing errors, uprev pydantic-core * fix tests on 3.8 * fixing private attributes and model_post_init * renaming and cleanup * remove unnecessary PydanticMetadata inheritance * fixing forward refs and mypy tests * fix signatures, change how xfail works * revert mypy tests to 3.7 syntax * correct model title * try to fix tests * fixing ClassVar forward refs * uprev pydantic-core, new error format * add "force" argument to model_rebuild * Apply suggestions from code review Suggestions from @tiangolo and @hramezani :pray: Co-authored-by: Hasan Ramezani <[email protected]> Co-authored-by: Sebastián Ramírez <[email protected]> * more suggestions from @tiangolo * extra -> json_schema_extra on Field Co-authored-by: Hasan Ramezani <[email protected]> Co-authored-by: Sebastián Ramírez <[email protected]>
24
2
2,848
4
7
27
def subplot2grid(shape, loc, rowspan=1, colspan=1, fig=None, **kwargs): if fig is None: fig = gcf() rows, cols = shape gs = GridSpec._check_gridspec_exists(fig, rows, cols) subplotspec = gs.new_subplotspec(loc, rowspan=rowspan, colspan=colspan) ax = fig.add_subplot(subplotspec, **kwargs) axes_to_delete = [other for other in fig.axes if other != ax and ax.bbox.fully_overlaps(other.bbox)] if axes_to_delete: _api.warn_deprecated( "3.6", message="Auto-removal of overlapping axes is deprecated " "since %(since)s and will be removed %(removal)s; explicitly call " "ax.remove() as needed.") for ax_to_del in axes_to_delete: delaxes(ax_to_del) return ax
lib/matplotlib/pyplot.py
203
matplotlib
{ "docstring": "\n Create a subplot at a specific location inside a regular grid.\n\n Parameters\n ----------\n shape : (int, int)\n Number of rows and of columns of the grid in which to place axis.\n loc : (int, int)\n Row number and column number of the axis location within the grid.\n rowspan : int, default: 1\n Number of rows for the axis to span downwards.\n colspan : int, default: 1\n Number of columns for the axis to span to the right.\n fig : `.Figure`, optional\n Figure to place the subplot in. Defaults to the current figure.\n **kwargs\n Additional keyword arguments are handed to `~.Figure.add_subplot`.\n\n Returns\n -------\n `.axes.SubplotBase`, or another subclass of `~.axes.Axes`\n\n The axes of the subplot. The returned axes base class depends on the\n projection used. It is `~.axes.Axes` if rectilinear projection is used\n and `.projections.polar.PolarAxes` if polar projection is used. The\n returned axes is then a subplot subclass of the base class.\n\n Notes\n -----\n The following call ::\n\n ax = subplot2grid((nrows, ncols), (row, col), rowspan, colspan)\n\n is identical to ::\n\n fig = gcf()\n gs = fig.add_gridspec(nrows, ncols)\n ax = fig.add_subplot(gs[row:row+rowspan, col:col+colspan])\n ", "language": "en", "n_whitespaces": 332, "n_words": 179, "vocab_size": 103 }
76
Python
58
23338c7eb4b315cd4af0c57b61afc80f8c2086f9
pyplot.py
107,945
17
129
subplot2grid
https://github.com/matplotlib/matplotlib.git
Deprecate auto-removal of overlapping Axes by plt.subplot{,2grid}. In particular, note that the OO add_subplot does not have this behavior.
181
0
22,983
12
9
42
def migrate_product_tax_codes(apps, _schema_editor): Product = apps.get_model("product", "Product") ProductType = apps.get_model("product", "ProductType") TaxClass = apps.get_model("tax", "TaxClass") query = Q(metadata__has_key=VATLAYER_CODE_META_KEY) | Q( metadata__has_key=AVATAX_CODE_META_KEY ) tax_class_metadata = {} product_types = ( ProductType.objects.filter(query).values("id", "metadata").order_by("pk") ) for batch_pks in queryset_in_batches(product_types): tax_classes_from_product_types = defaultdict(list) product_types = ProductType.objects.filter(pk__in=batch_pks) for product_type in product_types: tax_class_name, metadata = _populate_tax_class_name_and_metadata( product_type ) if tax_class_name: tax_classes_from_product_types[tax_class_name].append(product_type.pk) tax_class_metadata[tax_class_name] = metadata for name, ids in tax_classes_from_product_types.items(): tax_class, _ = TaxClass.objects.get_or_create( name=name, metadata=tax_class_metadata.get(name, {}) ) ProductType.objects.filter(id__in=ids).update(tax_class=tax_class) products = Product.objects.filter(query).values("id", "metadata").order_by("pk") tax_classes_from_products = defaultdict(list) for batch_pks in queryset_in_batches(products): products = Product.objects.filter(pk__in=batch_pks) for product in products: tax_class_name, metadata = _populate_tax_class_name_and_metadata(product) if tax_class_name: tax_classes_from_products[tax_class_name].append(product.pk) tax_class_metadata[tax_class_name] = metadata for name, ids in tax_classes_from_products.items(): tax_class, _ = TaxClass.objects.get_or_create( name=name, metadata=tax_class_metadata.get(name, {}) ) Product.objects.filter(id__in=ids).update(tax_class=tax_class)
saleor/tax/migrations/0004_migrate_tax_classes.py
536
saleor
{ "docstring": "Create tax classes by migrating currently used tax codes.\n\n Tax codes are stored in metadata of products and product types. For each found code\n we get or create a TaxClass instance and assign the object to the tax class.\n If object has both Avalara and Vatlayer codes, keep only the Avalara code.\n ", "language": "en", "n_whitespaces": 64, "n_words": 52, "vocab_size": 44 }
113
Python
61
67df28935c555fdd673f17e8c9183e24dde7c51f
0004_migrate_tax_classes.py
29,532
40
329
migrate_product_tax_codes
https://github.com/saleor/saleor.git
Simple (flat rate) taxes API (#9784) * Add empty tax module * Add tax models (#9839) * Add tax API queries (#9856) * Add MANAGE_TAXES permission * Add tax configuration queries * Create tax configuration when channel is created * Drop sorters for now * Add TaxConfigurationPerCountry type * Update migration * Add metadata to TaxConfiguration type * Add tests for tax configuration queries * Add TaxClass types * Improve tests * Add queries for tax configuration per country * Fix query in tests * Update query cost map * Add tax API mutations (#9934) * Add taxConfigurationUpdate mutation * Update schema * Add tax class CRUD mutations * Add mutations to update/delete tax class rates per country * Review fixes * Add taxClass field to ProductType type (#9999) * Add taxClass field to ProductType type * Add taxClass field to Product type * Add taxClass field to shipping method type * Add displayGrossPrices to ProductPricingInfo (#10008) * Add displayGrossPrices to ProductPricingInfo * Add displayGrossPrices to Checkout * Add displayGrossPrices to Order * Add tests * Add ADDED_IN_35 label to new fields' descriptions * Use new display_gross_prices flag (#10121) * Use new display_gross_prices flag * Update tests * Add tests * Review fixes * Drop Vatlayer (#10335) * Add migration from Vatlayer to simple taxes * Review fixes * Review fixes * Drop usages of global include_taxes_in_prices flag (#10406) * Drop `include_taxes_in_prices` function from site settings * Adjust tests * Review fixes * Drop the `charge_taxes_on_shipping` flag from site settings. (#10466) * Include migrating Avatax tax codes in tax class migration * Drop `charge_taxes_on_shipping` function * Add tax_class to ShippingMethodData * Review fixes * Always calculate shipping tax with Avalara * Add default country rate (#10497) * Allow setting default tax rate for a country (without providing a tax class) * Add validation to allow settings only one default rate at once * Code review fixes * Add taxCalculationStrategy field * Add tests * CR fixes * Adjust resolver to use new tax configuration (#10533) * CR fixes * Add database router to fix false positives on relation mismatch. (#10524) * Add database router to fix false positives on relation mismatch. * The db router should have only 'allow_relation' implemented. * The 'db_for_write' part should stay. * Subscription for sync tax webooks (#10433) * Add proposed changes to schema * Add base implementation for sync tax subscription * Add test for empty order * Add clean up and missing part for tests * Use subscription for tax webhooks. Add more tests * Improve descriptions for tax objects * Adjust resolver to use new tax configuration (#10533) * Add taxCalculationStrategy field (#10532) * Add taxCalculationStrategy field * Add tests * CR fixes * CR fixes * Add datamigration to populate taxCalculationStrategy * Migrate Product.charge_taxes to new tax configuration (#10585) * Migrate Product.charge_taxes field to new tax configuration * Rename function * Fix tests * Change assign_tax_code_to_object_meta function to support tax classes * Update tax class fixtures * Improve dataloader * CR fixes * CR fixes * Add deprecation notice to dataloader * Allow removing country rates in the `taxCountryConfigurationUpdate` mutation. (#10647) * Allow deleting rates in taxCountryConfigurationUpdate mutation * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Remove TaxClass.is_default field (#10660) * Change tax rates ordering to keep default rates first (with null tax classes) * Update existing migration * Drop is_default field from TaxClass model * Drop extra Avalara config (#10673) * Drop extra Avatax config options * Adjust tests * Use flat rates in tax calculations (#10747) * WIP Use new tax configuration in tax calculations * Use new tax calculations for checkout * Adjust tests * Add flat rates calculations for checkout and order * Calculate flat rates in product pricing objects * Adjust tests * Add tests for order calculations * Add tests for product queries tax calculations * Add tests for order calculations * Use base calculations to get default checkout shipping price * Add tests for using tax_class from product_type * Add tests for get_order_country * Adjust tests * Code review fixes * Drop update_taxes_for_order_lines (#11000) * Fix calls to Avalara not validating order (#11012) * Add validation to disallow creating negative rates (#11010) * Add missing recalculation of order.undiscounted_total (#11039) * Optimize getting tax class country rates (#11040) * Tax API adjustments for dashboard (#11042) * Ignore null rates in taxCountryConfigurationUpdate mutation * Allow to pass null rates in taxClassUpdate mutation * Improve tests * Update saleor/graphql/tax/mutations/tax_class_update.py Co-authored-by: Krzysztof Waliczek <[email protected]> * Update schema Co-authored-by: Krzysztof Waliczek <[email protected]> * Cleanup before release (#11049) * Update ADDED_IN labels * Fix skippeded test * Regenerate migrations * Deprecate CountryDisplay.vat field * Add changelog * Update order.undiscounted_total calculation to not include taxes (#11068) * Fix assigning rates to tax classes (#11105) * Allow all staff users and apps to query tax-related data (#11113) * Bump dependencies for origin/SALEOR-6391-simple-taxes (#11127) Bumps: - cryptography to 38.0.3 - pillow to 9.3.0 * Fix using tax code from product and product type's tax class (#11111) * Fix using tax code from product and product type's tax class * Extract function * Replace synchronous load_site with promise (#11165) * Denormalize tax class for order lines and orders (#11172) * WIP Denormalize tax class for order lines and orders * Add denormalized fields in GraphQL types * Add tests for denormalized API fields * Return 0 rate in API when rate is null * Add preview/version notes in new field descriptions * Update changelog Co-authored-by: Dominik Kozaczko <[email protected]> Co-authored-by: Maciej Korycinski <[email protected]> Co-authored-by: Krzysztof Waliczek <[email protected]> Co-authored-by: Mika <[email protected]> Co-authored-by: Krzysztof Kwaśniak <[email protected]>
441
0
5,234
16
6
32
async def async_inference_detector(model, imgs): if not isinstance(imgs, (list, tuple)): imgs = [imgs] cfg = model.cfg if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) for m in model.modules(): assert not isinstance( m, RoIPool), 'CPU inference with RoIPool is not supported currently.' # We don't restore `torch.is_grad_enabled()` value during concurrent # inference since execution can overlap torch.set_grad_enabled(False) results = await model.aforward_test(data, rescale=True) return results
mmdet/apis/inference.py
314
mmdetection
{ "docstring": "Async inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str | ndarray): Either image files or loaded images.\n\n Returns:\n Awaitable detection results.\n ", "language": "en", "n_whitespaces": 56, "n_words": 26, "vocab_size": 24 }
113
Python
80
2631e2879acf0bd20a64dfdd7039f37a8e6afbf6
inference.py
244,324
24
193
async_inference_detector
https://github.com/open-mmlab/mmdetection.git
Support Datasampler
298
0
70,325
17
6
7
def isGoalState(self): for i in range(self.size): for j in range(self.size): if i == j and j == self.size - 1: continue if self.state[i][j] != (i) * self.size + (j + 1): return False return True
Eight_Puzzle_Solver/eight_puzzle.py
108
Python
{ "docstring": "\n Parameters: State\n Returns: True if Goal State, otherwise False\n Restrictions: State is self.size x self.size Array\n ", "language": "en", "n_whitespaces": 45, "n_words": 16, "vocab_size": 14 }
35
Python
24
f0af0c43340763724f139fa68aa1e5a9ffe458b4
eight_puzzle.py
22,411
8
69
isGoalState
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
135
0
4,321
13
1
5
def combine_first(self, other): # noqa: PR01, RT01, D200 return self._binary_op("combine_first", other, _axis=0)
modin/pandas/base.py
36
modin
{ "docstring": "\n Update null elements with value in the same location in `other`.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
12
Python
12
605efa618e7994681f57b11d04d417f353ef8d50
base.py
153,568
2
21
combine_first
https://github.com/modin-project/modin.git
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
27
0
35,449
8
4
22
def testOnCheckpointOrdered(self): keep_checkpoints_num = 2 checkpoint_manager = self.checkpoint_manager(keep_checkpoints_num) checkpoints = [ Checkpoint(Checkpoint.PERSISTENT, {i}, self.mock_result(i)) for i in range(3) ] with patch.object(checkpoint_manager, "delete") as delete_mock: for j in range(3): checkpoint_manager.on_checkpoint(checkpoints[j]) expected_deletes = 0 if j != 2 else 1 self.assertEqual(delete_mock.call_count, expected_deletes, j) self.assertEqual( checkpoint_manager.newest_persistent_checkpoint, checkpoints[j] ) best_checkpoints = checkpoint_manager.best_checkpoints() self.assertEqual(len(best_checkpoints), keep_checkpoints_num) self.assertIn(checkpoints[1], best_checkpoints) self.assertIn(checkpoints[2], best_checkpoints)
python/ray/tune/tests/test_checkpoint_manager.py
231
ray
{ "docstring": "\n Tests increasing priorities. Also tests that that the worst checkpoints\n are deleted when necessary.\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
54
Python
45
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_checkpoint_manager.py
132,416
19
148
testOnCheckpointOrdered
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
251
0
29,752
12
1
7
def inverse_transform(self, X): check_is_fitted(self) X = check_array(X) return (X @ self.components_) + self.mean_
sklearn/decomposition/_sparse_pca.py
50
scikit-learn
{ "docstring": "Transform data from the latent space to the original space.\n\n This inversion is an approximation due to the loss of information\n induced by the forward decomposition.\n\n .. versionadded:: 1.2\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_components)\n Data in the latent space.\n\n Returns\n -------\n X_original : ndarray of shape (n_samples, n_features)\n Reconstructed data in the original space.\n ", "language": "en", "n_whitespaces": 150, "n_words": 58, "vocab_size": 40 }
13
Python
13
01fcf8a0acc7e6517faa4fc6887eb45f5d2ea77b
_sparse_pca.py
260,502
4
30
inverse_transform
https://github.com/scikit-learn/scikit-learn.git
ENH add inverse_transform in *SparsePCA (#23905)
41
0
76,297
9
1
6
def get_student_group_strength(student_group): student_group_strength = frappe.db.sql( , student_group, )[0][0] return student_group_strength
erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py
41
erpnext
{ "docstring": "select count(*) from `tabStudent Group Student`\n\t\twhere parent = %s and active=1", "language": "en", "n_whitespaces": 10, "n_words": 12, "vocab_size": 12 }
10
Python
9
494bd9ef78313436f0424b918f200dab8fc7c20b
student_batch_wise_attendance.py
65,944
7
26
get_student_group_strength
https://github.com/frappe/erpnext.git
style: format code with black
4
0
14,064
11
1
11
def wait_for_text_to_equal(self, selector, text, timeout=None): return self._wait_for( method=text_to_equal, args=(selector, text), timeout=timeout, msg=f"text -> {text} not found within {timeout or self._wait_timeout}s", )
dash/testing/browser.py
73
dash
{ "docstring": "Explicit wait until the element's text equals the expected `text`.\n\n timeout if not set, equals to the fixture's `wait_timeout`\n shortcut to `WebDriverWait` with customized `text_to_equal`\n condition.\n ", "language": "en", "n_whitespaces": 54, "n_words": 26, "vocab_size": 22 }
21
Python
21
c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c
browser.py
40,215
7
41
wait_for_text_to_equal
https://github.com/plotly/dash.git
f-strings everywhere! fffff
86
0
7,354
12
3
11
def stack_pos(self) -> str: try: idx = self.context.layers.index(self) except ValueError: return repr(self) else: return " >> ".join(repr(x) for x in self.context.layers[: idx + 1])
mitmproxy/proxy/layer.py
94
mitmproxy
{ "docstring": "repr() for this layer and all its parent layers, only useful for debugging.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 12 }
24
Python
22
b3587b52b25077f68116b9852b041d33e7fc6601
layer.py
251,443
8
56
stack_pos
https://github.com/mitmproxy/mitmproxy.git
make it black!
85
0
73,741
14
2
21
def apply_node_provider_config_updates(config, node_cfg, node_type_name, max_count): tags = node_provider_tags(config, node_type_name) tags[TAG_RAY_CLUSTER_NAME] = DEFAULT_CLUSTER_NAME user_tag_specs = node_cfg.get("TagSpecifications", []) tag_specs = [ { "ResourceType": "instance", "Tags": [{"Key": k, "Value": v} for k, v in sorted(tags.items())], } ] node_provider_cfg_updates = { "MinCount": 1, "MaxCount": max_count, "TagSpecifications": tag_specs, } tags.pop(TAG_RAY_CLUSTER_NAME) node_cfg.update(node_provider_cfg_updates) # merge node provider tag specs with user overrides AWSNodeProvider._merge_tag_specs(tag_specs, user_tag_specs)
python/ray/tests/aws/utils/helpers.py
184
ray
{ "docstring": "\n Applies default updates made by AWSNodeProvider to node_cfg during node\n creation. This should only be used for testing purposes.\n\n Args:\n config: autoscaler config\n node_cfg: node config\n node_type_name: node type name\n max_count: max nodes of the given type to launch\n ", "language": "en", "n_whitespaces": 80, "n_words": 39, "vocab_size": 34 }
57
Python
50
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
helpers.py
131,089
18
110
apply_node_provider_config_updates
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
150
0
29,475
15
1
8
def test_relative_name(file): result = file.rename(name="..\\rel\\path\\test", source=str(source)) assert "is not an absolute path" in result.filtered["comment"] assert result.filtered["result"] is False
tests/pytests/functional/states/file/test_rename.py
76
salt
{ "docstring": "\n Test file.rename when name is a relative path\n ", "language": "en", "n_whitespaces": 15, "n_words": 8, "vocab_size": 8 }
18
Python
17
a35b29b2651bf33c5d5b45e64bc7765ffde4aff4
test_rename.py
215,828
4
41
test_relative_name
https://github.com/saltstack/salt.git
Add some funtional tests Add functional tests for the following: - file.readlink - file.replace - file.symlink Remove unit tests for file.replace as they are duplicated in the added functional test
30
0
54,194
11
1
2
def ybingroup(self): return self["ybingroup"]
packages/python/plotly/plotly/graph_objs/_histogram2d.py
22
plotly.py
{ "docstring": "\n Set a group of histogram traces which will have compatible\n y-bin settings. Using `ybingroup`, histogram2d and\n histogram2dcontour traces (on axes of the same axis type) can\n have compatible y-bin settings. Note that the same `ybingroup`\n value can be used to set (1D) histogram `bingroup`\n\n The 'ybingroup' property is a string and must be specified as:\n - A string\n - A number that will be converted to a string\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 161, "n_words": 71, "vocab_size": 49 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_histogram2d.py
227,054
2
11
ybingroup
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
58,727
7
1
11
def debug_mutated_model(base_model, evaluator, applied_mutators): base_model_ir, applied_mutators = preprocess_model(base_model, evaluator, applied_mutators) from ..strategy import _LocalDebugStrategy strategy = _LocalDebugStrategy() strategy.run(base_model_ir, applied_mutators) _logger.info('local debug completed!')
nni/retiarii/experiment/pytorch.py
78
nni
{ "docstring": "\n Locally run only one trial without launching an experiment for debug purpose, then exit.\n For example, it can be used to quickly check shape mismatch.\n\n Specifically, it applies mutators (default to choose the first candidate for the choices)\n to generate a new model, then run this model locally.\n\n The model will be parsed with graph execution engine.\n\n Parameters\n ----------\n base_model : nni.retiarii.nn.pytorch.nn.Module\n the base model\n evaluator : nni.retiarii.graph.Evaluator\n the training class of the generated models\n applied_mutators : list\n a list of mutators that will be applied on the base model for generating a new model\n ", "language": "en", "n_whitespaces": 150, "n_words": 95, "vocab_size": 67 }
22
Python
19
5b7dac5c4054115854b3684ba86b9a79fb18d5eb
pytorch.py
111,857
6
47
debug_mutated_model
https://github.com/microsoft/nni.git
Wrap one-shot algorithms as strategies (#4571)
40
0
24,492
8
5
11
def k_crust(G, k=None, core_number=None): # Default for k is one less than in _core_subgraph, so just inline. # Filter is c[v] <= k if core_number is None: core_number = nx.core_number(G) if k is None: k = max(core_number.values()) - 1 nodes = (v for v in core_number if core_number[v] <= k) return G.subgraph(nodes).copy()
networkx/algorithms/core.py
115
networkx
{ "docstring": "Returns the k-crust of G.\n\n The k-crust is the graph G with the edges of the k-core removed\n and isolated nodes found after the removal of edges are also removed.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph or directed graph.\n k : int, optional\n The order of the shell. If not specified return the main crust.\n core_number : dictionary, optional\n Precomputed core numbers for the graph G.\n\n Returns\n -------\n G : NetworkX graph\n The k-crust subgraph\n\n Raises\n ------\n NetworkXError\n The k-crust is not implemented for graphs with self loops\n or parallel edges.\n\n Notes\n -----\n This definition of k-crust is different than the definition in [1]_.\n The k-crust in [1]_ is equivalent to the k+1 crust of this algorithm.\n\n Not implemented for graphs with parallel edges or self loops.\n\n For directed graphs the node degree is defined to be the\n in-degree + out-degree.\n\n Graph, node, and edge attributes are copied to the subgraph.\n\n See Also\n --------\n core_number\n\n References\n ----------\n .. [1] A model of Internet topology using k-shell decomposition\n Shai Carmi, Shlomo Havlin, Scott Kirkpatrick, Yuval Shavitt,\n and Eran Shir, PNAS July 3, 2007 vol. 104 no. 27 11150-11154\n http://www.pnas.org/content/104/27/11150.full\n ", "language": "en", "n_whitespaces": 334, "n_words": 190, "vocab_size": 121 }
52
Python
35
17fa9942568bfca34d4a68f8d93c538014f69389
core.py
176,278
7
71
k_crust
https://github.com/networkx/networkx.git
Fix functions appearing in variables `__all__` but not in docs for NX2.7 (#5289) * Adjust functions appearing in `__all__` but not in docs * clean up coloring: merge two modules make interchange private * fix duplicate name. Probably should be changed * fix "see also" doc of recursive_simple_cycles * Rm internal uses of deprecated . * Fixup warnings filters regex. * clean up a bit more, make Node & AdjList private classes Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
90
0
41,801
13
4
17
def native_unit_of_measurement(self) -> str | None: raw_units = self.raw_unit_of_measurement # Check if this is a known index pair UOM if isinstance(raw_units, dict) or raw_units in (UOM_ON_OFF, UOM_INDEX): return None if raw_units in ( UnitOfTemperature.FAHRENHEIT, UnitOfTemperature.CELSIUS, UOM_DOUBLE_TEMP, ): return self.hass.config.units.temperature_unit return raw_units
homeassistant/components/isy994/sensor.py
94
core
{ "docstring": "Get the Home Assistant unit of measurement for the device.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
41
Python
33
a6ddac9004b7f73633f2019f3b5267e1486756c1
sensor.py
297,698
12
61
native_unit_of_measurement
https://github.com/home-assistant/core.git
Use UnitOfTemperature in integrations (i-m) (#84307)
145
0
96,664
11
1
15
def test_get_repositories_all_and_pagination(self): with self.tasks(): self.assert_setup_flow() integration = Integration.objects.get(provider=self.provider.key) installation = integration.get_installation(self.organization) result = installation.get_repositories() assert result == [ {"name": "foo", "identifier": "Test-Organization/foo"}, {"name": "bar", "identifier": "Test-Organization/bar"}, {"name": "baz", "identifier": "Test-Organization/baz"}, ]
tests/sentry/integrations/github/test_integration.py
160
sentry
{ "docstring": "Fetch all repositories and test the pagination logic.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
31
Python
24
d6bcead1be02914e9734ab23f5e476b3d6f3f2cb
test_integration.py
86,734
11
86
test_get_repositories_all_and_pagination
https://github.com/getsentry/sentry.git
fix(github): Add pagination when fetching repositories (#39750) We are not using pagination for Github's repositories endpoint. This means we were getting up to a maximum of 100 repositories. I do not know how no one hit any issues in the past. This is work to support WOR-2234 and creating automatic code mappings.
124
0
18,162
11
11
26
def bivariate_type(f, x, y, *, first=True): u = Dummy('u', positive=True) if first: p = Poly(f, x, y) f = p.as_expr() _x = Dummy() _y = Dummy() rv = bivariate_type(Poly(f.subs({x: _x, y: _y}), _x, _y), _x, _y, first=False) if rv: reps = {_x: x, _y: y} return rv[0].xreplace(reps), rv[1].xreplace(reps), rv[2] return p = f f = p.as_expr() # f(x*y) args = Add.make_args(p.as_expr()) new = [] for a in args: a = _mexpand(a.subs(x, u/y)) free = a.free_symbols if x in free or y in free: break new.append(a) else: return x*y, Add(*new), u
sympy/solvers/bivariate.py
328
sympy
{ "docstring": "Given an expression, f, 3 tests will be done to see what type\n of composite bivariate it might be, options for u(x, y) are::\n\n x*y\n x+y\n x*y+x\n x*y+y\n\n If it matches one of these types, ``u(x, y)``, ``P(u)`` and dummy\n variable ``u`` will be returned. Solving ``P(u)`` for ``u`` and\n equating the solutions to ``u(x, y)`` and then solving for ``x`` or\n ``y`` is equivalent to solving the original expression for ``x`` or\n ``y``. If ``x`` and ``y`` represent two functions in the same\n variable, e.g. ``x = g(t)`` and ``y = h(t)``, then if ``u(x, y) - p``\n can be solved for ``t`` then these represent the solutions to\n ``P(u) = 0`` when ``p`` are the solutions of ``P(u) = 0``.\n\n Only positive values of ``u`` are considered.\n\n Examples\n ========\n\n >>> from sympy import solve\n >>> from sympy.solvers.bivariate import bivariate_type\n >>> from sympy.abc import x, y\n >>> eq = (x**2 - 3).subs(x, x + y)\n >>> bivariate_type(eq, x, y)\n (x + y, _u**2 - 3, _u)\n >>> uxy, pu, u = _\n >>> usol = solve(pu, u); usol\n [sqrt(3)]\n >>> [solve(uxy - s) for s in solve(pu, u)]\n [[{x: -y + sqrt(3)}]]\n >>> all(eq.subs(s).equals(0) for sol in _ for s in sol)\n True\n\n ", "language": "en", "n_whitespaces": 310, "n_words": 204, "vocab_size": 126 }
90
Python
60
59d22b6bb7287613d598611027f640d068ca5748
bivariate.py
196,418
43
412
bivariate_type
https://github.com/sympy/sympy.git
Moved imports to higher level
237
0
47,918
16
6
32
def get_table_description(self, cursor, table_name): cursor.execute( "PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name) ) table_info = cursor.fetchall() if not table_info: raise DatabaseError(f"Table {table_name} does not exist (empty pragma).") collations = self._get_column_collations(cursor, table_name) json_columns = set() if self.connection.features.can_introspect_json_field: for line in table_info: column = line[1] json_constraint_sql = '%%json_valid("%s")%%' % column has_json_constraint = cursor.execute( , [table_name, json_constraint_sql], ).fetchone() if has_json_constraint: json_columns.add(column) return [ FieldInfo( name, data_type, None, get_field_size(data_type), None, None, not notnull, default, collations.get(name), pk == 1, name in json_columns, ) for cid, name, data_type, notnull, default, pk in table_info ]
django/db/backends/sqlite3/introspection.py
257
django
{ "docstring": "\n Return a description of the table with the DB-API cursor.description\n interface.\n \n SELECT sql\n FROM sqlite_master\n WHERE\n type = 'table' AND\n name = %s AND\n sql LIKE %s\n ", "language": "en", "n_whitespaces": 191, "n_words": 27, "vocab_size": 22 }
85
Python
60
9c19aff7c7561e3a82978a272ecdaad40dda5c00
introspection.py
205,197
42
167
get_table_description
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
518
0
51,030
15
1
10
def test_render_none_as_context_variable(self): context = {"image": None, "image_node": "fake value"} node = ImageNode(Variable("image"), "original", "image_node") rendered = node.render(context) self.assertEqual(rendered, "") self.assertIsNone(context["image_node"])
wagtail/images/tests/test_templatetags.py
103
wagtail
{ "docstring": "\n Tests that an ImageNode without an image and a context variable name\n renders an empty string and puts None in the context variable\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 18 }
20
Python
18
d10f15e55806c6944827d801cd9c2d53f5da4186
test_templatetags.py
75,309
6
55
test_render_none_as_context_variable
https://github.com/wagtail/wagtail.git
Reformat with black
62
0
16,388
11
8
13
def sieveEr(N): from math import sqrt # precondition assert isinstance(N, int) and (N > 2), "'N' must been an int and > 2" primes = [True for x in range(N + 1)] for p in range(2, int(sqrt(N)) + 1): if primes[p]: for i in range(p * p, N + 1, p): primes[i] = False primes[0] = False primes[1] = False ret = [] for p in range(N + 1): if primes[p]: ret.append(p) return ret # --------------------------------
primelib/primelib.py
195
Python
{ "docstring": "\n input: positive integer 'N' > 2\n returns a list of prime numbers from 2 up to N.\n\n This function implements the algorithm called\n sieve of erathostenes.\n\n ", "language": "en", "n_whitespaces": 42, "n_words": 26, "vocab_size": 24 }
76
Python
52
f0af0c43340763724f139fa68aa1e5a9ffe458b4
primelib.py
22,743
15
125
sieveEr
https://github.com/geekcomputers/Python.git
refactor: clean code Signed-off-by: slowy07 <[email protected]>
159
0
4,434
13
10
29
def plotting_context(context=None, font_scale=1, rc=None): if context is None: context_dict = {k: mpl.rcParams[k] for k in _context_keys} elif isinstance(context, dict): context_dict = context else: contexts = ["paper", "notebook", "talk", "poster"] if context not in contexts: raise ValueError(f"context must be in {', '.join(contexts)}") # Set up dictionary of default parameters texts_base_context = { "font.size": 12, "axes.labelsize": 12, "axes.titlesize": 12, "xtick.labelsize": 11, "ytick.labelsize": 11, "legend.fontsize": 11, "legend.title_fontsize": 12, } base_context = { "axes.linewidth": 1.25, "grid.linewidth": 1, "lines.linewidth": 1.5, "lines.markersize": 6, "patch.linewidth": 1, "xtick.major.width": 1.25, "ytick.major.width": 1.25, "xtick.minor.width": 1, "ytick.minor.width": 1, "xtick.major.size": 6, "ytick.major.size": 6, "xtick.minor.size": 4, "ytick.minor.size": 4, } base_context.update(texts_base_context) # Scale all the parameters by the same factor depending on the context scaling = dict(paper=.8, notebook=1, talk=1.5, poster=2)[context] context_dict = {k: v * scaling for k, v in base_context.items()} # Now independently scale the fonts font_keys = texts_base_context.keys() font_dict = {k: context_dict[k] * font_scale for k in font_keys} context_dict.update(font_dict) # Override these settings with the provided rc dictionary if rc is not None: rc = {k: v for k, v in rc.items() if k in _context_keys} context_dict.update(rc) # Wrap in a _PlottingContext object so this can be used in a with statement context_object = _PlottingContext(context_dict) return context_object
seaborn/rcmod.py
472
seaborn
{ "docstring": "\n Get the parameters that control the scaling of plot elements.\n\n This affects things like the size of the labels, lines, and other elements\n of the plot, but not the overall style. This is accomplished using the\n :ref:`matplotlib rcParams system <matplotlib:matplotlib-rcparams>`.\n\n The base context is \"notebook\", and the other contexts are \"paper\", \"talk\",\n and \"poster\", which are version of the notebook parameters scaled by different\n values. Font elements can also be scaled independently of (but relative to)\n the other values.\n\n This function can also be used as a context manager to temporarily\n alter the global defaults. See :func:`set_theme` or :func:`set_context`\n to modify the global defaults for all plots.\n\n Parameters\n ----------\n context : None, dict, or one of {paper, notebook, talk, poster}\n A dictionary of parameters or the name of a preconfigured set.\n font_scale : float, optional\n Separate scaling factor to independently scale the size of the\n font elements.\n rc : dict, optional\n Parameter mappings to override the values in the preset seaborn\n context dictionaries. This only updates parameters that are\n considered part of the context definition.\n\n Examples\n --------\n\n .. include:: ../docstrings/plotting_context.rst\n\n ", "language": "en", "n_whitespaces": 283, "n_words": 180, "vocab_size": 115 }
195
Python
120
f7e25e18983f2f36a1529cd9e4bda6fa008cbd6d
rcmod.py
40,304
44
290
plotting_context
https://github.com/mwaskom/seaborn.git
Use f-strings for string formatting (#2800) Reformats all the text from the old "%-formatted" and .format(...) format to the newer f-string format, as defined in PEP 498. This requires Python 3.6+. Flynt 0.76 was used to reformat the strings. 45 f-strings were created in 13 files. F-strings are in general more readable, concise and performant. See also: https://www.python.org/dev/peps/pep-0498/#rationale
586
0
7,383
17
4
9
def getcoroutinestate(coroutine): if coroutine.cr_running: return CORO_RUNNING if coroutine.cr_suspended: return CORO_SUSPENDED if coroutine.cr_frame is None: return CORO_CLOSED return CORO_CREATED
Lib/inspect.py
52
cpython
{ "docstring": "Get current state of a coroutine object.\n\n Possible states are:\n CORO_CREATED: Waiting to start execution.\n CORO_RUNNING: Currently being executed by the interpreter.\n CORO_SUSPENDED: Currently suspended at an await expression.\n CORO_CLOSED: Execution has completed.\n ", "language": "en", "n_whitespaces": 59, "n_words": 33, "vocab_size": 32 }
18
Python
13
b04dfbbe4bd7071d46c8688c2263726ea31d33cd
inspect.py
175,794
8
31
getcoroutinestate
https://github.com/python/cpython.git
bpo-46409: Make generators in bytecode (GH-30633) * Add RETURN_GENERATOR and JUMP_NO_INTERRUPT opcodes. * Trim frame and generator by word each. * Minor refactor of frame.c * Update test.test_sys to account for smaller frames. * Treat generator functions as normal functions when evaluating and specializing.
54
0
41,653
7
15
34
def compare_caltech(self, thres): if self.dt_boxes is None or self.gt_boxes is None: return list() dtboxes = self.dt_boxes if self.dt_boxes is not None else list() gtboxes = self.gt_boxes if self.gt_boxes is not None else list() dt_matched = np.zeros(dtboxes.shape[0]) gt_matched = np.zeros(gtboxes.shape[0]) dtboxes = np.array(sorted(dtboxes, key=lambda x: x[-1], reverse=True)) gtboxes = np.array(sorted(gtboxes, key=lambda x: x[-1], reverse=True)) if len(dtboxes): overlap_iou = bbox_overlaps(dtboxes, gtboxes, mode='iou') overlap_ioa = bbox_overlaps(dtboxes, gtboxes, mode='iof') else: return list() score_list = list() for i, dt in enumerate(dtboxes): maxpos = -1 maxiou = thres for j, gt in enumerate(gtboxes): if gt_matched[j] == 1: continue if gt[-1] > 0: overlap = overlap_iou[i][j] if overlap > maxiou: maxiou = overlap maxpos = j else: if maxpos >= 0: break else: overlap = overlap_ioa[i][j] if overlap > thres: maxiou = overlap maxpos = j if maxpos >= 0: if gtboxes[maxpos, -1] > 0: gt_matched[maxpos] = 1 dt_matched[i] = 1 score_list.append((dt, 1, self.ID)) else: dt_matched[i] = -1 else: dt_matched[i] = 0 score_list.append((dt, 0, self.ID)) return score_list
mmdet/evaluation/metrics/crowdhuman_metric.py
539
mmdetection
{ "docstring": "Match the detection results with the ground_truth by Caltech\n matching strategy.\n\n Args:\n thres (float): IOU threshold.\n\n Returns:\n score_list(list[tuple[ndarray, int, str]]): Matching result.\n a list of tuples (dtbox, label, imgID) in the descending\n sort of dtbox.score.\n ", "language": "en", "n_whitespaces": 107, "n_words": 35, "vocab_size": 32 }
160
Python
73
6fca2160bd676cf011e10bdf4b622efb5688bae0
crowdhuman_metric.py
245,805
45
346
compare_caltech
https://github.com/open-mmlab/mmdetection.git
[Feature] Add CrowdHumanDataset and Metric (#8437) * [Fix] Fix UT to be compatible with pytorch 1.6 (#8707) * Update * Update * force reinstall pycocotools * Fix build_cuda * docker install git * Update * comment other job to speedup process * update * uncomment * Update * Update * Add comments for --force-reinstall * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * Add CrowdHumanDataset * [WIP] Add CrowdHumanDataset * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric * [Feature] Add CrowdHumanDataset and Metric Co-authored-by: jbwang1997 <[email protected]>
795
0
70,897
18
4
10
def get_offset_transform(self): if self._offset_transform is None: self._offset_transform = transforms.IdentityTransform() elif (not isinstance(self._offset_transform, transforms.Transform) and hasattr(self._offset_transform, '_as_mpl_transform')): self._offset_transform = \ self._offset_transform._as_mpl_transform(self.axes) return self._offset_transform
lib/matplotlib/collections.py
105
matplotlib
{ "docstring": "Return the `.Transform` instance used by this artist offset.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
22
Python
18
c25cf96cfb7e6fc9ad75347cb2a32193c501e82c
collections.py
106,990
8
65
get_offset_transform
https://github.com/matplotlib/matplotlib.git
Switch transOffset to offset_transform. Note that most APIs *previously* already accepted *offset_transform* as kwarg, due to the presence of the `set_offset_transform` setter. Prefer that name (shortening it to `offset_trf` for local variables). Backcompat for the old `transOffset` name is kept in most places by introducing a property alias.
100
0
22,540
12
1
5
def data(request): return request.param @pytest.fixture()
pandas/tests/arrays/masked/test_function.py
30
@pytest.fixture()
pandas
{ "docstring": "\n Fixture returning parametrized 'data' array with different integer and\n floating point types\n ", "language": "en", "n_whitespaces": 22, "n_words": 12, "vocab_size": 12 }
5
Python
5
89be1f053b695c4ce1c0569f737caf3f03c12128
test_function.py
166,960
2
10
data
https://github.com/pandas-dev/pandas.git
DOC: Added docstrings to fixtures defined in array module (#47211)
10
1
39,892
6
9
28
def get_lvm_facts(self): lvm_facts = {'lvm': 'N/A'} if os.getuid() == 0 and self.module.get_bin_path('vgs'): lvm_util_options = '--noheadings --nosuffix --units g --separator ,' vgs_path = self.module.get_bin_path('vgs') # vgs fields: VG #PV #LV #SN Attr VSize VFree vgs = {} if vgs_path: rc, vg_lines, err = self.module.run_command('%s %s' % (vgs_path, lvm_util_options)) for vg_line in vg_lines.splitlines(): items = vg_line.strip().split(',') vgs[items[0]] = {'size_g': items[-2], 'free_g': items[-1], 'num_lvs': items[2], 'num_pvs': items[1]} lvs_path = self.module.get_bin_path('lvs') # lvs fields: # LV VG Attr LSize Pool Origin Data% Move Log Copy% Convert lvs = {} if lvs_path: rc, lv_lines, err = self.module.run_command('%s %s' % (lvs_path, lvm_util_options)) for lv_line in lv_lines.splitlines(): items = lv_line.strip().split(',') lvs[items[0]] = {'size_g': items[3], 'vg': items[1]} pvs_path = self.module.get_bin_path('pvs') # pvs fields: PV VG #Fmt #Attr PSize PFree pvs = {} if pvs_path: rc, pv_lines, err = self.module.run_command('%s %s' % (pvs_path, lvm_util_options)) for pv_line in pv_lines.splitlines(): items = pv_line.strip().split(',') pvs[self._find_mapper_device_name(items[0])] = { 'size_g': items[4], 'free_g': items[5], 'vg': items[1]} lvm_facts['lvm'] = {'lvs': lvs, 'vgs': vgs, 'pvs': pvs} return lvm_facts
lib/ansible/module_utils/facts/hardware/linux.py
550
ansible
{ "docstring": " Get LVM Facts if running as root and lvm utils are available ", "language": "en", "n_whitespaces": 13, "n_words": 12, "vocab_size": 12 }
161
Python
104
b2cde3a8964373d51d24e66692394d98858a2b33
linux.py
267,673
33
325
get_lvm_facts
https://github.com/ansible/ansible.git
Add default value for lvm_facts when lvm or lvm2 is not installed or … (#75989) * Add default value for lvm_facts when lvm or lvm2 is not installed or there are no lvm facts
735
0
78,995
16
1
26
def test_first_event_without_minified_stack_trace_received(self, record_analytics): now = timezone.now() project = self.create_project(first_event=now) project_created.send(project=project, user=self.user, sender=type(project)) data = load_data("javascript") self.store_event( data=data, project_id=project.id, ) with pytest.raises(AssertionError): record_analytics.assert_called_with( "first_event_with_minified_stack_trace_for_project.sent", user_id=self.user.id, organization_id=project.organization_id, project_id=project.id, platform="javascript", url="http://localhost:3000", )
tests/sentry/receivers/test_onboarding.py
177
sentry
{ "docstring": "\n Test that an analytics event is NOT recorded when\n there no event with minified stack trace is received\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 16 }
29
Python
25
ce841204ef3b20d0f6ac812ebb06aebbc63547ac
test_onboarding.py
89,863
18
110
test_first_event_without_minified_stack_trace_received
https://github.com/getsentry/sentry.git
ref(onboarding): Add function to record first event per project with min stack trace -(#42208)
219
0
18,580
12
1
8
def get_yaxis(self): return self.yaxis get_xgridlines = _axis_method_wrapper("xaxis", "get_gridlines") get_xticklines = _axis_method_wrapper("xaxis", "get_ticklines") get_ygridlines = _axis_method_wrapper("yaxis", "get_gridlines") get_yticklines = _axis_method_wrapper("yaxis", "get_ticklines") # Adding and tracking artists
lib/matplotlib/axes/_base.py
84
matplotlib
{ "docstring": "\n [*Discouraged*] Return the YAxis instance.\n\n .. admonition:: Discouraged\n\n The use of this function is discouraged. You should instead\n directly access the attribute ``ax.yaxis``.\n ", "language": "en", "n_whitespaces": 67, "n_words": 23, "vocab_size": 22 }
25
Python
18
5af97515b3823b2efa1961253a11e2d77df88637
_base.py
109,257
2
10
get_yaxis
https://github.com/matplotlib/matplotlib.git
Add discouraged admonitions The [*Discouraged*] prefix in the summary line is added in analogy to the [*Deprecated*] prefix we add automatically. We do this so that these "labels" are prominently visible also in summary overviews of the functions in the docs. Since we rarely discourage whole functions, for now I just do this manually.
54
0
23,495
7
8
12
def read(self, size=-1): if self.closed: raise ValueError("I/O operation on closed file") if self.size_read >= self.chunksize: return b'' if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ self.align and \ (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data
python3.10.4/Lib/chunk.py
215
XX-Net
{ "docstring": "Read at most size bytes from the chunk.\n If size is omitted or negative, read until the end\n of the chunk.\n ", "language": "en", "n_whitespaces": 42, "n_words": 21, "vocab_size": 17 }
67
Python
38
8198943edd73a363c266633e1aa5b2a9e9c9f526
chunk.py
221,321
17
136
read
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
216
0
56,344
11
5
9
def _get_cons(self): if self.__origin__ is None: raise TypeError("Cannot get the underlying type of a " "non-specialized Annotated type.") tree = self._subs_tree() while isinstance(tree, tuple) and tree[0] is Annotated: tree = tree[1] if isinstance(tree, tuple): return tree[0] else: return tree
pipenv/patched/notpip/_vendor/typing_extensions.py
107
pipenv
{ "docstring": "Return the class used to create instance of this type.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
39
Python
31
f3166e673fe8d40277b804d35d77dcdb760fc3b3
typing_extensions.py
20,881
11
64
_get_cons
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
196
0
3,608
11
6
13
def _task(cls): # type: () -> None log_runtime.debug("TimeoutScheduler Thread spawning @ %f", cls._time()) time_empty = None try: while 1: handle = cls._peek_next() if handle is None: now = cls._time() if time_empty is None: time_empty = now # 100 ms of grace time before killing the thread if cls.GRACE < now - time_empty: return else: time_empty = None cls._wait(handle) cls._poll() finally: # Worst case scenario: if this thread dies, the next scheduled # timeout will start a new one log_runtime.debug("TimeoutScheduler Thread died @ %f", cls._time()) cls._thread = None
scapy/contrib/isotp/isotp_soft_socket.py
174
scapy
{ "docstring": "Executed in a background thread, this thread will automatically\n start when the first timeout is added and stop when the last timeout\n is removed or executed.", "language": "en", "n_whitespaces": 39, "n_words": 26, "vocab_size": 22 }
87
Python
58
afa3a8051f6768a23895d72239de44f23c1c210c
isotp_soft_socket.py
208,901
19
98
_task
https://github.com/secdev/scapy.git
Refactoring of ISOTPSoftSockets
400
0
52,558
14
1
6
def test_nonconflicting_autogenerated_basename_different_models(self): self.router.register(r'notes', NoteViewSet) self.router.register(r'notes_basename', BasenameViewSet)
tests/test_routers.py
47
django-rest-framework
{ "docstring": "\n Ensure 2 routers with different models, and a distinct basename specified\n on each does not throw an exception\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 18 }
6
Python
6
48a21aa0eb3a95d32456c2a927eff9552a04231e
test_routers.py
48,738
3
28
test_nonconflicting_autogenerated_basename_different_models
https://github.com/encode/django-rest-framework.git
raise ImproperlyConfigured exception if `basename` is not unique (#8438) * raise ImproperlyConfigured if basename already exists * rename already_registered function; return True/False * additional basename tests * additional basename tests * Update rest_framework/routers.py Co-authored-by: David Graves <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
27
0
9,589
8
2
5
def cache_full(self) -> bool: if self._cache_info["cache_full"]: return self._cache_info["cache_full"] with self._lock: return self._cache_info["cache_full"]
lib/training/cache.py
64
faceswap
{ "docstring": "bool: ``True`` if the cache has been fully populated. ``False`` if there are items still\n to be cached. ", "language": "en", "n_whitespaces": 25, "n_words": 18, "vocab_size": 17 }
12
Python
10
2beceffad9b15c1fd78f06b9b272563321c5a41e
cache.py
101,282
7
35
cache_full
https://github.com/deepfakes/faceswap.git
Data Augmentation update (#1263) - lib.detected_face - Subclass Masks for Landmark based masks - Add training mask propery + methods to DetectedFace - lib.training_training - subclass TrainingDataGenerator for training and preview data - Split cache into own module - Reduce thread count to 1 to prevent image corruption + data re-use - Process on largest model input/output size rather than stored image size - Size and crop masks during caching stage - Implement ring buffer for data flow - Fix preview reload bug - augmentation - typing - switch color aug order - better initialization - Fix warp + landmark warp to correctly apply at different image scales - Slightly improved warp caching - Don't store whether image is_preview. Handle all data as training images implicitly - plugins.trainer: Typing and fixes to work with trainingdata refactor
55
0
20,701
9
2
18
def put_timestamp(self, feed_id, timestamp): self._fetch_data() with self._lock, open(self._data_file, "wb") as myfile: self._data.update({feed_id: timestamp}) _LOGGER.debug( "Overwriting feed %s timestamp in storage file %s", feed_id, self._data_file, ) try: pickle.dump(self._data, myfile) except Exception: # pylint: disable=broad-except _LOGGER.error("Error saving pickled data to %s", self._data_file) self._cache_outdated = True
homeassistant/components/feedreader/__init__.py
144
core
{ "docstring": "Update timestamp for given feed id (usually the url).", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
43
Python
41
62a5854e40cb554fecb1eec897d7bcb4c94628fe
__init__.py
301,868
14
86
put_timestamp
https://github.com/home-assistant/core.git
Fix bare except (#72906)
202
0
100,706
13
2
16
def test_link_error_on_chord_header(self, header): self.app.conf.task_allow_error_cb_on_chord_header = True c = chord(header, signature('body')) err = signature('err') errback = c.link_error(err) assert errback == err for header_task in c.tasks: assert header_task.options['link_error'] == [err] assert c.body.options['link_error'] == [err]
t/unit/tasks/test_canvas.py
128
celery
{ "docstring": " Test that link_error on a chord also links the header ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
32
Python
22
3983484defb4564c9baf2a24a6b3af2d0b3c0df7
test_canvas.py
208,396
9
77
test_link_error_on_chord_header
https://github.com/celery/celery.git
Housekeeping for Canvas.py (#7942) * Removed pass from @abstractmethod StampingVisitor.on_signature() * Added unit test: test_repr_empty_group() * Added unit test: test_signature_on_error_adds_error_callback() * Cleaned chord.link_error() implementation * Added a new test suite: test_merge_dictionaries * Fixed bug in _merge_dictionaries() function when using None values, tested with test_none_values() * Added test case for "Signature | non-Signature" in unit test: test_OR() * Added new unit test: test_freezing_args_set_in_options() * Added new unit test: test_group_prepared(), for the inner method of group._prepared() * Added unit test for chord: test_link_error_on_chord_header(), using the task_allow_error_cb_on_chord_header flag * Added subtests explanation to test_OR() unit test for "sig | non-sig" test case * Added unit test: test_on_signature_gets_the_signature() * Matched (copied) the unit tests "Install tox" step to the integration tests to have the same command for both
99
0
52,316
11
2
20
def _gen_html_string(self): self.html_string = _hilite_me( self.code_string, self.language, self.style, self.insert_line_no, "border:solid gray;border-width:.1em .1em .1em .8em;padding:.2em .6em;", self.file_path, self.line_no_from, ) if self.generate_html_file: os.makedirs( os.path.join("assets", "codes", "generated_html_files"), exist_ok=True, ) with open( os.path.join( "assets", "codes", "generated_html_files", self.file_name + ".html", ), "w", ) as file: file.write(self.html_string)
manim/mobject/svg/code_mobject.py
170
manim
{ "docstring": "Function to generate html string with code highlighted and stores in variable html_string.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
41
Python
37
902e7eb4f0147b5882a613b67467e38a1d47f01e
code_mobject.py
189,448
25
103
gen_html_string
https://github.com/ManimCommunity/manim.git
Hide more private methods from the docs. (#2468) * hide privs from text_mobject.py * hide privs from tex_mobject.py * hide privs from code_mobject.py * hide privs from svg_mobject.py * remove SVGPath and utils from __init__.py * don't import string_to_numbers * hide privs from geometry.py * hide privs from matrix.py * hide privs from numbers.py * hide privs from three_dimensions.py * forgot underscore under set_stroke_width_from_length * there were more i missed * unhidea method that was used in docs * forgot other text2hash * remove svg_path from docs
356
0
46,056
16
1
5
def test_iterable_not_string(): constraint = _IterablesNotString() assert constraint.is_satisfied_by([1, 2, 3]) assert constraint.is_satisfied_by(range(10)) assert not constraint.is_satisfied_by("some string")
sklearn/utils/tests/test_param_validation.py
70
scikit-learn
{ "docstring": "Check that a string does not satisfy the _IterableNotString constraint.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
15
Python
13
b9f623cff0f61c43b194e794da45c81518c57f60
test_param_validation.py
260,597
5
41
test_iterable_not_string
https://github.com/scikit-learn/scikit-learn.git
MAINT Param validation: add helper constraint for cv object (#24010)
30
0
76,365
9
4
13
def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]: cache_key_results: MutableMapping[str, int] = {} for org_id, result_dict in self.results.items(): for string, id in result_dict.items(): key = f"{org_id}:{string}" if id is not None: cache_key_results[key] = id return cache_key_results
src/sentry/sentry_metrics/indexer/base.py
111
sentry
{ "docstring": "\n Return the results, but formatted as the following:\n {\n \"1:a\": 10,\n \"1:b\": 11,\n \"1:c\", 12,\n \"2:e\": 13\n }\n This is for when we use indexer_cache.set_many()\n ", "language": "en", "n_whitespaces": 129, "n_words": 25, "vocab_size": 24 }
33
Python
26
c4cc0467974bcfb2b3c95120bd19c337aa977183
base.py
92,382
18
66
get_mapped_key_strings_to_ints
https://github.com/getsentry/sentry.git
feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] (#36263) * feat(metrics_indexer): Add rate limits functionality to indexer [INGEST-1380] The postgres string indexer now is able to rate limit writes using four sentry options. If that happens, `None` is returned in place of an integer, and the FetchType is RATE_LIMITED. The kafka consumer/message processor explicitly checks for those `None` values and throws away every message that references a rate-limited string. It logs a Sentry error for every dropped message just because that's already what we do for other kinds of dropped messages. Rate limiting and quota management currently creates a ton of dataclasses and that probably wastes time. There are a ton of low-hanging fruits: * the return value of _construct_quotas could be globally cached, as long as the cache is wiped when the sentry options change. * the same Quota object (for global limits) is referenced from multiple RequestedQuota instances (one for each org). `sentry.ratelimits.sliding_windows` could check the `id()` of the quota (if there is no prefix override) to avoid computing and checking the same quota multiple times. An even lower hanging fruit is that we're fetching the same keys from Redis multiple times, because multiple organizations (and therefore multiple RequestedQuota instances) adhere to the global quota. So that's been fixed, but as for the rest let's wait for timings from prod. * fix typo * fix typing * apply review feedback * fix typing, add test * fix tests * apply review feedback about logging too many msgs * fix leaking option in test * sike, more test failures
121
0
18,905
13
2
12
def copy(self, update_attrs=None, exclude_fields=None): exclude_fields = ( self.default_exclude_fields_in_copy + self.exclude_fields_in_copy + (exclude_fields or []) ) instance, child_object_map = _copy(self.specific, exclude_fields, update_attrs) instance.save() _copy_m2m_relations(self, instance, exclude_fields=exclude_fields) return instance
wagtail/core/models/__init__.py
97
wagtail
{ "docstring": "Copy this task state, excluding the attributes in the ``exclude_fields`` list and updating any attributes to values\n specified in the ``update_attrs`` dictionary of ``attribute``: ``new value`` pairs", "language": "en", "n_whitespaces": 33, "n_words": 27, "vocab_size": 23 }
27
Python
24
d10f15e55806c6944827d801cd9c2d53f5da4186
__init__.py
73,881
10
63
copy
https://github.com/wagtail/wagtail.git
Reformat with black
109
0
16,158
11
1
14
def extract_tags(self, sentence, topK=20, withWeight=False, allowPOS=(), withFlag=False): self.check_dependency() import jieba import jieba.analyse jieba.setLogLevel(logging.ERROR) res = jieba.analyse.extract_tags(sentence, topK=topK, withWeight=withWeight, allowPOS=allowPOS, withFlag=withFlag) return res
modules/text/lexical_analysis/jieba_paddle/module.py
108
PaddleHub
{ "docstring": "\n Extract keywords from sentence using TF-IDF algorithm.\n Args:\n topK(int): return how many top keywords. `None` for all possible words.\n withWeight(bool): if True, return a list of (word, weight);\n if False, return a list of words.\n allowPOS(tuple): the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].\n if the POS of w is not in this list,it will be filtered.\n withFlag(bool): only work with allowPOS is not empty.\n if True, return a list of pair(word, weight) like posseg.cut\n if False, return a list of words\n Returns:\n result(list): The key words.\n ", "language": "en", "n_whitespaces": 266, "n_words": 88, "vocab_size": 61 }
22
Python
20
8468e1ac6cfe165aa1e3cf4f77ab6fb66ce98614
module.py
49,679
25
72
extract_tags
https://github.com/PaddlePaddle/PaddleHub.git
Remove fluid api in modules and pkg. (#1906)
231
0
9,867
9
1
6
def to_list(self): return list(convert_submodules(self.__dict__).values())
ludwig/schema/model_config.py
36
ludwig
{ "docstring": "Method for getting a list representation of the input features.\n\n Returns:\n List of input features specified.\n ", "language": "en", "n_whitespaces": 41, "n_words": 16, "vocab_size": 14 }
4
Python
4
4d2d81f9fdefc52eea6a9bf0826a6f2ffc8d681b
model_config.py
8,421
2
20
to_list
https://github.com/ludwig-ai/ludwig.git
Config Object (#2426) * Fixed loss instances across features * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed binary OneOfImplementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix custom loss components * Fix gbm category * Remove config object code, out of scope * Fixed more tests * Fixed incorrect text preproc default, added clip to category feature level * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes additional tests * Cache jsonschema validator to reduce memory pressure * Fix imports * Skip neuropod test * Added upgrade audio to default preproc back compat and cleaned up * Small nits * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Change backfill constant for audio * Add docstring to compute feature hash * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Unused import * Another backfill constant change * Unused import * remove default population functions * Added config object test * rewired build_inputs * rewired combiner in ecd, added logic to config object * Refactored ecd.py * Fixing up merge_with_defaults, need metadata changes in master * Refactored defaults section and mega upgraded config obj * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed some formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed feature col, proc col, and render config from defaults.py * Fix duplicate import * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added config initializer to merge defaults flow * Refactored update_config_with_metadata * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added dict conversion method to config object and refactored merge config function in config_utils * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactored until preproc entrypoint * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed update_config_with_metadata * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed load config base feature method - no longer necessary * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Formatting * Fixed input size assignment * Temp fix * Fixed pretrained encoder path referencing temp until preproc refactor * Solved the WORST BUG EVER * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Switch reduce_input to None for sequence tagger * Fixed another one * Fixed typo * Various test fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed excess defaults params issue * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Minor fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed some defaults tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Formatting * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * More test fixes * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed defaults tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Fixed more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixing ghost tests attempt * Deep copy to smash the ghost failures * Copied top level modules now too * Started fixing hyperopt * Fixed Hyperopt Issues * Flake 8 * Remove commented out code * Address Piero feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * Removed merge with defaults * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed various issues with preprocessing and splitting positioning * Fixed hyperopt issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Refactored api pipeline to use all config obj references * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed more tests * Flake 8 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix more tests * Fixed auto tune learning rate and batch size * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed sequence feature tests * Fixed image feature test * Fixed last test * flake 8 * Marshmallowify Config object, remove manual to dict method, add Factory method constructors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Validate config within config object * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * All Travis feedback addressed * Using all new constructors now * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * removed from class attributes * Added deep copies back and piped repr inheritance * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Format * Small error fix, moved back compat into Config Object * Flake8 * Docstring for hyperopt defaults method * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address Joppe feedback * Revert "Address Joppe feedback" This reverts commit 42f1665ef917d062a010550bb960594c355285ff. * Fix tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake8 * fix test * Small improvement * Changed repr for input features, added feature enabling/disabling * Added feature enabling/disabling, and better reprs for SDK dev * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Flake 8 * Added rich to requirements.txt * Add some more CO tests and comment more on CO code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix explain issue * Julian feedback * Added TODOs for future refactor PRs * Fix explain test failure, test shared state improvement and bug fix, remove unncessary code from convert_submodules * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * implement Daniel's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix residual errors * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Error fix * Using mixins now so no loose attributes on defaults, fixed height width schema restrictions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed unnecessary filtering from defaults schema logic * Piero's simplification and cleanup * Flake 8 * Fix test and update docstrings from Pieros change * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address most of Justin's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix tests and more feedback implementation * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Address feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Renamed files to correspond to ModelConfig class name * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Missing constant import * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixed incorrect merge conflict resolution * Flake8 * Fix remaining tests (except old models training from trainer type removal) * Fixed old models not validating trainer type * Add output_feature=False to test_hyperopt_ray.py * Implement Kabir's feedback * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Travis Addair <[email protected]> Co-authored-by: w4nderlust <[email protected]>
18
0
1,429
12
1
2
def test_action_not_coerced_for_get_and_head(self):
tests/schemas/test_coreapi.py
13
django-rest-framework
{ "docstring": "\n Ensure that action name is preserved when action map contains \"head\".\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
2
Python
2
df584350b4f77143d84615f05000f71408aec9c0
test_coreapi.py
48,645
46
269
test_action_not_coerced_for_get_and_head
https://github.com/encode/django-rest-framework.git
Prevent head method mapping to coerce action name (#7729)
9
0
9,554
6
1
12
def get_cluster_metadata(gcs_client) -> dict: return json.loads( gcs_client.internal_kv_get( usage_constant.CLUSTER_METADATA_KEY, namespace=ray_constants.KV_NAMESPACE_CLUSTER, ).decode("utf-8") )
python/ray/_private/usage/usage_lib.py
57
ray
{ "docstring": "Get the cluster metadata from GCS.\n\n It is a blocking API.\n\n This will return None if `put_cluster_metadata` was never called.\n\n Params:\n gcs_client: The GCS client to perform KV operation GET.\n\n Returns:\n The cluster metadata in a dictinoary.\n\n Raises:\n RuntimeError if it fails to obtain cluster metadata from GCS.\n ", "language": "en", "n_whitespaces": 87, "n_words": 48, "vocab_size": 38 }
11
Python
11
3a48a79fd7d2ed9195baec275562e64e96596de4
usage_lib.py
125,358
22
34
get_cluster_metadata
https://github.com/ray-project/ray.git
[Usage stats] Report total number of running jobs for usage stats purpose. (#26787) - Report total number of running jobs - Fix total number of nodes to include only alive nodes Signed-off-by: Jiajun Yao <[email protected]>
56
0
27,847
13
2
19
def test_unblock_room_twice(self) -> None: self._block_room(self.room_id) for _ in range(2): channel = self.make_request( "PUT", self.url % self.room_id, content={"block": False}, access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertFalse(channel.json_body["block"]) self._is_blocked(self.room_id, expect=False)
tests/rest/admin/test_room.py
149
synapse
{ "docstring": "Test that unblock a room that is not blocked is successful.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
26
Python
26
c97042f7eef3748e17c90e48a4122389a89c4735
test_room.py
249,148
13
94
test_unblock_room_twice
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13469)
162
0
72,655
14
1
17
def test_limit_and_from(self) -> None: number_destinations = 20 self._create_destinations(number_destinations) channel = self.make_request( "GET", self.url + "?from=5&limit=10", access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], number_destinations) self.assertEqual(channel.json_body["next_token"], "15") self.assertEqual(len(channel.json_body["destinations"]), 10) self._check_fields(channel.json_body["destinations"])
tests/rest/admin/test_federation.py
180
synapse
{ "docstring": "Testing list of destinations with a defined starting point and limit", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
27
Python
26
6a72c910f180ee8b4bd78223775af48492769472
test_federation.py
246,112
14
109
test_limit_and_from
https://github.com/matrix-org/synapse.git
Add admin API to get a list of federated rooms (#11658)
130
0
71,018
11
1
6
def face(self) -> Optional[np.ndarray]: return self._face
lib/align/aligned_face.py
29
faceswap
{ "docstring": " :class:`numpy.ndarray`: The aligned face at the given :attr:`size` at the specified\n :attr:`coverage` in the given :attr:`dtype`. If an :attr:`image` has not been provided\n then an the attribute will return ``None``. ", "language": "en", "n_whitespaces": 45, "n_words": 30, "vocab_size": 24 }
6
Python
6
a2de4a97985dc62db3b140a924aeac2be733abf8
aligned_face.py
101,190
5
17
face
https://github.com/deepfakes/faceswap.git
lib.align.aligned_face updates - Typing - Legacy support for pre-aligned faces - Coverage support for pre-aligned faces - Standardized retrieval of sub-crops
20
0
20,611
6
3
15
def pyplot(self, fig=None, clear_figure=None, **kwargs): if not fig and config.get_option("deprecation.showPyplotGlobalUse"): self.dg.exception(PyplotGlobalUseWarning()) image_list_proto = ImageListProto() marshall( self.dg._get_delta_path_str(), image_list_proto, fig, clear_figure, **kwargs ) return self.dg._enqueue("imgs", image_list_proto)
lib/streamlit/elements/pyplot.py
118
streamlit
{ "docstring": "Display a matplotlib.pyplot figure.\n\n Parameters\n ----------\n fig : Matplotlib Figure\n The figure to plot. When this argument isn't specified, this\n function will render the global figure (but this is deprecated,\n as described below)\n\n clear_figure : bool\n If True, the figure will be cleared after being rendered.\n If False, the figure will not be cleared after being rendered.\n If left unspecified, we pick a default based on the value of `fig`.\n\n * If `fig` is set, defaults to `False`.\n\n * If `fig` is not set, defaults to `True`. This simulates Jupyter's\n approach to matplotlib rendering.\n\n **kwargs : any\n Arguments to pass to Matplotlib's savefig function.\n\n Example\n -------\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>>\n >>> arr = np.random.normal(1, 1, size=100)\n >>> fig, ax = plt.subplots()\n >>> ax.hist(arr, bins=20)\n >>>\n >>> st.pyplot(fig)\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/charts.pyplot.py\n height: 630px\n\n Notes\n -----\n .. note::\n Deprecation warning. After December 1st, 2020, we will remove the ability\n to specify no arguments in `st.pyplot()`, as that requires the use of\n Matplotlib's global figure object, which is not thread-safe. So\n please always pass a figure object as shown in the example section\n above.\n\n Matplotlib support several different types of \"backends\". If you're\n getting an error using Matplotlib with Streamlit, try setting your\n backend to \"TkAgg\"::\n\n echo \"backend: TkAgg\" >> ~/.matplotlib/matplotlibrc\n\n For more information, see https://matplotlib.org/faq/usage_faq.html.\n\n ", "language": "en", "n_whitespaces": 581, "n_words": 220, "vocab_size": 150 }
24
Python
24
72703b38029f9358a0ec7ca5ed875a6b438ece19
pyplot.py
118,742
8
73
pyplot
https://github.com/streamlit/streamlit.git
Replace static apps with live Cloud apps (#4317) Co-authored-by: kajarenc <[email protected]>
88
0
26,399
11
3
17
def print_tensor(x, message="", summarize=3): if isinstance(x, tf.Tensor) and hasattr(x, "graph"): with get_graph().as_default(): op = tf.print( message, x, output_stream=sys.stdout, summarize=summarize ) with tf.control_dependencies([op]): return tf.identity(x) else: tf.print(message, x, output_stream=sys.stdout, summarize=summarize) return x # GRAPH MANIPULATION
keras/backend.py
160
keras
{ "docstring": "Prints `message` and the tensor value when evaluated.\n\n Note that `print_tensor` returns a new tensor identical to `x`\n which should be used in the following code. Otherwise the\n print operation is not taken into account during evaluation.\n\n Example:\n\n >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]])\n >>> tf.keras.backend.print_tensor(x)\n <tf.Tensor: shape=(2, 2), dtype=float32, numpy=\n array([[1., 2.],\n [3., 4.]], dtype=float32)>\n\n Args:\n x: Tensor to print.\n message: Message to print jointly with the tensor.\n summarize: The first and last `summarize` elements within each dimension\n are recursively printed per Tensor. If None, then the first 3 and last\n 3 elements of each dimension are printed for each tensor. If set to\n -1, it will print all elements of every tensor.\n\n Returns:\n The same tensor `x`, unchanged.\n ", "language": "en", "n_whitespaces": 229, "n_words": 121, "vocab_size": 92 }
34
Python
30
84afc5193d38057e2e2badf9c889ea87d80d8fbf
backend.py
269,468
11
99
print_tensor
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
126
0
80,103
14
7
4
def _reassign_keys(cls): rcParams["keymap.fullscreen"] = [k for k in rcParams["keymap.fullscreen"] if k != "f"] rcParams["keymap.save"] = [k for k in rcParams["keymap.save"] if k != "s"] rcParams["keymap.home"] = [k for k in rcParams["keymap.home"] if k != "r"]
scripts/train.py
112
faceswap
{ "docstring": " Remove `F`, 'S' and 'R' from their default bindings. ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
35
Python
15
7b9fc0454d982a2425ec44e90e5b05a87d149953
train.py
101,031
4
63
_reassign_keys
https://github.com/deepfakes/faceswap.git
Live Preview - Replace cv2 with matplotlib viewer
63
0
20,471
10
6
12
def weight(self) -> int: weight = 0 if self.models: weight += 401 - (1 if callable(self.models) else len(self.models)) if self.manufacturers: weight += 301 - ( 1 if callable(self.manufacturers) else len(self.manufacturers) ) weight += 10 * len(self.channel_names) weight += 5 * len(self.generic_ids) if isinstance(self.aux_channels, frozenset): weight += 1 * len(self.aux_channels) return weight
homeassistant/components/zha/core/registries.py
167
core
{ "docstring": "Return the weight of the matching rule.\n\n Most specific matches should be preferred over less specific. Model matching\n rules have a priority over manufacturer matching rules and rules matching a\n single model/manufacturer get a better priority over rules matching multiple\n models/manufacturers. And any model or manufacturers matching rules get better\n priority over rules matching only channels.\n But in case of a channel name/channel id matching, we give rules matching\n multiple channels a better priority over rules matching a single channel.\n ", "language": "en", "n_whitespaces": 136, "n_words": 80, "vocab_size": 46 }
51
Python
32
8bed2e6459bfc1efb25d6a55aaea2eb1b9953cf9
registries.py
314,673
23
105
weight
https://github.com/home-assistant/core.git
Remove zha from mypy ignore list (#73603)
166
0
113,278
14
4
35
def test_edit_cases(self) -> None: self.login("hamlet") hamlet = self.example_user("hamlet") msg_id = self.send_stream_message( self.example_user("hamlet"), "Denmark", topic_name="topic 1", content="content 1" ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 2", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual(set(history[0].keys()), {"timestamp", LEGACY_PREV_TOPIC, "user_id"}) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 3", "topic": "topic 3", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 2") self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[0]["user_id"], hamlet.id) self.assertEqual( set(history[0].keys()), { "timestamp", LEGACY_PREV_TOPIC, "prev_content", "user_id", "prev_rendered_content", "prev_rendered_content_version", }, ) result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "content": "content 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0]["prev_content"], "content 3") self.assertEqual(history[0]["user_id"], hamlet.id) self.login("iago") result = self.client_patch( f"/json/messages/{msg_id}", { "message_id": msg_id, "topic": "topic 4", }, ) self.assert_json_success(result) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[0]["user_id"], self.example_user("iago").id) history = orjson.loads(Message.objects.get(id=msg_id).edit_history) self.assertEqual(history[0][LEGACY_PREV_TOPIC], "topic 3") self.assertEqual(history[2][LEGACY_PREV_TOPIC], "topic 2") self.assertEqual(history[3][LEGACY_PREV_TOPIC], "topic 1") self.assertEqual(history[1]["prev_content"], "content 3") self.assertEqual(history[2]["prev_content"], "content 2") self.assertEqual(history[4]["prev_content"], "content 1") # Now, we verify that the edit history data sent back has the # correct filled-out fields message_edit_history = self.client_get(f"/json/messages/{msg_id}/history") json_response = orjson.loads(message_edit_history.content) # We reverse the message history view output so that the IDs line up with the above. message_history = list(reversed(json_response["message_history"])) i = 0 for entry in message_history: expected_entries = {"content", "rendered_content", "topic", "timestamp", "user_id"} if i in {0, 2, 3}: expected_entries.add("prev_topic") if i in {1, 2, 4}: expected_entries.add("prev_content") expected_entries.add("prev_rendered_content") expected_entries.add("content_html_diff") i += 1 self.assertEqual(expected_entries, set(entry.keys())) self.assert_length(message_history, 6) self.assertEqual(message_history[0]["prev_topic"], "topic 3") self.assertEqual(message_history[0]["topic"], "topic 4") self.assertEqual(message_history[1]["topic"], "topic 3") self.assertEqual(message_history[2]["topic"], "topic 3") self.assertEqual(message_history[2]["prev_topic"], "topic 2") self.assertEqual(message_history[3]["topic"], "topic 2") self.assertEqual(message_history[3]["prev_topic"], "topic 1") self.assertEqual(message_history[4]["topic"], "topic 1") self.assertEqual(message_history[0]["content"], "content 4") self.assertEqual(message_history[1]["content"], "content 4") self.assertEqual(message_history[1]["prev_content"], "content 3") self.assertEqual(message_history[2]["content"], "content 3") self.assertEqual(message_history[2]["prev_content"], "content 2") self.assertEqual(message_history[3]["content"], "content 2") self.assertEqual(message_history[4]["content"], "content 2") self.assertEqual(message_history[4]["prev_content"], "content 1") self.assertEqual(message_history[5]["content"], "content 1") self.assertEqual(message_history[5]["topic"], "topic 1")
zerver/tests/test_message_edit.py
1,737
zulip
{ "docstring": "This test verifies the accuracy of construction of Zulip's edit\n history data structures.", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 12 }
310
Python
136
d560d124a304a2f6dd467200aab7f070a78bf155
test_message_edit.py
83,152
128
1,019
test_edit_cases
https://github.com/zulip/zulip.git
python: Replace string concatenations with f-strings.
1,529
0
17,602
13
6
15
def close(self): # finish decoding if self.decoder: # get rid of what's left in the buffers self.feed(b"") self.data = self.decoder = None if not self.finished: msg = "image was incomplete" raise OSError(msg) if not self.image: msg = "cannot parse this image" raise OSError(msg) if self.data: # incremental parsing not possible; reopen the file # not that we have all data with io.BytesIO(self.data) as fp: try: self.image = Image.open(fp) finally: self.image.load() return self.image # --------------------------------------------------------------------
src/PIL/ImageFile.py
174
Pillow
{ "docstring": "\n (Consumer) Close the stream.\n\n :returns: An image object.\n :exception OSError: If the parser failed to parse the image file either\n because it cannot be identified or cannot be\n decoded.\n ", "language": "en", "n_whitespaces": 112, "n_words": 29, "vocab_size": 24 }
74
Python
55
2ae55ccbdad9c842929fb238ea1eb81d1f999024
ImageFile.py
243,737
17
97
close
https://github.com/python-pillow/Pillow.git
Improve exception traceback readability
312
0
70,104
15
1
3
def transform_non_affine(self, values): return values
lib/matplotlib/transforms.py
18
matplotlib
{ "docstring": "\n Apply only the non-affine part of this transformation.\n\n ``transform(values)`` is always equivalent to\n ``transform_affine(transform_non_affine(values))``.\n\n In non-affine transformations, this is generally equivalent to\n ``transform(values)``. In affine transformations, this is\n always a no-op.\n\n Parameters\n ----------\n values : array\n The input values as NumPy array of length :attr:`input_dims` or\n shape (N x :attr:`input_dims`).\n\n Returns\n -------\n array\n The output values as NumPy array of length :attr:`output_dims` or\n shape (N x :attr:`output_dims`), depending on the input.\n ", "language": "en", "n_whitespaces": 208, "n_words": 71, "vocab_size": 45 }
5
Python
5
442e7082140f85de53349bf0bf0e3c98e2eaa44c
transforms.py
107,966
2
10
transform_non_affine
https://github.com/matplotlib/matplotlib.git
Correct cross-references in documentation
19
0
22,992
6
2
7
def collect(self) -> Generator[Metric, None, None]: for pre_update_hook in self._pre_update_hooks: pre_update_hook() yield from super().collect()
synapse/util/metrics.py
56
synapse
{ "docstring": "\n Collects metrics, calling pre-update hooks first.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
14
Python
14
cf11919ddd4f48b2f59062542ba62969042f80aa
metrics.py
249,473
7
34
collect
https://github.com/matrix-org/synapse.git
Fix cache metrics not being updated when not using the legacy exposition module. (#13717)
46
0
72,940
10
4
11
def get_ranking(pairs): if len(pairs) == 1: return list(pairs[0]) w = get_winner(pairs) # now remove the winner from the list of pairs p_new = np.array([(a, b) for a, b in pairs if a != w]) return [w] + get_ranking(p_new)
backend/postprocessing/rankings.py
98
Open-Assistant
{ "docstring": "\n Abuses concordance property to get a (not necessarily unqiue) ranking.\n The lack of uniqueness is due to the potential existance of multiple\n equally ranked winners. We have to pick one, which is where\n the non-uniqueness comes from\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 32 }
38
Python
33
38ca08446d560797522b7828720032799584d32a
rankings.py
216,630
6
61
get_ranking
https://github.com/LAION-AI/Open-Assistant.git
ran pre-commit hook
63
0
54,671
11
3
12
def flow_hierarchy(G, weight=None): if not G.is_directed(): raise nx.NetworkXError("G must be a digraph in flow_hierarchy") scc = nx.strongly_connected_components(G) return 1 - sum(G.subgraph(c).size(weight) for c in scc) / G.size(weight)
networkx/algorithms/hierarchy.py
101
networkx
{ "docstring": "Returns the flow hierarchy of a directed network.\n\n Flow hierarchy is defined as the fraction of edges not participating\n in cycles in a directed graph [1]_.\n\n Parameters\n ----------\n G : DiGraph or MultiDiGraph\n A directed graph\n\n weight : key,optional (default=None)\n Attribute to use for node weights. If None the weight defaults to 1.\n\n Returns\n -------\n h : float\n Flow hierarchy value\n\n Notes\n -----\n The algorithm described in [1]_ computes the flow hierarchy through\n exponentiation of the adjacency matrix. This function implements an\n alternative approach that finds strongly connected components.\n An edge is in a cycle if and only if it is in a strongly connected\n component, which can be found in $O(m)$ time using Tarjan's algorithm.\n\n References\n ----------\n .. [1] Luo, J.; Magee, C.L. (2011),\n Detecting evolving patterns of self-organizing networks by flow\n hierarchy measurement, Complexity, Volume 16 Issue 6 53-61.\n DOI: 10.1002/cplx.20368\n http://web.mit.edu/~cmagee/www/documents/28-DetectingEvolvingPatterns_FlowHierarchy.pdf\n ", "language": "en", "n_whitespaces": 247, "n_words": 144, "vocab_size": 108 }
27
Python
26
2a05ccdb07cff88e56661dee8a9271859354027f
hierarchy.py
176,717
5
61
flow_hierarchy
https://github.com/networkx/networkx.git
Remove redundant py2 numeric conversions (#5661) * Remove redundant float conversion * Remove redundant int conversion * Use integer division Co-authored-by: Miroslav Šedivý <[email protected]>
46
0
42,052
13
11
15
def rot90(m, k=1, axes=(0, 1)): axes = tuple(axes) if len(axes) != 2: raise ValueError("len(axes) must be 2.") m = asanyarray(m) if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: raise ValueError("Axes must be different.") if (axes[0] >= m.ndim or axes[0] < -m.ndim or axes[1] >= m.ndim or axes[1] < -m.ndim): raise ValueError("Axes={} out of range for array of ndim={}." .format(axes, m.ndim)) k %= 4 if k == 0: return m[:] if k == 2: return flip(flip(m, axes[0]), axes[1]) axes_list = arange(0, m.ndim) (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], axes_list[axes[0]]) if k == 1: return transpose(flip(m, axes[1]), axes_list) else: # k == 3 return flip(transpose(m, axes_list), axes[1])
numpy/lib/function_base.py
377
numpy
{ "docstring": "\n Rotate an array by 90 degrees in the plane specified by axes.\n\n Rotation direction is from the first towards the second axis.\n\n Parameters\n ----------\n m : array_like\n Array of two or more dimensions.\n k : integer\n Number of times the array is rotated by 90 degrees.\n axes : (2,) array_like\n The array is rotated in the plane defined by the axes.\n Axes must be different.\n\n .. versionadded:: 1.12.0\n\n Returns\n -------\n y : ndarray\n A rotated view of `m`.\n\n See Also\n --------\n flip : Reverse the order of elements in an array along the given axis.\n fliplr : Flip an array horizontally.\n flipud : Flip an array vertically.\n\n Notes\n -----\n ``rot90(m, k=1, axes=(1,0))`` is the reverse of\n ``rot90(m, k=1, axes=(0,1))``\n\n ``rot90(m, k=1, axes=(1,0))`` is equivalent to\n ``rot90(m, k=-1, axes=(0,1))``\n\n Examples\n --------\n >>> m = np.array([[1,2],[3,4]], int)\n >>> m\n array([[1, 2],\n [3, 4]])\n >>> np.rot90(m)\n array([[2, 4],\n [1, 3]])\n >>> np.rot90(m, 2)\n array([[4, 3],\n [2, 1]])\n >>> m = np.arange(8).reshape((2,2,2))\n >>> np.rot90(m, 1, (1,2))\n array([[[1, 3],\n [0, 2]],\n [[5, 7],\n [4, 6]]])\n\n ", "language": "en", "n_whitespaces": 378, "n_words": 170, "vocab_size": 108 }
105
Python
68
f404e9e92e87a3990712d723d5c562a89300ac01
function_base.py
160,188
79
250
rot90
https://github.com/numpy/numpy.git
Add space after argument name
265
0
38,560
12
1
9
def test_export_resolves_empty_project(self): payload = self.make_payload( "discover", {"project": [], "start": "2020-05-18T14:00:00", "end": "2020-05-19T14:00:00"}, ) with self.feature("organizations:discover-query"): self.get_valid_response(self.org.slug, status_code=201, **payload) payload = self.make_payload( "issue", {"project": None, "start": "2020-05-18T14:00:00", "end": "2020-05-19T14:00:00"} ) with self.feature("organizations:discover-query"): self.get_valid_response(self.org.slug, status_code=201, **payload)
tests/sentry/data_export/endpoints/test_data_export.py
186
sentry
{ "docstring": "\n Ensures that a request to this endpoint returns a 201 if projects\n is an empty list.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 15 }
34
Python
21
e6285db024d7af78f2022822abcde6f5d118af9e
test_data_export.py
97,025
12
102
test_export_resolves_empty_project
https://github.com/getsentry/sentry.git
ref(discover): Remove resolve_field_list from data export (#32547) - This removes the call to resolve_field_list to validate queries from the data export
138
0
19,363
11
2
11
def get_compat_v1_regularization_losses(self): return { name: regularizer() for name, regularizer in self._tf1_style_var_store._regularizers.items() } # pylint: disable=protected-access @test_combinations.generate(test_combinations.combine(mode=["eager"]))
keras/legacy_tf_layers/variable_scope_shim_test.py
76
@test_combinations.generate(test_combinations.combine(mode=["eager"]))
keras
{ "docstring": "Dict w/ regularization losses from `get_variable`&`compat.v1.layers`.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
16
Python
16
84afc5193d38057e2e2badf9c889ea87d80d8fbf
variable_scope_shim_test.py
274,468
5
28
get_compat_v1_regularization_losses
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
59
1
81,207
11
1
8
def test_random_tree_n_zero(): with pytest.raises(nx.NetworkXPointlessConcept): T = nx.random_tree(0, seed=1234)
networkx/generators/tests/test_trees.py
48
networkx
{ "docstring": "Tests if n = 0 then the NetworkXPointlessConcept exception is raised.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
8
Python
8
dc70d037f21c76c61e1aab02039c4ca87898f7c7
test_trees.py
177,372
3
27
test_random_tree_n_zero
https://github.com/networkx/networkx.git
added coverage in generators/tree.py (#6082)
21
0
42,365
11
1
4
def add(inputs, **kwargs): return Add(**kwargs)(inputs)
keras/layers/merging/add.py
32
keras
{ "docstring": "Functional interface to the `tf.keras.layers.Add` layer.\n\n Args:\n inputs: A list of input tensors with the same shape.\n **kwargs: Standard layer keyword arguments.\n\n Returns:\n A tensor as the sum of the inputs. It has the same shape as the inputs.\n\n Examples:\n\n >>> input_shape = (2, 3, 4)\n >>> x1 = tf.random.normal(input_shape)\n >>> x2 = tf.random.normal(input_shape)\n >>> y = tf.keras.layers.add([x1, x2])\n >>> print(y.shape)\n (2, 3, 4)\n\n Used in a functional model:\n\n >>> input1 = tf.keras.layers.Input(shape=(16,))\n >>> x1 = tf.keras.layers.Dense(8, activation='relu')(input1)\n >>> input2 = tf.keras.layers.Input(shape=(32,))\n >>> x2 = tf.keras.layers.Dense(8, activation='relu')(input2)\n >>> added = tf.keras.layers.add([x1, x2])\n >>> out = tf.keras.layers.Dense(4)(added)\n >>> model = tf.keras.models.Model(inputs=[input1, input2], outputs=out)\n\n ", "language": "en", "n_whitespaces": 177, "n_words": 102, "vocab_size": 62 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
add.py
272,642
2
18
add
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
11
0
81,028
9
1
2
def get_cached_paths(self): return ["/"]
wagtail/core/models/__init__.py
21
wagtail
{ "docstring": "\n This returns a list of paths to invalidate in a frontend cache\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
4
Python
4
d10f15e55806c6944827d801cd9c2d53f5da4186
__init__.py
73,846
2
10
get_cached_paths
https://github.com/wagtail/wagtail.git
Reformat with black
18
0
16,141
7
20
50
def cleanup(self, onlyFileTbl=False, udfDict=None, web=False): if web and self.webBackdoorFilePath: logger.info("cleaning up the web files uploaded") self.delRemoteFile(self.webStagerFilePath) self.delRemoteFile(self.webBackdoorFilePath) if (not isStackingAvailable() or kb.udfFail) and not conf.direct: return if any((conf.osCmd, conf.osShell)) and Backend.isDbms(DBMS.PGSQL) and kb.copyExecTest: return if Backend.isOs(OS.WINDOWS): libtype = "dynamic-link library" elif Backend.isOs(OS.LINUX): libtype = "shared object" else: libtype = "shared library" if onlyFileTbl: logger.debug("cleaning up the database management system") else: logger.info("cleaning up the database management system") logger.debug("removing support tables") inject.goStacked("DROP TABLE %s" % self.fileTblName, silent=True) inject.goStacked("DROP TABLE %shex" % self.fileTblName, silent=True) if not onlyFileTbl: inject.goStacked("DROP TABLE %s" % self.cmdTblName, silent=True) if Backend.isDbms(DBMS.MSSQL): udfDict = {"master..new_xp_cmdshell": {}} if udfDict is None: udfDict = getattr(self, "sysUdfs", {}) for udf, inpRet in udfDict.items(): message = "do you want to remove UDF '%s'? [Y/n] " % udf if readInput(message, default='Y', boolean=True): dropStr = "DROP FUNCTION %s" % udf if Backend.isDbms(DBMS.PGSQL): inp = ", ".join(i for i in inpRet["input"]) dropStr += "(%s)" % inp logger.debug("removing UDF '%s'" % udf) inject.goStacked(dropStr, silent=True) logger.info("database management system cleanup finished") warnMsg = "remember that UDF %s files " % libtype if conf.osPwn: warnMsg += "and Metasploit related files in the temporary " warnMsg += "folder " warnMsg += "saved on the file system can only be deleted " warnMsg += "manually" logger.warning(warnMsg)
plugins/generic/misc.py
594
sqlmap
{ "docstring": "\n Cleanup file system and database from sqlmap create files, tables\n and functions\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 11 }
203
Python
118
df4293473d2fb6e887e31522cab5aff95e201581
misc.py
123,734
45
344
cleanup
https://github.com/sqlmapproject/sqlmap.git
Fixing DeprecationWarning (logger.warn)
726
0
27,412
19
7
8
def func_load(code, defaults=None, closure=None, globs=None): if isinstance(code, (tuple, list)): # unpack previous dump code, defaults, closure = code if isinstance(defaults, list): defaults = tuple(defaults)
keras/utils/generic_utils.py
78
keras
{ "docstring": "Deserializes a user defined function.\n\n Args:\n code: bytecode of the function.\n defaults: defaults of the function.\n closure: closure of the function.\n globs: dictionary of global objects.\n\n Returns:\n A function object.\n ", "language": "en", "n_whitespaces": 74, "n_words": 30, "vocab_size": 22 }
24
Python
22
84afc5193d38057e2e2badf9c889ea87d80d8fbf
generic_utils.py
276,843
18
147
func_load
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
56
0
81,753
12
1
18
def assert_orientation_landscape_image_is_correct(self, rendition): from willow.plugins.pillow import PillowImage with rendition.get_willow_image() as willow_image: image = PillowImage.open(willow_image) # Check that the image is the correct size (and not rotated) self.assertEqual(image.get_size(), (600, 450)) # Check that the red flower is in the bottom left # The JPEGs have compressed slightly differently so the colours won't be spot on colour = image.image.convert("RGB").getpixel((155, 282)) self.assertAlmostEqual(colour[0], 217, delta=25) self.assertAlmostEqual(colour[1], 38, delta=25) self.assertAlmostEqual(colour[2], 46, delta=25) # Check that the water is at the bottom colour = image.image.convert("RGB").getpixel((377, 434)) self.assertAlmostEqual(colour[0], 85, delta=25) self.assertAlmostEqual(colour[1], 93, delta=25) self.assertAlmostEqual(colour[2], 65, delta=25)
wagtail/images/tests/test_models.py
273
wagtail
{ "docstring": "\n Check that the image has the correct colored pixels in the right places\n so that we know the image did not physically rotate.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 18 }
89
Python
61
d10f15e55806c6944827d801cd9c2d53f5da4186
test_models.py
75,292
13
177
assert_orientation_landscape_image_is_correct
https://github.com/wagtail/wagtail.git
Reformat with black
212
0
16,387
11
3
9
def get_user_model(): try: return django_apps.get_model(settings.AUTH_USER_MODEL, require_ready=False) except ValueError: raise ImproperlyConfigured( "AUTH_USER_MODEL must be of the form 'app_label.model_name'" ) except LookupError: raise ImproperlyConfigured( "AUTH_USER_MODEL refers to model '%s' that has not been installed" % settings.AUTH_USER_MODEL )
django/contrib/auth/__init__.py
71
django
{ "docstring": "\n Return the User model that is active in this project.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
35
Python
30
9c19aff7c7561e3a82978a272ecdaad40dda5c00
__init__.py
203,587
12
40
get_user_model
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
115
0
50,462
12
1
2
def visible(self): return self["visible"]
packages/python/plotly/plotly/graph_objs/_bar.py
22
plotly.py
{ "docstring": "\n Determines whether or not this trace is visible. If\n \"legendonly\", the trace is not drawn, but can appear as a\n legend item (provided that the legend itself is visible).\n\n The 'visible' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n [True, False, 'legendonly']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 134, "n_words": 53, "vocab_size": 43 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_bar.py
226,157
2
11
visible
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,830
7
2
11
def fgcolor(value, dark='000000', light='ffffff'): value = value.lower().strip('#') if not re.match('^[0-9a-f]{6}$', value): return '' return f'#{foreground_color(value, dark, light)}' @register.filter()
netbox/utilities/templatetags/builtins/filters.py
102
@register.filter()
netbox
{ "docstring": "\n Return black (#000000) or white (#ffffff) given an arbitrary background color in RRGGBB format. The foreground\n color with the better contrast is returned.\n\n Args:\n value: The background color\n dark: The foreground color to use for light backgrounds\n light: The foreground color to use for dark backgrounds\n ", "language": "en", "n_whitespaces": 80, "n_words": 46, "vocab_size": 32 }
18
Python
17
7c105019d8ae9205051c302e7499b33a455f9176
filters.py
264,443
5
42
fgcolor
https://github.com/netbox-community/netbox.git
Closes #8600: Document built-in template tags & filters
36
1
77,729
10
2
23
def test_image(): # Test fails for matplotlib 1.5+ because the size of the image # generated by matplotlib has changed. if Version(matplotlib.__version__) == Version("3.4.1"): image_size = 432 else: pytest.skip("Test fails for older matplotlib") np.random.seed(0) # image size depends on the seed fig, ax = plt.subplots(figsize=(2, 2)) ax.imshow(np.random.random((10, 10)), cmap=plt.cm.jet, interpolation="nearest") _assert_output_equal( fake_renderer_output(fig, FakeRenderer), f, )
packages/python/plotly/plotly/matplotlylib/mplexporter/tests/test_basic.py
159
plotly.py
{ "docstring": "\n opening figure\n opening axes\n draw image of size {image_size} \n closing axes\n closing figure\n ", "language": "en", "n_whitespaces": 159, "n_words": 13, "vocab_size": 9 }
55
Python
45
1d82b8822120db088bfeb6c8eae7ec8df9703783
test_basic.py
230,975
18
94
test_image
https://github.com/plotly/plotly.py.git
Updated distutils.Version to packaging.Version
110
0
62,617
11
2
15
def _build_tabs(self) -> None: logger.debug("Build Tabs") for section in self.config_tools.sections: tab = ttk.Notebook(self) self._tabs[section] = {"tab": tab} self.add(tab, text=section.replace("_", " ").title())
tools/preview/preview.py
110
faceswap
{ "docstring": " Build the notebook tabs for the each configuration section. ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 8 }
21
Python
20
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
preview.py
101,448
7
64
_build_tabs
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
75
0
20,861
15
3
4
def theme_use_custom_titlebar(): if USE_CUSTOM_TITLEBAR is False: return False return USE_CUSTOM_TITLEBAR or pysimplegui_user_settings.get('-custom titlebar-', False)
PySimpleGUI.py
41
PySimpleGUI
{ "docstring": "\n Returns True if a custom titlebar will be / should be used.\n The setting is in the Global Settings window and can be overwridden\n using set_options call\n\n :return: True if a custom titlebar / custom menubar should be used\n :rtype: (bool)\n ", "language": "en", "n_whitespaces": 75, "n_words": 41, "vocab_size": 30 }
14
Python
12
66931d51e1a06797381d3c32c2b1a4400c033357
PySimpleGUI.py
212,994
4
23
theme_use_custom_titlebar
https://github.com/PySimpleGUI/PySimpleGUI.git
Release 4.60.0
30
0
53,561
9
1
16
def test_dataset_stats_shuffle(ray_start_regular_shared): context = DatasetContext.get_current() context.optimize_fuse_stages = True ds = ray.data.range(1000, parallelism=10) ds = ds.random_shuffle().repartition(1, shuffle=True) stats = canonicalize(ds.stats()) assert ( stats == )
python/ray/data/tests/test_stats.py
103
ray
{ "docstring": "Stage N read->random_shuffle: executed in T\n\n Substage Z read->random_shuffle_map: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n\n Substage N random_shuffle_reduce: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n\nStage N repartition: executed in T\n\n Substage Z repartition_map: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n\n Substage N repartition_reduce: N/N blocks executed\n * Remote wall time: T min, T max, T mean, T total\n * Remote cpu time: T min, T max, T mean, T total\n * Output num rows: N min, N max, N mean, N total\n * Output size bytes: N min, N max, N mean, N total\n * Tasks per node: N min, N max, N mean; N nodes used\n", "language": "en", "n_whitespaces": 350, "n_words": 280, "vocab_size": 35 }
24
Python
18
ea791ab0a0f176c94c911ef0eb06ca8fa568de0c
test_stats.py
138,510
41
63
test_dataset_stats_shuffle
https://github.com/ray-project/ray.git
[Datasets] Print hierarchical stats for multi-stage operations. (#24119) The total execution time for multi-stage operations being logged twice in the dataset stats is [confusing to users](https://github.com/ray-project/ray/issues/23915), making it seem like each stage in the operation took the same amount of time. This PR modifies the stats output for multi-stage operations, such that the total execution time is printed out once as a top-level op stats line, with the stats for each of the (sub)stages indented and devoid of the total execution time repeat. This also opens the door for other op-level stats (e.g. peak memory utilization) and per-substage stats (e.g. total substage execution time).
59
0
31,449
10
16
53
def rewrite(e, Omega, x, wsym): if not isinstance(Omega, SubsSet): raise TypeError("Omega should be an instance of SubsSet") if len(Omega) == 0: raise ValueError("Length cannot be 0") # all items in Omega must be exponentials for t in Omega.keys(): if not isinstance(t, exp): raise ValueError("Value should be exp") rewrites = Omega.rewrites Omega = list(Omega.items()) nodes = build_expression_tree(Omega, rewrites) Omega.sort(key=lambda x: nodes[x[1]].ht(), reverse=True) # make sure we know the sign of each exp() term; after the loop, # g is going to be the "w" - the simplest one in the mrv set for g, _ in Omega: sig = sign(g.exp, x) if sig != 1 and sig != -1: raise NotImplementedError('Result depends on the sign of %s' % sig) if sig == 1: wsym = 1/wsym # if g goes to oo, substitute 1/w # O2 is a list, which results by rewriting each item in Omega using "w" O2 = [] denominators = [] for f, var in Omega: c = limitinf(f.exp/g.exp, x) if c.is_Rational: denominators.append(c.q) arg = f.exp if var in rewrites: if not isinstance(rewrites[var], exp): raise ValueError("Value should be exp") arg = rewrites[var].args[0] O2.append((var, exp((arg - c*g.exp).expand())*wsym**c)) # Remember that Omega contains subexpressions of "e". So now we find # them in "e" and substitute them for our rewriting, stored in O2 # the following powsimp is necessary to automatically combine exponentials, # so that the .xreplace() below succeeds: # TODO this should not be necessary from sympy.simplify.powsimp import powsimp f = powsimp(e, deep=True, combine='exp') for a, b in O2: f = f.xreplace({a: b}) for _, var in Omega: assert not f.has(var) # finally compute the logarithm of w (logw). logw = g.exp if sig == 1: logw = -logw # log(w)->log(1/w)=-log(w) # Some parts of SymPy have difficulty computing series expansions with # non-integral exponents. The following heuristic improves the situation: exponent = reduce(ilcm, denominators, 1) f = f.subs({wsym: wsym**exponent}) logw /= exponent return f, logw
sympy/series/gruntz.py
589
sympy
{ "docstring": "e(x) ... the function\n Omega ... the mrv set\n wsym ... the symbol which is going to be used for w\n\n Returns the rewritten e in terms of w and log(w). See test_rewrite1()\n for examples and correct results.\n ", "language": "en", "n_whitespaces": 53, "n_words": 38, "vocab_size": 30 }
319
Python
189
f757f3daae6e11ea0cfb7dadc133274d8d74315f
gruntz.py
196,819
43
365
rewrite
https://github.com/sympy/sympy.git
Reordered imports 2
594
0
48,197
19
2
41
def register_converter_cb(key) -> None: from pandas.plotting import ( deregister_matplotlib_converters, register_matplotlib_converters, ) if cf.get_option(key): register_matplotlib_converters() else: deregister_matplotlib_converters() with cf.config_prefix("plotting.matplotlib"): cf.register_option( "register_converters", "auto", register_converter_doc, validator=is_one_of_factory(["auto", True, False]), cb=register_converter_cb, ) # ------ # Styler # ------ styler_sparse_index_doc = styler_sparse_columns_doc = styler_render_repr = styler_max_elements = styler_max_rows = styler_max_columns = styler_precision = styler_decimal = styler_thousands = styler_na_rep = styler_escape = styler_formatter = styler_multirow_align = styler_multicol_align = r styler_hrules = styler_environment = styler_encoding = styler_mathjax = with cf.config_prefix("styler"): cf.register_option("sparse.index", True, styler_sparse_index_doc, validator=is_bool) cf.register_option( "sparse.columns", True, styler_sparse_columns_doc, validator=is_bool ) cf.register_option( "render.repr", "html", styler_render_repr, validator=is_one_of_factory(["html", "latex"]), ) cf.register_option( "render.max_elements", 2**18, styler_max_elements, validator=is_nonnegative_int, ) cf.register_option( "render.max_rows", None, styler_max_rows, validator=is_nonnegative_int, ) cf.register_option( "render.max_columns", None, styler_max_columns, validator=is_nonnegative_int, ) cf.register_option("render.encoding", "utf-8", styler_encoding, validator=is_str) cf.register_option("format.decimal", ".", styler_decimal, validator=is_str) cf.register_option( "format.precision", 6, styler_precision, validator=is_nonnegative_int ) cf.register_option( "format.thousands", None, styler_thousands, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.na_rep", None, styler_na_rep, validator=is_instance_factory([type(None), str]), ) cf.register_option( "format.escape", None, styler_escape, validator=is_one_of_factory([None, "html", "latex"]), ) cf.register_option( "format.formatter", None, styler_formatter, validator=is_instance_factory([type(None), dict, Callable, str]), ) cf.register_option("html.mathjax", True, styler_mathjax, validator=is_bool) cf.register_option( "latex.multirow_align", "c", styler_multirow_align, validator=is_one_of_factory(["c", "t", "b", "naive"]), ) val_mca = ["r", "|r|", "|r", "r|", "c", "|c|", "|c", "c|", "l", "|l|", "|l", "l|"] val_mca += ["naive-l", "naive-r"] cf.register_option( "latex.multicol_align", "r", styler_multicol_align, validator=is_one_of_factory(val_mca), ) cf.register_option("latex.hrules", False, styler_hrules, validator=is_bool) cf.register_option( "latex.environment", None, styler_environment, validator=is_instance_factory([type(None), str]), )
pandas/core/config_init.py
854
pandas
{ "docstring": "\n: bool\n Whether to sparsify the display of a hierarchical index. Setting to False will\n display each explicit level element in a hierarchical key for each row.\n\n: bool\n Whether to sparsify the display of hierarchical columns. Setting to False will\n display each explicit level element in a hierarchical key for each column.\n\n: str\n Determine which output to use in Jupyter Notebook in {\"html\", \"latex\"}.\n\n: int\n The maximum number of data-cell (<td>) elements that will be rendered before\n trimming will occur over columns, rows or both if needed.\n\n: int, optional\n The maximum number of rows that will be rendered. May still be reduced to\n satsify ``max_elements``, which takes precedence.\n\n: int, optional\n The maximum number of columns that will be rendered. May still be reduced to\n satsify ``max_elements``, which takes precedence.\n\n: int\n The precision for floats and complex numbers.\n\n: str\n The character representation for the decimal separator for floats and complex.\n\n: str, optional\n The character representation for thousands separator for floats, int and complex.\n\n: str, optional\n The string representation for values identified as missing.\n\n: str, optional\n Whether to escape certain characters according to the given context; html or latex.\n\n: str, callable, dict, optional\n A formatter object to be used as default within ``Styler.format``.\n\n: {\"c\", \"t\", \"b\"}\n The specifier for vertical alignment of sparsified LaTeX multirows.\n\n: {\"r\", \"c\", \"l\", \"naive-l\", \"naive-r\"}\n The specifier for horizontal alignment of sparsified LaTeX multicolumns. Pipe\n decorators can also be added to non-naive values to draw vertical\n rules, e.g. \"\\|r\" will draw a rule on the left side of right aligned merged cells.\n\n: bool\n Whether to add horizontal rules on top and bottom and below the headers.\n\n: str\n The environment to replace ``\\\\begin{table}``. If \"longtable\" is used results\n in a specific longtable environment format.\n\n: str\n The encoding used for output HTML and LaTeX files.\n\n: bool\n If False will render special CSS classes to table attributes that indicate Mathjax\n will not be used in Jupyter Notebook.\n", "language": "en", "n_whitespaces": 397, "n_words": 334, "vocab_size": 162 }
200
Python
127
9612375ca28ade056f15d4338f1bfde5d045c9fc
config_init.py
167,698
9
34
register_converter_cb
https://github.com/pandas-dev/pandas.git
TYP: return values in core/*.py (#47587) * TYP: return values in core/*.py * fix test * to_html * to_html part 2 * DataFrame.query * more overloads * fix query? * increase stacklevel by one * fix rename_axis * and an overload for DataFrame.eval * address comments * fix typevar
695
0
40,081
13
2
7
def get_accounts_in_mappers(mapping_names): return frappe.db.sql(, (', '.join('%s' % d for d in mapping_names)))
erpnext/accounts/report/cash_flow/custom_cash_flow.py
50
erpnext
{ "docstring": "\n\t\tselect cfma.name, cfm.label, cfm.is_working_capital, cfm.is_income_tax_liability,\n\t\tcfm.is_income_tax_expense, cfm.is_finance_cost, cfm.is_finance_cost_adjustment, cfma.account\n\t\tfrom `tabCash Flow Mapping Accounts` cfma\n\t\tjoin `tabCash Flow Mapping` cfm on cfma.parent=cfm.name\n\t\twhere cfma.parent in (%s)\n\t\torder by cfm.is_working_capital\n\t", "language": "en", "n_whitespaces": 23, "n_words": 29, "vocab_size": 27 }
12
Python
11
119273e633ec8e56c7d5c4649ef81c3deeb5f7d2
custom_cash_flow.py
64,634
9
29
get_accounts_in_mappers
https://github.com/frappe/erpnext.git
fix: custom cash flow mapper doesn't show any data
10
0
13,684
12
1
2
def circle(self): return self["circle"]
packages/python/plotly/plotly/graph_objs/layout/mapbox/_layer.py
22
plotly.py
{ "docstring": "\n The 'circle' property is an instance of Circle\n that may be specified as:\n - An instance of :class:`plotly.graph_objs.layout.mapbox.layer.Circle`\n - A dict of string/value properties that will be passed\n to the Circle constructor\n\n Supported dict properties:\n\n radius\n Sets the circle radius\n (mapbox.layer.paint.circle-radius). Has an\n effect only when `type` is set to \"circle\".\n\n Returns\n -------\n plotly.graph_objs.layout.mapbox.layer.Circle\n ", "language": "en", "n_whitespaces": 209, "n_words": 54, "vocab_size": 41 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_layer.py
232,039
2
11
circle
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,483
7