complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
test_kivy_log_mode_marker_on
def test_kivy_log_mode_marker_on(): from kivy.logger import previous_stderr assert sys.stderr == previous_stderr, "Kivy.logging override stderr" assert logging.root.parent is None, "Kivy.logging override root logger"
2d9755ad8a82ba0777299cbc1666bed25278db94
8
test_logger.py
50
Support KivyLogMode environment variable for logging testing (#7971) * Support KivyLogMode for logging testing Also: Remove unused imports. Remove Python 2 only code Run through Black to canonicalize formatting * Undo formatting changes Undo black.
47,024
0
33
29
18
194,668
21
kivy
9
kivy/tests/test_logger.py
Python
4
{ "docstring": "\n This is a test of the pytest marker \"logmodetest\".\n This should only be invoked if the environment variable is properly set\n (before pytest is run).\n\n Also, tests that kivy.logger paid attention to the environment variable\n ", "language": "en", "n_whitespaces": 51, "n_words": 35, "vocab_size": 27 }
https://github.com/kivy/kivy.git
3
_get_checkfiles_linux
def _get_checkfiles_linux(self): chk = os.popen("ldconfig -p | grep -P \"libcudnn.so.\\d+\" | head -n 1").read() chk = chk.strip().replace("libcudnn.so.", "") if not chk: return [] cudnn_vers = chk[0] header_files = [f"cudnn_v{cudnn_vers}.h"] + self._cudnn_header_files cudnn_path = os.path.realpath(chk[chk.find("=>") + 3:chk.find("libcudnn") - 1]) cudnn_path = cudnn_path.replace("lib", "include") cudnn_checkfiles = [os.path.join(cudnn_path, header) for header in header_files] return cudnn_checkfiles
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
14
setup.py
202
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
19,893
0
133
114
40
100,410
52
faceswap
18
setup.py
Python
11
{ "docstring": " Return the the files to check for cuDNN locations for Linux by querying\n the dynamic link loader.\n\n Returns\n -------\n list\n List of header file locations to scan for cuDNN versions\n ", "language": "en", "n_whitespaces": 77, "n_words": 30, "vocab_size": 23 }
https://github.com/deepfakes/faceswap.git
1
set_default_options
def set_default_options(self) -> None: default = self.cli_opts.get_option_values() logger.debug(default) self._gui_objects.default_options = default self.project.set_default_options()
dc18c74eea0c7837a820d27628cb12b0824fa30e
9
utils.py
64
Bugfix: Preview for extract in batch mode
20,926
0
47
37
10
101,515
12
faceswap
10
lib/gui/utils.py
Python
12
{ "docstring": " Set the default options for :mod:`lib.gui.projects`\n\n The Default GUI options are stored on Faceswap startup.\n\n Exposed as the :attr:`_default_opts` for a project cannot be set until after the main\n Command Tabs have been loaded.\n ", "language": "en", "n_whitespaces": 63, "n_words": 34, "vocab_size": 30 }
https://github.com/deepfakes/faceswap.git
5
do_lint
def do_lint() -> Set[str]: failures = set() with monkeypatch_pydantic(): logger.debug("Importing synapse") try: # TODO: make "synapse" an argument so we can target this script at # a subpackage module = importlib.import_module("synapse") except ModelCheckerException as e: logger.warning("Bad annotation found when importing synapse") failures.add(format_model_checker_exception(e)) return failures try: logger.debug("Fetching subpackages") module_infos = list( pkgutil.walk_packages(module.__path__, f"{module.__name__}.") ) except ModelCheckerException as e: logger.warning("Bad annotation found when looking for modules to import") failures.add(format_model_checker_exception(e)) return failures for module_info in module_infos: logger.debug("Importing %s", module_info.name) try: importlib.import_module(module_info.name) except ModelCheckerException as e: logger.warning( f"Bad annotation found when importing {module_info.name}" ) failures.add(format_model_checker_exception(e)) return failures
ba8938b090c7e1908cfa4feac75f08f3bc1183e8
17
check_pydantic_models.py
285
Reject non-strict types in Pydantic models (#13502)
72,885
0
406
152
61
249,389
93
synapse
24
scripts-dev/check_pydantic_models.py
Python
30
{ "docstring": "Try to import all of Synapse and see if we spot any Pydantic type coercions.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
https://github.com/matrix-org/synapse.git
17
kernS
def kernS(s): hit = False quoted = '"' in s or "'" in s if '(' in s and not quoted: if s.count('(') != s.count(")"): raise SympifyError('unmatched left parenthesis') # strip all space from s s = ''.join(s.split()) olds = s # now use space to represent a symbol that # will # step 1. turn potential 2-arg Muls into 3-arg versions # 1a. *( -> * *( s = s.replace('*(', '* *(') # 1b. close up exponentials s = s.replace('** *', '**') # 2. handle the implied multiplication of a negated # parenthesized expression in two steps # 2a: -(...) --> -( *(...) target = '-( *(' s = s.replace('-(', target) # 2b: double the matching closing parenthesis # -( *(...) --> -( *(...)) i = nest = 0 assert target.endswith('(') # assumption below while True: j = s.find(target, i) if j == -1: break j += len(target) - 1 for j in range(j, len(s)): if s[j] == "(": nest += 1 elif s[j] == ")": nest -= 1 if nest == 0: break s = s[:j] + ")" + s[j:] i = j + 2 # the first char after 2nd ) if ' ' in s: # get a unique kern kern = '_' while kern in s: kern += choice(string.ascii_letters + string.digits) s = s.replace(' ', kern) hit = kern in s else: hit = False for i in range(2): try: expr = sympify(s) break except TypeError: # the kern might cause unknown errors... if hit: s = olds # maybe it didn't like the kern; use un-kerned s hit = False continue expr = sympify(s) # let original error raise if not hit: return expr from .symbol import Symbol rep = {Symbol(kern): 1}
65be461082dda54c8748922f9c29a19af1279fe1
16
sympify.py
535
Remove abbreviations in documentation
48,459
0
868
307
166
197,316
288
sympy
29
sympy/core/sympify.py
Python
53
{ "docstring": "Use a hack to try keep autosimplification from distributing a\n a number into an Add; this modification does not\n prevent the 2-arg Mul from becoming an Add, however.\n\n Examples\n ========\n\n >>> from sympy.core.sympify import kernS\n >>> from sympy.abc import x, y\n\n The 2-arg Mul distributes a number (or minus sign) across the terms\n of an expression, but kernS will prevent that:\n\n >>> 2*(x + y), -(x + 1)\n (2*x + 2*y, -x - 1)\n >>> kernS('2*(x + y)')\n 2*(x + y)\n >>> kernS('-(x + 1)')\n -(x + 1)\n\n If use of the hack fails, the un-hacked string will be passed to sympify...\n and you get what you get.\n\n XXX This hack should not be necessary once issue 4596 has been resolved.\n ", "language": "en", "n_whitespaces": 175, "n_words": 121, "vocab_size": 82 }
https://github.com/sympy/sympy.git
1
test_install_fail_dnf_try_fileset
def test_install_fail_dnf_try_fileset(): bos_net_fake_error = dnf_installp_call = MagicMock( side_effect=[ {"retcode": 1, "stdout": "", "stderr": bos_net_fake_error}, {"retcode": 0, "stdout": ""}, ] ) fileset_pkg_name = "/cecc/repos/aix72/TL3/BASE/installp/ppc/bos.net" list_pkgs_mock = MagicMock( side_effect=[ {"bos.net.tcp.tcpdump": "7.1.6.3"}, {"bos.net.tcp.tcpdump": "7.2.4.1"}, ] ) with patch("pathlib.Path.is_file", return_value=True): with patch.dict( aixpkg.__salt__, { "cmd.run_all": dnf_installp_call, "config.get": MagicMock(return_value=False), }, ), patch.object(aixpkg, "list_pkgs", list_pkgs_mock): result = aixpkg.install(fileset_pkg_name) assert dnf_installp_call.call_count == 2 libpath_env = {"LIBPATH": "/opt/freeware/lib:/usr/lib"} dnf_installp_call.assert_any_call( f"/opt/freeware/bin/dnf install --allowerasing --assumeyes {fileset_pkg_name}", env=libpath_env, ignore_retcode=True, python_shell=False, ) dnf_installp_call.assert_called_with( "/usr/sbin/installp -acYXg -d /cecc/repos/aix72/TL3/BASE/installp/ppc bos.net", python_shell=False, ) expected = {"bos.net.tcp.tcpdump": {"old": "7.1.6.3", "new": "7.2.4.1"}} assert result == expected
f1c37893caf90738288e789c3233ab934630254f
16
test_aixpkg.py
331
Working tests for install
53,810
0
453
184
67
215,093
89
salt
23
tests/pytests/unit/modules/test_aixpkg.py
Python
43
{ "docstring": "\n Test install of non-recognized extension, first dnf then fileset\n AIX generic repository 12 kB/s | 2.6 kB 00:00\nAIX noarch repository 12 kB/s | 2.5 kB 00:00 \nAIX 7.2 specific repository 12 kB/s | 2.5 kB 00:00 \nNo match for argument: bos.net\nError: Unable to find a match: bos.net\n", "language": "en", "n_whitespaces": 709, "n_words": 49, "vocab_size": 33 }
https://github.com/saltstack/salt.git
10
plot_resources
def plot_resources(results, palette="Viridis", **kwargs): bp = import_required("bokeh.plotting", _BOKEH_MISSING_MSG) from bokeh import palettes from bokeh.models import LinearAxis, Range1d defaults = dict( title="Profile Results", tools="save,reset,xwheel_zoom,xpan", toolbar_location="above", width=800, height=300, ) # Support plot_width and plot_height for backwards compatibility if "plot_width" in kwargs: kwargs["width"] = kwargs.pop("plot_width") if BOKEH_VERSION().major >= 3: warnings.warn("Use width instead of plot_width with Bokeh >= 3") if "plot_height" in kwargs: kwargs["height"] = kwargs.pop("plot_height") if BOKEH_VERSION().major >= 3: warnings.warn("Use height instead of plot_height with Bokeh >= 3") # Drop `label_size` to match `plot_cache` and `plot_tasks` kwargs if "label_size" in kwargs: kwargs.pop("label_size") defaults.update(**kwargs) if results: t, mem, cpu = zip(*results) left, right = min(t), max(t) t = [i - left for i in t] p = bp.figure( y_range=fix_bounds(0, max(cpu), 100), x_range=fix_bounds(0, right - left, 1), **defaults, ) else: t = mem = cpu = [] p = bp.figure(y_range=(0, 100), x_range=(0, 1), **defaults) colors = palettes.all_palettes[palette][6] p.line( t, cpu, color=colors[0], line_width=4, legend_label="% CPU", ) p.yaxis.axis_label = "% CPU" p.extra_y_ranges = { "memory": Range1d( *fix_bounds(min(mem) if mem else 0, max(mem) if mem else 100, 100) ) } p.line( t, mem, color=colors[2], y_range_name="memory", line_width=4, legend_label="Memory", ) p.add_layout(LinearAxis(y_range_name="memory", axis_label="Memory (MB)"), "right") p.xaxis.axis_label = "Time (s)" return p
6193b9de78798fc9b2d934e3317debc9bb5d8af5
16
profile_visualize.py
615
Handle plot_width / plot_height deprecations (#8544) Bokeh 3.0 will finally deprecates `plot_width` and `plot_height` and expose only `width` and `height` (consistent with every other layout-able).
36,429
0
541
378
124
155,568
190
dask
51
dask/diagnostics/profile_visualize.py
Python
59
{ "docstring": "Plot resource usage in a bokeh plot.\n\n Parameters\n ----------\n results : sequence\n Output of ResourceProfiler.results\n palette : string, optional\n Name of the bokeh palette to use, must be a member of\n bokeh.palettes.all_palettes.\n **kwargs\n Other keyword arguments, passed to bokeh.figure. These will override\n all defaults set by plot_resources.\n\n Returns\n -------\n The completed bokeh plot object.\n ", "language": "en", "n_whitespaces": 116, "n_words": 54, "vocab_size": 46 }
https://github.com/dask/dask.git
9
get_mode_of_payment_details
def get_mode_of_payment_details(filters): mode_of_payment_details = {} invoice_list = get_invoices(filters) invoice_list_names = ",".join('"' + invoice['name'] + '"' for invoice in invoice_list) if invoice_list: inv_mop_detail = frappe.db.sql(.format(invoice_list_names=invoice_list_names), as_dict=1) inv_change_amount = frappe.db.sql(.format(invoice_list_names=invoice_list_names), as_dict=1) for d in inv_change_amount: for det in inv_mop_detail: if det["owner"] == d["owner"] and det["posting_date"] == d["posting_date"] and det["mode_of_payment"] == d["mode_of_payment"]: paid_amount = det["paid_amount"] - d["change_amount"] det["paid_amount"] = paid_amount for d in inv_mop_detail: mode_of_payment_details.setdefault(d["owner"]+cstr(d["posting_date"]), []).append((d.mode_of_payment,d.paid_amount)) return mode_of_payment_details
3eb5440aa968960528379930cc3c2ba4a4ee544a
18
sales_payment_summary.py
299
fix: linters erros on report sales payments summary (#30345) * fix: wrong values for report and get change amout based on payment TYPE. * charcase for select field. * fix: linter check erros * fix: linters errors Co-authored-by: Ankush Menat <[email protected]>
13,689
0
50
177
41
64,660
65
erpnext
22
erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py
Python
55
{ "docstring": "\n\t\t\tselect t.owner,\n\t\t\t t.posting_date,\n\t\t\t\t t.mode_of_payment,\n\t\t\t\t sum(t.paid_amount) as paid_amount\n\t\t\tfrom (\n\t\t\t\tselect a.owner, a.posting_date,\n\t\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(b.base_amount) as paid_amount\n\t\t\t\tfrom `tabSales Invoice` a, `tabSales Invoice Payment` b\n\t\t\t\twhere a.name = b.parent\n\t\t\t\tand a.docstatus = 1\n\t\t\t\tand a.name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t\tunion\n\t\t\t\tselect a.owner,a.posting_date,\n\t\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(c.allocated_amount) as paid_amount\n\t\t\t\tfrom `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c\n\t\t\t\twhere a.name = c.reference_name\n\t\t\t\tand b.name = c.parent\n\t\t\t\tand b.docstatus = 1\n\t\t\t\tand a.name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t\tunion\n\t\t\t\tselect a.owner, a.posting_date,\n\t\t\t\tifnull(a.voucher_type,'') as mode_of_payment, sum(b.credit)\n\t\t\t\tfrom `tabJournal Entry` a, `tabJournal Entry Account` b\n\t\t\t\twhere a.name = b.parent\n\t\t\t\tand a.docstatus = 1\n\t\t\t\tand b.reference_type = \"Sales Invoice\"\n\t\t\t\tand b.reference_name in ({invoice_list_names})\n\t\t\t\tgroup by a.owner, a.posting_date, mode_of_payment\n\t\t\t) t\n\t\t\tgroup by t.owner, t.posting_date, t.mode_of_payment\n\t\t\tselect a.owner, a.posting_date,\n\t\t\tifnull(b.mode_of_payment, '') as mode_of_payment, sum(a.base_change_amount) as change_amount\n\t\t\tfrom `tabSales Invoice` a, `tabSales Invoice Payment` b\n\t\t\twhere a.name = b.parent\n\t\t\tand a.name in ({invoice_list_names})\n\t\t\tand b.type = 'Cash'\n\t\t\tand a.base_change_amount > 0\n\t\t\tgroup by a.owner, a.posting_date, mode_of_payment", "language": "en", "n_whitespaces": 142, "n_words": 169, "vocab_size": 64 }
https://github.com/frappe/erpnext.git
3
adv_search_text
def adv_search_text(q, include_inputs, exclude_inputs, data_value): for inp in include_inputs: q = q.filter(db.Books.data.any(data_value == inp)) for excl in exclude_inputs: q = q.filter(not_(db.Books.data.any(data_value == excl))) return q
4545f4a20d9ff90b99bbd4e3e34b6de4441d6367
'''
17
web.py
206
Better epub cover parsing with multiple cover-image items Code cosmetics renamed variables refactored xml page generation refactored prepare author
40,820
1
47
64
19
172,806
25
calibre-web
19
cps/web.py
Python
6
{ "docstring": "def adv_search_extension(q, include_extension_inputs, exclude_extension_inputs):\n for extension in include_extension_inputs:\n q = q.filter(db.Books.data.any(db.Data.format == extension))\n for extension in exclude_extension_inputs:\n q = q.filter(not_(db.Books.data.any(db.Data.format == extension)))\n return q\n\n", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 17 }
https://github.com/janeczku/calibre-web.git
1
test_api_create_invalid_storage_path
def test_api_create_invalid_storage_path(self): response = self.client.post( self.ENDPOINT, json.dumps( { "name": "Another storage path", "path": "Somewhere/{correspdent}", }, ), content_type="application/json", ) self.assertEqual(response.status_code, 400) self.assertEqual(StoragePath.objects.count(), 1)
d7f7d839f8a6b7d0378dda1e0744739748d71b9c
13
test_api.py
108
Adds invalid storage path format test
117,006
0
169
64
22
319,842
22
paperless-ngx
14
src/documents/tests/test_api.py
Python
13
{ "docstring": "\n GIVEN:\n - API request to create a storage paths\n - Storage path format is incorrect\n WHEN:\n - API is called\n THEN:\n - Correct HTTP 400 response\n - No storage path is created\n ", "language": "en", "n_whitespaces": 116, "n_words": 32, "vocab_size": 23 }
https://github.com/paperless-ngx/paperless-ngx.git
14
resolve_template_files
def resolve_template_files(self) -> None: if self.template_ext: for field in self.template_fields: content = getattr(self, field, None) if content is None: continue elif isinstance(content, str) and any(content.endswith(ext) for ext in self.template_ext): env = self.get_template_env() try: setattr(self, field, env.loader.get_source(env, content)[0]) # type: ignore except Exception: self.log.exception("Failed to resolve template field %r", field) elif isinstance(content, list): env = self.get_template_env() for i, item in enumerate(content): if isinstance(item, str) and any(item.endswith(ext) for ext in self.template_ext): try: content[i] = env.loader.get_source(env, item)[0] # type: ignore except Exception as e: self.log.exception(e) self.prepare_template()
ff3bbc3db24f9f3f4f88033d48859fb08fc3237b
23
base.py
298
Implement enough interface for MappedOperator to be baggable (#20945)
8,182
0
476
189
54
44,163
83
airflow
26
airflow/models/base.py
Python
22
{ "docstring": "Getting the content of files for template_field / template_ext.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/apache/airflow.git
1
autocast_box_type
def autocast_box_type(dst_box_type='hbox') -> Callable: _, box_type_cls = get_box_type(dst_box_type)
af063a6f25ddae4de90646f86b2db824f3d00138
8
box_type.py
34
[Refactor] Refactor pipelines with boxlist. (#8562) * Refactor pipelines and data_preprocesser by boxlist * Refactor browse_dataset.py * Update * Update * Update * Update * update * Update * Change with_box_wrapped to with_boxlist * Fix comments * Fix commits * Update UT
70,810
0
14
22
8
245,504
8
mmdetection
6
mmdet/structures/bbox/box_type.py
Python
18
{ "docstring": "A decorator which automatically casts results['gt_bboxes'] to the\n destination box type.\n\n It commenly used in mmdet.datasets.transforms to make the transforms up-\n compatible with the np.ndarray type of results['gt_bboxes'].\n\n The speed of processing of np.ndarray and BaseBoxes data are the same:\n\n - np.ndarray: 0.0509 img/s\n - BaseBoxes: 0.0551 img/s\n\n Args:\n dst_box_type (str): Destination box type.\n ", "language": "en", "n_whitespaces": 85, "n_words": 54, "vocab_size": 43 }
https://github.com/open-mmlab/mmdetection.git
2
__next__
def __next__(self): if self._leftover: output = self._leftover self._leftover = b"" else: output = next(self._producer) self._unget_history = [] self.position += len(output) return output
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
multipartparser.py
82
Refs #33476 -- Reformatted code with Black.
51,343
0
101
48
16
206,055
22
django
9
django/http/multipartparser.py
Python
9
{ "docstring": "\n Used when the exact number of bytes to read is unimportant.\n\n Return whatever chunk is conveniently returned from the iterator.\n Useful to avoid unnecessary bookkeeping if performance is an issue.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 26 }
https://github.com/django/django.git
7
get_power_utilization
def get_power_utilization(self): powerfeeds = PowerFeed.objects.filter(rack=self) available_power_total = sum(pf.available_power for pf in powerfeeds) print(f'available_power_total: {available_power_total}') if not available_power_total: return 0 powerports = [] for powerfeed in powerfeeds: powerports.extend([ peer for peer in powerfeed.link_peers if isinstance(peer, PowerPort) ]) allocated_draw = 0 for powerport in powerports: allocated_draw += powerport.get_power_draw()['allocated'] print(f'allocated_draw: {allocated_draw}') return int(allocated_draw / available_power_total * 100)
fcd1daaf798d62023f999c3e09e035f7b3f47c8f
13
racks.py
175
Update power utilization calculations for new cabling model
78,027
0
190
103
39
265,205
54
netbox
23
netbox/dcim/models/racks.py
Python
16
{ "docstring": "\n Determine the utilization rate of power in the rack and return it as a percentage.\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 14 }
https://github.com/netbox-community/netbox.git
11
k_edge_augmentation
def k_edge_augmentation(G, k, avail=None, weight=None, partial=False): try: if k <= 0: raise ValueError(f"k must be a positive integer, not {k}") elif G.number_of_nodes() < k + 1: msg = f"impossible to {k} connect in graph with less than {k + 1} nodes" raise nx.NetworkXUnfeasible(msg) elif avail is not None and len(avail) == 0: if not nx.is_k_edge_connected(G, k): raise nx.NetworkXUnfeasible("no available edges") aug_edges = [] elif k == 1: aug_edges = one_edge_augmentation( G, avail=avail, weight=weight, partial=partial ) elif k == 2: aug_edges = bridge_augmentation(G, avail=avail, weight=weight) else: # raise NotImplementedError(f'not implemented for k>2. k={k}') aug_edges = greedy_k_edge_augmentation( G, k=k, avail=avail, weight=weight, seed=0 ) # Do eager evaulation so we can catch any exceptions # Before executing partial code. yield from list(aug_edges) except nx.NetworkXUnfeasible: if partial: # Return all available edges if avail is None: aug_edges = complement_edges(G) else: # If we can't k-edge-connect the entire graph, try to # k-edge-connect as much as possible aug_edges = partial_k_edge_augmentation( G, k=k, avail=avail, weight=weight ) yield from aug_edges else: raise
26b7de005ac562786f72b24a73af5a59bbab6953
17
edge_augmentation.py
342
doc: fix typos in docstring and comment (#5647)
42,026
0
566
207
108
176,658
165
networkx
21
networkx/algorithms/connectivity/edge_augmentation.py
Python
33
{ "docstring": "Finds set of edges to k-edge-connect G.\n\n Adding edges from the augmentation to G make it impossible to disconnect G\n unless k or more edges are removed. This function uses the most efficient\n function available (depending on the value of k and if the problem is\n weighted or unweighted) to search for a minimum weight subset of available\n edges that k-edge-connects G. In general, finding a k-edge-augmentation is\n NP-hard, so solutions are not guaranteed to be minimal. Furthermore, a\n k-edge-augmentation may not exist.\n\n Parameters\n ----------\n G : NetworkX graph\n An undirected graph.\n\n k : integer\n Desired edge connectivity\n\n avail : dict or a set of 2 or 3 tuples\n The available edges that can be used in the augmentation.\n\n If unspecified, then all edges in the complement of G are available.\n Otherwise, each item is an available edge (with an optional weight).\n\n In the unweighted case, each item is an edge ``(u, v)``.\n\n In the weighted case, each item is a 3-tuple ``(u, v, d)`` or a dict\n with items ``(u, v): d``. The third item, ``d``, can be a dictionary\n or a real number. If ``d`` is a dictionary ``d[weight]``\n correspondings to the weight.\n\n weight : string\n key to use to find weights if ``avail`` is a set of 3-tuples where the\n third item in each tuple is a dictionary.\n\n partial : boolean\n If partial is True and no feasible k-edge-augmentation exists, then all\n a partial k-edge-augmentation is generated. Adding the edges in a\n partial augmentation to G, minimizes the number of k-edge-connected\n components and maximizes the edge connectivity between those\n components. For details, see :func:`partial_k_edge_augmentation`.\n\n Yields\n ------\n edge : tuple\n Edges that, once added to G, would cause G to become k-edge-connected.\n If partial is False, an error is raised if this is not possible.\n Otherwise, generated edges form a partial augmentation, which\n k-edge-connects any part of G where it is possible, and maximally\n connects the remaining parts.\n\n Raises\n ------\n NetworkXUnfeasible\n If partial is False and no k-edge-augmentation exists.\n\n NetworkXNotImplemented\n If the input graph is directed or a multigraph.\n\n ValueError:\n If k is less than 1\n\n Notes\n -----\n When k=1 this returns an optimal solution.\n\n When k=2 and ``avail`` is None, this returns an optimal solution.\n Otherwise when k=2, this returns a 2-approximation of the optimal solution.\n\n For k>3, this problem is NP-hard and this uses a randomized algorithm that\n produces a feasible solution, but provides no guarantees on the\n solution weight.\n\n Examples\n --------\n >>> # Unweighted cases\n >>> G = nx.path_graph((1, 2, 3, 4))\n >>> G.add_node(5)\n >>> sorted(nx.k_edge_augmentation(G, k=1))\n [(1, 5)]\n >>> sorted(nx.k_edge_augmentation(G, k=2))\n [(1, 5), (5, 4)]\n >>> sorted(nx.k_edge_augmentation(G, k=3))\n [(1, 4), (1, 5), (2, 5), (3, 5), (4, 5)]\n >>> complement = list(nx.k_edge_augmentation(G, k=5, partial=True))\n >>> G.add_edges_from(complement)\n >>> nx.edge_connectivity(G)\n 4\n\n >>> # Weighted cases\n >>> G = nx.path_graph((1, 2, 3, 4))\n >>> G.add_node(5)\n >>> # avail can be a tuple with a dict\n >>> avail = [(1, 5, {\"weight\": 11}), (2, 5, {\"weight\": 10})]\n >>> sorted(nx.k_edge_augmentation(G, k=1, avail=avail, weight=\"weight\"))\n [(2, 5)]\n >>> # or avail can be a 3-tuple with a real number\n >>> avail = [(1, 5, 11), (2, 5, 10), (4, 3, 1), (4, 5, 51)]\n >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail))\n [(1, 5), (2, 5), (4, 5)]\n >>> # or avail can be a dict\n >>> avail = {(1, 5): 11, (2, 5): 10, (4, 3): 1, (4, 5): 51}\n >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail))\n [(1, 5), (2, 5), (4, 5)]\n >>> # If augmentation is infeasible, then a partial solution can be found\n >>> avail = {(1, 5): 11}\n >>> sorted(nx.k_edge_augmentation(G, k=2, avail=avail, partial=True))\n [(1, 5)]\n ", "language": "en", "n_whitespaces": 971, "n_words": 592, "vocab_size": 262 }
https://github.com/networkx/networkx.git
16
block_parser
def block_parser(part, rgxin, rgxout, fmtin, fmtout): block = [] lines = part.split('\n') N = len(lines) i = 0 decorator = None while 1: if i==N: # nothing left to parse -- the last line break line = lines[i] i += 1 line_stripped = line.strip() if line_stripped.startswith('#'): block.append((COMMENT, line)) continue if any( line_stripped.startswith('@' + pseudo_decorator) for pseudo_decorator in PSEUDO_DECORATORS ): if decorator: raise RuntimeError("Applying multiple pseudo-decorators on one line is not supported") else: decorator = line_stripped continue # does this look like an input line? matchin = rgxin.match(line) if matchin: lineno, inputline = int(matchin.group(1)), matchin.group(2) # the ....: continuation string continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2)) Nc = len(continuation) # input lines can continue on for more than one line, if # we have a '\' line continuation char or a function call # echo line 'print'. The input line can only be # terminated by the end of the block or an output line, so # we parse out the rest of the input line if it is # multiline as well as any echo text rest = [] while i<N: # look ahead; if the next line is blank, or a comment, or # an output line, we're done nextline = lines[i] matchout = rgxout.match(nextline) #print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation)) if matchout or nextline.startswith('#'): break elif nextline.startswith(continuation): # The default ipython_rgx* treat the space following the colon as optional. # However, If the space is there we must consume it or code # employing the cython_magic extension will fail to execute. # # This works with the default ipython_rgx* patterns, # If you modify them, YMMV. nextline = nextline[Nc:] if nextline and nextline[0] == ' ': nextline = nextline[1:] inputline += '\n' + nextline else: rest.append(nextline) i+= 1 block.append((INPUT, (decorator, inputline, '\n'.join(rest)))) continue # if it looks like an output line grab all the text to the end # of the block matchout = rgxout.match(line) if matchout: lineno, output = int(matchout.group(1)), matchout.group(2) if i<N-1: output = '\n'.join([output] + lines[i:]) block.append((OUTPUT, output)) break return block
a9b523c7047fe12c49373972c6b092ed5fc29e99
20
ipython_directive.py
598
match only pseudo-decorators
52,443
0
1,178
345
184
208,657
334
ipython
39
IPython/sphinxext/ipython_directive.py
Python
52
{ "docstring": "\n part is a string of ipython text, comprised of at most one\n input, one output, comments, and blank lines. The block parser\n parses the text into a list of::\n\n blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]\n\n where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and\n data is, depending on the type of token::\n\n COMMENT : the comment string\n\n INPUT: the (DECORATOR, INPUT_LINE, REST) where\n DECORATOR: the input decorator (or None)\n INPUT_LINE: the input as string (possibly multi-line)\n REST : any stdout generated by the input line (not OUTPUT)\n\n OUTPUT: the output string, possibly multi-line\n\n ", "language": "en", "n_whitespaces": 162, "n_words": 98, "vocab_size": 76 }
https://github.com/ipython/ipython.git
6
update_direct_sparsity
def update_direct_sparsity(self, node): # this name is consistent with the name returned by named_modules() module_name = node.name _logger.info('Update mask for %s', module_name) unique_name = node.unique_name dummy_input, input_debugname = self._prepare_dummy_input(node) # get the input mask from self.masks # Note: the input mask of the successor nodes are # already created by the predecessor node in_masks = [self.masks[debugname] for debugname in input_debugname] in_constants = [self.constant[debugname] for debugname in input_debugname] if node.type == 'func': # we cannot get the runable function directly from the jit traced # graph, so we translate it back to python function, Note: the function # is appliable to both cpu/gpu devices, the output tensors will be on the # same device of the input tensors func = jit_to_python_function(node, self) if func is None: # no need to infer the sparsity for this node self.auto_inferences[unique_name] = None return # function doesn't have weights _auto_infer = AutoMaskInference( func, dummy_input, self, in_masks, in_constants=in_constants) else: weight_mask = None if module_name in self.masks: weight_mask = self.masks[module_name] _, module = get_module_by_name(self.bound_model, module_name) _auto_infer = AutoMaskInference( module, dummy_input, self, in_masks, weight_mask, in_constants=in_constants, state_dict=copy.deepcopy(module.state_dict())) self.auto_inferences[unique_name] = _auto_infer _auto_infer.name = node.unique_name _auto_infer.update_direct_sparsity() # also save the input debug names into the auto_infer _auto_infer.input_debugname = input_debugname # update the mask tensor and the internal output of the submodules # after manually unpack the tuple/list of tensors, the number of the outputs # of each node should always be one(Except for the TupleUnpack node at the end # of the whole model) assert len( node.outputs) == 1, 'The number of the output should be one after the Tuple unpacked manually' out_debugname = node.outputs[0] # update the output mask into self.masks self.masks[out_debugname] = _auto_infer.output_mask self.constant[out_debugname] = _auto_infer.out_constant # update the output result into self.internal_result, so that # the successor nodes can take these output tensors as inputs. self.internal_result[out_debugname] = _auto_infer.output # update the parameter mask of the node self.masks[module_name] = _auto_infer.weight_mask
97d067e614243f06ed1f8e2d389512977fff8828
16
compressor.py
411
Speedup enhancement (#4925)
24,873
0
806
256
164
113,264
311
nni
37
nni/compression/pytorch/speedup/compressor.py
Python
34
{ "docstring": "\n Update the direct sparsity for the target node. Here the direct sparsity\n means that the sparsity in the output tensor that caused by the sparsity\n in the input tensors/weight tensors.\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 18 }
https://github.com/microsoft/nni.git
4
read_graph6
def read_graph6(path): glist = [] for line in path: line = line.strip() if not len(line): continue glist.append(from_graph6_bytes(line)) if len(glist) == 1: return glist[0] else: return glist @not_implemented_for("directed") @not_implemented_for("multigraph") @open_file(1, mode="wb")
9b63ca1a0d46a1f50bcc59eda52be02721a134db
@not_implemented_for("directed") @not_implemented_for("multigraph") @open_file(1, mode="wb")
11
graph6.py
133
Remove old Appveyor cruft (#5924) * Remove old Appveyor cruft * Fix Windows issue
42,277
1
88
56
25
177,122
30
networkx
11
networkx/readwrite/graph6.py
Python
11
{ "docstring": "Read simple undirected graphs in graph6 format from path.\n\n Parameters\n ----------\n path : file or string\n File or filename to write.\n\n Returns\n -------\n G : Graph or list of Graphs\n If the file contains multiple lines then a list of graphs is returned\n\n Raises\n ------\n NetworkXError\n If the string is unable to be parsed in graph6 format\n\n Examples\n --------\n You can read a graph6 file by giving the path to the file::\n\n >>> import tempfile\n >>> with tempfile.NamedTemporaryFile(delete=False) as f:\n ... _ = f.write(b\">>graph6<<A_\\\\n\")\n ... _ = f.seek(0)\n ... G = nx.read_graph6(f.name)\n >>> list(G.edges())\n [(0, 1)]\n\n You can also read a graph6 file by giving an open file-like object::\n\n >>> import tempfile\n >>> with tempfile.NamedTemporaryFile() as f:\n ... _ = f.write(b\">>graph6<<A_\\\\n\")\n ... _ = f.seek(0)\n ... G = nx.read_graph6(f)\n >>> list(G.edges())\n [(0, 1)]\n\n See Also\n --------\n from_graph6_bytes, write_graph6\n\n References\n ----------\n .. [1] Graph6 specification\n <http://users.cecs.anu.edu.au/~bdm/data/formats.html>\n\n ", "language": "en", "n_whitespaces": 356, "n_words": 145, "vocab_size": 83 }
https://github.com/networkx/networkx.git
7
split_ref_from_uri
def split_ref_from_uri(uri): # type: (AnyStr) -> Tuple[AnyStr, Optional[AnyStr]] if not isinstance(uri, str): raise TypeError("Expected a string, received {0!r}".format(uri)) parsed = _get_parsed_url(uri) path = parsed.path if parsed.path else "" scheme = parsed.scheme if parsed.scheme else "" ref = None if scheme != "file" and (re.match("^.*@[^/@]*$", path) or path.count("@") >= 2): path, _, ref = path.rpartition("@") parsed = parsed._replace(path=path) return (parsed.url, ref)
2f6a04b89a70879f40a42d7d2ce662468f6e87ca
12
utils.py
188
5132 Vendor in latest requirementslib. (#5151) * 5132 Vendor in latest requirementslib.
3,737
0
104
111
45
21,253
60
pipenv
18
pipenv/vendor/requirementslib/models/utils.py
Python
11
{ "docstring": "Given a path or URI, check for a ref and split it from the path if it is\n present, returning a tuple of the original input and the ref or None.\n\n :param AnyStr uri: The path or URI to split\n :returns: A 2-tuple of the path or URI and the ref\n :rtype: Tuple[AnyStr, Optional[AnyStr]]\n ", "language": "en", "n_whitespaces": 69, "n_words": 54, "vocab_size": 34 }
https://github.com/pypa/pipenv.git
1
mock_update_duration_fixture
def mock_update_duration_fixture(mock_update): mock_update.return_value = { "rows": [ { "elements": [ { "duration": { "value": 1560, "text": "26 mins", }, "distance": {"text": "21.3 km"}, } ] } ] } yield mock_update @pytest.fixture(name="mock_update_empty")
beb30a1ff199596163c655e8ae745a0f1649b78a
@pytest.fixture(name="mock_update_empty")
17
test_sensor.py
108
Add google_travel_time sensor tests (#66568) Co-authored-by: Paulus Schoutsen <[email protected]>
91,329
1
269
47
24
292,229
31
core
6
tests/components/google_travel_time/test_sensor.py
Python
17
{ "docstring": "Mock an update to the sensor returning no duration_in_traffic.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
async_process_entity_map
async def async_process_entity_map(self) -> None: # Ensure the Pairing object has access to the latest version of the entity map. This # is especially important for BLE, as the Pairing instance relies on the entity map # to map aid/iid to GATT characteristics. So push it to there as well. self.async_detect_workarounds() # Migrate to new device ids self.async_migrate_devices() # Remove any of the legacy serial numbers from the device registry self.async_remove_legacy_device_serial_numbers() self.async_create_devices() # Load any triggers for this config entry await async_setup_triggers_for_entry(self.hass, self.config_entry)
f23b1750e85f07091eb896a0b12b8f95e5646338
9
connection.py
76
Migrate HomeKit Controller to use stable identifiers (#80064)
87,991
0
167
39
60
288,842
83
core
9
homeassistant/components/homekit_controller/connection.py
Python
12
{ "docstring": "\n Process the entity map and load any platforms or entities that need adding.\n\n This is idempotent and will be called at startup and when we detect metadata changes\n via the c# counter on the zeroconf record.\n ", "language": "en", "n_whitespaces": 65, "n_words": 36, "vocab_size": 32 }
https://github.com/home-assistant/core.git
4
patch_mac_app
def patch_mac_app() -> None: dist_path = pathlib.Path('dist') app_path = dist_path / 'qutebrowser.app' contents_path = app_path / 'Contents' macos_path = contents_path / 'MacOS' resources_path = contents_path / 'Resources' pyqt_path = macos_path / 'PyQt5' # Replace some duplicate files by symlinks framework_path = pyqt_path / 'Qt5' / 'lib' / 'QtWebEngineCore.framework' core_lib = framework_path / 'Versions' / '5' / 'QtWebEngineCore' core_lib.unlink() core_target = pathlib.Path(*[os.pardir] * 7, 'MacOS', 'QtWebEngineCore') core_lib.symlink_to(core_target) framework_resource_path = framework_path / 'Resources' for file_path in framework_resource_path.iterdir(): target = pathlib.Path(*[os.pardir] * 5, file_path.name) if file_path.is_dir(): shutil.rmtree(file_path) else: file_path.unlink() file_path.symlink_to(target) # Move stuff around to make things signable on macOS # See https://github.com/pyinstaller/pyinstaller/issues/6612 pyqt_path_dest = resources_path / pyqt_path.name shutil.move(pyqt_path, pyqt_path_dest) pyqt_path_target = pathlib.Path("..") / pyqt_path_dest.relative_to(contents_path) pyqt_path.symlink_to(pyqt_path_target) for path in macos_path.glob("Qt*"): link_path = resources_path / path.name target_path = pathlib.Path("..") / path.relative_to(contents_path) link_path.symlink_to(target_path)
660e776a15c02f5577d7aca075bb0e3f8b142831
14
build_release.py
395
build-release: Sign macOS .app properly Based on https://github.com/pyinstaller/pyinstaller/issues/6612 Might help with #6771.
117,462
0
265
221
81
320,956
128
qutebrowser
32
scripts/dev/build_release.py
Python
29
{ "docstring": "Patch .app to save some space and make it signable.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/qutebrowser/qutebrowser.git
5
expand
def expand(image, border=0, fill=0): left, top, right, bottom = _border(border) width = left + image.size[0] + right height = top + image.size[1] + bottom color = _color(fill, image.mode) if image.mode == "P" and image.palette: palette = ImagePalette.ImagePalette(palette=image.getpalette()) if isinstance(color, tuple): color = palette.getcolor(color) else: palette = None out = Image.new(image.mode, (width, height), color) if palette: out.putpalette(palette.palette) out.paste(image, (left, top)) return out
279ddf4ce6c76498ac29df2552a3023b9aaa76c1
13
ImageOps.py
230
Use getpalette() in ImageOps
70,030
0
133
149
45
243,427
61
Pillow
26
src/PIL/ImageOps.py
Python
16
{ "docstring": "\n Add border to the image\n\n :param image: The image to expand.\n :param border: Border width, in pixels.\n :param fill: Pixel fill value (a color value). Default is 0 (black).\n :return: An image.\n ", "language": "en", "n_whitespaces": 52, "n_words": 32, "vocab_size": 28 }
https://github.com/python-pillow/Pillow.git
8
get_policy_data_from_agent_data
def get_policy_data_from_agent_data(agent_data, policy_map_fn): policy_data = {} for agent_id, data in agent_data.items(): policy_id = policy_map_fn(agent_id) policy_data.setdefault(policy_id, {}) policy_data[policy_id].setdefault("agent_id", []) if data["obs"].ndim == 1: policy_data[policy_id]["agent_id"].append(agent_id) else: policy_data[policy_id]["agent_id"] += [agent_id] * len(data["obs"]) for k, v in data.items(): policy_data[policy_id].setdefault(k, []) if v.ndim == 1: v = v[None] policy_data[policy_id][k].append(v) for policy_id in policy_data: policy_data[policy_id] = { k: np.concatenate(v) if k != "agent_id" else v for k, v in policy_data[policy_id].items() } return policy_data
30058267363b8de16b809c987bb1f7d7befad24d
16
test_torch_marl_module.py
291
[RLlib] MARLModule, RLModule PR 4/N (N=4) (#29449) Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
30,625
0
230
182
47
135,458
67
ray
16
rllib/core/rl_module/torch/tests/test_torch_marl_module.py
Python
21
{ "docstring": "Utility function to get policy data from agent data and policy map function.\n\n It also keeps track of agent_id for each row so that we can retreive the agent\n level information after the forward pass.\n\n Returns:\n dict of module_id to module data\n ", "language": "en", "n_whitespaces": 61, "n_words": 42, "vocab_size": 35 }
https://github.com/ray-project/ray.git
1
cli
def cli(): ... option_verbose = click.option( "--verbose", is_flag=True, help="Print verbose information about performed steps", ) option_assume_yes = click.option( "--assume-yes", is_flag=True, help="Assume yes answer to question", ) option_previous_release = click.option( "--previous-release", type=str, required=True, help="commit reference (for example hash or tag) of the previous release.", ) option_current_release = click.option( "--current-release", type=str, required=True, help="commit reference (for example hash or tag) of the current release.", ) option_github_token = click.option( "--github-token", type=str, required=True, help=textwrap.dedent( ), envvar='GITHUB_TOKEN', ) option_limit_pr_count = click.option( "--limit-pr-count", type=int, default=None, help="Limit PR count processes (useful for testing small subset of PRs).", ) option_dry_run = click.option( "--dry-run", is_flag=True, help="Do not make any changes, just show what would have been done", ) option_skip_assigned = click.option( "--skip-assigned", is_flag=True, help="Skip PRs already correctly assigned to the right milestone", ) option_milestone_number = click.option( "--milestone-number", type=int, required=True, help="Milestone number to set. See https://github.com/apache/airflow/milestones to find milestone id", ) option_print_summary = click.option( "--print-summary", is_flag=True, help="Produce summary of the changes cherry-picked in the file specified. Implies --skip-assigned", ) option_output_folder = click.option( "--output-folder", type=str, help="Folder where files with commit hashes will be store. Implies --print-summary and --skip-assigned", )
bc1f062bdebd5a92b650e2316d4d98d2097388ca
10
assign_cherry_picked_prs_with_milestone.py
358
Add dev tool to review and classify cherry-picked commits (#21032) Until we have Towncrier, this is a useful tool to classify commits to one of three categories (in v*-test) branches 1) a/add - add to milestone 2) d/doc - doc-only change 3) e/excluded - change that is skipped from changelog (dev tools) This is done via label and milestone assignment. We can also skip the PR or quit. Information about the PR is nicely printed including its current labels and URL that allows to quickly review the PR in question.
8,197
0
285
5
116
44,213
177
airflow
24
dev/assign_cherry_picked_prs_with_milestone.py
Python
2
{ "docstring": "\n Github token used to authenticate.\n You can set omit it if you have GITHUB_TOKEN env variable set\n Can be generated with:\n https://github.com/settings/tokens/new?description=Read%20Write%20isssues&scopes=repo", "language": "en", "n_whitespaces": 50, "n_words": 22, "vocab_size": 21 }
https://github.com/apache/airflow.git
4
delete_systemd_cgroup_v1
def delete_systemd_cgroup_v1(self) -> None: # Privileged mode is required to remove the cgroup directories on some hosts, such as Fedora 36 and RHEL 9.0. # The BusyBox find utility will report "Permission denied" otherwise, although it still exits with a status code of 0. options = ['--volume', '/sys/fs/cgroup/systemd:/sys/fs/cgroup/systemd:rw', '--privileged'] cmd = ['sh', '-c', f'>&2 echo {shlex.quote(self.MARKER)} && {shlex.join(self.delete_systemd_cgroup_v1_command)}'] try: run_utility_container(self.args, f'ansible-test-cgroup-delete-{self.label}', cmd, options) except SubprocessError as ex: if error := self.extract_error(ex.stderr): if error.endswith(': No such file or directory'): return display.error(str(ex))
cda16cc5e9aa8703fb4e1ac0a0be6b631d9076cc
13
host_profiles.py
173
ansible-test - Improve container management. (#78550) See changelogs/fragments/ansible-test-container-management.yml for details.
79,632
0
196
77
75
268,732
80
ansible
20
test/lib/ansible_test/_internal/host_profiles.py
Python
11
{ "docstring": "Delete a previously created ansible-test cgroup in the v1 systemd hierarchy.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/ansible/ansible.git
3
iterate_tree_cache_items
def iterate_tree_cache_items(key, value): if isinstance(value, TreeCacheNode): for sub_key, sub_value in value.items(): yield from iterate_tree_cache_items((*key, sub_key), sub_value) else: # we've reached a leaf of the tree. yield key, value
0b87eb8e0c8e2dd4a426005dce53dfdd57282475
14
treecache.py
75
Make DictionaryCache have better expiry properties (#13292)
72,514
0
69
46
27
248,927
28
synapse
8
synapse/util/caches/treecache.py
Python
6
{ "docstring": "Helper function to iterate over the leaves of a tree, i.e. a dict of that\n can contain dicts.\n\n The provided key is a tuple that will get prepended to the returned keys.\n\n Example:\n\n cache = TreeCache()\n cache[(1, 1)] = \"a\"\n cache[(1, 2)] = \"b\"\n cache[(2, 1)] = \"c\"\n\n tree_node = cache.get((1,))\n\n items = iterate_tree_cache_items((1,), tree_node)\n assert list(items) == [((1, 1), \"a\"), ((1, 2), \"b\")]\n\n Returns:\n A generator yielding key/value pairs.\n ", "language": "en", "n_whitespaces": 141, "n_words": 70, "vocab_size": 57 }
https://github.com/matrix-org/synapse.git
1
deserialize
def deserialize(config, custom_objects=None): return deserialize_keras_object( config, module_objects=globals(), custom_objects=custom_objects, printable_module_name='metric function') @keras_export('keras.metrics.get')
b4dca51d0558e788f62a96d1009a07f773a202f4
@keras_export('keras.metrics.get')
10
__init__.py
58
Refactor disparate metrics-related files into a single metrics folder. Further work may be needed to split up the long file with individual metric definitions. However having a single file per metric may be too granular. TBD. PiperOrigin-RevId: 425248502
79,747
1
32
29
11
268,880
11
keras
8
keras/metrics/__init__.py
Python
6
{ "docstring": "Deserializes a serialized metric class/function instance.\n\n Args:\n config: Metric configuration.\n custom_objects: Optional dictionary mapping names (strings) to custom\n objects (classes and functions) to be considered during deserialization.\n\n Returns:\n A Keras `Metric` instance or a metric function.\n ", "language": "en", "n_whitespaces": 55, "n_words": 36, "vocab_size": 33 }
https://github.com/keras-team/keras.git
4
get_top_k_scored_items
def get_top_k_scored_items(scores, top_k, sort_top_k=False): # ensure we're working with a dense ndarray if isinstance(scores, sparse.spmatrix): scores = scores.todense() if scores.shape[1] < top_k: logger.warning( "Number of items is less than top_k, limiting top_k to number of items" ) k = min(top_k, scores.shape[1]) test_user_idx = np.arange(scores.shape[0])[:, None] # get top K items and scores # this determines the un-ordered top-k item indices for each user top_items = np.argpartition(scores, -k, axis=1)[:, -k:] top_scores = scores[test_user_idx, top_items] if sort_top_k: sort_ind = np.argsort(-top_scores) top_items = top_items[test_user_idx, sort_ind] top_scores = top_scores[test_user_idx, sort_ind] return np.array(top_items), np.array(top_scores)
1d7341e93d1f03387699fb3c6ae0b6c0e464296f
11
python_utils.py
231
Add new item similarity metrics for SAR (#1754) * Add mutual information similarity in SAR * Add lexicographers mutual information similarity for SAR * Add cosine similarity for SAR * Add inclusion index for SAR * Typos * Change SARSingleNode to SAR * Convert item similarity matrix to np.array * Update * Update SAR tests * Remove unused imports * Add explanations for new similarity metrics
7,236
0
178
148
71
39,444
89
recommenders
23
recommenders/utils/python_utils.py
Python
16
{ "docstring": "Extract top K items from a matrix of scores for each user-item pair, optionally sort results per user.\n\n Args:\n scores (numpy.ndarray): Score matrix (users x items).\n top_k (int): Number of top items to recommend.\n sort_top_k (bool): Flag to sort top k results.\n\n Returns:\n numpy.ndarray, numpy.ndarray:\n - Indices into score matrix for each user's top items.\n - Scores corresponding to top items.\n\n ", "language": "en", "n_whitespaces": 112, "n_words": 61, "vocab_size": 45 }
https://github.com/microsoft/recommenders.git
19
curse_add_stat
def curse_add_stat(self, key, width=None, header='', display_key=True, separator='', trailer=''): if key not in self.stats: return [] # Check if a shortname is defined if key in self.fields_description and 'short_name' in self.fields_description[key]: key_name = self.fields_description[key]['short_name'] else: key_name = key if not display_key: key_name = '' # Check if unit is defined and get the short unit char in the unit_sort dict if ( key in self.fields_description and 'unit' in self.fields_description[key] and self.fields_description[key]['unit'] in fields_unit_short ): # Get the shortname unit_short = fields_unit_short[self.fields_description[key]['unit']] else: unit_short = '' # Check if unit is defined and get the unit type unit_type dict if ( key in self.fields_description and 'unit' in self.fields_description[key] and self.fields_description[key]['unit'] in fields_unit_type ): # Get the shortname unit_type = fields_unit_type[self.fields_description[key]['unit']] else: unit_type = 'float' # Is it a rate ? Yes, compute it thanks to the time_since_update key if ( key in self.fields_description and 'rate' in self.fields_description[key] and self.fields_description[key]['rate'] is True ): value = self.stats[key] // self.stats['time_since_update'] else: value = self.stats[key] if width is None: msg_item = header + '{}'.format(key_name) + separator if unit_type == 'float': msg_value = '{:.1f}{}'.format(value, unit_short) + trailer elif 'min_symbol' in self.fields_description[key]: msg_value = ( '{}{}'.format( self.auto_unit(int(value), min_symbol=self.fields_description[key]['min_symbol']), unit_short ) + trailer ) else: msg_value = '{}{}'.format(int(value), unit_short) + trailer else: # Define the size of the message # item will be on the left # value will be on the right msg_item = header + '{:{width}}'.format(key_name, width=width - 7) + separator if unit_type == 'float': msg_value = '{:5.1f}{}'.format(value, unit_short) + trailer elif 'min_symbol' in self.fields_description[key]: msg_value = ( '{:>5}{}'.format( self.auto_unit(int(value), min_symbol=self.fields_description[key]['min_symbol']), unit_short ) + trailer ) else: msg_value = '{:>5}{}'.format(int(value), unit_short) + trailer decoration = self.get_views(key=key, option='decoration') optional = self.get_views(key=key, option='optional') return [ self.curse_add_line(msg_item, optional=optional), self.curse_add_line(msg_value, decoration=decoration, optional=optional), ]
586ebd7099fb6fca47cf04f632c8cbf7f0450500
22
glances_plugin.py
797
Refactor comment
15,223
0
1,104
475
116
69,984
282
glances
27
glances/plugins/glances_plugin.py
Python
65
{ "docstring": "Return a list of dict messages with the 'key: value' result\n\n <=== width ===>\n __key : 80.5%__\n | | | | |_ trailer\n | | | |_ self.stats[key]\n | | |_ separator\n | |_ key (if display_key is True)\n |_ header\n\n Instead of:\n msg = ' {:8}'.format('idle:')\n ret.append(self.curse_add_line(msg, optional=self.get_views(key='idle', option='optional')))\n msg = '{:5.1f}%'.format(self.stats['idle'])\n ret.append(self.curse_add_line(msg, optional=self.get_views(key='idle', option='optional')))\n\n Use:\n ret.extend(self.curse_add_stat('idle', width=15, header=' '))\n\n ", "language": "en", "n_whitespaces": 215, "n_words": 61, "vocab_size": 43 }
https://github.com/nicolargo/glances.git
3
loss_labels
def loss_labels(self, outputs, targets, indices, num_boxes): if "logits" not in outputs: raise KeyError("No logits were found in the outputs") src_logits = outputs["logits"] idx = self._get_src_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( src_logits.shape[:2], self.num_classes, dtype=torch.int64, device=src_logits.device ) target_classes[idx] = target_classes_o loss_ce = nn.functional.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) losses = {"loss_ce": loss_ce} return losses
cc034f72eb6137f4c550e911fba67f8a0e1e98fa
12
modeling_detr.py
213
Replace assertion with exception (#16720) * Updated assertions to exceptions * updated assertions to exceptions * bug fixes * fix-copies * Update modeling_ctrl.py * Update src/transformers/models/ctrl/modeling_tf_ctrl.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/gpt_neo/modeling_gpt_neo.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/gptj/modeling_gptj.py Co-authored-by: Sylvain Gugger <[email protected]> * Update src/transformers/models/gptj/modeling_tf_gptj.py Co-authored-by: Sylvain Gugger <[email protected]> * Update modeling_led.py * Update modeling_led.py * Update modeling_led.py Co-authored-by: Sylvain Gugger <[email protected]>
6,726
0
157
138
48
37,071
58
transformers
31
src/transformers/models/detr/modeling_detr.py
Python
13
{ "docstring": "\n Classification loss (NLL) targets dicts must contain the key \"class_labels\" containing a tensor of dim\n [nb_target_boxes]\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 16 }
https://github.com/huggingface/transformers.git
3
get_func
def get_func(cls, key, **kwargs): if "agg_func" in kwargs: return cls.inplace_applyier_builder(key, kwargs["agg_func"]) elif "func_dict" in kwargs: return cls.inplace_applyier_builder(key, kwargs["func_dict"]) else: return cls.inplace_applyier_builder(key)
1e65a4afd191cf61ba05b80545d23f9b88962f41
12
groupby.py
92
FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373) Signed-off-by: Dmitry Chigarev <[email protected]>
35,257
0
82
54
16
153,097
21
modin
5
modin/core/dataframe/algebra/default2pandas/groupby.py
Python
7
{ "docstring": "\n Extract aggregation function from groupby arguments.\n\n Parameters\n ----------\n key : callable or str\n Default aggregation function. If aggregation function is not specified\n via groupby arguments, then `key` function is used.\n **kwargs : dict\n GroupBy arguments that may contain aggregation function.\n\n Returns\n -------\n callable\n Aggregation function.\n\n Notes\n -----\n There are two ways of how groupby aggregation can be invoked:\n 1. Explicitly with query compiler method: `qc.groupby_sum()`.\n 2. By passing aggregation function as an argument: `qc.groupby_agg(\"sum\")`.\n Both are going to produce the same result, however in the first case actual aggregation\n function can be extracted from the method name, while for the second only from the method arguments.\n ", "language": "en", "n_whitespaces": 271, "n_words": 106, "vocab_size": 78 }
https://github.com/modin-project/modin.git
11
check_validation_split_arg
def check_validation_split_arg(validation_split, subset, shuffle, seed): if validation_split and not 0 < validation_split < 1: raise ValueError( '`validation_split` must be between 0 and 1, received: %s' % (validation_split,)) if (validation_split or subset) and not (validation_split and subset): raise ValueError( 'If `subset` is set, `validation_split` must be set, and inversely.') if subset not in ('training', 'validation', 'both', None): raise ValueError('`subset` must be either "training", ' '"validation" or "both", received: %s' % (subset,)) if validation_split and shuffle and seed is None: raise ValueError( 'If using `validation_split` and shuffling the data, you must provide ' 'a `seed` argument, to make sure that there is no overlap between the ' 'training and validation subset.')
c52c11968b096580577c75b169f51c5b39002106
12
dataset_utils.py
159
Updated tests for subset="both"
79,952
0
188
92
68
269,208
109
keras
6
keras/utils/dataset_utils.py
Python
16
{ "docstring": "Raise errors in case of invalid argument values.\n\n Args:\n validation_split: float between 0 and 1, fraction of data to reserve for\n validation.\n subset: One of \"training\", \"validation\" or \"both\". Only used if `validation_split`\n is set.\n shuffle: Whether to shuffle the data. Either True or False.\n seed: random seed for shuffling and transformations.\n ", "language": "en", "n_whitespaces": 76, "n_words": 52, "vocab_size": 46 }
https://github.com/keras-team/keras.git
2
prepare_test_img
def prepare_test_img(self, idx): img_info = self.data_infos[idx] results = dict(img_info=img_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results)
1516986a616fee8bb741d0ab2be40683045efccd
10
custom.py
92
[Feature] Support OpenImages Dataset (#6331) * [Feature] support openimage group of eval * [Feature] support openimage group of eval * support openimage dataset * support openimage challenge dataset * fully support OpenImages-V6 and OpenImages Challenge 2019 * Fix some logic error * update config file * fix get data_infos error * fully support OpenImages evaluation * update OpenImages config files * [Feature] support OpenImages datasets * fix bug * support load image metas from pipeline * fix bug * fix get classes logic error * update code * support get image metas * support openimags * support collect image metas * support Open Images * fix openimages logic * minor fix * add a new function to compute openimages tpfp * minor fix * fix ci error * minor fix * fix indication * minor fix * fix returns * fix returns * fix returns * fix returns * fix returns * minor fix * update readme * support loading image level labels and fix some logic * minor fix * minor fix * add class names * minor fix * minor fix * minor fix * add openimages test unit * minor fix * minor fix * fix test unit * minor fix * fix logic error * minor fix * fully support openimages * minor fix * fix docstring * fix docstrings in readthedocs * update get image metas script * label_description_file -> label_file * update openimages readme * fix test unit * fix test unit * minor fix * update readme file * Update get_image_metas.py
70,180
0
73
56
18
243,990
20
mmdetection
10
mmdet/datasets/custom.py
Python
7
{ "docstring": "Get testing data after pipeline.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Testing data after pipeline with new keys introduced by \\\n pipeline.\n ", "language": "en", "n_whitespaces": 82, "n_words": 24, "vocab_size": 21 }
https://github.com/open-mmlab/mmdetection.git
2
get_redirect_target
def get_redirect_target(self, resp): # Due to the nature of how requests processes redirects this method will # be called at least once upon the original response and at least twice # on each subsequent redirect response (if any). # If a custom mixin is used to handle this logic, it may be advantageous # to cache the redirect location onto the response object as a private # attribute. if resp.is_redirect: location = resp.headers["location"] # Currently the underlying http module on py3 decode headers # in latin1, but empirical evidence suggests that latin1 is very # rarely used with non-ASCII characters in HTTP headers. # It is more likely to get UTF8 header rather than latin1. # This causes incorrect handling of UTF8 encoded location headers. # To solve this, we re-encode the location in latin1. location = location.encode("latin1") return to_native_string(location, "utf8") return None
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
11
sessions.py
79
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,187
0
305
38
100
22,111
143
pipenv
8
pipenv/patched/pip/_vendor/requests/sessions.py
Python
6
{ "docstring": "Receives a Response. Returns a redirect URI or ``None``", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/pypa/pipenv.git
4
get_type_hints
def get_type_hints(obj, globalns=None, localns=None, include_extras=False): if hasattr(typing, "Annotated"): hint = typing.get_type_hints( obj, globalns=globalns, localns=localns, include_extras=True ) else: hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) if include_extras: return hint return {k: _strip_extras(t) for k, t in hint.items()} # Python 3.9+ has PEP 593 (Annotated) if hasattr(typing, 'Annotated'): Annotated = typing.Annotated # Not exported and not a public API, but needed for get_origin() and get_args() # to work. _AnnotatedAlias = typing._AnnotatedAlias # 3.7-3.8 else:
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
12
typing_extensions.py
172
Vendor in pip 22.1.2
3,948
0
172
88
54
21,600
70
pipenv
14
pipenv/patched/notpip/_vendor/typing_extensions.py
Python
10
{ "docstring": "Return type hints for an object.\n\n This is often the same as obj.__annotations__, but it handles\n forward references encoded as string literals, adds Optional[t] if a\n default value equal to None is set and recursively replaces all\n 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'\n (unless 'include_extras=True').\n\n The argument may be a module, class, method, or function. The annotations\n are returned as a dictionary. For classes, annotations include also\n inherited members.\n\n TypeError is raised if the argument is not of a type that can contain\n annotations, and an empty dictionary is returned if no annotations are\n present.\n\n BEWARE -- the behavior of globalns and localns is counterintuitive\n (unless you are familiar with how eval() and exec() work). The\n search order is locals first, then globals.\n\n - If no dict arguments are passed, an attempt is made to use the\n globals from obj (or the respective module's globals for classes),\n and these are also used as the locals. If the object does not appear\n to have globals, an empty dictionary is used.\n\n - If one dict argument is passed, it is used for both globals and\n locals.\n\n - If two dict arguments are passed, they specify globals and\n locals, respectively.\n ", "language": "en", "n_whitespaces": 371, "n_words": 198, "vocab_size": 123 }
https://github.com/pypa/pipenv.git
3
color_temp_supported
def color_temp_supported(self) -> bool: return ( self.color_capabilities is not None and lighting.Color.ColorCapabilities.Color_temperature in self.color_capabilities ) or self.color_temperature is not None
df67a8cd4f8df91a153778009a74be1e3876ca53
12
lighting.py
57
Fix ZHA light color temp support (#76305)
102,289
0
74
36
16
303,469
20
core
9
homeassistant/components/zha/core/channels/lighting.py
Python
7
{ "docstring": "Return True if the channel supports color temperature.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
8
partial_fit
def partial_fit(self, X, y, classes=None): if _check_partial_fit_first_call(self, classes): self._validate_params() if not hasattr(self.estimator, "partial_fit"): raise ValueError( ("Base estimator {0}, doesn't have partial_fit method").format( self.estimator ) ) self.estimators_ = [clone(self.estimator) for _ in range(self.n_classes_)] # A sparse LabelBinarizer, with sparse_output=True, has been # shown to outperform or match a dense label binarizer in all # cases and has also resulted in less or equal memory consumption # in the fit_ovr function overall. self.label_binarizer_ = LabelBinarizer(sparse_output=True) self.label_binarizer_.fit(self.classes_) if len(np.setdiff1d(y, self.classes_)): raise ValueError( ( "Mini-batch contains {0} while classes " + "must be subset of {1}" ).format(np.unique(y), self.classes_) ) Y = self.label_binarizer_.transform(y) Y = Y.tocsc() columns = (col.toarray().ravel() for col in Y.T) self.estimators_ = Parallel(n_jobs=self.n_jobs)( delayed(_partial_fit_binary)(estimator, X, column) for estimator, column in zip(self.estimators_, columns) ) if hasattr(self.estimators_[0], "n_features_in_"): self.n_features_in_ = self.estimators_[0].n_features_in_ return self
1aa38c44f60cb729faf217ef85f5cb7d1dd30b46
15
multiclass.py
372
MAINT Parameters validation for OneVsRest estimator (#24290) Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,491
0
493
232
100
260,786
129
scikit-learn
40
sklearn/multiclass.py
Python
28
{ "docstring": "Partially fit underlying estimators.\n\n Should be used when memory is inefficient to train all data.\n Chunks of data can be passed in several iteration.\n\n Parameters\n ----------\n X : (sparse) array-like of shape (n_samples, n_features)\n Data.\n\n y : (sparse) array-like of shape (n_samples,) or (n_samples, n_classes)\n Multi-class targets. An indicator matrix turns on multilabel\n classification.\n\n classes : array, shape (n_classes, )\n Classes across all calls to partial_fit.\n Can be obtained via `np.unique(y_all)`, where y_all is the\n target vector of the entire dataset.\n This argument is only required in the first call of partial_fit\n and can be omitted in the subsequent calls.\n\n Returns\n -------\n self : object\n Instance of partially fitted estimator.\n ", "language": "en", "n_whitespaces": 286, "n_words": 110, "vocab_size": 84 }
https://github.com/scikit-learn/scikit-learn.git
12
_get_items
def _get_items(self): postprocess_items = {} # Debug Landmarks if (hasattr(self._args, 'debug_landmarks') and self._args.debug_landmarks): postprocess_items["DebugLandmarks"] = None # Face Filter post processing if ((hasattr(self._args, "filter") and self._args.filter is not None) or (hasattr(self._args, "nfilter") and self._args.nfilter is not None)): if hasattr(self._args, "detector"): detector = self._args.detector.replace("-", "_").lower() else: detector = "cv2_dnn" if hasattr(self._args, "aligner"): aligner = self._args.aligner.replace("-", "_").lower() else: aligner = "cv2_dnn" face_filter = dict(detector=detector, aligner=aligner, multiprocess=not self._args.singleprocess) filter_lists = {} if hasattr(self._args, "ref_threshold"): face_filter["ref_threshold"] = self._args.ref_threshold for filter_type in ('filter', 'nfilter'): filter_args = getattr(self._args, filter_type, None) filter_args = None if not filter_args else filter_args filter_lists[filter_type] = filter_args face_filter["filter_lists"] = filter_lists postprocess_items["FaceFilter"] = {"kwargs": face_filter} logger.debug("Postprocess Items: %s", postprocess_items) return postprocess_items
9e503bdaa2bfe2baaea50ad2e4bf742f309d9d10
16
fsmedia.py
422
bugfix: debug landmarks
20,730
0
496
249
67
101,311
108
faceswap
23
scripts/fsmedia.py
Python
29
{ "docstring": " Check the passed in command line arguments for requested actions,\n\n For any requested actions, add the item to the actions list along with\n any relevant arguments and keyword arguments.\n\n Returns\n -------\n dict\n The name of the action to be performed as the key. Any action specific\n arguments and keyword arguments as the value.\n ", "language": "en", "n_whitespaces": 118, "n_words": 53, "vocab_size": 37 }
https://github.com/deepfakes/faceswap.git
3
_apply_media_retention_rules
async def _apply_media_retention_rules(self) -> None: # Purge remote media if self._media_retention_remote_media_lifetime_ms is not None: # Calculate a threshold timestamp derived from the configured lifetime. Any # media that has not been accessed since this timestamp will be removed. remote_media_threshold_timestamp_ms = ( self.clock.time_msec() - self._media_retention_remote_media_lifetime_ms ) logger.info( "Purging remote media last accessed before" f" {remote_media_threshold_timestamp_ms}" ) await self.delete_old_remote_media( before_ts=remote_media_threshold_timestamp_ms ) # And now do the same for local media if self._media_retention_local_media_lifetime_ms is not None: # This works the same as the remote media threshold local_media_threshold_timestamp_ms = ( self.clock.time_msec() - self._media_retention_local_media_lifetime_ms ) logger.info( "Purging local media last accessed before" f" {local_media_threshold_timestamp_ms}" ) await self.delete_old_local_media( before_ts=local_media_threshold_timestamp_ms, keep_profiles=True, )
2fc787c341ff540e5880932f116498ec0ed7a2c2
13
media_repository.py
170
Add config options for media retention (#12732)
72,294
0
440
93
62
248,466
105
synapse
14
synapse/rest/media/v1/media_repository.py
Python
28
{ "docstring": "\n Purge old local and remote media according to the media retention rules\n defined in the homeserver config.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 15 }
https://github.com/matrix-org/synapse.git
11
_url
def _url(self, hashed_name_func, name, force=False, hashed_files=None): if settings.DEBUG and not force: hashed_name, fragment = name, "" else: clean_name, fragment = urldefrag(name) if urlsplit(clean_name).path.endswith("/"): # don't hash paths hashed_name = name else: args = (clean_name,) if hashed_files is not None: args += (hashed_files,) hashed_name = hashed_name_func(*args) final_url = super().url(hashed_name) # Special casing for a @font-face hack, like url(myfont.eot?#iefix") # http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax query_fragment = "?#" in name # [sic!] if fragment or query_fragment: urlparts = list(urlsplit(final_url)) if fragment and not urlparts[4]: urlparts[4] = fragment if query_fragment and not urlparts[3]: urlparts[2] += "?" final_url = urlunsplit(urlparts) return unquote(final_url)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
storage.py
261
Refs #33476 -- Reformatted code with Black.
50,711
0
356
156
60
204,364
94
django
24
django/contrib/staticfiles/storage.py
Python
22
{ "docstring": "\n Return the non-hashed URL in DEBUG mode.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
8
parseString
def parseString(self, instring, parseAll=False): ParserElement.resetCache() if not self.streamlined: self.streamline() # ~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() try: loc, tokens = self._parse(instring, 0) if parseAll: loc = self.preParse(instring, loc) se = Empty() + StringEnd() se._parse(instring, loc) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc else: return tokens
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
17
pyparsing.py
233
upd; format
13,288
0
359
141
64
63,403
80
transferlearning
25
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
Python
23
{ "docstring": "\n Execute the parse expression with the given string.\n This is the main interface to the client code, once the complete\n expression has been built.\n\n Returns the parsed data as a :class:`ParseResults` object, which may be\n accessed as a list, or as a dict or object with attributes if the given parser\n includes results names.\n\n If you want the grammar to require that the entire input string be\n successfully parsed, then set ``parseAll`` to True (equivalent to ending\n the grammar with ``StringEnd()``).\n\n Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,\n in order to report proper column numbers in parse actions.\n If the input string contains tabs and\n the grammar uses parse actions that use the ``loc`` argument to index into the\n string being parsed, you can ensure you have a consistent view of the input\n string by:\n\n - calling ``parseWithTabs`` on your grammar before calling ``parseString``\n (see :class:`parseWithTabs`)\n - define your parse action using the full ``(s, loc, toks)`` signature, and\n reference the input string using the parse action's ``s`` argument\n - explictly expand the tabs in your input string before calling\n ``parseString``\n\n Example::\n\n Word('a').parseString('aaaaabaaa') # -> ['aaaaa']\n Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text\n ", "language": "en", "n_whitespaces": 389, "n_words": 197, "vocab_size": 121 }
https://github.com/jindongwang/transferlearning.git
4
show_actual_vendor_versions
def show_actual_vendor_versions(vendor_txt_versions): # type: (Dict[str, str]) -> None for module_name, expected_version in vendor_txt_versions.items(): extra_message = '' actual_version = get_vendor_version_from_module(module_name) if not actual_version: extra_message = ' (Unable to locate actual module version, using'\ ' vendor.txt specified version)' actual_version = expected_version elif parse_version(actual_version) != parse_version(expected_version): extra_message = ' (CONFLICT: vendor.txt suggests version should'\ ' be {})'.format(expected_version) logger.info('%s==%s%s', module_name, actual_version, extra_message)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
15
debug.py
126
upd; format
12,219
0
189
71
45
60,607
58
transferlearning
12
.venv/lib/python3.8/site-packages/pip/_internal/commands/debug.py
Python
12
{ "docstring": "Log the actual version and print extra info if there is\n a conflict or if the actual version could not be imported.\n ", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 18 }
https://github.com/jindongwang/transferlearning.git
1
test_sends_resolution_notification
def test_sends_resolution_notification(self, record_analytics): url = f"/api/0/issues/{self.group.id}/" with self.tasks(): response = self.client.put(url, format="json", data={"status": "resolved"}) assert response.status_code == 200, response.content msg = mail.outbox[0] # check the txt version assert f"{self.user.username} marked {self.short_id} as resolved" in msg.body # check the html version assert f"{self.short_id}</a> as resolved</p>" in msg.alternatives[0][0] attachment, text = get_attachment() assert ( text == f"{self.name} marked <http://testserver/organizations/{self.organization.slug}/issues/{self.group.id}/?referrer=activity_notification|{self.short_id}> as resolved" ) assert attachment["title"] == self.group.title assert ( attachment["footer"] == f"{self.project.slug} | <http://testserver/settings/account/notifications/workflow/?referrer=resolved_activity-slack-user|Notification Settings>" ) assert analytics_called_with_args( record_analytics, "integrations.email.notification_sent", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, ) assert analytics_called_with_args( record_analytics, "integrations.slack.notification_sent", user_id=self.user.id, actor_id=self.user.actor_id, organization_id=self.organization.id, )
afbf9a3334ce9cad1a62fced372d7fcee40a3133
14
test_notifications.py
360
chore(notification): Pass User ID into notification analytics (#38924) We pass in the actor_id to notification analytics events but we should also include a user_id if the recipient is a user
18,056
0
387
178
57
85,881
89
sentry
34
tests/sentry/notifications/test_notifications.py
Python
32
{ "docstring": "\n Test that an email AND Slack notification are sent with\n the expected values when an issue is resolved.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
https://github.com/getsentry/sentry.git
1
get
def get(self): raise NotImplementedError @keras_export("keras.utils.OrderedEnqueuer")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.utils.OrderedEnqueuer")
7
data_utils.py
27
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,716
1
18
8
5
276,742
5
keras
4
keras/utils/data_utils.py
Python
2
{ "docstring": "Creates a generator to extract data from the queue.\n\n Skip the data if it is `None`.\n # Returns\n Generator yielding tuples `(inputs, targets)`\n or `(inputs, targets, sample_weights)`.\n ", "language": "en", "n_whitespaces": 74, "n_words": 27, "vocab_size": 24 }
https://github.com/keras-team/keras.git
2
_check_data
def _check_data(self, xp, rs): xp_lines = xp.get_lines() rs_lines = rs.get_lines() assert len(xp_lines) == len(rs_lines) for xpl, rsl in zip(xp_lines, rs_lines): xpdata = xpl.get_xydata() rsdata = rsl.get_xydata() tm.assert_almost_equal(xpdata, rsdata) tm.close()
03fef5f0e35200aa5828975b62782bcf11faa0d2
10
common.py
119
TST: Clean tests/plotting (#45992)
39,621
0
104
73
26
164,924
29
pandas
17
pandas/tests/plotting/common.py
Python
9
{ "docstring": "\n Check each axes has identical lines\n\n Parameters\n ----------\n xp : matplotlib Axes object\n rs : matplotlib Axes object\n ", "language": "en", "n_whitespaces": 61, "n_words": 18, "vocab_size": 14 }
https://github.com/pandas-dev/pandas.git
4
get_1x_lr_params
def get_1x_lr_params(model): b = [model.xception_features] for i in range(len(b)): for k in b[i].parameters(): if k.requires_grad: yield k
2e5d23ee0e7fc1fdd7ad2e615fd651655aeb0f5b
12
deeplab_xception.py
71
Graphonomy Face/Hair Segmentation added
1,642
0
59
43
14
9,617
17
insightface
10
reconstruction/ostec/external/graphonomy/FaceHairMask/deeplab_xception.py
Python
6
{ "docstring": "\n This generator returns all the parameters of the net except for\n the last classification layer. Note that for each batchnorm layer,\n requires_grad is set to False in deeplab_resnet.py, therefore this function does not return\n any batchnorm parameter\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 33 }
https://github.com/deepinsight/insightface.git
2
parse_sysconfig_var
def parse_sysconfig_var(self) -> None: defines = apache_util.parse_define_file(self.sysconfig_filep, "OPTIONS") for k, v in defines.items(): self.variables[k] = v
7d9e9a49005de7961e84d2a7c608db57dbab3046
10
override_centos.py
65
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
45,572
0
48
39
14
186,664
16
certbot
10
certbot-apache/certbot_apache/_internal/override_centos.py
Python
5
{ "docstring": " Parses Apache CLI options from CentOS configuration file ", "language": "en", "n_whitespaces": 9, "n_words": 8, "vocab_size": 8 }
https://github.com/certbot/certbot.git
69
max_weight_matching
def max_weight_matching(G, maxcardinality=False, weight="weight"): # # The algorithm is taken from "Efficient Algorithms for Finding Maximum # Matching in Graphs" by Zvi Galil, ACM Computing Surveys, 1986. # It is based on the "blossom" method for finding augmenting paths and # the "primal-dual" method for finding a matching of maximum weight, both # methods invented by Jack Edmonds. # # A C program for maximum weight matching by Ed Rothberg was used # extensively to validate this new code. # # Many terms used in the code comments are explained in the paper # by Galil. You will probably need the paper to make sense of this code. #
853fb4b27b547bf11761d73c1a62648701f3679f
6
matching.py
38
Added docstring examples to matching functions (#5617) Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
42,021
0
151
1,118
74
176,653
109
networkx
4
networkx/algorithms/matching.py
Python
165
{ "docstring": "Compute a maximum-weighted matching of G.\n\n A matching is a subset of edges in which no node occurs more than once.\n The weight of a matching is the sum of the weights of its edges.\n A maximal matching cannot add more edges and still be a matching.\n The cardinality of a matching is the number of matched edges.\n\n Parameters\n ----------\n G : NetworkX graph\n Undirected graph\n\n maxcardinality: bool, optional (default=False)\n If maxcardinality is True, compute the maximum-cardinality matching\n with maximum weight among all maximum-cardinality matchings.\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n If key not found, uses 1 as weight.\n\n\n Returns\n -------\n matching : set\n A maximal matching of the graph.\n\n Examples\n --------\n >>> G = nx.Graph()\n >>> edges = [(1, 2, 6), (1, 3, 2), (2, 3, 1), (2, 4, 7), (3, 5, 9), (4, 5, 3)]\n >>> G.add_weighted_edges_from(edges)\n >>> sorted(nx.max_weight_matching(G))\n [(2, 4), (5, 3)]\n\n Notes\n -----\n If G has edges with weight attributes the edge data are used as\n weight values else the weights are assumed to be 1.\n\n This function takes time O(number_of_nodes ** 3).\n\n If all edge weights are integers, the algorithm uses only integer\n computations. If floating point weights are used, the algorithm\n could return a slightly suboptimal matching due to numeric\n precision errors.\n\n This method is based on the \"blossom\" method for finding augmenting\n paths and the \"primal-dual\" method for finding a matching of maximum\n weight, both methods invented by Jack Edmonds [1]_.\n\n Bipartite graphs can also be matched using the functions present in\n :mod:`networkx.algorithms.bipartite.matching`.\n\n References\n ----------\n .. [1] \"Efficient Algorithms for Finding Maximum Matching in Graphs\",\n Zvi Galil, ACM Computing Surveys, 1986.\n ", "language": "en", "n_whitespaces": 429, "n_words": 274, "vocab_size": 173 }
https://github.com/networkx/networkx.git
4
make_action_immutable
def make_action_immutable(obj): if isinstance(obj, np.ndarray): obj.setflags(write=False) return obj elif isinstance(obj, OrderedDict): return MappingProxyType(dict(obj)) elif isinstance(obj, dict): return MappingProxyType(obj) else: return obj
242706922b44d4ba4e395deaf6e98b745474863b
12
numpy.py
96
[rllib] Fix linting (#24335) #24262 broke linting. This fixes this.
31,541
0
71
59
14
138,874
21
ray
10
rllib/utils/numpy.py
Python
10
{ "docstring": "Flags actions immutable to notify users when trying to change\n them.\n\n Can also be used with any tree-like structure containing either\n dictionaries, numpy arrays or already immutable objects per se.\n Note, however that `tree.map_structure()` will in general not\n include the shallow object containing all others and therefore\n immutability will hold only for all objects contained in it.\n Use `tree.traverse(fun, action, top_down=False)` to include\n also the containing object.\n\n Args:\n obj: The object to be made immutable.\n\n Returns:\n The immutable object.\n\n Examples:\n >>> import tree\n >>> import numpy as np\n >>> arr = np.arange(1,10)\n >>> d = dict(a = 1, b = (arr, arr))\n >>> tree.traverse(make_action_immutable, d, top_down=False)\n ", "language": "en", "n_whitespaces": 191, "n_words": 106, "vocab_size": 79 }
https://github.com/ray-project/ray.git
13
execute_list_role
def execute_list_role(self): path_found = False role_found = False warnings = [] roles_search_paths = context.CLIARGS['roles_path'] role_name = context.CLIARGS['role'] for path in roles_search_paths: role_path = GalaxyCLI._resolve_path(path) if os.path.isdir(path): path_found = True else: warnings.append("- the configured path {0} does not exist.".format(path)) continue if role_name: # show the requested role, if it exists gr = GalaxyRole(self.galaxy, self.lazy_role_api, role_name, path=os.path.join(role_path, role_name)) if os.path.isdir(gr.path): role_found = True display.display('# %s' % os.path.dirname(gr.path)) _display_role(gr) break warnings.append("- the role %s was not found" % role_name) else: if not os.path.exists(role_path): warnings.append("- the configured path %s does not exist." % role_path) continue if not os.path.isdir(role_path): warnings.append("- the configured path %s, exists, but it is not a directory." % role_path) continue display.display('# %s' % role_path) path_files = os.listdir(role_path) for path_file in path_files: gr = GalaxyRole(self.galaxy, self.lazy_role_api, path_file, path=path) if gr.metadata: _display_role(gr) # Do not warn if the role was found in any of the search paths if role_found and role_name: warnings = [] for w in warnings: display.warning(w) if not path_found: raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type'])) return 0
cb2e434dd2359a9fe1c00e75431f4abeff7381e8
17
galaxy.py
465
ansible-galaxy install - fix unnecessary api check when installing a role from git repo (#79090) * delay server api evaluation until a GalaxyRole needs to make an api call for info, list, and install
79,564
0
742
278
97
268,616
177
ansible
33
lib/ansible/cli/galaxy.py
Python
41
{ "docstring": "\n List all roles installed on the local system or a specific role\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/ansible/ansible.git
10
localize_input
def localize_input(value, default=None): if isinstance(value, str): # Handle strings first for performance reasons. return value elif isinstance(value, bool): # Don't treat booleans as numbers. return str(value) elif isinstance(value, (decimal.Decimal, float, int)): return number_format(value) elif isinstance(value, datetime.datetime): format = default or get_format("DATETIME_INPUT_FORMATS")[0] format = sanitize_strftime_format(format) return value.strftime(format) elif isinstance(value, datetime.date): format = default or get_format("DATE_INPUT_FORMATS")[0] format = sanitize_strftime_format(format) return value.strftime(format) elif isinstance(value, datetime.time): format = default or get_format("TIME_INPUT_FORMATS")[0] return value.strftime(format) return value @functools.lru_cache
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@functools.lru_cache
14
formats.py
250
Refs #33476 -- Reformatted code with Black.
51,614
1
174
152
40
206,662
72
django
20
django/utils/formats.py
Python
19
{ "docstring": "\n Check if an input value is a localizable type and return it\n formatted with the appropriate formatting string of the current locale.\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 21 }
https://github.com/django/django.git
3
_process_state
def _process_state(self, entity_observation): entity = entity_observation.entity_id try: if condition.state(self.hass, entity, [STATE_UNKNOWN, STATE_UNAVAILABLE]): return None return condition.state(self.hass, entity, entity_observation.to_state) except ConditionError: return None
dd1463da287f591652e47b00eee0c5b77f5f5b7c
10
binary_sensor.py
84
Refactor bayesian observations using dataclass (#79590) * refactor * remove some changes * remove typehint * improve codestyle * move docstring to comment * < 88 chars * avoid short var names * more readable * fix rename * Update homeassistant/components/bayesian/helpers.py Co-authored-by: epenet <[email protected]> * Update homeassistant/components/bayesian/binary_sensor.py Co-authored-by: epenet <[email protected]> * Update homeassistant/components/bayesian/binary_sensor.py Co-authored-by: epenet <[email protected]> * no intermediate * comment why set before list Co-authored-by: epenet <[email protected]>
87,639
0
98
55
17
288,481
22
core
12
homeassistant/components/bayesian/binary_sensor.py
Python
8
{ "docstring": "Return True if state conditions are met, return False if they are not.\n\n Returns None if the state is unavailable.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 16 }
https://github.com/home-assistant/core.git
3
pi
def pi(self): total = 0.0 label_freqs = FreqDist(x["labels"] for x in self.data) for k, f in label_freqs.items(): total += f**2 Ae = total / ((len(self.I) * len(self.C)) ** 2) return (self.avg_Ao() - Ae) / (1 - Ae)
0fac0c0f8e4618c2bdd3d2137d5fb8a80f581246
14
agreement.py
128
Update black to 22.3.0 The most recent release of Click (8.1.0) was breaking Black. See psf/black#2964
7,551
0
90
81
28
42,462
37
nltk
15
nltk/metrics/agreement.py
Python
7
{ "docstring": "Scott 1955; here, multi-pi.\n Equivalent to K from Siegel and Castellan (1988).\n\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
https://github.com/nltk/nltk.git
1
test_user_logout_all
def test_user_logout_all(self) -> None: # Login in as the user puppet_token = self._get_token() # Test that we can successfully make a request channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # Logout all with the real user token channel = self.make_request( "POST", "logout/all", b"{}", access_token=self.other_user_tok ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # The puppet token should still work channel = self.make_request("GET", "devices", b"{}", access_token=puppet_token) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) # .. but the real user's tokens shouldn't channel = self.make_request( "GET", "devices", b"{}", access_token=self.other_user_tok ) self.assertEqual(HTTPStatus.UNAUTHORIZED, channel.code, msg=channel.json_body)
901b264c0c88f39cbfb8b2229e0dc57968882658
10
test_user.py
255
Add type hints to `tests/rest/admin` (#11851)
71,059
0
226
159
51
246,165
85
synapse
15
tests/rest/admin/test_user.py
Python
17
{ "docstring": "Tests that the target user calling `/logout/all` does *not* expire\n the token.\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
2
create_vae_diffusers_config
def create_vae_diffusers_config(original_config): vae_params = original_config.model.params.first_stage_config.params.ddconfig _ = original_config.model.params.first_stage_config.params.embed_dim block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult] down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels) up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels) config = dict( sample_size=vae_params.resolution, in_channels=vae_params.in_channels, out_channels=vae_params.out_ch, down_block_types=tuple(down_block_types), up_block_types=tuple(up_block_types), block_out_channels=tuple(block_out_channels), latent_channels=vae_params.z_channels, layers_per_block=vae_params.num_res_blocks, ) return config
039958eae55ff0700cfb42a7e72739575ab341f1
11
convert_original_stable_diffusion_to_diffusers.py
193
Stable diffusion text2img conversion script. (#154) * begin text2img conversion script * add fn to convert config * create config if not provided * update imports and use UNet2DConditionModel * fix imports, layer names * fix unet coversion * add function to convert VAE * fix vae conversion * update main * create text model * update config creating logic for unet * fix config creation * update script to create and save pipeline * remove unused imports * fix checkpoint loading * better name * save progress * finish * up * up Co-authored-by: Patrick von Platen <[email protected]>
120,913
0
124
125
31
336,767
41
diffusers
28
scripts/convert_original_stable_diffusion_to_diffusers.py
Python
17
{ "docstring": "\n Creates a config for the diffusers based on the config of the LDM model.\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 11 }
https://github.com/huggingface/diffusers.git
5
__isub__
def __isub__(self, other): m = getmask(other) if self._mask is nomask: if m is not nomask and m.any(): self._mask = make_mask_none(self.shape, self.dtype) self._mask += m elif m is not nomask: self._mask += m other_data = getdata(other) other_data = np.where(self._mask, other_data.dtype.type(0), other_data) self._data.__isub__(other_data) return self
8fced79a8c60d86aaaaf997aa861589336f7899c
13
core.py
158
MAINT: Fortify masked in-place ops against promotion warnings These warnings are probably optional in the future. They should not matter much (since the following is an in-place op), but the `np.where` could upcast currently!
38,627
0
151
100
26
160,410
43
numpy
17
numpy/ma/core.py
Python
12
{ "docstring": "\n Subtract other from self in-place.\n\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
https://github.com/numpy/numpy.git
1
mixin_head_parser
def mixin_head_parser(parser): gp = add_arg_group(parser, title='Head') gp.add_argument( '--uses-before-address', type=str, help='The address of the uses-before runtime', ) gp.add_argument( '--uses-after-address', type=str, help='The address of the uses-before runtime', ) gp.add_argument( '--connection-list', type=str, help='dictionary JSON with a list of connections to configure', ) gp.add_argument( '--disable-reduce', action='store_true', default=False, help='Disable the built-in reduce mechanism, set this if the reduction is to be handled by the Executor connected to this Head', )
c7ad27e5614dfb2b1684f4718c5508840cd55de0
10
head.py
137
refactor: add disable_reduce args (#4424)
2,054
0
186
80
44
11,483
65
jina
11
jina/parsers/orchestrate/runtimes/head.py
Python
23
{ "docstring": "Mixing in arguments required by head pods and runtimes into the given parser.\n :param parser: the parser instance to which we add arguments\n ", "language": "en", "n_whitespaces": 29, "n_words": 23, "vocab_size": 21 }
https://github.com/jina-ai/jina.git
3
get_pe_matching_query
def get_pe_matching_query(amount_condition, account_from_to, transaction): # get matching payment entries query from_date = frappe.db.get_single_value('Bank Reconciliation Tool','bank_statement_from_date') to_date = frappe.db.get_single_value('Bank Reconciliation Tool','bank_statement_to_date') from_reference_date = frappe.db.get_single_value('Bank Reconciliation Tool','from_reference_date') to_reference_date = frappe.db.get_single_value('Bank Reconciliation Tool','to_reference_date') filtered_by_reference_date = frappe.db.get_single_value('Bank Reconciliation Tool','filtered_by_reference_date') if transaction.deposit > 0: currency_field = "paid_to_account_currency as currency" else: currency_field = "paid_from_account_currency as currency" if (filtered_by_reference_date): pe_data= f else: pe_data= f return pe_data
e5a1189becad071f54c727bc6c0dba16bea2a12f
11
bank_reconciliation_tool.py
222
Update bank_reconciliation_tool.py Applying date filter on transactions and all the bank entries and also gives the filter the bank entries as per reference date. Sorted all transactions and entries as per date in ascending order. Also added posting date columns in all bank entries and default checkbox tick of journal entry, hide the sales invoice and purchase invoice checkbox.
15,082
0
45
101
38
69,682
59
erpnext
15
erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py
Python
65
{ "docstring": "\n\t\t\tSELECT\n\t\t\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t\t\t+ 1 ) AS rank,\n\t\t\t\t'Payment Entry' as doctype,\n\t\t\t\tname,\n\t\t\t\tpaid_amount,\n\t\t\t\treference_no,\n\t\t\t\treference_date,\n\t\t\t\tparty,\n\t\t\t\tparty_type,\n\t\t\t\tposting_date,\n\t\t\t\t{currency_field}\n\t\t\tFROM\n\t\t\t\t`tabPayment Entry`\n\t\t\tWHERE\n\t\t\t\tpaid_amount {amount_condition} %(amount)s\n\t\t\t\tAND docstatus = 1\n\t\t\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\t\tAND {account_from_to} = %(bank_account)s\n\t\t\t\tAND reference_date >= '{from_reference_date}'\n\t\t\t\tAND reference_date <= '{to_reference_date}'\t \n\t\t\t\t\n\t\t\t\n\t\t\tSELECT\n\t\t\t\t(CASE WHEN reference_no=%(reference_no)s THEN 1 ELSE 0 END\n\t\t\t\t+ CASE WHEN (party_type = %(party_type)s AND party = %(party)s ) THEN 1 ELSE 0 END\n\t\t\t\t+ 1 ) AS rank,\n\t\t\t\t'Payment Entry' as doctype,\n\t\t\t\tname,\n\t\t\t\tpaid_amount,\n\t\t\t\treference_no,\n\t\t\t\treference_date,\n\t\t\t\tparty,\n\t\t\t\tparty_type,\n\t\t\t\tposting_date,\n\t\t\t\t{currency_field}\n\t\t\tFROM\n\t\t\t\t`tabPayment Entry`\n\t\t\tWHERE\n\t\t\t\tpaid_amount {amount_condition} %(amount)s\n\t\t\t\tAND docstatus = 1\n\t\t\t\tAND payment_type IN (%(payment_type)s, 'Internal Transfer')\n\t\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\t\tAND {account_from_to} = %(bank_account)s\n\t\t\t\tAND posting_date >= '{from_date}'\n\t\t\t\tAND posting_date <= '{to_date}'\t \n\t\t\t\t\n\t\t\t", "language": "en", "n_whitespaces": 110, "n_words": 152, "vocab_size": 58 }
https://github.com/frappe/erpnext.git
2
set_client_cli_parser
def set_client_cli_parser(parser=None): if not parser: from jina.parsers.base import set_base_parser parser = set_base_parser() from jina.parsers.peapods.runtimes.remote import mixin_client_gateway_parser from jina.parsers.client import ( mixin_client_features_parser, mixin_comm_protocol_parser, ) mixin_client_gateway_parser(parser) mixin_client_features_parser(parser) mixin_comm_protocol_parser(parser) return parser
cea300655ed8be70d74c390ca12e8b09fb741665
10
__init__.py
99
refactor: use absolute imports (#4167)
1,853
0
83
64
23
10,563
28
jina
13
jina/parsers/__init__.py
Python
13
{ "docstring": "Set the parser for the cli client\n\n :param parser: an optional existing parser to build upon\n :return: the parser\n ", "language": "en", "n_whitespaces": 28, "n_words": 19, "vocab_size": 15 }
https://github.com/jina-ai/jina.git
3
get_delivered_items_cost
def get_delivered_items_cost(): dn_items = frappe.db.sql( , as_dict=1, ) si_items = frappe.db.sql( , as_dict=1, ) dn_item_map = {} for item in dn_items: dn_item_map.setdefault(item.project, item.amount) for item in si_items: dn_item_map.setdefault(item.project, item.amount) return dn_item_map
494bd9ef78313436f0424b918f200dab8fc7c20b
10
project_wise_stock_tracking.py
116
style: format code with black
14,413
0
16
74
19
67,035
31
erpnext
12
erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py
Python
22
{ "docstring": "select dn.project, sum(dn_item.base_net_amount) as amount\n\t\tfrom `tabDelivery Note` dn, `tabDelivery Note Item` dn_item\n\t\twhere dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != ''\n\t\tgroup by dn.projectselect si.project, sum(si_item.base_net_amount) as amount\n\t\tfrom `tabSales Invoice` si, `tabSales Invoice Item` si_item\n\t\twhere si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1\n\t\tand si.is_pos = 1 and ifnull(si.project, '') != ''\n\t\tgroup by si.project", "language": "en", "n_whitespaces": 57, "n_words": 65, "vocab_size": 40 }
https://github.com/frappe/erpnext.git
5
set_location
def set_location(self, location): # This puts the rectangle into figure-relative coordinates. if isinstance(location, str): _api.check_in_list(self._locstrings, location=location) self._pos = 1. if location in ('top', 'right') else 0. elif isinstance(location, numbers.Real): self._pos = location else: raise ValueError( f"location must be {self._locstrings[0]!r}, " f"{self._locstrings[1]!r}, or a float, not {location!r}") self._loc = location if self._orientation == 'x': # An x-secondary axes is like an inset axes from x = 0 to x = 1 and # from y = pos to y = pos + eps, in the parent's transAxes coords. bounds = [0, self._pos, 1., 1e-10] else: # 'y' bounds = [self._pos, 0, 1e-10, 1] # this locator lets the axes move in the parent axes coordinates. # so it never needs to know where the parent is explicitly in # figure coordinates. # it gets called in ax.apply_aspect() (of all places) self.set_axes_locator( _TransformedBoundsLocator(bounds, self._parent.transAxes))
8387676bc049d7b3e071846730c632e6ced137ed
15
_secondary_axes.py
230
Clean up code in SecondaryAxis
23,720
0
363
130
97
109,724
142
matplotlib
19
lib/matplotlib/axes/_secondary_axes.py
Python
17
{ "docstring": "\n Set the vertical or horizontal location of the axes in\n parent-normalized coordinates.\n\n Parameters\n ----------\n location : {'top', 'bottom', 'left', 'right'} or float\n The position to put the secondary axis. Strings can be 'top' or\n 'bottom' for orientation='x' and 'right' or 'left' for\n orientation='y'. A float indicates the relative position on the\n parent axes to put the new axes, 0.0 being the bottom (or left)\n and 1.0 being the top (or right).\n ", "language": "en", "n_whitespaces": 170, "n_words": 71, "vocab_size": 51 }
https://github.com/matplotlib/matplotlib.git
8
load_and_dump
def load_and_dump(self) -> None: with ExitStack() as stack: # set env vars stack.enter_context(change_env('JINA_FULL_CLI', 'true')) # change directory to `workspace` stack.enter_context(change_cwd(get_workspace_path(self.workspace_id))) # load and build f: Flow = Flow.load_config( str(self.localpath()), substitute=True, context=self.envs ).build() # get & set the ports mapping, set `runs_in_docker` port_mapping = [] port_mapping.append( PortMapping( deployment_name='gateway', pod_name='gateway', ports=Ports(port_expose=f.port_expose), ) ) for deployment_name, deployment in f._deployment_nodes.items(): runtime_cls = update_runtime_cls(deployment.args, copy=True).runtime_cls if runtime_cls in ['WorkerRuntime'] + list( GATEWAY_RUNTIME_DICT.values() ): current_ports = Ports() for port_name in Ports.__fields__: setattr( current_ports, port_name, getattr(deployment.args, port_name, None), ) port_mapping.append( PortMapping( deployment_name=deployment_name, pod_name='', ports=current_ports, ) ) elif ( runtime_cls in ['ContainerRuntime'] and hasattr(deployment.args, 'replicas') and deployment.args.replicas > 1 ): for pod_args in [deployment.pod_args['head']]: self._update_port_mapping( pod_args, deployment_name, port_mapping ) self.ports = port_mapping # save to a new file & set it for partial-daemon f.save_config(filename=self.newfile) self.params.uses = self.newname
13edc16d806fb5d77a6849551178ccc75937f25f
18
dependencies.py
436
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <[email protected]>
1,916
0
994
267
92
10,804
129
jina
51
daemon/api/dependencies.py
Python
63
{ "docstring": "\n every Flow created inside JinaD lives inside a container. It is important to know the\n list of ports to be published with localhost before actually starting the container.\n\n 1. `load` the flow yaml here.\n - yaml is stored in `workspace` directory, so we'll `cd` there\n - yaml might include env vars. so we'll set them (passed via query params)\n 2. `build` the Flow so that `gateway` gets added.\n - get the list of ports to be published (port_expose, port_in, port_out, port_ctrl)\n - ports need to be published for gateway & executors that are not `ContainerRuntime` or `JinadRuntime` based\n - Deployment level args for ports are enough, as we don't need to publish Pod ports\n 3. `save` the Flow config.\n - saves port configs of all `executors` into the new yaml.\n - set `JINA_FULL_CLI` envvar, so that `gateway` args are also added.\n - save the config into a new file.\n 4. pass this new file as filename to `partial-daemon` to start the Flow\n ", "language": "en", "n_whitespaces": 300, "n_words": 162, "vocab_size": 103 }
https://github.com/jina-ai/jina.git
7
search
def search(self, content): result = [] length = len(content) for start in range(length): for end in range(start + 1, length + 1): pos = self.tree.get(content[start:end], -1) if pos == -1: break if pos and (len(result) == 0 or end > result[-1][1]): result.append((start, end)) return result
621357338437ee420eabbbf5ab19065bc85e73a5
16
utils.py
156
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: tianxin <[email protected]>
118,098
0
174
100
33
322,216
45
PaddleNLP
13
paddlenlp/taskflow/utils.py
Python
11
{ "docstring": "Backward maximum matching\n\n Args:\n content (str): string to be searched\n Returns:\n List[Tuple]: list of maximum matching words, each element represents \n the starting and ending position of the matching string.\n ", "language": "en", "n_whitespaces": 88, "n_words": 29, "vocab_size": 24 }
https://github.com/PaddlePaddle/PaddleNLP.git
2
send_gstin_reminder
def send_gstin_reminder(party_type, party): frappe.has_permission(party_type, throw=True) email = _send_gstin_reminder(party_type, party) if email: frappe.msgprint(_("Reminder to update GSTIN Sent"), title="Reminder sent", indicator="green")
494bd9ef78313436f0424b918f200dab8fc7c20b
12
gst_settings.py
78
style: format code with black
14,421
0
14
46
19
67,056
19
erpnext
12
erpnext/regional/doctype/gst_settings/gst_settings.py
Python
5
{ "docstring": "Send GSTIN reminder to one party (called from Customer, Supplier form)", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/frappe/erpnext.git
7
serialize
def serialize(items): data = QByteArray() stream = QDataStream(data, QIODevice.OpenModeFlag.ReadWrite) cur_user_data = None current_idx = None for i, item in enumerate(items): if item.active: if current_idx is not None: raise ValueError("Multiple active items ({} and {}) " "found!".format(current_idx, i)) current_idx = i cur_user_data = item.user_data if items: if current_idx is None: raise ValueError("No active item found!") else: current_idx = -1 ### src/core/web_contents_adapter.cpp serializeNavigationHistory # sample data: # kHistoryStreamVersion stream.writeInt(HISTORY_STREAM_VERSION) # \x00\x00\x00\x03 # count stream.writeInt(len(items)) # \x00\x00\x00\x01 # currentIndex stream.writeInt(current_idx) # \x00\x00\x00\x00 for item in items: _serialize_item(item, stream) stream.device().reset() qtutils.check_qdatastream(stream) return stream, data, cur_user_data
0877fb0d78635692e481c8bde224fac5ad0dd430
17
tabhistory.py
246
Run scripts/dev/rewrite_enums.py
117,553
0
337
143
60
321,126
91
qutebrowser
26
qutebrowser/browser/webengine/tabhistory.py
Python
25
{ "docstring": "Serialize a list of TabHistoryItems to a data stream.\n\n Args:\n items: An iterable of TabHistoryItems.\n\n Return:\n A (stream, data, user_data) tuple.\n stream: The reset QDataStream.\n data: The QByteArray with the raw data.\n cur_user_data: The user data for the current item or None.\n\n Warning:\n If 'data' goes out of scope, reading from 'stream' will result in a\n segfault!\n ", "language": "en", "n_whitespaces": 130, "n_words": 57, "vocab_size": 49 }
https://github.com/qutebrowser/qutebrowser.git
2
check_query_parameters
def check_query_parameters(self, queryset): query_parameters = set(self.request.GET.keys()) # All query parameters must be either a database field or an operation allowed_query_parameters = set( self.get_available_fields(queryset.model, db_fields_only=True) ).union(self.known_query_parameters) unknown_parameters = query_parameters - allowed_query_parameters if unknown_parameters: raise BadRequestError( "query parameter is not an operation or a recognised field: %s" % ", ".join(sorted(unknown_parameters)) )
d10f15e55806c6944827d801cd9c2d53f5da4186
14
views.py
117
Reformat with black
15,918
0
161
69
41
72,960
49
wagtail
18
wagtail/api/v2/views.py
Python
11
{ "docstring": "\n Ensure that only valid query parameters are included in the URL.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/wagtail/wagtail.git
2
request
def request(self, method, path, data=None, params=None, **kwargs): request_spec = self.request_hook(method, path, data, params, **kwargs) if "headers" not in request_spec: request_spec["headers"] = {} # Force adherence to the GDPR compliant API conventions. # See # https://developer.atlassian.com/cloud/jira/platform/deprecation-notice-user-privacy-api-migration-guide request_spec["headers"]["x-atlassian-force-account-id"] = "true" return self._request(**request_spec)
2fbf550ec05c8501cbc9eca62e73526e717dcbdf
10
client.py
113
ref(Jira): Split Jira Cloud and Jira Server (#37034) * Split Jira Cloud and Jira Server
19,008
0
107
68
35
93,686
40
sentry
10
src/sentry/integrations/jira/client.py
Python
6
{ "docstring": "\n Use the request_hook method for our specific style of Jira to\n add authentication data and transform parameters.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 17 }
https://github.com/getsentry/sentry.git
1
nullify_connected_endpoints
def nullify_connected_endpoints(instance, **kwargs): model = instance.termination_type.model_class() model.objects.filter(pk=instance.termination_id).update(_link_peer_type=None, _link_peer_id=None)
8bc6d8cb231ad45cd8b97ffb26cc3d989c60c277
11
signals.py
67
Introduce CablePath.retrace() to handle deleted cables
77,858
0
17
41
8
264,840
8
netbox
13
netbox/dcim/signals.py
Python
3
{ "docstring": "\n Disassociate the Cable from the termination object.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 6 }
https://github.com/netbox-community/netbox.git
7
getModuleImportableFilesHash
def getModuleImportableFilesHash(full_name): package_name = full_name.getPackageName() paths = getPackageSearchPath(None) if package_name is not None: paths += getPackageSearchPath(package_name) all_suffixes = getAllModuleSuffixes() result_hash = Hash() for path in paths: if not os.path.isdir(path): continue for fullname, filename in listDir(path): if isPackageDir(fullname) or filename.endswith(all_suffixes): result_hash.updateFromValues(filename, b"\0") return result_hash.asHexDigest()
840959fbec6d897aa7e51f63e1c34e46402ced8b
14
Caching.py
159
Optimization: Make experimental caching bytecode demotion work.
42,780
0
125
96
33
178,646
43
Nuitka
20
nuitka/Caching.py
Python
14
{ "docstring": "Calculate hash value of packages importable for a module of this name.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
https://github.com/Nuitka/Nuitka.git
2
get_summary_stats
def get_summary_stats(self) -> List[dict]: logger.debug("Compiling sessions summary data") self._get_time_stats() self._get_per_session_stats() if not self._per_session_stats: return self._per_session_stats total_stats = self._total_stats() retval = self._per_session_stats + [total_stats] retval = self._format_stats(retval) logger.debug("Final stats: %s", retval) return retval
afec52309326304f4323029039e49bfcf928ef43
8
stats.py
122
Bugfixes: - Stats graph - Handle NaNs in data - logger - de-elevate matplotlib font messages
20,168
0
113
71
26
100,713
32
faceswap
13
lib/gui/analysis/stats.py
Python
22
{ "docstring": " Compile the individual session statistics and calculate the total.\n\n Format the stats for display\n\n Returns\n -------\n list\n A list of summary statistics dictionaries containing the Session ID, start time, end\n time, elapsed time, rate, batch size and number of iterations for each session id\n within the loaded data as well as the totals.\n ", "language": "en", "n_whitespaces": 122, "n_words": 53, "vocab_size": 39 }
https://github.com/deepfakes/faceswap.git
2
zero_module
def zero_module(module): for p in module.parameters(): p.detach().zero_() return module
f4d6e64cdc132ae868699a0ba442f4ab1d304a14
11
nn.py
46
add disco_diffusion_cnclip_vitb16 module
9,933
0
25
26
9
49,823
9
PaddleHub
6
modules/image/text_to_image/disco_diffusion_cnclip_vitb16/reverse_diffusion/model/nn.py
Python
4
{ "docstring": "\n Zero out the parameters of a module and return it.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/PaddlePaddle/PaddleHub.git
6
normalize_drive
def normalize_drive(path): if os.name != "nt" or not isinstance(path, str): return path drive, tail = os.path.splitdrive(path) # Only match (lower cased) local drives (e.g. 'c:'), not UNC mounts. if drive.islower() and len(drive) == 2 and drive[1] == ":": return f"{drive.upper()}{tail}" return path @contextmanager
3387881a6d4fc2d8bdc0f05c484cb2f7222acfb8
@contextmanager
11
shell.py
123
Code reorg utils into utils module reduces complexity (#4990) * Split apart the massive utils.py into a utils module
3,025
1
74
61
36
19,569
43
pipenv
13
pipenv/utils/shell.py
Python
7
{ "docstring": "Normalize drive in path so they stay consistent.\n\n This currently only affects local drives on Windows, which can be\n identified with either upper or lower cased drive names. The case is\n always converted to uppercase because it seems to be preferred.\n\n See: <https://github.com/pypa/pipenv/issues/1218>\n ", "language": "en", "n_whitespaces": 58, "n_words": 43, "vocab_size": 40 }
https://github.com/pypa/pipenv.git
6
check_models_are_tested
def check_models_are_tested(module, test_file): # XxxPreTrainedModel are not tested defined_models = get_models(module) tested_models = find_tested_models(test_file) if tested_models is None: if test_file.replace(os.path.sep, "/") in TEST_FILES_WITH_NO_COMMON_TESTS: return return [ f"{test_file} should define `all_model_classes` to apply common tests to the models it tests. " + "If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file " + "`utils/check_repo.py`." ] failures = [] for model_name, _ in defined_models: if model_name not in tested_models and model_name not in IGNORE_NON_TESTED: failures.append( f"{model_name} is defined in {module.__name__} but is not tested in " + f"{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file." + "If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`" + "in the file `utils/check_repo.py`." ) return failures
1f9e862507704774334dc22a84724e74f52232b7
19
check_repo.py
185
Update check_models_are_tested to deal with Windows path (#16973) * fix * Apply suggestions from code review Co-authored-by: ydshieh <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]>
6,780
0
299
89
74
37,451
121
transformers
20
utils/check_repo.py
Python
21
{ "docstring": "Check models defined in module are tested in test_file.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/huggingface/transformers.git
3
test_with_slicing
def test_with_slicing(metrics_message, setup_slicing) -> None: org_id = metrics_message.payload.routing_header.get("org_id") router = SlicingRouter("sliceable") route = router.get_route_for_message(metrics_message) if int(org_id) % SENTRY_SLICING_LOGICAL_PARTITION_COUNT < 128: assert route.topic.name == "sliced_topic_0" elif int(org_id) % SENTRY_SLICING_LOGICAL_PARTITION_COUNT < 256: assert route.topic.name == "sliced_topic_1" else: assert False, "unexpected org_id" router.shutdown()
e1c5b9ca3bdeadcfdb328dc42740923038d6eb94
11
test_slicing_router.py
144
feat(indexer): Allow routing of messages (#40776) ### Context In order to support slicing in Snuba, the upstream dependency of Snuba i.e. Sentry metrics indexer needs to route messages to different slices. The current implementation is hardcoded to send messages on a single destination topic. ### Implementation The way to support sending messages to different slices is by creating a new abstraction called RoutingProducer. The routing producer handles all the kafka specifics and delegates the routing decision to an abstract `MessageRouter`. The `MessageRouter` returns a route which encapsulates the producer and topic on which the data would be sent. The `SlicingRouter` will use information about slicing from config and populate all the sliced producers and topics. It will then rely on the presence of `org_id` in the Kafkapayload header to make decisions on how to route the message. The routing decision is similar to what is made in Snuba. **The current PR does not plug in the code with the consumer. There would be a separate PR which would handle that.** Co-authored-by: getsantry[bot] <66042841+getsantry[bot]@users.noreply.github.com>
18,423
0
85
83
30
88,664
40
sentry
16
tests/sentry/sentry_metrics/consumers/test_slicing_router.py
Python
15
{ "docstring": "\n With partitioning settings, the SlicingRouter should route to the correct topic\n based on the org_id header.\n ", "language": "en", "n_whitespaces": 26, "n_words": 16, "vocab_size": 14 }
https://github.com/getsentry/sentry.git
3
get_dependencies
def get_dependencies(dsk, key=None, task=no_default, as_list=False): if key is not None: arg = dsk[key] elif task is not no_default: arg = task else: raise ValueError("Provide either key or task") return keys_in_tasks(dsk, [arg], as_list=as_list)
cccb9d8d8e33a891396b1275c2448c352ef40c27
11
core.py
92
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
36,541
0
68
59
26
156,079
32
dask
9
dask/core.py
Python
8
{ "docstring": "Get the immediate tasks on which this task depends\n\n Examples\n --------\n >>> inc = lambda x: x + 1\n >>> add = lambda x, y: x + y\n >>> dsk = {'x': 1,\n ... 'y': (inc, 'x'),\n ... 'z': (add, 'x', 'y'),\n ... 'w': (inc, 'z'),\n ... 'a': (add, (inc, 'x'), 1)}\n\n >>> get_dependencies(dsk, 'x')\n set()\n\n >>> get_dependencies(dsk, 'y')\n {'x'}\n\n >>> get_dependencies(dsk, 'z') # doctest: +SKIP\n {'x', 'y'}\n\n >>> get_dependencies(dsk, 'w') # Only direct dependencies\n {'z'}\n\n >>> get_dependencies(dsk, 'a') # Ignore non-keys\n {'x'}\n\n >>> get_dependencies(dsk, task=(inc, 'x')) # provide tasks directly\n {'x'}\n ", "language": "en", "n_whitespaces": 190, "n_words": 92, "vocab_size": 61 }
https://github.com/dask/dask.git
3
start
def start(self, page, user): state = WorkflowState( page=page, workflow=self, status=WorkflowState.STATUS_IN_PROGRESS, requested_by=user, ) state.save() state.update(user=user) workflow_submitted.send(sender=state.__class__, instance=state, user=user) next_task_data = None if state.current_task_state: next_task_data = { "id": state.current_task_state.task.id, "title": state.current_task_state.task.name, } log( instance=page, action="wagtail.workflow.start", data={ "workflow": { "id": self.id, "title": self.name, "status": state.status, "next": next_task_data, "task_state_id": state.current_task_state.id if state.current_task_state else None, } }, revision=page.get_latest_revision(), user=user, ) return state
d10f15e55806c6944827d801cd9c2d53f5da4186
15
__init__.py
260
Reformat with black
16,126
0
459
166
47
73,815
57
wagtail
27
wagtail/core/models/__init__.py
Python
34
{ "docstring": "Initiates a workflow by creating an instance of ``WorkflowState``", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/wagtail/wagtail.git
2
unpack
def unpack(self, location, url): # type: (str, HiddenText) -> None if os.path.exists(location): rmtree(location) self.obtain(location, url=url)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
9
versioncontrol.py
56
upd; format
12,546
0
54
34
15
61,400
15
transferlearning
9
.venv/lib/python3.8/site-packages/pip/_internal/vcs/versioncontrol.py
Python
4
{ "docstring": "\n Clean up current location and download the url repository\n (and vcs infos) into location\n\n :param url: the repository URL starting with a vcs prefix.\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 20 }
https://github.com/jindongwang/transferlearning.git
5
mutual_info_score
def mutual_info_score(labels_true, labels_pred, *, contingency=None): if contingency is None: labels_true, labels_pred = check_clusterings(labels_true, labels_pred) contingency = contingency_matrix(labels_true, labels_pred, sparse=True) else: contingency = check_array( contingency, accept_sparse=["csr", "csc", "coo"], dtype=[int, np.int32, np.int64], ) if isinstance(contingency, np.ndarray): # For an array nzx, nzy = np.nonzero(contingency) nz_val = contingency[nzx, nzy] else: # For a sparse matrix nzx, nzy, nz_val = sp.find(contingency) contingency_sum = contingency.sum() pi = np.ravel(contingency.sum(axis=1)) pj = np.ravel(contingency.sum(axis=0)) # Since MI <= min(H(X), H(Y)), any labelling with zero entropy, i.e. containing a # single cluster, implies MI = 0 if pi.size == 1 or pj.size == 1: return 0.0 log_contingency_nm = np.log(nz_val) contingency_nm = nz_val / contingency_sum # Don't need to calculate the full outer product, just for non-zeroes outer = pi.take(nzx).astype(np.int64, copy=False) * pj.take(nzy).astype( np.int64, copy=False ) log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum()) mi = ( contingency_nm * (log_contingency_nm - log(contingency_sum)) + contingency_nm * log_outer ) mi = np.where(np.abs(mi) < np.finfo(mi.dtype).eps, 0.0, mi) return np.clip(mi.sum(), 0.0, None)
8256a48519b7ff0a29c46862de533cefa9ad7f48
14
_supervised.py
488
MAINT Parameters validation for metrics.mutual_info_score (#25243)
77,010
0
344
312
111
261,808
157
scikit-learn
43
sklearn/metrics/cluster/_supervised.py
Python
32
{ "docstring": "Mutual Information between two clusterings.\n\n The Mutual Information is a measure of the similarity between two labels\n of the same data. Where :math:`|U_i|` is the number of the samples\n in cluster :math:`U_i` and :math:`|V_j|` is the number of the\n samples in cluster :math:`V_j`, the Mutual Information\n between clusterings :math:`U` and :math:`V` is given as:\n\n .. math::\n\n MI(U,V)=\\\\sum_{i=1}^{|U|} \\\\sum_{j=1}^{|V|} \\\\frac{|U_i\\\\cap V_j|}{N}\n \\\\log\\\\frac{N|U_i \\\\cap V_j|}{|U_i||V_j|}\n\n This metric is independent of the absolute values of the labels:\n a permutation of the class or cluster label values won't change the\n score value in any way.\n\n This metric is furthermore symmetric: switching :math:`U` (i.e\n ``label_true``) with :math:`V` (i.e. ``label_pred``) will return the\n same score value. This can be useful to measure the agreement of two\n independent label assignments strategies on the same dataset when the\n real ground truth is not known.\n\n Read more in the :ref:`User Guide <mutual_info_score>`.\n\n Parameters\n ----------\n labels_true : array-like of shape (n_samples,), dtype=integral\n A clustering of the data into disjoint subsets, called :math:`U` in\n the above formula.\n\n labels_pred : array-like of shape (n_samples,), dtype=integral\n A clustering of the data into disjoint subsets, called :math:`V` in\n the above formula.\n\n contingency : {array-like, sparse matrix} of shape \\\n (n_classes_true, n_classes_pred), default=None\n A contingency matrix given by the :func:`contingency_matrix` function.\n If value is ``None``, it will be computed, otherwise the given value is\n used, with ``labels_true`` and ``labels_pred`` ignored.\n\n Returns\n -------\n mi : float\n Mutual information, a non-negative value, measured in nats using the\n natural logarithm.\n\n See Also\n --------\n adjusted_mutual_info_score : Adjusted against chance Mutual Information.\n normalized_mutual_info_score : Normalized Mutual Information.\n\n Notes\n -----\n The logarithm used is the natural logarithm (base-e).\n ", "language": "en", "n_whitespaces": 446, "n_words": 267, "vocab_size": 152 }
https://github.com/scikit-learn/scikit-learn.git
2
_load_own_variables
def _load_own_variables(self, store): for i, variable in enumerate(self.variables()): variable.assign(store[str(i)]) base_optimizer_keyword_args = @keras_export( "keras.optimizers.Optimizer", "keras.optimizers.experimental.Optimizer", v1=[], )
2851235d5bc1c6603a97d7efffc7649b0a84b826
@keras_export( "keras.optimizers.Optimizer", "keras.optimizers.experimental.Optimizer", v1=[], )
12
optimizer.py
86
Use a single h5 file for all numerical state in the model. The modular design enables us to easily swap out the h5 file storage with any other form of storage (e.g. npz or tensorstore) in the future. Just implement a new IOHandler for the new storage system. PiperOrigin-RevId: 479718541
83,263
1
48
34
16
280,118
16
keras
12
keras/optimizers/optimizer_experimental/optimizer.py
Python
3
{ "docstring": "Set the state of this optimizer object.name: String. The name to use\n for momentum accumulator weights created by\n the optimizer.\n weight_decay: Float, defaults to None. If set, weight decay is applied.\n clipnorm: Float. If set, the gradient of each weight is individually\n clipped so that its norm is no higher than this value.\n clipvalue: Float. If set, the gradient of each weight is clipped to be no\n higher than this value.\n global_clipnorm: Float. If set, the gradient of all weights is clipped so\n that their global norm is no higher than this value.\n use_ema: Boolean, defaults to False. If True, exponential moving average\n (EMA) is applied. EMA consists of computing an exponential moving\n average of the weights of the model (as the weight values change after\n each training batch), and periodically overwriting the weights with\n their moving average.\n ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is # noqa: E501\n the momentum to use when computing the EMA of the model's weights:\n `new_average = ema_momentum * old_average + (1 - ema_momentum) *\n current_variable_value`.\n ema_overwrite_frequency: Int or None, defaults to None. Only used if\n `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we\n overwrite the model variable by its moving average. If None, the optimizer # noqa: E501\n does not overwrite model variables in the middle of training, and you\n need to explicitly overwrite the variables at the end of training\n by calling `optimizer.finalize_variable_values()` (which updates the model # noqa: E501\n variables in-place). When using the built-in `fit()` training loop, this\n happens automatically after the last epoch, and you don't need to do\n anything.\n jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA # noqa: E501\n compilation. If no GPU device is found, this flag will be ignored.\n **kwargs: keyword arguments only used for backward compatibility.", "language": "en", "n_whitespaces": 494, "n_words": 298, "vocab_size": 151 }
https://github.com/keras-team/keras.git
2
test_sessions_metrics_equal_num_keys
def test_sessions_metrics_equal_num_keys(self): empty_groupbyes = ["project", "release", "environment", "session.status"] interval_days = "1d" for groupby in empty_groupbyes: with patch( "sentry.api.endpoints.organization_sessions.release_health", SessionsReleaseHealthBackend(), ): sessions_data = result_sorted(self.get_sessions_data(groupby, interval_days)) with patch( "sentry.release_health.metrics_sessions_v2.indexer.resolve", MockIndexer().resolve ), patch( "sentry.api.endpoints.organization_sessions.release_health", MetricsReleaseHealthBackend(), ): metrics_data = result_sorted(self.get_sessions_data(groupby, interval_days)) errors = compare_results( sessions=sessions_data, metrics=metrics_data, rollup=interval_days * 24 * 60 * 60, # days to seconds ) assert len(errors) == 0
cfdb7fdc1fef7f8a364bbfef050cdcfc66c82371
14
test_metrics_sessions_v2.py
195
fix(metrics): Zero-fill response when there's no data [INGEST-941] (#32157) When there isn't any metrics data, the `groups` of the response is empty. However, the absence of data must be represented with an appropriate value. For example, `count_unique(user)` must return `0` when there aren't any users, instead of returning no data. The value representing the absence of data is `0` for sums and counts, and `None` for everything else (such as `p50`).
19,338
0
325
114
45
96,684
58
sentry
20
tests/sentry/release_health/test_metrics_sessions_v2.py
Python
22
{ "docstring": "\n Tests whether the number of keys in the metrics implementation of\n sessions data is the same as in the sessions implementation.\n\n Runs twice. Firstly, against sessions implementation to populate the\n cache. Then, against the metrics implementation, and compares with\n cached results.\n ", "language": "en", "n_whitespaces": 84, "n_words": 41, "vocab_size": 29 }
https://github.com/getsentry/sentry.git
1
get_linear_schedule_with_warmup
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1):
71289ba06ea897270ad6de0ea7ff641f4a7b246c
6
optimization.py
24
add lr schedule utils
120,727
0
8
26
5
335,246
5
diffusers
5
src/diffusers/optimization.py
Python
3
{ "docstring": "\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after\n a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n\n Args:\n optimizer ([`~torch.optim.Optimizer`]):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (`int`):\n The number of steps for the warmup phase.\n num_training_steps (`int`):\n The total number of training steps.\n last_epoch (`int`, *optional*, defaults to -1):\n The index of the last epoch when resuming training.\n\n Return:\n `torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n ", "language": "en", "n_whitespaces": 185, "n_words": 90, "vocab_size": 57 }
https://github.com/huggingface/diffusers.git
6
_dedupe_indices
def _dedupe_indices(new, exclude): exclude = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(exclude) if len(conflicts) == 0: return None exclude.update(dums_new) self_args_free = [(i, None) for i in exclude] gen = _IndexStructure._get_generator_for_dummy_indices(self_args_free) repl = {} for d in conflicts: if -d in repl.keys(): continue newname = gen(d.tensor_index_type) new_d = d.func(newname, *d.args[1:]) repl[d] = new_d repl[-d] = -new_d if len(repl) == 0: return None new_renamed = new._replace_indices(repl) return new_renamed
22174995eac1f437c5f4abe0232760877daf586f
13
tensor.py
240
TensMul._dedupe_indices: remove index_structure arg _get_generator_for_dummy_indices is a staticmethod, and so I can just call _IndexStructure._get_generator_for_dummy_indices
49,714
0
257
148
44
200,579
66
sympy
25
sympy/tensor/tensor.py
Python
26
{ "docstring": "\n exclude: set\n new: TensExpr\n\n If ``new`` has any dummy indices that are in ``exclude``, return a version\n of new with those indices replaced. If no replacements are needed,\n return None\n\n \n ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``.\n Since the latter does not use the index position for anything, we just\n set it as ``None`` here.\n ", "language": "en", "n_whitespaces": 127, "n_words": 55, "vocab_size": 48 }
https://github.com/sympy/sympy.git
9
tls_session_update
def tls_session_update(self, msg_str): super(TLS13ClientHello, self).tls_session_update(msg_str) s = self.tls_session if self.sidlen and self.sidlen > 0: s.sid = self.sid s.middlebox_compatibility = True self.random_bytes = msg_str[6:38] s.client_random = self.random_bytes if self.ext: for e in self.ext: if isinstance(e, TLS_Ext_SupportedVersion_CH): for ver in sorted(e.versions, reverse=True): # RFC 8701: GREASE of TLS will send unknown versions # here. We have to ignore them if ver in _tls_version: self.tls_session.advertised_tls_version = ver break if isinstance(e, TLS_Ext_SignatureAlgorithms): s.advertised_sig_algs = e.sig_algs ############################################################################### # ServerHello # ###############################################################################
eb1e56d676c78ccbd5a3c820b931ac50f6a5a4f8
18
handshake.py
201
TLS1.3: wrong parsing size of random_bytes (#3539) Co-authored-by: dim0x69 <[email protected]>
52,590
0
410
126
53
209,068
76
scapy
25
scapy/layers/tls/handshake.py
Python
17
{ "docstring": "\n Either for parsing or building, we store the client_random\n along with the raw string representing this handshake message.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
https://github.com/secdev/scapy.git
1
_reshape_tensor
def _reshape_tensor(self, new_len, tensor, indices): reshaped_tensor = torch.zeros(new_len, device=tensor.device, dtype=tensor.dtype) reshaped_tensor[indices] = tensor return reshaped_tensor
b1acb681207559da56a787ba96e16f0e23697d92
10
director_bb2.py
60
Patch 8322 (#4709) * add dafetymix teacher * safety_mix teacher * safety_mix teacher pos and neg teachers * add tests for teacher * add license info * improvement * add task list * add task list and lint * add init.py * adding some patch to director * seeker changes * th * 3 * jing * changes * z and r * remove .opts * fix docs * add contrractions * lint Co-authored-by: Dexter Ju <[email protected]> Co-authored-by: Jing Xu <[email protected]>
47,242
0
43
40
13
195,279
15
ParlAI
10
projects/fits/agents/director_bb2.py
Python
4
{ "docstring": "\n This method reshapes the tensor back to the batch size.\n\n Args:\n batch: batch being processed in this iteration.\n tensor: vector (shape: b' X 1), where b' <= b.\n indices: indices of (either classification or generation) exs for which the loss was computed.\n\n Returns:\n reshaped tensor of shape: b X 1.\n ", "language": "en", "n_whitespaces": 123, "n_words": 50, "vocab_size": 43 }
https://github.com/facebookresearch/ParlAI.git
6
getphraselist
def getphraselist(self): plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
_parseaddr.py
196
add python 3.10.4 for windows
57,004
0
193
119
26
223,611
35
XX-Net
13
python3.10.4/Lib/email/_parseaddr.py
Python
14
{ "docstring": "Parse a sequence of RFC 2822 phrases.\n\n A phrase is a sequence of words, which are in turn either RFC 2822\n atoms or quoted-strings. Phrases are canonicalized by squeezing all\n runs of continuous whitespace into one space.\n ", "language": "en", "n_whitespaces": 66, "n_words": 37, "vocab_size": 30 }
https://github.com/XX-net/XX-Net.git
1
test_login_appservice_wrong_as
def test_login_appservice_wrong_as(self) -> None: self.register_appservice_user(AS_USER, self.service.token) params = { "type": login.LoginRestServlet.APPSERVICE_TYPE, "identifier": {"type": "m.id.user", "user": AS_USER}, } channel = self.make_request( b"POST", LOGIN_URL, params, access_token=self.another_service.token ) self.assertEquals(channel.result["code"], b"403", channel.result)
64c73c6ac88a740ee480a0ad1f9afc8596bccfa4
11
test_login.py
136
Add type hints to `tests/rest/client` (#12066)
71,281
0
110
83
27
246,588
28
synapse
17
tests/rest/client/test_login.py
Python
11
{ "docstring": "Test that as users cannot login with wrong as token", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
19
__exit__
def __exit__(self, exc_type, exc_val, traceback): if exc_type == ModuleNotFoundError: missing_module = self._pkg_name or exc_val.name with open(os.path.join(__resources_path__, 'extra-requirements.txt')) as fp: for v in fp: if ( v.strip() and not v.startswith('#') and v.startswith(missing_module) and ':' in v ): missing_module, install_tags = v.split(':') self._tags.append(missing_module) self._tags.extend(vv.strip() for vv in install_tags.split(',')) break if self._tags: from jina.helper import colored req_msg = colored('fallback to default behavior', color='yellow') if self._required: req_msg = colored('and it is required', color='red') err_msg = f avail_tags = ' '.join( colored(f'[{tag}]', attrs='bold') for tag in self._tags ) err_msg += ( f'\n\nTo enable this feature, use {colored("pip install jina[TAG]", attrs="bold")}, ' f'where {colored("[TAG]", attrs="bold")} is one of {avail_tags}.\n' ) else: err_msg = f'{exc_val.msg}' if self._required: if self._verbose: if self._logger: self._logger.critical(err_msg) if self._help_text: self._logger.error(self._help_text) else: warnings.warn(err_msg, RuntimeWarning, stacklevel=2) raise exc_val else: if self._verbose: if self._logger: self._logger.warning(err_msg) if self._help_text: self._logger.info(self._help_text) else: warnings.warn(err_msg, RuntimeWarning, stacklevel=2) return True # suppress the error
cea300655ed8be70d74c390ca12e8b09fb741665
19
importer.py
565
refactor: use absolute imports (#4167)
1,848
0
992
296
101
10,555
143
jina
46
jina/importer.py
Python
49
{ "docstring": "Python package \"{colored(missing_module, attrs='bold')}\" is not installed, {req_msg}.\n You are trying to use a feature not enabled by your current Jina installation.", "language": "en", "n_whitespaces": 40, "n_words": 22, "vocab_size": 21 }
https://github.com/jina-ai/jina.git
1
_patch_app_session
def _patch_app_session(self): return mock.patch( "streamlit.server.server.AppSession", # new_callable must return a function, not an object, or else # there will only be a single AppSession mock. Hence the lambda. new_callable=lambda: self._create_mock_app_session, )
704eab3478cf69847825b23dabf15813a8ac9fa2
10
server_test_case.py
40
Rename and refactor `Report` machinery (#4141) This refactor renames (almost) everything related to the outdated "report" concept with more precise concepts that we use throughout our code, primarily "script run", "session", and "app".
26,307
0
96
22
28
118,584
31
streamlit
6
lib/tests/server_test_case.py
Python
5
{ "docstring": "Mock the Server's AppSession import. We don't want\n actual sessions to be instantiated, or scripts to be run.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 16 }
https://github.com/streamlit/streamlit.git
9
get_sales_orders
def get_sales_orders(self): so_filter = item_filter = "" bom_item = "bom.item = so_item.item_code" date_field_mapper = { "from_date": (">=", "so.transaction_date"), "to_date": ("<=", "so.transaction_date"), "from_delivery_date": (">=", "so_item.delivery_date"), "to_delivery_date": ("<=", "so_item.delivery_date"), } for field, value in date_field_mapper.items(): if self.get(field): so_filter += f" and {value[1]} {value[0]} %({field})s" for field in ["customer", "project", "sales_order_status"]: if self.get(field): so_field = "status" if field == "sales_order_status" else field so_filter += f" and so.{so_field} = %({field})s" if self.item_code and frappe.db.exists("Item", self.item_code): bom_item = self.get_bom_item() or bom_item item_filter += " and so_item.item_code = %(item_code)s" open_so = frappe.db.sql( f, self.as_dict(), as_dict=1, ) return open_so @frappe.whitelist()
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist()
13
production_plan.py
329
style: format code with black
14,183
1
67
158
59
66,418
93
erpnext
20
erpnext/manufacturing/doctype/production_plan/production_plan.py
Python
38
{ "docstring": "\n\t\tselect distinct so.name, so.transaction_date, so.customer, so.base_grand_total\n\t\tfrom `tabSales Order` so, `tabSales Order Item` so_item\n\t\twhere so_item.parent = so.name\n\t\t\tand so.docstatus = 1 and so.status not in (\"Stopped\", \"Closed\")\n\t\t\tand so.company = %(company)s\n\t\t\tand so_item.qty > so_item.work_order_qty {so_filter} {item_filter}\n\t\t\tand (exists (select name from `tabBOM` bom where {bom_item}\n\t\t\t\t\tand bom.is_active = 1)\n\t\t\t\tor exists (select name from `tabPacked Item` pi\n\t\t\t\t\twhere pi.parent = so.name and pi.parent_item = so_item.item_code\n\t\t\t\t\t\tand exists (select name from `tabBOM` bom where bom.item=pi.item_code\n\t\t\t\t\t\t\tand bom.is_active = 1)))\n\t\t", "language": "en", "n_whitespaces": 68, "n_words": 80, "vocab_size": 49 }
https://github.com/frappe/erpnext.git
4
_create_examples
def _create_examples(self, lines, set_type): examples = [] for i, line in enumerate(lines): if i == 0: continue guid = f"{set_type}-{i}" text_a = line[3] text_b = line[4] label = None if set_type == "test" else line[0] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
afe5d42d8d1d80af911ed980c2936bfe887078f6
12
glue.py
137
Black preview (#17217) * Black preview * Fixup too! * Fix check copies * Use the same version as the CI * Bump black
6,924
0
150
83
34
38,166
41
transformers
14
src/transformers/data/processors/glue.py
Python
11
{ "docstring": "Creates examples for the training, dev and test sets.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/huggingface/transformers.git
8
_wait
async def _wait(fs, timeout, return_when, loop): assert fs, 'Set of Futures is empty.' waiter = loop.create_future() timeout_handle = None if timeout is not None: timeout_handle = loop.call_later(timeout, _release_waiter, waiter) counter = len(fs)
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
tasks.py
80
add python 3.10.4 for windows
56,138
0
57
132
27
220,831
32
XX-Net
12
python3.10.4/Lib/asyncio/tasks.py
Python
24
{ "docstring": "Internal helper for wait().\n\n The fs argument must be a collection of Futures.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
https://github.com/XX-net/XX-Net.git
5
get_timestamps
def get_timestamps(self, session_id=None): logger.debug("Getting timestamps: (session_id: %s, is_training: %s)", session_id, self._is_training) retval = {} for idx in [session_id] if session_id else self.session_ids: self._check_cache(idx) data = self._cache.get_data(idx, "timestamps") if not data: continue retval[idx] = data[idx]["timestamps"] logger.debug({k: v.shape for k, v in retval.items()}) return retval
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
11
event_reader.py
157
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
19,800
0
164
98
37
100,303
43
faceswap
17
lib/gui/analysis/event_reader.py
Python
12
{ "docstring": " Read the timestamps from the TensorBoard logs.\n\n As loss timestamps are slightly different for each loss, we collect the timestamp from the\n `batch_loss` key.\n\n Parameters\n ----------\n session_id: int, optional\n The Session ID to return the timestamps for. Set to ``None`` to return all session\n timestamps. Default ``None``\n\n Returns\n -------\n dict\n The session id(s) as key with list of timestamps per step as value\n ", "language": "en", "n_whitespaces": 160, "n_words": 63, "vocab_size": 48 }
https://github.com/deepfakes/faceswap.git
1
test_querysets_related_name
def test_querysets_related_name(self): self.assertQuerysetEqual( self.business.employees.all(), [ "Fran Bones", "Dan Jones", ], str, ) self.assertQuerysetEqual( self.fran.business_set.all(), [ "Sears", ], lambda b: b.name, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
93
Refs #33476 -- Reformatted code with Black.
50,147
0
189
57
17
202,528
21
django
11
tests/custom_pk/tests.py
Python
16
{ "docstring": "\n Custom pk doesn't affect related_name based lookups\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
2
draw_bboxes
def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2): polygons = [] for i, bbox in enumerate(bboxes): bbox_int = bbox.astype(np.int32) poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] np_poly = np.array(poly).reshape((4, 2)) polygons.append(Polygon(np_poly)) p = PatchCollection( polygons, facecolor='none', edgecolors=color, linewidths=thickness, alpha=alpha) ax.add_collection(p) return ax
301d4a2d4cfe1cdb62608e2892924be3e67e3098
12
image.py
222
[Feature] Support visualization for Panoptic Segmentation (#7041) * First commit of v2 * split the functions * Support to show panoptic result * temp * Support to show gt * support show gt * fix lint * Support to browse datasets * Fix unit tests * Fix findContours * fix comments * Fix pre-commit * fix lint * Add the type of an argument
70,168
0
139
153
37
243,966
43
mmdetection
26
mmdet/core/visualization/image.py
Python
16
{ "docstring": "Draw bounding boxes on the axes.\n\n Args:\n ax (matplotlib.Axes): The input axes.\n bboxes (ndarray): The input bounding boxes with the shape\n of (n, 4).\n color (list[tuple] | matplotlib.color): the colors for each\n bounding boxes.\n alpha (float): Transparency of bounding boxes. Default: 0.8.\n thickness (int): Thickness of lines. Default: 2.\n\n Returns:\n matplotlib.Axes: The result axes.\n ", "language": "en", "n_whitespaces": 127, "n_words": 54, "vocab_size": 39 }
https://github.com/open-mmlab/mmdetection.git
19
prepare_coco_detection
def prepare_coco_detection(self, image, target, return_segmentation_masks=False): w, h = image.size image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # get all COCO annotations for the given image anno = target["annotations"] anno = [obj for obj in anno if "iscrowd" not in obj or obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=w) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=h) classes = [obj["category_id"] for obj in anno] classes = np.asarray(classes, dtype=np.int64) if return_segmentation_masks: segmentations = [obj["segmentation"] for obj in anno] masks = self.convert_coco_poly_to_mask(segmentations, h, w) keypoints = None if anno and "keypoints" in anno[0]: keypoints = [obj["keypoints"] for obj in anno] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] if num_keypoints: keypoints = keypoints.reshape((-1, 3)) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] if return_segmentation_masks: masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] target = {} target["boxes"] = boxes target["class_labels"] = classes if return_segmentation_masks: target["masks"] = masks target["image_id"] = image_id if keypoints is not None: target["keypoints"] = keypoints # for conversion to coco api area = np.asarray([obj["area"] for obj in anno], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno], dtype=np.int64) target["area"] = area[keep] target["iscrowd"] = iscrowd[keep] target["orig_size"] = np.asarray([int(h), int(w)], dtype=np.int64) target["size"] = np.asarray([int(h), int(w)], dtype=np.int64) return image, target # Copied from transformers.models.detr.feature_extraction_detr.DetrFeatureExtractor.prepare_coco_panoptic
1ac698744c4dbdf1495d303246d08ffacdf4f5b8
14
feature_extraction_yolos.py
832
Add YOLOS (#16848) * First draft * Add YolosForObjectDetection * Make forward pass work * Add mid position embeddings * Add interpolation of position encodings * Add expected values * Add YOLOS to tests * Add integration test * Support tiny model as well * Support all models in conversion script * Remove mid_pe_size attribute * Make more tests pass * Add model to README and fix config * Add copied from statements * Rename base_model_prefix to vit * Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP * Apply suggestions from code review * Apply more suggestions from code review * Convert remaining checkpoints * Improve docstrings * Add YolosFeatureExtractor * Add feature extractor to docs * Add corresponding tests * Fix style * Fix docs * Apply suggestion from code review * Fix bad rebase * Fix some more bad rebase * Fix missing character * Improve docs and variable names Co-authored-by: Niels Rogge <[email protected]>
6,843
0
629
528
121
37,637
242
transformers
32
src/transformers/models/yolos/feature_extraction_yolos.py
Python
45
{ "docstring": "\n Convert the target in COCO format into the format expected by DETR.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 10 }
https://github.com/huggingface/transformers.git
2
secure_popen
def secure_popen(cmd): ret = '' # Split by multiple commands '&&' for c in cmd.split('&&'): ret += __secure_popen(c) return ret
4046fbb18cf16be684ada228314c1f328779a3c1
10
secure.py
51
Fix typos Found via `codespell -S ./venv,./glances/outputs,*.svg -L hart,bu,te,statics`
15,402
0
42
27
18
70,176
20
glances
6
glances/secure.py
Python
5
{ "docstring": "A more or less secure way to execute system commands\n\n Multiple command should be separated with a &&\n\n :return: the result of the commands\n ", "language": "en", "n_whitespaces": 33, "n_words": 24, "vocab_size": 22 }
https://github.com/nicolargo/glances.git
1
at
def at(self, axis=None): # noqa: PR01, RT01, D200 from .indexing import _LocIndexer return _LocIndexer(self)
605efa618e7994681f57b11d04d417f353ef8d50
7
base.py
37
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
35,484
0
36
20
14
153,603
14
modin
5
modin/pandas/base.py
Python
3
{ "docstring": "\n Get a single value for a row/column label pair.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
https://github.com/modin-project/modin.git
2
update
def update(self, **kwargs): self._not_support_combined_queries("update") if self.query.is_sliced: raise TypeError("Cannot update a query once a slice has been taken.") self._for_write = True query = self.query.chain(sql.UpdateQuery) query.add_update_values(kwargs) # Clear any annotations so that they won't be present in subqueries. query.annotations = {} with transaction.mark_for_rollback_on_error(using=self.db): rows = query.get_compiler(self.db).execute_sql(CURSOR) self._result_cache = None return rows update.alters_data = True
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
query.py
162
Refs #33476 -- Reformatted code with Black.
51,201
0
154
90
43
205,763
52
django
23
django/db/models/query.py
Python
12
{ "docstring": "\n Update all elements in the current QuerySet, setting all the given\n fields to the appropriate values.\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 13 }
https://github.com/django/django.git
6
_identity_from_extracted
def _identity_from_extracted(cls, filename) -> Tuple[np.ndarray, bool]: if os.path.splitext(filename)[-1].lower() != ".png": logger.info("'%s' not a png. Returning empty array", filename) return np.array([]), False meta = read_image_meta(filename) if "itxt" not in meta or "alignments" not in meta["itxt"]: logger.debug("'%s' does not contain faceswap data. Returning empty array", filename) return np.array([]), False align: "PNGHeaderAlignmentsDict" = meta["itxt"]["alignments"] if "identity" not in align or "vggface2" not in align["identity"]: logger.debug("'%s' does not contain identity data. Returning empty array", filename) return np.array([]), True retval = np.array(align["identity"]["vggface2"]) logger.debug("Obtained identity for '%s'. Shape: %s", filename, retval.shape) return retval, True
1d1face00d9476896e7857d3976afce383585d1b
12
extract.py
285
Update Face Filter - Remove old face filter - plugins.extract.pipeline: Expose plugins directly - Change `is_aligned` from plugin level to ExtractMedia level - Allow extract pipeline to take faceswap aligned images - Add ability for recognition plugins to accept aligned faces as input - Add face filter to recognition plugin - Move extractor pipeline IO ops to own class
21,385
0
217
166
52
102,011
88
faceswap
20
scripts/extract.py
Python
31
{ "docstring": " Test whether the given image is a faceswap extracted face and contains identity\n information. If so, return the identity embedding\n\n Parameters\n ----------\n filename: str\n Full path to the image file to load\n\n Returns\n -------\n :class:`numpy.ndarray`\n The identity embeddings, if they can be obtained from the image header, otherwise an\n empty array\n bool\n ``True`` if the image is a faceswap extracted image otherwise ``False``\n ", "language": "en", "n_whitespaces": 171, "n_words": 63, "vocab_size": 46 }
https://github.com/deepfakes/faceswap.git