complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
9
get_snuba_column_name
def get_snuba_column_name(name, dataset=Dataset.Events): no_conversion = {"group_id", "group_ids", "project_id", "start", "end"} if name in no_conversion: return name if not name or name.startswith("tags[") or QUOTED_LITERAL_RE.match(name): return name measurement_name = get_measurement_name(name) span_op_breakdown_name = get_span_op_breakdown_name(name) if "measurements_key" in DATASETS[dataset] and measurement_name: default = f"measurements[{measurement_name}]" elif "span_op_breakdowns_key" in DATASETS[dataset] and span_op_breakdown_name: default = f"span_op_breakdowns[{span_op_breakdown_name}]" else: default = f"tags[{name}]" return DATASETS[dataset].get(name, default)
1bcb129d69a6c4e481b950ebc5871e9c118db74f
11
snuba.py
200
perf(issue-search): optimize querying on perf issues by using hasAny on transaction.group_id (#41685) Special optimization for searching on transactions for performance issues by doing a `hasAny` check instead of arrayJoining `group_ids` column and applying a filter after. before query: ``` SELECT (arrayJoin((group_ids AS _snuba_group_ids)) AS _snuba_group_id), _snuba_group_id, (multiply(toUInt64(max((finish_ts AS _snuba_timestamp))), 1000) AS _snuba_last_seen), (ifNull(uniq(_snuba_group_id), 0) AS _snuba_total) FROM transactions_local SAMPLE 1.0 WHERE greaterOrEquals((finish_ts AS _snuba_finish_ts), toDateTime('2022-08-26T02:02:49', 'Universal')) AND less(_snuba_finish_ts, toDateTime('2022-11-24T02:03:49', 'Universal')) AND in((project_id AS _snuba_project_id), tuple(4550959674425346)) AND equals(('transaction' AS _snuba_type), 'transaction') AND in(_snuba_project_id, tuple(4550959674425346)) AND in(_snuba_group_id, (1, 2)) GROUP BY _snuba_group_id WITH TOTALS ORDER BY _snuba_last_seen DESC, _snuba_group_id ASC LIMIT 150 OFFSET 0 ``` after query: ``` SELECT (arrayJoin(arrayIntersect([1, 2], (group_ids AS _snuba_group_ids))) AS _snuba_group_id), (multiply(toUInt64(max((finish_ts AS _snuba_timestamp))), 1000) AS _snuba_last_seen), (ifNull(uniq(_snuba_group_id), 0) AS _snuba_total), _snuba_group_id FROM transactions_local SAMPLE 1.0 WHERE greaterOrEquals((finish_ts AS _snuba_finish_ts), toDateTime('2022-08-26T02:01:32', 'Universal')) AND less(_snuba_finish_ts, toDateTime('2022-11-24T02:02:32', 'Universal')) AND in((project_id AS _snuba_project_id), tuple(4550959669379074)) AND equals(hasAny(_snuba_group_ids, [1, 2]), 1) AND equals(('transaction' AS _snuba_type), 'transaction') AND in(_snuba_project_id, tuple(4550959669379074)) GROUP BY _snuba_group_id WITH TOTALS ORDER BY _snuba_last_seen DESC, _snuba_group_id ASC LIMIT 150 OFFSET 0 ```
18,492
0
121
110
37
89,031
56
sentry
16
src/sentry/utils/snuba.py
Python
15
{ "docstring": "\n Get corresponding Snuba column name from Sentry snuba map, if not found\n the column is assumed to be a tag. If name is falsy or name is a quoted literal\n (e.g. \"'name'\"), leave unchanged.\n ", "language": "en", "n_whitespaces": 47, "n_words": 34, "vocab_size": 28 }
https://github.com/getsentry/sentry.git
1
test_skip_noarchive_withtext
def test_skip_noarchive_withtext(self): parser = RasterisedDocumentParser(None) parser.parse( os.path.join(self.SAMPLE_FILES, "multi-page-digital.pdf"), "application/pdf", ) self.assertIsNone(parser.archive_path) self.assertContainsStrings( parser.get_text().lower(), ["page 1", "page 2", "page 3"], )
b3b2519bf03185aa12028fa68d3b8f8860555e6e
11
test_parser.py
109
Fixes the creation of an archive file, even if noarchive was specified
117,019
0
113
63
18
319,924
20
paperless-ngx
14
src/paperless_tesseract/tests/test_parser.py
Python
11
{ "docstring": "\n GIVEN:\n - File with existing text layer\n - OCR mode set to skip_noarchive\n WHEN:\n - Document is parsed\n THEN:\n - Text from images is extracted\n - No archive file is created\n ", "language": "en", "n_whitespaces": 115, "n_words": 31, "vocab_size": 25 }
https://github.com/paperless-ngx/paperless-ngx.git
1
test_createcachetable_with_table_argument
def test_createcachetable_with_table_argument(self): self.drop_table() out = io.StringIO() management.call_command( "createcachetable", "test cache table", verbosity=2, stdout=out, ) self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
tests.py
83
Refs #33476 -- Reformatted code with Black.
50,030
0
107
47
20
201,975
21
django
12
tests/cache/tests.py
Python
10
{ "docstring": "\n Delete and recreate cache table with legacy behavior (explicitly\n specifying the table name).\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
https://github.com/django/django.git
1
get_engle_granger_two_step_cointegration_test
def get_engle_granger_two_step_cointegration_test(dependent_series, independent_series): warnings.simplefilter(action="ignore", category=FutureWarning) long_run_ols = sm.OLS(dependent_series, sm.add_constant(independent_series)) warnings.simplefilter(action="default", category=FutureWarning) long_run_ols_fit = long_run_ols.fit() c, gamma = long_run_ols_fit.params z = long_run_ols_fit.resid short_run_ols = sm.OLS(dependent_series.diff().iloc[1:], (z.shift().iloc[1:])) short_run_ols_fit = short_run_ols.fit() alpha = short_run_ols_fit.params[0] # NOTE: The p-value returned by the adfuller function assumes we do not estimate z # first, but test stationarity of an unestimated series directly. This assumption # should have limited effect for high N, however. Critical values taking this into # account more accurately are provided in e.g. McKinnon (1990) and Engle & Yoo (1987). adfstat, pvalue, _, _, _ = adfuller(z, maxlag=1, autolag=None) return c, gamma, alpha, z, adfstat, pvalue
9e1a58e2dbedec4e4a9f9c2e32ddf091776c606b
13
econometrics_model.py
230
Here we merge all API Refactor related branches (#2236) * Update api.py * Updated forex menu * refactor ycrv command * refactor ycrv command black * refactor ecocal command * Minh changes * Adding space to test pushing * title fix ecocal df * get economic calendar annotation * fix investingcom tests * refactor index command * refactor overview command * give defaults to wsj view function args * rename date args investincom * refacto bigmac command * fix ecocal typo * refactor rtps command * alphavantage gdp * alphavantage gdp per capita * alphavantage cpi * alphavantage tyld * alphavantage inf * refactor macro command * refactor macro command w helpers * refactor treasury command * fix macro on terminal * treasury labels * refactor maturities * update treasury maturities doc strings * refactor get economic calendar finhub * refactor map command api * display map filter choices * route economy api to performance map * route economy api to performance map * display group choices on valuation command * refactor performance and valuation commands * refactor spectrum model and view * add choices to spectrum controller * delete image after view * fix model tests finviz * fix finciz view tests * refactor futures * fix some tests * fix more tests * fix controller test * refactor fred series notes * update fred notes docstring * refacto fred series ids * fix pred and qa when empty datasets * refactor fred * uncomment stuff * refacto get series data * fix some tests * set defaults on args * refactor fred yield curve * black * fix spell and remove ecocal names * fix linting * linting * pylint fix * change dangerous defaults * Working through crypto fixes (#2256) * Working through crypto fixes * Continued adding crypto stuff * Added crypto overview * Added test fixes * Added fixtures * Fixed tests * Fixed charting issue * Removed broken APIs * Final adjustments * Added test fixes * map get groups and get ycrv countries into old api * exposed econdb helper funcs * remove helpers * refactor search indices * linting * refactor arg currency * pylint from currency * Started switching crpyto ascending to ascend * Merging * Portfolio model arguements, params, and docstring * Refactored for etf commands (#2292) * Refactored for etf commands * Fixed tests * Added load command * Fixed menu * Portfolio logic fixes * Added econometrics (#2260) * Added econometrics * Fixed tests * Simplified API * Added test fixes * Added test csv * Allowed examples to be loaded * Fund refactor (#2291) * Fund refactor * Changed fund_name and fund to name * Changed ascending to ascend * Stock menu refactoring for easier API usage (#2194) * Stocks refactoring for easier API usage * Linting * Refactor newly added features * Linting * Fixing tests * Refactor common files used by stocks menu * Fixing flake8 * Fix linting and tests * Linting * Fix flake8 * refactor insider_data * refactor mentions * refactor watchlist * refactor sentiment * refactor sentiment * fix yahoofinance tests * refactor load and candle * refactor get_news and display_news * refactor stocks.ins.act * candle default matplotlib * fix yahoofinance_view tests * fix ark model tests * fix ark view tests * fix business insider model * fix business insider view * refactor csimarket model * fix tests csi market model * update dd controller * fix get suppliers tests * fix dd controller tests * fix finhub tests * fix finviz tests * fix fmp tests * fix marketwatch tests * corrected argument keywords in test_bt_model * corrected argument keywords in test_bt_view * refactor fa controller * refactor marketwatch view * refactor gov controller * fix tests fa av * fix tests elect * fix dcf tests * fix polygon tests * fix fmp tests * fix quiverquant tests * fix yahoofinance fa tests * fix more fa tests * fix insider tests * fix more tests * fix more tests * fix options tests * fix stock gov tests * fix tests test_ba_controller * fix tests for test_finviz_compare_model.py * fixed 2 tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fixed tests * fix final tests * fixed tests * fixed tests * Fix tests * black * forgot to black tests * fixed tests * fixed tests * fixed tests * fixed tests * flakefix * Tests + code : Stocks / Discovery * fix tests * added recorder * fixed tests * fixed tests * black * black * remove unused imports * refactor display raw * sia dicts fix * pylint * linting * remove dangerous default * fix tests * fix beta model test * black * skip screener qa test * change sector path to sectors * update tests readme * fix metric defaults * black * substitute lost ticker * defaults cpic * another round on sia * refactor cramer * reduce default tweets on sentiment * refactor yf hist, corr, volume * arkorders default * refactor income, balance, cashflow * refacto scorr, screener, getfinnhub * refactor stockgrid * ibkr refactor * another round on stockgrid * add dividens end point * refactor discovery endpoints * update docstrings with similar input * refactor messages * refactor ba * refactor regioons * refactor twitter sentiment * refactor hist * refactor regions * give default to timeframe * refactor bunch of defaults and arg names * remove leftover imports * refactor vwap * let tests run * fix tests * fix stock tests * fix stockanalysis tests * flake * MYPY * Made important changes * added fixes * Fixed big issue * Added fixes to tests * fix qa tests * fix tests * fix 1 more test * last stocks failing * fix crypto test Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: hjoaquim <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: colin99d <[email protected]> * fix portfolio tests * change period to window * update ca docstrings * refactor get_similar_companies func * Fixed * Update CI * Update CI 2 * Update CI 3 * Update dependencies Co-authored-by: colin99d <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: James Simmons <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]> Co-authored-by: minhhoang1023 <[email protected]> Co-authored-by: jose-donato <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: northern-64bit <[email protected]> Co-authored-by: hjoaquim <[email protected]>
85,239
0
151
147
88
285,199
103
OpenBBTerminal
31
openbb_terminal/econometrics/econometrics_model.py
Python
12
{ "docstring": "Estimates long-run and short-run cointegration relationship for series y and x and apply\n the two-step Engle & Granger test for cointegration.\n\n Uses a 2-step process to first estimate coefficients for the long-run relationship\n y_t = c + gamma * x_t + z_t\n\n and then the short-term relationship,\n y_t - y_(t-1) = alpha * z_(t-1) + epsilon_t,\n\n with z the found residuals of the first equation.\n\n Then tests cointegration by Dickey-Fuller phi=1 vs phi < 1 in\n z_t = phi * z_(t-1) + eta_t\n\n If this implies phi < 1, the z series is stationary is concluded to be\n stationary, and thus the series y and x are concluded to be cointegrated.\n\n Parameters\n ----------\n dependent_series : pd.Series\n The first time series of the pair to analyse.\n\n independent_series : pd.Series\n The second time series of the pair to analyse.\n\n Returns\n -------\n c : float\n The constant term in the long-run relationship y_t = c + gamma * x_t + z_t. This\n describes the static shift of y with respect to gamma * x.\n\n gamma : float\n The gamma term in the long-run relationship y_t = c + gamma * x_t + z_t. This\n describes the ratio between the const-shifted y and x.\n\n alpha : float\n The alpha term in the short-run relationship y_t - y_(t-1) = alpha * z_(t-1) + epsilon. This\n gives an indication of the strength of the error correction toward the long-run mean.\n\n z : pd.Series\n Series of residuals z_t from the long-run relationship y_t = c + gamma * x_t + z_t, representing\n the value of the error correction term.\n\n dfstat : float\n The Dickey Fuller test-statistic for phi = 1 vs phi < 1 in the second equation. A more\n negative value implies the existence of stronger cointegration.\n\n pvalue : float\n The p-value corresponding to the Dickey Fuller test-statistic. A lower value implies\n stronger rejection of no-cointegration, thus stronger evidence of cointegration.\n\n ", "language": "en", "n_whitespaces": 494, "n_words": 315, "vocab_size": 129 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
12
naive_all_pairs_lowest_common_ancestor
def naive_all_pairs_lowest_common_ancestor(G, pairs=None): if not nx.is_directed_acyclic_graph(G): raise nx.NetworkXError("LCA only defined on directed acyclic graphs.") if len(G) == 0: raise nx.NetworkXPointlessConcept("LCA meaningless on null graphs.") ancestor_cache = {} if pairs is None: pairs = combinations_with_replacement(G, 2) for v, w in pairs: if v not in ancestor_cache: ancestor_cache[v] = nx.ancestors(G, v) ancestor_cache[v].add(v) if w not in ancestor_cache: ancestor_cache[w] = nx.ancestors(G, w) ancestor_cache[w].add(w) common_ancestors = ancestor_cache[v] & ancestor_cache[w] if common_ancestors: common_ancestor = next(iter(common_ancestors)) while True: successor = None for lower_ancestor in G.successors(common_ancestor): if lower_ancestor in common_ancestors: successor = lower_ancestor break if successor is None: break common_ancestor = successor yield ((v, w), common_ancestor) @not_implemented_for("undirected") @not_implemented_for("multigraph")
8b6c25ec952b80539a38e7a884c0fdc3fd735b28
@not_implemented_for("undirected") @not_implemented_for("multigraph")
16
lowest_common_ancestors.py
319
Improve LCA input validation (#5877) * Remove explicit checks for None nodes. The graph objects already do not allow None nodes. * Change elif to if for raising branches.
42,256
1
387
188
60
177,062
101
networkx
22
networkx/algorithms/lowest_common_ancestors.py
Python
28
{ "docstring": "Return the lowest common ancestor of all pairs or the provided pairs\n\n Parameters\n ----------\n G : NetworkX directed graph\n\n pairs : iterable of pairs of nodes, optional (default: all pairs)\n The pairs of nodes of interest.\n If None, will find the LCA of all pairs of nodes.\n\n Yields\n ------\n ((node1, node2), lca) : 2-tuple\n Where lca is least common ancestor of node1 and node2.\n Note that for the default case, the order of the node pair is not considered,\n e.g. you will not get both ``(a, b)`` and ``(b, a)``\n\n Raises\n ------\n NetworkXPointlessConcept\n If `G` is null.\n NetworkXError\n If `G` is not a DAG.\n\n Examples\n --------\n The default behavior is to yield the lowest common ancestor for all\n possible combinations of nodes in `G`, including self-pairings:\n\n >>> G = nx.DiGraph([(0, 1), (0, 3), (1, 2)])\n >>> dict(nx.naive_all_pairs_lowest_common_ancestor(G))\n {(0, 0): 0, (0, 1): 0, (0, 3): 0, (0, 2): 0, (1, 1): 1, (1, 3): 0, (1, 2): 1, (3, 3): 3, (3, 2): 0, (2, 2): 2}\n\n The pairs argument can be used to limit the output to only the\n specified node pairings:\n\n >>> dict(nx.naive_all_pairs_lowest_common_ancestor(G, pairs=[(1, 2), (2, 3)]))\n {(1, 2): 1, (2, 3): 0}\n\n Notes\n -----\n Only defined on non-null directed acyclic graphs.\n\n See Also\n --------\n naive_lowest_common_ancestor\n ", "language": "en", "n_whitespaces": 344, "n_words": 208, "vocab_size": 126 }
https://github.com/networkx/networkx.git
6
unquote_unreserved
def unquote_unreserved(uri): parts = uri.split("%") for i in range(1, len(parts)): h = parts[i][0:2] if len(h) == 2 and h.isalnum(): try: c = chr(int(h, 16)) except ValueError: raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") if c in UNRESERVED_SET: parts[i] = c + parts[i][2:] else: parts[i] = f"%{parts[i]}" else: parts[i] = f"%{parts[i]}" return "".join(parts)
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
16
utils.py
215
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,233
0
198
119
37
22,163
50
pipenv
16
pipenv/patched/pip/_vendor/requests/utils.py
Python
16
{ "docstring": "Un-escape any percent-escape sequences in a URI that are unreserved\n characters. This leaves all reserved, illegal and non-ASCII bytes encoded.\n\n :rtype: str\n ", "language": "en", "n_whitespaces": 31, "n_words": 22, "vocab_size": 22 }
https://github.com/pypa/pipenv.git
2
is_named_tuple
def is_named_tuple(obj) -> bool: return isinstance(obj, abc.Sequence) and hasattr(obj, "_fields")
bce995817caf00ab5e82cb4cf1b540f1530cf4ea
9
inference.py
41
Fix some dosctring RT02 error (#50197)
40,758
0
16
24
10
172,104
10
pandas
7
pandas/core/dtypes/inference.py
Python
25
{ "docstring": "\n Check if the object is a named tuple.\n\n Parameters\n ----------\n obj : The object to check\n\n Returns\n -------\n bool\n Whether `obj` is a named tuple.\n\n Examples\n --------\n >>> from collections import namedtuple\n >>> Point = namedtuple(\"Point\", [\"x\", \"y\"])\n >>> p = Point(1, 2)\n >>>\n >>> is_named_tuple(p)\n True\n >>> is_named_tuple((1, 2))\n False\n ", "language": "en", "n_whitespaces": 113, "n_words": 51, "vocab_size": 40 }
https://github.com/pandas-dev/pandas.git
9
_laplace_rule_timescale
def _laplace_rule_timescale(f, t, s, doit=True, **hints): r _simplify = hints.pop('simplify', True) b = Wild('b', exclude=[t]) g = WildFunction('g', nargs=1) ma1 = f.match(g) if ma1: arg = ma1[g].args[0].collect(t) ma2 = arg.match(b*t) if ma2 and ma2[b]>0: debug('_laplace_apply_rules match:') debug(' f: %s ( %s, %s )'%(f, ma1, ma2)) debug(' rule: amplitude and time scaling (1.1, 1.2)') if ma2[b]==1: if doit==True and not any(func.has(t) for func in ma1[g].atoms(AppliedUndef)): return _laplace_transform(ma1[g].func(t), t, s, simplify=_simplify) else: return LaplaceTransform(ma1[g].func(t), t, s, **hints) else: L = _laplace_apply_rules(ma1[g].func(t), t, s/ma2[b], doit=doit, **hints) try: r, p, c = L return (1/ma2[b]*r, p, c) except TypeError: return 1/ma2[b]*L return None
af443377dd48c2caf440d2b6dd76830dbe84712f
19
transforms.py
407
Moved all constant-factor calculations to `_laplace_apply_rules`
49,677
0
506
266
71
200,509
99
sympy
35
sympy/integrals/transforms.py
Python
37
{ "docstring": "\n This internal helper function tries to apply the time-scaling rule of the\n Laplace transform and returns `None` if it cannot do it.\n\n Time-scaling means the following: if $F(s)$ is the Laplace transform of,\n $f(t)$, then, for any $a>0$, the Laplace transform of $f(at)$ will be\n $\\frac1a F(\\frac{s}{a})$. This scaling will also affect the transform's\n convergence plane.\n ", "language": "en", "n_whitespaces": 78, "n_words": 56, "vocab_size": 43 }
https://github.com/sympy/sympy.git
7
_read
def _read(cls, path_or_buf, **kwargs): path_or_buf = cls.get_path_or_buffer(path_or_buf) if isinstance(path_or_buf, str): if not cls.file_exists(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) path_or_buf = cls.get_path(path_or_buf) elif not cls.pathlib_or_pypath(path_or_buf): return cls.single_worker_read(path_or_buf, **kwargs) if not kwargs.get("lines", False): return cls.single_worker_read(path_or_buf, **kwargs) with OpenFile(path_or_buf, "rb") as f: columns = pandas.read_json(BytesIO(b"" + f.readline()), lines=True).columns kwargs["columns"] = columns empty_pd_df = pandas.DataFrame(columns=columns) with OpenFile(path_or_buf, "rb", kwargs.get("compression", "infer")) as f: partition_ids = [] index_ids = [] dtypes_ids = [] column_widths, num_splits = cls._define_metadata(empty_pd_df, columns) args = {"fname": path_or_buf, "num_splits": num_splits, **kwargs} splits = cls.partitioned_file( f, num_partitions=NPartitions.get(), ) for start, end in splits: args.update({"start": start, "end": end}) partition_id = cls.deploy(cls.parse, num_returns=num_splits + 3, **args) partition_ids.append(partition_id[:-3]) index_ids.append(partition_id[-3]) dtypes_ids.append(partition_id[-2]) # partition_id[-1] contains the columns for each partition, which will be useful # for implementing when `lines=False`. row_lengths = cls.materialize(index_ids) new_index = pandas.RangeIndex(sum(row_lengths)) dtypes = cls.get_dtypes(dtypes_ids) partition_ids = cls.build_partition(partition_ids, row_lengths, column_widths) if isinstance(dtypes, pandas.Series): dtypes.index = columns else: dtypes = pandas.Series(dtypes, index=columns) new_frame = cls.frame_cls( np.array(partition_ids), new_index, columns, row_lengths, column_widths, dtypes=dtypes, ) new_frame.synchronize_labels(axis=0) return cls.query_compiler_cls(new_frame)
97769988a6f19e4b76f34238c97bf159ee7626a5
16
json_dispatcher.py
641
REFACTOR-#3853: interacting with Dask interface through 'DaskWrapper' class (#3854) Co-authored-by: Devin Petersohn <[email protected]> Co-authored-by: Dmitry Chigarev <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Anatoly Myachev <[email protected]>
35,438
0
655
398
106
153,549
157
modin
58
modin/core/io/text/json_dispatcher.py
Python
48
{ "docstring": "\n Read data from `path_or_buf` according to the passed `read_json` `kwargs` parameters.\n\n Parameters\n ----------\n path_or_buf : str, path object or file-like object\n `path_or_buf` parameter of `read_json` function.\n **kwargs : dict\n Parameters of `read_json` function.\n\n Returns\n -------\n BaseQueryCompiler\n Query compiler with imported data for further processing.\n ", "language": "en", "n_whitespaces": 141, "n_words": 44, "vocab_size": 35 }
https://github.com/modin-project/modin.git
5
_store
def _store(self, messages, response, remove_oldest=True, *args, **kwargs): unstored_messages = [] encoded_data = self._encode(messages) if self.max_cookie_size: # data is going to be stored eventually by SimpleCookie, which # adds its own overhead, which we must account for. cookie = SimpleCookie() # create outside the loop
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
cookie.py
70
Refs #33476 -- Reformatted code with Black.
50,673
0
106
112
39
204,181
44
django
13
django/contrib/messages/storage/cookie.py
Python
16
{ "docstring": "\n Store the messages to a cookie and return a list of any messages which\n could not be stored.\n\n If the encoded data is larger than ``max_cookie_size``, remove\n messages until the data fits (these are the messages which are\n returned), and add the not_finished sentinel value to indicate as much.\n ", "language": "en", "n_whitespaces": 92, "n_words": 49, "vocab_size": 36 }
https://github.com/django/django.git
11
handle_out
def handle_out(out, result): if isinstance(out, tuple): if len(out) == 1: out = out[0] elif len(out) > 1: raise NotImplementedError("The out parameter is not fully supported") else: out = None # Notice, we use .__class__ as opposed to type() in order to support # object proxies see <https://github.com/dask/dask/pull/6981> if out is not None and out.__class__ != result.__class__: raise TypeError( "Mismatched types between result and out parameter. " "out=%s, result=%s" % (str(type(out)), str(type(result))) ) if isinstance(out, DataFrame): if len(out.columns) != len(result.columns): raise ValueError( "Mismatched columns count between result and out parameter. " "out=%s, result=%s" % (str(len(out.columns)), str(len(result.columns))) ) if isinstance(out, (Series, DataFrame, Scalar)): out._meta = result._meta out._name = result._name out.dask = result.dask if not isinstance(out, Scalar): out._divisions = result.divisions elif out is not None: msg = ( "The out parameter is not fully supported." " Received type %s, expected %s " % (typename(type(out)), typename(type(result))) ) raise NotImplementedError(msg) else: return result
bc43d692e8f5fc55e6d81c91053598bd89f79267
18
core.py
373
Add sanity checks to divisions setter (#8806) Adds a few sanity checks to the divisions property setter - New divisions are compatible with the existing `npartitions` - New divisions don't mix None and non-None values - New divisions are sorted (except for ordered categorical dtypes, where that's hard)
36,599
0
424
227
90
156,197
148
dask
23
dask/dataframe/core.py
Python
34
{ "docstring": "Handle out parameters\n\n If out is a dask.DataFrame, dask.Series or dask.Scalar then\n this overwrites the contents of it with the result\n ", "language": "en", "n_whitespaces": 30, "n_words": 21, "vocab_size": 19 }
https://github.com/dask/dask.git
1
test_regressor_predict_on_arraylikes
def test_regressor_predict_on_arraylikes(): X = [[5, 1], [3, 1], [4, 3], [0, 3]] y = [2, 3, 5, 6]
742d39ca38f713027091324c4555f9b4e1b9da05
8
test_neighbors.py
60
FIX Fixes KNeighborsRegressor.predict with array-likes (#22687) Co-authored-by: Thomas J. Fan <[email protected]>
75,566
0
27
90
16
259,100
18
scikit-learn
3
sklearn/neighbors/tests/test_neighbors.py
Python
7
{ "docstring": "Ensures that `predict` works for array-likes when `weights` is a callable.\n\n Non-regression test for #22687.\n ", "language": "en", "n_whitespaces": 21, "n_words": 15, "vocab_size": 14 }
https://github.com/scikit-learn/scikit-learn.git
9
_send_msg
def _send_msg(self, msg): self.logger.info(f"Sending discord message: {msg}") # TODO: handle other message types if msg['type'] == RPCMessageType.EXIT_FILL: profit_ratio = msg.get('profit_ratio') open_date = msg.get('open_date').strftime('%Y-%m-%d %H:%M:%S') close_date = msg.get('close_date').strftime('%Y-%m-%d %H:%M:%S') if msg.get('close_date') else '' embeds = [{ 'title': '{} Trade: {}'.format( 'Profit' if profit_ratio > 0 else 'Loss', msg.get('pair')), 'color': (0x00FF00 if profit_ratio > 0 else 0xFF0000), 'fields': [ {'name': 'Trade ID', 'value': msg.get('id'), 'inline': True}, {'name': 'Exchange', 'value': msg.get('exchange').capitalize(), 'inline': True}, {'name': 'Pair', 'value': msg.get('pair'), 'inline': True}, {'name': 'Direction', 'value': 'Short' if msg.get('is_short') else 'Long', 'inline': True}, {'name': 'Open rate', 'value': msg.get('open_rate'), 'inline': True}, {'name': 'Close rate', 'value': msg.get('close_rate'), 'inline': True}, {'name': 'Amount', 'value': msg.get('amount'), 'inline': True}, {'name': 'Open order', 'value': msg.get('open_order_id'), 'inline': True}, {'name': 'Open date', 'value': open_date, 'inline': True}, {'name': 'Close date', 'value': close_date, 'inline': True}, {'name': 'Profit', 'value': msg.get('profit_amount'), 'inline': True}, {'name': 'Profitability', 'value': '{:.2f}%'.format(profit_ratio * 100), 'inline': True}, {'name': 'Stake currency', 'value': msg.get('stake_currency'), 'inline': True}, {'name': 'Fiat currency', 'value': msg.get('fiat_display_currency'), 'inline': True}, {'name': 'Buy Tag', 'value': msg.get('enter_tag'), 'inline': True}, {'name': 'Sell Reason', 'value': msg.get('exit_reason'), 'inline': True}, {'name': 'Strategy', 'value': self.strategy, 'inline': True}, {'name': 'Timeframe', 'value': self.timeframe, 'inline': True}, ], }] # convert all value in fields to string for discord for embed in embeds: for field in embed['fields']: field['value'] = str(field['value']) # Send the message to discord channel payload = { 'embeds': embeds, } headers = { 'Content-Type': 'application/json', } try: requests.post(self.config['discord']['webhook_url'], data=json.dumps(payload), headers=headers) except Exception as e: self.logger.error(f"Failed to send discord message: {e}")
45c47bda6000b2b57026fdedffaaa69f8fc1797e
19
discord.py
994
refactor into discord rpc module
34,602
0
953
535
130
149,948
237
freqtrade
31
freqtrade/rpc/discord.py
Python
45
{ "docstring": "\n msg = {\n 'type': (RPCMessageType.EXIT_FILL if fill\n else RPCMessageType.EXIT),\n 'trade_id': trade.id,\n 'exchange': trade.exchange.capitalize(),\n 'pair': trade.pair,\n 'leverage': trade.leverage,\n 'direction': 'Short' if trade.is_short else 'Long',\n 'gain': gain,\n 'limit': profit_rate,\n 'order_type': order_type,\n 'amount': trade.amount,\n 'open_rate': trade.open_rate,\n 'close_rate': trade.close_rate,\n 'current_rate': current_rate,\n 'profit_amount': profit_trade,\n 'profit_ratio': profit_ratio,\n 'buy_tag': trade.enter_tag,\n 'enter_tag': trade.enter_tag,\n 'sell_reason': trade.exit_reason, # Deprecated\n 'exit_reason': trade.exit_reason,\n 'open_date': trade.open_date,\n 'close_date': trade.close_date or datetime.utcnow(),\n 'stake_currency': self.config['stake_currency'],\n 'fiat_currency': self.config.get('fiat_display_currency', None),\n }\n ", "language": "en", "n_whitespaces": 359, "n_words": 63, "vocab_size": 59 }
https://github.com/freqtrade/freqtrade.git
4
_get_arithmetic_result_freq
def _get_arithmetic_result_freq(self, other) -> BaseOffset | None: # Adding or subtracting a Timedelta/Timestamp scalar is freq-preserving # whenever self.freq is a Tick if is_period_dtype(self.dtype): return self.freq elif not lib.is_scalar(other): return None elif isinstance(self.freq, Tick): # In these cases return self.freq return None
8f26ab7c184c7f6fc7337bfed47ac9d86ee4551f
9
datetimelike.py
85
REF: helpers to de-duplicate datetimelike arithmetic (#48844)
40,423
0
136
51
31
169,366
42
pandas
11
pandas/core/arrays/datetimelike.py
Python
11
{ "docstring": "\n Check if we can preserve self.freq in addition or subtraction.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/pandas-dev/pandas.git
3
__reset_trigger_counts
def __reset_trigger_counts(self): for trigger_id in self.trigger_alert_counts: self.trigger_alert_counts[trigger_id] = 0 for trigger_id in self.trigger_resolve_counts: self.trigger_resolve_counts[trigger_id] = 0 self.update_alert_rule_stats()
524c8579d4d87954d993cd3fb7a29e0422058e6a
10
subscription_processor.py
66
feat(alerts): Clear counters for null subscription updates (#30831) Adds logic that clears trigger count and resolve count for crash rate alerts and metrics crash rate alerts in the case an empty subscription update is sent or if a min session count threshold is set and the count in the subscription update is lower than the min threshold
19,202
0
67
41
12
95,336
17
sentry
6
src/sentry/incidents/subscription_processor.py
Python
6
{ "docstring": "\n Helper method that clears both the trigger alert and the trigger resolve counts\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 11 }
https://github.com/getsentry/sentry.git
1
wide_resnet50_2
def wide_resnet50_2(pretrained=False, **kwargs): kwargs['width'] = 64 * 2 return _resnet('wide_resnet50_2', BottleneckBlock, 50, pretrained, **kwargs)
ffcde21305c61d950a9f93e57e6180c9a9665b87
8
resnet.py
54
add disco_diffusion_ernievil_base
10,081
0
23
33
14
50,301
14
PaddleHub
5
modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/resnet.py
Python
3
{ "docstring": "Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.vision.models import wide_resnet50_2\n\n # build model\n model = wide_resnet50_2()\n\n # build model and load imagenet pretrained weight\n # model = wide_resnet50_2(pretrained=True)\n\n x = paddle.rand([1, 3, 224, 224])\n out = model(x)\n\n print(out.shape)\n ", "language": "en", "n_whitespaces": 182, "n_words": 57, "vocab_size": 43 }
https://github.com/PaddlePaddle/PaddleHub.git
1
piecewise_linear
def piecewise_linear(t, deltas, k, m, changepoint_ts): deltas_t = (changepoint_ts[None, :] <= t[..., None]) * deltas k_t = deltas_t.sum(axis=1) + k m_t = (deltas_t * -changepoint_ts).sum(axis=1) + m return k_t * t + m_t
8fbf8ba2a5bfcdb892e8ca596e338894614000b5
12
forecaster.py
103
Speed Up Uncertainty Predictions (#2186)
440
0
68
68
25
3,305
33
prophet
11
python/prophet/forecaster.py
Python
5
{ "docstring": "Evaluate the piecewise linear function.\n\n Parameters\n ----------\n t: np.array of times on which the function is evaluated.\n deltas: np.array of rate changes at each changepoint.\n k: Float initial rate.\n m: Float initial offset.\n changepoint_ts: np.array of changepoint times.\n\n Returns\n -------\n Vector y(t).\n ", "language": "en", "n_whitespaces": 119, "n_words": 42, "vocab_size": 35 }
https://github.com/facebook/prophet.git
3
test_cache_metric
def test_cache_metric(self): CACHE_NAME = "cache_metrics_test_fgjkbdfg" cache: DeferredCache[str, str] = DeferredCache(CACHE_NAME, max_entries=777) items = { x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") for x in filter( lambda x: b"cache_metrics_test_fgjkbdfg" in x, generate_latest(REGISTRY).split(b"\n"), ) } self.assertEqual(items["synapse_util_caches_cache_size"], "0.0") self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0") cache.prefill("1", "hi") items = { x.split(b"{")[0].decode("ascii"): x.split(b" ")[1].decode("ascii") for x in filter( lambda x: b"cache_metrics_test_fgjkbdfg" in x, generate_latest(REGISTRY).split(b"\n"), ) } self.assertEqual(items["synapse_util_caches_cache_size"], "1.0") self.assertEqual(items["synapse_util_caches_cache_max_size"], "777.0")
2bb2c32e8ed5642a5bf3ba1e8c49e10cecc88905
14
test_metrics.py
336
Avoid incrementing bg process utime/stime counters by negative durations (#14323)
73,114
0
267
198
33
249,768
57
synapse
16
tests/metrics/test_metrics.py
Python
22
{ "docstring": "\n Caches produce metrics reflecting their state when scraped.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/matrix-org/synapse.git
1
test_multilabel_y_explicit_zeros
def test_multilabel_y_explicit_zeros(tmp_path): save_path = str(tmp_path / "svm_explicit_zero") rng = np.random.RandomState(42) X = rng.randn(3, 5).astype(np.float64) indptr = np.array([0, 2, 3, 6]) indices = np.array([0, 2, 2, 0, 1, 2]) # The first and last element are explicit zeros. data = np.array([0, 1, 1, 1, 1, 0]) y = sp.csr_matrix((data, indices, indptr), shape=(3, 3)) # y as a dense array would look like # [[0, 0, 1], # [0, 0, 1], # [1, 1, 0]] dump_svmlight_file(X, y, save_path, multilabel=True) _, y_load = load_svmlight_file(save_path, multilabel=True) y_true = [(2.0,), (2.0,), (0.0, 1.0)] assert y_load == y_true
2e3a0474a6195161e7e3ff02be9d1ff18591ca7b
10
test_svmlight_format.py
244
ENH Cythonize `dump_svmlight_file` (#23127) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
76,118
0
145
174
64
260,207
92
scikit-learn
26
sklearn/datasets/tests/test_svmlight_format.py
Python
12
{ "docstring": "\n Ensure that if y contains explicit zeros (i.e. elements of y.data equal to\n 0) then those explicit zeros are not encoded.\n ", "language": "en", "n_whitespaces": 31, "n_words": 21, "vocab_size": 19 }
https://github.com/scikit-learn/scikit-learn.git
1
handler_name
def handler_name(self) -> str: # Property to make it read only return self._handler_name
1013f84ffc67bdce04bf13eaf401ab5ced572bb0
6
message.py
23
diagrams
44,890
0
34
12
13
185,071
13
textual
4
src/textual/message.py
Python
3
{ "docstring": "The name of the handler associated with this message.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/Textualize/textual.git
4
load_sample_image
def load_sample_image(image_name): images = load_sample_images() index = None for i, filename in enumerate(images.filenames): if filename.endswith(image_name): index = i break if index is None: raise AttributeError("Cannot find sample image: %s" % image_name) return images.images[index]
9d7220b57ccac4ac12268a96281940667a4de1d8
11
_base.py
96
DOC changed some typo of _shrunk_covariance.ledoit_wolf_shrinkage (#22798) * Changed from 3rd to 1st person the 1st word and remote blanck space. * Add two periods in the content
75,626
0
87
57
28
259,175
33
scikit-learn
11
sklearn/datasets/_base.py
Python
10
{ "docstring": "Load the numpy array of a single sample image.\n\n Read more in the :ref:`User Guide <sample_images>`.\n\n Parameters\n ----------\n image_name : {`china.jpg`, `flower.jpg`}\n The name of the sample image loaded\n\n Returns\n -------\n img : 3D array\n The image as a numpy array: height x width x color.\n\n Examples\n --------\n\n >>> from sklearn.datasets import load_sample_image\n >>> china = load_sample_image('china.jpg') # doctest: +SKIP\n >>> china.dtype # doctest: +SKIP\n dtype('uint8')\n >>> china.shape # doctest: +SKIP\n (427, 640, 3)\n >>> flower = load_sample_image('flower.jpg') # doctest: +SKIP\n >>> flower.dtype # doctest: +SKIP\n dtype('uint8')\n >>> flower.shape # doctest: +SKIP\n (427, 640, 3)\n ", "language": "en", "n_whitespaces": 288, "n_words": 95, "vocab_size": 58 }
https://github.com/scikit-learn/scikit-learn.git
6
test_vr_connector_with_multiple_buffers
def test_vr_connector_with_multiple_buffers(self): context_len = 5 # This view requirement simulates the use-case of a decision transformer # without reward-to-go. view_rq_dict = { # obs[t-context_len+1:t] "context_obs": ViewRequirement("obs", shift=f"-{context_len-1}:0"), # next_obs[t-context_len+1:t] "context_next_obs": ViewRequirement( "obs", shift=f"-{context_len}:1", used_for_compute_actions=False ), # act[t-context_len+1:t] "context_act": ViewRequirement( SampleBatch.ACTIONS, shift=f"-{context_len-1}:-1" ), } obs_arrs = np.arange(10)[:, None] + 1 act_arrs = (np.arange(10)[:, None] + 1) * 100 n_steps = obs_arrs.shape[0] config = PPOConfig().to_dict() ctx = ConnectorContext( view_requirements=view_rq_dict, config=config, is_policy_recurrent=True ) c = ViewRequirementAgentConnector(ctx) # keep a queue of length ctx_len of observations obs_list, act_list = [], [] for t in range(n_steps): # next state and action at time t-1 are the following timestep_data = { SampleBatch.NEXT_OBS: obs_arrs[t], SampleBatch.ACTIONS: ( np.zeros_like(act_arrs[0]) if t == 0 else act_arrs[t - 1] ), SampleBatch.T: t - 1, } data = AgentConnectorDataType(0, 1, timestep_data) processed = c([data]) sample_batch = processed[0].data.sample_batch if t == 0: obs_list.extend([obs_arrs[0] for _ in range(context_len)]) act_list.extend( [np.zeros_like(act_arrs[0]) for _ in range(context_len)] ) else: obs_list.pop(0) act_list.pop(0) obs_list.append(obs_arrs[t]) act_list.append(act_arrs[t - 1]) self.assertTrue("context_next_obs" not in sample_batch) check(sample_batch["context_obs"], np.stack(obs_list)[None]) check(sample_batch["context_act"], np.stack(act_list[:-1])[None])
5bf9d5084fe38d864c1b5075145d2e778c431a5b
15
test_agent.py
572
[RLlib] Pass input dict into action connectors, because sometimes input data is useful for adapting output actions. (#28588) Based on user feedback. Input data is sometimes useful for adapting outputs, for example action masks. Signed-off-by: Jun Gong <[email protected]>
28,520
0
731
349
122
127,777
166
ray
43
rllib/connectors/tests/test_agent.py
Python
44
{ "docstring": "Test that the ViewRequirementAgentConnector can handle slice shifts correctly\n when it has multiple buffers to shift.", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 16 }
https://github.com/ray-project/ray.git
4
reversed
def reversed(G): msg = ( "context manager reversed is deprecated and to be removed in 3.0." "Use G.reverse(copy=False) if G.is_directed() else G instead." ) warnings.warn(msg, DeprecationWarning) directed = G.is_directed() if directed: G._pred, G._succ = G._succ, G._pred G._adj = G._succ try: yield finally: if directed: # Reverse the reverse. G._pred, G._succ = G._succ, G._pred G._adj = G._succ
cc1db275efc709cb964ce88abbfa877798d58c10
13
contextmanagers.py
141
Minor improvements from general code readthrough (#5414) * Add deprecated directive to reversed docstring. * Add missing dep directives to shpfiles. * Remove defn of INF sentinel. * typo. * str -> comment in forloop. * STY: appropriate casing for var name.
41,913
0
155
82
40
176,452
56
networkx
11
networkx/utils/contextmanagers.py
Python
16
{ "docstring": "A context manager for temporarily reversing a directed graph in place.\n\n .. deprecated:: 2.6\n\n This context manager is deprecated and will be removed in 3.0.\n Use ``G.reverse(copy=False) if G.is_directed() else G`` instead.\n\n This is a no-op for undirected graphs.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n Warning\n -------\n The reversed context manager is deprecated in favor\n of G.reverse(copy=False). The view allows multiple threads to use the\n same graph without confusion while the context manager does not.\n This context manager is scheduled to be removed in version 3.0.\n ", "language": "en", "n_whitespaces": 143, "n_words": 88, "vocab_size": 60 }
https://github.com/networkx/networkx.git
5
train_epoch_ch3
def train_epoch_ch3(net, train_iter, loss, updater): # Sum of training loss, sum of training accuracy, no. of examples metric = Accumulator(3) for X, y in train_iter: # Compute gradients and update parameters with tf.GradientTape() as tape: y_hat = net(X) # Keras implementations for loss takes (labels, predictions) # instead of (predictions, labels) that users might implement # in this book, e.g. `cross_entropy` that we implemented above if isinstance(loss, tf.keras.losses.Loss): l = loss(y, y_hat) else: l = loss(y_hat, y) if isinstance(updater, tf.keras.optimizers.Optimizer): params = net.trainable_variables grads = tape.gradient(l, params) updater.apply_gradients(zip(grads, params)) else: updater(X.shape[0], tape.gradient(l, updater.params)) # Keras loss by default returns the average loss in a batch l_sum = l * float(tf.size(y)) if isinstance( loss, tf.keras.losses.Loss) else tf.reduce_sum(l) metric.add(l_sum, accuracy(y_hat, y), tf.size(y)) # Return training loss and training accuracy return metric[0] / metric[2], metric[1] / metric[2]
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
15
tensorflow.py
324
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
37,440
0
352
207
98
158,287
134
d2l-zh
33
d2l/tensorflow.py
Python
19
{ "docstring": "The training loop defined in Chapter 3.\n\n Defined in :numref:`sec_softmax_scratch`", "language": "en", "n_whitespaces": 12, "n_words": 10, "vocab_size": 9 }
https://github.com/d2l-ai/d2l-zh.git
9
_laplace_apply_rules
def _laplace_apply_rules(f, t, s, doit=True, **hints): k, func = f.as_independent(t, as_Add=False) simple_rules = _laplace_build_rules(t, s) for t_dom, s_dom, check, plane, prep in simple_rules: ma = prep(func).match(t_dom) if ma: debug('_laplace_apply_rules match:') debug(' f: %s'%(func,)) debug(' rule: %s o---o %s'%(t_dom, s_dom)) try: debug(' try %s'%(check,)) c = check.xreplace(ma) debug(' check %s -> %s'%(check, c)) if c==True: return _laplace_cr(k*s_dom.xreplace(ma), plane.xreplace(ma), S.true, **hints) except Exception: debug('_laplace_apply_rules did not match.') if f.has(DiracDelta): return None prog_rules = [_laplace_rule_timescale, _laplace_rule_heaviside, _laplace_rule_exp, _laplace_rule_trig, _laplace_rule_diff] for p_rule in prog_rules: LT = p_rule(func, t, s, doit=doit, **hints) if LT is not None: try: r, p, c = LT return (k*r, p, c) except TypeError: return k*LT return LT return None
af443377dd48c2caf440d2b6dd76830dbe84712f
18
transforms.py
365
Moved all constant-factor calculations to `_laplace_apply_rules`
49,680
0
469
237
77
200,512
110
sympy
39
sympy/integrals/transforms.py
Python
32
{ "docstring": "\n Helper function for the class LaplaceTransform.\n\n This function does a Laplace transform based on rules and, after\n applying the rules, hands the rest over to `_laplace_transform`, which\n will attempt to integrate.\n\n If it is called with `doit=False`, then it will instead return\n `LaplaceTransform` objects.\n ", "language": "en", "n_whitespaces": 66, "n_words": 44, "vocab_size": 38 }
https://github.com/sympy/sympy.git
1
_object2proto
def _object2proto(self) -> SMPCActionSeqBatchMessage_PB: return SMPCActionSeqBatchMessage_PB( smpc_actions=list(map(lambda x: serialize(x), self.smpc_actions)), address=serialize(self.address), msg_id=serialize(self.id), )
e272ed2fa4c58e0a89e273a3e85da7d13a85e04c
15
smpc_action_seq_batch_message.py
79
[syft.core.node.common.action] Change syft import absolute -> relative
349
0
67
50
13
2,719
13
PySyft
11
packages/syft/src/syft/core/node/common/action/smpc_action_seq_batch_message.py
Python
20
{ "docstring": "Returns a protobuf serialization of self.\n\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n\n :return: returns a protobuf object\n :rtype: SMPCActionSeqBatchMessage_PB\n\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "language": "en", "n_whitespaces": 150, "n_words": 68, "vocab_size": 56 }
https://github.com/OpenMined/PySyft.git
3
postprocessing
def postprocessing(data): if type_to_string(type(data)) == "torch.Tensor": try: import torch from torchvision import transforms # By default Torch tensors are displayed as images. To display them as JSON, # the user can simply convert them to numpy arrays. transformer = transforms.ToPILImage() return transformer(torch.squeeze(data)) except ModuleNotFoundError: logger.warning( "Module `torchvision` isn't installed, unable to process torch tensor." ) return data return data
203253321d34543aa25483803ebc21e3903679b6
13
gradio_visualize_graph.py
101
[serve] Add additional features to DAG visualization with Gradio (#28246)
28,435
0
196
55
50
127,405
59
ray
13
python/ray/serve/experimental/gradio_visualize_graph.py
Python
13
{ "docstring": "Add support for types that are not supported by Gradio.\n\n Some data types like PyTorch tensors, cannot be processed and displayed through\n Gradio. Thus we extend support to these data types by transforming them into a form\n that Gradio can process and display.\n ", "language": "en", "n_whitespaces": 55, "n_words": 43, "vocab_size": 35 }
https://github.com/ray-project/ray.git
1
get_anchor_yield_reserve
def get_anchor_yield_reserve() -> pd.DataFrame: df = get_history_asset_from_terra_address( address="terra1tmnqgvg567ypvsvk6rwsga3srp7e3lg6u0elp8" ) return df
72b0a9f1ee8b91ad9fd9e76d80d2ccab51ee6d21
10
terraengineer_model.py
36
Next release : reports on steroids (#2349) * fix gov tests * refactor insider * new virtual path extraction * removed some symbol default params as they're considered critical * little adjustments * portfolio refactor * merge API factory * add helpers, stocks, crypto, forex * minor forex changes * include forex api paths * add 2 missing forex funcs * portfolio brokers refactor * display help on api func call * add econometrics virtual paths to api * add api unit test * fixed report for the new api * minor portfolio refactorings * added gdapps * anchor_yield path * some more crypto path fixes * small change * fixed wrong param * minor fixes * wip - inital commit for forex report * add bw as a model, we'll get better solution afterwards * added ema with dummy model as it adds great functionality to the report * minor fixes * wip - added functions to forex report * add feedparser news path * add new virtual paths to api * adding commands to equity report * revert to old paths, new ones were breaking * Add in very basic ETF report * Add candle chart to ETF report * add etf load * allow use of candle without data * add raw to candle * added forex report * ongoing equity report * equity report change * fix some portfolio bugs and add docstrings * include portfolio paths and coin class * add crypto paths * change event dates to str * starting economy report * window for limit * equity report and refactor newsapi * add helper to api * update on economy report * equity report * update economy report * refactor some docstrings * change maturities helper * refactor newsapi * refactor futures command * add some sauce to ycrv plot * black * update report * refactor alphavantage * refactor wsj * update economy report * ycrv tenor * map avaiable_indices * map economy helpers * fix econdb docstring * add plots on economy report * minor fixes * wip - crypto report * update economy report * added same default args as view * added view to explicity use chart=True when suing the api * adjustments - removed rich tables to use only df * final version economy report * change report name * equity report for review * linting * add etf symbols endpoint * incorporate feedback economy report * fix reports launch by adding tag to economy report * fix equity bug * remove analyst name * fix * fix news * make links hyperlinks for equity * click links * fixed arg name * improved news * small improves * Fix light terminal stylesheet that would prevent using it in notebooks (#2473) * improved report * run reports in installer * fix #2209 * minor ycrv refactoring * refactor portfolio/holdv virtual path * refactor benchmark trades * fix events args * adapt economy report to changes * fix portfolio controller bug * holdv refactor * refactor perf command * start portfolio report * remove perf view * refactor holp * add textwrap3 to poetry (doesn't solve the error) * fix equity after merge * add some rolling commands * fix equity after save button * improved crypto report, plus minor fixes * minor fixes on the reports * add maxdd and distr * refactor qa * var command * refactor qa expected shortfall * add es command * add es command * fix qa percentile bug * fix economy rendering * refactor qa omega * add om command * add summary command * add dret command * add mret command * add yret command * add metrics * add allocs to report * remove bro and po commands, add later * fixed some tests * adjustments to crypto report * Fix docstring for VSCode Added a note about installing Jupyter PowerToys extension for optimal API usage in Jupyter VSCode, in the API_README.md. * minor adjustment * remove nft calendar model virtual paths * Add in Portfolio report * fix external axes portfolio view * Update portfolio report with rolling plots * Details for ETF and Portfolio * fix economy report * change analyst to openbb * floppy * fixed unmatched axis in reports * Speed up tests * fix file and load on po * get_news output * add some po paths * Add integration tests for Reports menu * refactor maxsharpe * open maxsharpe * open minrisk * open maxutil * open maxret * Added fixes * black * remove useless views * Fixed small issue * refactor ef * open ef api * portfolio optimization report * Added fixes * unblock api loading * add more endpoints * update po report * unblock api loading * update po report * expose herc * expose property endpoint * Added fixes * More api fixes * flake8 * Fixed some mypy * news api model * flake8 * mypy fix * mypy * black * pylint * fix tests * markdown * markdown * Added fixes * fix economy report * merge * fix economy report * remove empty notebook * expose nco * remove jupyter notebook * expose plot endpoint * remove po report, just used for tests * api v paths plot * remove api_old * change loading msg Co-authored-by: montezdesousa <[email protected]> Co-authored-by: hjoaquim <[email protected]> Co-authored-by: montezdesousa <[email protected]> Co-authored-by: Om Gupta <[email protected]> Co-authored-by: minhhoang1023 <[email protected]> Co-authored-by: JerBouma <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]> Co-authored-by: Om Gupta <[email protected]> Co-authored-by: Diogo Sousa <[email protected]> Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: northern-64bit <[email protected]> Co-authored-by: colin99d <[email protected]> Co-authored-by: Minh Hoang <[email protected]>
85,377
0
30
19
10
285,679
11
OpenBBTerminal
6
openbb_terminal/cryptocurrency/defi/terraengineer_model.py
Python
13
{ "docstring": "Displays the 30-day history of the Anchor Yield Reserve.\n [Source: https://terra.engineer/]\n\n Returns\n ----------\n pd.DataFrame\n Dataframe containing historical data\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
1
disable_telemetry
def disable_telemetry(): os.environ[HAYSTACK_TELEMETRY_ENABLED] = "False" logger.info("Telemetry has been disabled.")
ac5617e757e9ace6f30b7291686d9dbbc339f433
8
telemetry.py
38
Add basic telemetry features (#2314) * add basic telemetry features * change pipeline_config to _component_config * Update Documentation & Code Style * add super().__init__() calls to error classes * make posthog mock work with python 3.7 * Update Documentation & Code Style * update link to docs web page * log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH) * add comment on send_event in BaseComponent.init() and fix mypy * mock NonPrivateParameters and fix pylint undefined-variable * Update Documentation & Code Style * check model path contains multiple / * add test for writing to file * add test for en-/disable telemetry * Update Documentation & Code Style * merge file deletion methods and ignore pylint global statement * Update Documentation & Code Style * set env variable in demo to activate telemetry * fix mock of HAYSTACK_TELEMETRY_ENABLED * fix mypy and linter * add CI as env variable to execution contexts * remove threading, add test for custom error event * Update Documentation & Code Style * simplify config/log file deletion * add test for final event being sent * force writing config file in test * make test compatible with python 3.7 * switch to posthog production server * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
74,975
0
18
19
9
256,958
9
haystack
6
haystack/telemetry.py
Python
3
{ "docstring": "\n Disables telemetry so that no events are sent anymore, except for one final event.\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 14 }
https://github.com/deepset-ai/haystack.git
3
cyclic_reduction
def cyclic_reduction(self, removed=False): word = self.copy() g = self.group.identity while not word.is_cyclically_reduced(): exp1 = abs(word.exponent_syllable(0)) exp2 = abs(word.exponent_syllable(-1)) exp = min(exp1, exp2) start = word[0]**abs(exp) end = word[-1]**abs(exp) word = start**-1*word*end**-1 g = g*start if removed: return word, g return word
498015021131af4dbb07eb110e5badaba8250c7b
13
free_groups.py
184
Updated import locations
47,569
0
171
113
28
196,069
41
sympy
17
sympy/combinatorics/free_groups.py
Python
14
{ "docstring": "Return a cyclically reduced version of the word. Unlike\n `identity_cyclic_reduction`, this will not cyclically permute\n the reduced word - just remove the \"unreduced\" bits on either\n side of it. Compare the examples with those of\n `identity_cyclic_reduction`.\n\n When `removed` is `True`, return a tuple `(word, r)` where\n self `r` is such that before the reduction the word was either\n `r*word*r**-1`.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import free_group\n >>> F, x, y = free_group(\"x, y\")\n >>> (x**2*y**2*x**-1).cyclic_reduction()\n x*y**2\n >>> (x**-3*y**-1*x**5).cyclic_reduction()\n y**-1*x**2\n >>> (x**-3*y**-1*x**5).cyclic_reduction(removed=True)\n (y**-1*x**2, x**-3)\n\n ", "language": "en", "n_whitespaces": 209, "n_words": 83, "vocab_size": 66 }
https://github.com/sympy/sympy.git
2
clear_bpbynumber
def clear_bpbynumber(self, arg): try: bp = self.get_bpbynumber(arg) except ValueError as err: return str(err) bp.deleteMe() self._prune_breaks(bp.file, bp.line) return None
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
bdb.py
79
add python 3.10.4 for windows
56,223
0
82
47
17
221,122
18
XX-Net
12
python3.10.4/Lib/bdb.py
Python
8
{ "docstring": "Delete a breakpoint by its index in Breakpoint.bpbynumber.\n\n If arg is invalid, return an error message.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 16 }
https://github.com/XX-net/XX-Net.git
3
test_mapped_dag_pre_existing_tis
def test_mapped_dag_pre_existing_tis(self, dag_maker, session): from airflow.decorators import task from airflow.operators.python import PythonOperator list_result = [[1], [2], [{'a': 'b'}]]
de3bf06863714c6c02ad0c7f5999c961b382bf4a
11
test_backfill_job.py
68
Ensure that BackfillJob re-runs existing mapped task instances (#22952) * Ensure that BackfillJob re-runs existing mapped task instances `expand_mapped_task` only returns _new_ TaskInstances, so if a backfill job was run for a dag that already existed the mapped task would never be executed. * Fix tests for changed interface This interface also isn't the best, and we could probably do with refactoring it. Not now though
9,086
0
46
487
16
47,369
18
airflow
11
tests/jobs/test_backfill_job.py
Python
61
{ "docstring": "If the DagRun already some mapped TIs, ensure that we re-run them successfully", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/apache/airflow.git
4
_collate_rejected
def _collate_rejected(self, pair, row): # It could be fun to enable hyperopt mode to write # a loss function to reduce rejected signals if (self.config.get('export', 'none') == 'signals' and self.dataprovider.runmode == RunMode.BACKTEST): if pair not in self.rejected_dict: self.rejected_dict[pair] = [] self.rejected_dict[pair].append([row[DATE_IDX], row[ENTER_TAG_IDX]])
5a4e99b413f84f818662cc3012819db76aec47c1
12
backtesting.py
120
Add support for collating and analysing rejected trades in backtest
35,136
0
122
74
37
151,789
42
freqtrade
14
freqtrade/optimize/backtesting.py
Python
6
{ "docstring": "\n Temporarily store rejected trade information for downstream use in backtesting_analysis\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/freqtrade/freqtrade.git
2
not_found
def not_found(error): return ( render_template( 'airflow/not_found.html', hostname=get_hostname() if conf.getboolean('webserver', 'EXPOSE_HOSTNAME', fallback=True) else 'redact', ), 404, )
e29543ec00c0a3eae7a789cb49350499c3b584c2
13
views.py
65
Use `get_hostname` instead of `socket.getfqdn` (#24260) We allow users to configure a different function to determine the hostname, so we should use that consistently when we need the hostname.
7,853
0
90
38
16
43,181
16
airflow
8
airflow/www/views.py
Python
10
{ "docstring": "Show Not Found on screen for any error in the Webserver", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/apache/airflow.git
1
test_http_proxy_relative_request_no_host_header
def test_http_proxy_relative_request_no_host_header(tctx): assert ( Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False) >> DataReceived(tctx.client, b"GET / HTTP/1.1\r\n\r\n") << SendData(tctx.client, BytesMatching(b"400 Bad Request.+" b"HTTP request has no host header, destination unknown.")) << CloseConnection(tctx.client) )
56eea20f6389b751d38079fb09b29237a0d2b262
14
test_http.py
96
tutils: add BytesMatching placeholder
73,596
0
134
61
27
251,116
28
mitmproxy
13
test/mitmproxy/proxy/layers/http/test_http.py
Python
8
{ "docstring": "Test handling of a relative-form \"GET /\" in regular proxy mode, but without a host header.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 15 }
https://github.com/mitmproxy/mitmproxy.git
1
project
def project(x): return torch.cat([torch.sqrt(1.0 + torch.sum(x * x, 1, keepdim=True)), x], 1)
04eb35c234de59d4a47848a4f85cb5be3c58c24e
14
hyperbolic.py
60
Added prototypical decoder
53,731
0
18
41
12
214,304
12
flair
7
flair/models/sandbox/prototypical_decoder/distance/hyperbolic.py
Python
2
{ "docstring": "Project onto the hyeprboloid embedded in in n+1 dimensions.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 8 }
https://github.com/flairNLP/flair.git
1
_create_method
def _create_method(cls, op, coerce_to_dtype=True, result_dtype=None): # NOTE(Clark): This overrides, but coerce_to_dtype, result_dtype might # not be needed
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
6
tensor_extension.py
27
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,321
0
38
31
16
130,594
17
ray
5
python/ray/data/extensions/tensor_extension.py
Python
4
{ "docstring": "\n Add support for binary operators by unwrapping, applying, and\n rewrapping.\n ", "language": "en", "n_whitespaces": 32, "n_words": 10, "vocab_size": 10 }
https://github.com/ray-project/ray.git
2
_collect_tcl_modules
def _collect_tcl_modules(tcl_root): # Obtain Tcl major version. tcl_major_version = tcl_version.split('.')[0] modules_dirname = f"tcl{tcl_major_version}" modules_path = os.path.join(tcl_root, '..', modules_dirname) if not os.path.isdir(modules_path): logger.warning('Tcl modules directory %s does not exist.', modules_path) return [] return Tree(modules_path, prefix=modules_dirname)
2b2559af1c7790596e7b2040f48e56baef608f9d
10
tcl_tk.py
116
hookutils: tcl/tk: port to PyInstaller.isolated framework
77,582
0
69
66
30
264,061
34
pyinstaller
15
PyInstaller/utils/hooks/tcl_tk.py
Python
8
{ "docstring": "\n Get a list of TOC-style 3-tuples describing Tcl modules. The modules directory is separate from the library/data\n one, and is located at $tcl_root/../tclX, where X is the major Tcl version.\n\n Returns\n -------\n Tree\n Such list, if the modules directory exists.\n ", "language": "en", "n_whitespaces": 66, "n_words": 40, "vocab_size": 33 }
https://github.com/pyinstaller/pyinstaller.git
2
policy_scope
def policy_scope(policy): old_policy = _global_policy try: set_global_policy(policy) yield finally: set_global_policy(old_policy)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
10
policy.py
43
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,316
0
43
22
10
275,141
10
keras
5
keras/mixed_precision/policy.py
Python
7
{ "docstring": "A context manager that sets the global Policy under it.\n\n Args:\n policy: A Policy, or a string that will be converted to a Policy..\n\n Yields:\n Nothing.\n ", "language": "en", "n_whitespaces": 45, "n_words": 26, "vocab_size": 23 }
https://github.com/keras-team/keras.git
15
directed_edge_swap
def directed_edge_swap(G, *, nswap=1, max_tries=100, seed=None): if nswap > max_tries: raise nx.NetworkXError("Number of swaps > number of tries allowed.") if len(G) < 4: raise nx.NetworkXError("DiGraph has fewer than four nodes.") if len(G.edges) < 3: raise nx.NetworkXError("DiGraph has fewer than 3 edges") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. tries = 0 swapcount = 0 keys, degrees = zip(*G.degree()) # keys, degree cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree discrete_sequence = nx.utils.discrete_sequence while swapcount < nswap: # choose source node index from discrete distribution start_index = discrete_sequence(1, cdistribution=cdf, seed=seed)[0] start = keys[start_index] tries += 1 if tries > max_tries: msg = f"Maximum number of swap attempts ({tries}) exceeded before desired swaps achieved ({nswap})." raise nx.NetworkXAlgorithmError(msg) # If the given node doesn't have any out edges, then there isn't anything to swap if G.out_degree(start) == 0: continue second = seed.choice(list(G.succ[start])) if start == second: continue if G.out_degree(second) == 0: continue third = seed.choice(list(G.succ[second])) if second == third: continue if G.out_degree(third) == 0: continue fourth = seed.choice(list(G.succ[third])) if third == fourth: continue if ( third not in G.succ[start] and fourth not in G.succ[second] and second not in G.succ[third] ): # Swap nodes G.add_edge(start, third) G.add_edge(third, second) G.add_edge(second, fourth) G.remove_edge(start, second) G.remove_edge(second, third) G.remove_edge(third, fourth) swapcount += 1 return G @py_random_state(3)
247231f0154badc4a07b6a4ceb40148ea18f264b
@py_random_state(3)
14
swap.py
549
Bug fix in swap: directed_edge_swap and double_edge_swap (#6149) * raise exception if graph has no edges and test for that * Simplify code: raise exception if G has less than 3 edges * add correction * Solved bug in double_edge_swap and added tests for that. Also updated the doc entry * Update networkx/algorithms/swap.py * Added some final suggestions * add merge suggestions Co-authored-by: Ross Barnowski <[email protected]>
42,431
1
620
332
139
177,539
228
networkx
34
networkx/algorithms/swap.py
Python
47
{ "docstring": "Swap three edges in a directed graph while keeping the node degrees fixed.\n\n A directed edge swap swaps three edges such that a -> b -> c -> d becomes\n a -> c -> b -> d. This pattern of swapping allows all possible states with the\n same in- and out-degree distribution in a directed graph to be reached.\n\n If the swap would create parallel edges (e.g. if a -> c already existed in the\n previous example), another attempt is made to find a suitable trio of edges.\n\n Parameters\n ----------\n G : DiGraph\n A directed graph\n\n nswap : integer (optional, default=1)\n Number of three-edge (directed) swaps to perform\n\n max_tries : integer (optional, default=100)\n Maximum number of attempts to swap edges\n\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n G : DiGraph\n The graph after the edges are swapped.\n\n Raises\n ------\n NetworkXError\n If `G` is not directed, or\n If nswap > max_tries, or\n If there are fewer than 4 nodes or 3 edges in `G`.\n NetworkXAlgorithmError\n If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made\n\n Notes\n -----\n Does not enforce any connectivity constraints.\n\n The graph G is modified in place.\n\n References\n ----------\n .. [1] Erdős, Péter L., et al. “A Simple Havel-Hakimi Type Algorithm to Realize\n Graphical Degree Sequences of Directed Graphs.” ArXiv:0905.4913 [Math],\n Jan. 2010. https://doi.org/10.48550/arXiv.0905.4913.\n Published 2010 in Elec. J. Combinatorics (17(1)). R66.\n http://www.combinatorics.org/Volume_17/PDF/v17i1r66.pdf\n .. [2] “Combinatorics - Reaching All Possible Simple Directed Graphs with a given\n Degree Sequence with 2-Edge Swaps.” Mathematics Stack Exchange,\n https://math.stackexchange.com/questions/22272/. Accessed 30 May 2022.\n ", "language": "en", "n_whitespaces": 472, "n_words": 264, "vocab_size": 176 }
https://github.com/networkx/networkx.git
3
open
def open(fp, mode="r"): if mode != "r": raise ValueError("bad mode") try: return GdImageFile(fp) except SyntaxError as e: raise UnidentifiedImageError("cannot identify this image file") from e
965df6df6f806887fca3f7b5c6dd6c70a20e803e
11
GdImageFile.py
73
Add missing paramters to docstrings
69,912
0
58
39
24
242,760
25
Pillow
8
src/PIL/GdImageFile.py
Python
7
{ "docstring": "\n Load texture from a GD image file.\n\n :param fp: GD file name, or an opened file handle.\n :param mode: Optional mode. In this version, if the mode argument\n is given, it must be \"r\".\n :returns: An image instance.\n :raises OSError: If the image could not be read.\n ", "language": "en", "n_whitespaces": 74, "n_words": 47, "vocab_size": 40 }
https://github.com/python-pillow/Pillow.git
2
_make_relation_requests
def _make_relation_requests(self) -> Tuple[List[str], JsonDict]: # Request the relations of the event. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) event_ids = [ev["event_id"] for ev in channel.json_body["chunk"]] # Fetch the bundled aggregations of the event. channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/event/{self.parent_id}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) bundled_relations = channel.json_body["unsigned"].get("m.relations", {}) return event_ids, bundled_relations
f63bedef07360216a8de71dc38f00f1aea503903
11
test_relations.py
214
Invalidate caches when an event with a relation is redacted. (#12121) The caches for the target of the relation must be cleared so that the bundled aggregations are re-calculated after the redaction is processed.
71,671
0
204
117
36
247,439
54
synapse
19
tests/rest/client/test_relations.py
Python
24
{ "docstring": "\n Makes requests and ensures they result in a 200 response, returns a\n tuple of results:\n\n 1. `/relations` -> Returns a list of event IDs.\n 2. `/event` -> Returns the response's m.relations field (from unsigned),\n if it exists.\n ", "language": "en", "n_whitespaces": 84, "n_words": 37, "vocab_size": 32 }
https://github.com/matrix-org/synapse.git
3
at_time
def at_time(self, time, asof=False, axis=None): # noqa: PR01, RT01, D200 axis = self._get_axis_number(axis) idx = self.index if axis == 0 else self.columns indexer = pandas.Series(index=idx).at_time(time, asof=asof).index return self.loc[indexer] if axis == 0 else self.loc[:, indexer]
605efa618e7994681f57b11d04d417f353ef8d50
12
base.py
118
DOCS-#3099: Fix `BasePandasDataSet` docstrings warnings (#4333) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
35,498
0
71
78
27
153,617
35
modin
13
modin/pandas/base.py
Python
5
{ "docstring": "\n Select values at particular time of day (e.g., 9:30AM).\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/modin-project/modin.git
6
_check_fields
def _check_fields(self, obj): if obj.fields is None: return [] elif not isinstance(obj.fields, (list, tuple)): return must_be("a list or tuple", option="fields", obj=obj, id="admin.E004") elif obj.fieldsets: return [ checks.Error( "Both 'fieldsets' and 'fields' are specified.", obj=obj.__class__, id="admin.E005", ) ] fields = flatten(obj.fields) if len(fields) != len(set(fields)): return [ checks.Error( "The value of 'fields' contains duplicate field(s).", obj=obj.__class__, id="admin.E006", ) ] return list( chain.from_iterable( self._check_field_spec(obj, field_name, "fields") for field_name in obj.fields ) )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
checks.py
228
Refs #33476 -- Reformatted code with Black.
50,313
0
418
142
55
203,337
70
django
21
django/contrib/admin/checks.py
Python
28
{ "docstring": "Check that `fields` only refer to existing fields, doesn't contain\n duplicates. Check if at most one of `fields` and `fieldsets` is defined.\n ", "language": "en", "n_whitespaces": 36, "n_words": 22, "vocab_size": 20 }
https://github.com/django/django.git
3
register_for_auto_class
def register_for_auto_class(cls, auto_class="TFAutoModel"): if not isinstance(auto_class, str): auto_class = auto_class.__name__ import transformers.models.auto as auto_module if not hasattr(auto_module, auto_class): raise ValueError(f"{auto_class} is not a valid auto class.") cls._auto_class = auto_class
811c4c9f79758235762b4f70ffae00deae494fb1
11
modeling_tf_utils.py
89
fix bug: register_for_auto_class should be defined on TFPreTrainedModel instead of TFSequenceSummary (#18607)
6,074
0
86
52
24
33,222
29
transformers
13
src/transformers/modeling_tf_utils.py
Python
7
{ "docstring": "\n Register this class with a given auto class. This should only be used for custom models as the ones in the\n library are already mapped with an auto class.\n\n <Tip warning={true}>\n\n This API is experimental and may have some slight breaking changes in the next releases.\n\n </Tip>\n\n Args:\n auto_class (`str` or `type`, *optional*, defaults to `\"TFAutoModel\"`):\n The auto class to register this new model with.\n ", "language": "en", "n_whitespaces": 141, "n_words": 65, "vocab_size": 54 }
https://github.com/huggingface/transformers.git
10
get
def get(self, request): model = self.queryset.model content_type = ContentType.objects.get_for_model(model) if self.filterset: self.queryset = self.filterset(request.GET, self.queryset).qs # Compile a dictionary indicating which permissions are available to the current user for this model permissions = {} for action in ('add', 'change', 'delete', 'view'): perm_name = get_permission_for_model(model, action) permissions[action] = request.user.has_perm(perm_name) if 'export' in request.GET: # Export the current table view if request.GET['export'] == 'table': table = self.get_table(request, permissions) columns = [name for name, _ in table.selected_columns] return self.export_table(table, columns) # Render an ExportTemplate elif request.GET['export']: template = get_object_or_404(ExportTemplate, content_type=content_type, name=request.GET['export']) return self.export_template(template, request) # Check for YAML export support on the model elif hasattr(model, 'to_yaml'): response = HttpResponse(self.export_yaml(), content_type='text/yaml') filename = 'netbox_{}.yaml'.format(self.queryset.model._meta.verbose_name_plural) response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename) return response # Fall back to default table/YAML export else: table = self.get_table(request, permissions) return self.export_table(table) # Render the objects table table = self.get_table(request, permissions) configure_table(table, request) # If this is an HTMX request, return only the rendered table HTML if is_htmx(request): return render(request, 'htmx/table.html', { 'table': table, }) context = { 'content_type': content_type, 'table': table, 'permissions': permissions, 'action_buttons': self.action_buttons, 'filter_form': self.filterset_form(request.GET, label_suffix='') if self.filterset_form else None, } context.update(self.get_extra_context(request)) return render(request, self.template_name, context) # # Export methods #
54834c47f8870e7faabcd847c3270da0bd3d2884
17
bulk_views.py
565
Refactor generic views; add plugins dev documentation
77,669
0
698
342
123
264,296
192
netbox
47
netbox/netbox/views/generic/bulk_views.py
Python
40
{ "docstring": "\n GET request handler.\n\n Args:\n request: The current request\n ", "language": "en", "n_whitespaces": 41, "n_words": 8, "vocab_size": 7 }
https://github.com/netbox-community/netbox.git
1
load_audio
def load_audio(file_path): x, sr = torchaudio.load(file_path) assert (x > 1).sum() + (x < -1).sum() == 0 return x, sr
00c7600103ee34ac50506af88f1b34b713f849e7
12
vits.py
72
Update Vits model API
77,159
0
31
43
16
262,249
19
TTS
7
TTS/tts/models/vits.py
Python
4
{ "docstring": "Load the audio file normalized in [-1, 1]\n\n Return Shapes:\n - x: :math:`[1, T]`\n ", "language": "en", "n_whitespaces": 27, "n_words": 14, "vocab_size": 14 }
https://github.com/coqui-ai/TTS.git
2
state
def state(self) -> Mapping[str, Any]: if self._cursor_value: return { self.cursor_field: self._cursor_value, "include_deleted": self._include_deleted, } return {}
a3aae8017a0a40ff2006e2567f71dccb04c997a5
10
base_streams.py
61
🎉 🎉 Source FB Marketing: performance and reliability fixes (#9805) * Facebook Marketing performance improvement * add comments and little refactoring * fix integration tests with the new config * improve job status handling, limit concurrency to 10 * fix campaign jobs, refactor manager * big refactoring of async jobs, support random order of slices * update source _read_incremental to hook new state logic * fix issues with timeout * remove debugging and clean up, improve retry logic * merge changes from #8234 * fix call super _read_increment * generalize batch execution, add use_batch flag * improve coverage, do some refactoring of spec * update test, remove overrides of source * add split by AdSet * add smaller insights * fix end_date < start_date case * add account_id to PK * add notes * fix new streams * fix reversed incremental stream * update spec.json for SAT * upgrade CDK and bump version Co-authored-by: Dmytro Rezchykov <[email protected]> Co-authored-by: Eugene Kulak <[email protected]>
553
0
89
38
15
3,767
16
airbyte
8
airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/base_streams.py
Python
8
{ "docstring": "State getter, get current state and serialize it to emmit Airbyte STATE message", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/airbytehq/airbyte.git
4
create_nested_bom
def create_nested_bom(tree, prefix="_Test bom "): def create_items(bom_tree): for item_code, subtree in bom_tree.items(): bom_item_code = prefix + item_code if not frappe.db.exists("Item", bom_item_code): frappe.get_doc(doctype="Item", item_code=bom_item_code, item_group="_Test Item Group").insert() create_items(subtree) create_items(tree) def dfs(tree, node): for node_name, subtree in tree.items(): if node_name == node: return subtree else: result = dfs(subtree, node) if result is not None: return result order_of_creating_bom = reversed(level_order_traversal(tree)) for item in order_of_creating_bom: child_items = dfs(tree, item) if child_items: bom_item_code = prefix + item bom = frappe.get_doc(doctype="BOM", item=bom_item_code) for child_item in child_items.keys(): bom.append("items", {"item_code": prefix + child_item}) bom.company = "_Test Company" bom.currency = "INR" bom.insert() bom.submit() return bom # parent bom is last bom
8dff4d66a4a13def61276c2b46cbf0984c469819
17
test_bom.py
346
fix: bom valuation - handle lack of LPP (#30454)
13,699
0
74
107
65
64,713
102
erpnext
32
erpnext/manufacturing/doctype/bom/test_bom.py
Python
17
{ "docstring": " Helper function to create a simple nested bom from tree describing item names. (along with required items)\n\tnaive implementation for searching right subtree", "language": "en", "n_whitespaces": 22, "n_words": 23, "vocab_size": 23 }
https://github.com/frappe/erpnext.git
3
_make_historical_mat_time
def _make_historical_mat_time(deltas, changepoints_t, t_time, n_row=1, single_diff=None): if single_diff is None: single_diff = np.diff(t_time).mean() prev_time = np.arange(0, 1 + single_diff, single_diff) idxs = [] for changepoint in changepoints_t: idxs.append(np.where(prev_time > changepoint)[0][0]) prev_deltas = np.zeros(len(prev_time)) prev_deltas[idxs] = deltas prev_deltas = np.repeat(prev_deltas.reshape(1, -1), n_row, axis=0) return prev_deltas, prev_time
8fbf8ba2a5bfcdb892e8ca596e338894614000b5
14
forecaster.py
183
Speed Up Uncertainty Predictions (#2186)
441
0
130
120
37
3,306
45
prophet
21
python/prophet/forecaster.py
Python
11
{ "docstring": "\n Creates a matrix of slope-deltas where these changes occured in training data according to the trained prophet obj\n ", "language": "en", "n_whitespaces": 33, "n_words": 18, "vocab_size": 18 }
https://github.com/facebook/prophet.git
1
test_lm_generate_gpt2_xla
def test_lm_generate_gpt2_xla(self): model = TFGPT2LMHeadModel.from_pretrained("gpt2") input_ids = tf.convert_to_tensor([[464, 3290]], dtype=tf.int32) # The dog # The dog was found in a field near the intersection of West and West Streets.\n\nThe dog # fmt: off expected_output_ids = [464, 3290, 373, 1043, 287, 257, 2214, 1474, 262, 16246, 286, 2688, 290, 2688, 27262, 13, 198, 198, 464, 3290] # fmt: on xla_generate = tf.function(model.generate, jit_compile=True) output_ids = xla_generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].numpy().tolist(), expected_output_ids)
cd4c5c90605b2e23879fcca484f7079b0fc0c361
12
test_modeling_tf_gpt2.py
173
TF XLA greedy generation (#15786) * First attempt at TF XLA generation * Fix comments * Update XLA greedy generate with direct XLA calls * Support attention mask, prepare_inputs_for_generation no longer hardcoded for greedy * Handle position_ids correctly * make xla generate work for non xla case * force using xla generate * refactor * more fixes * finish cleaning * finish * finish * clean gpt2 tests * add gpt2 tests * correct more cases * up * finish * finish * more fixes * flake 8 stuff * final rag fix * Update src/transformers/models/rag/modeling_tf_rag.py * finish t5 as well * finish * Update src/transformers/generation_utils.py Co-authored-by: Patrick von Platen <[email protected]>
6,584
0
139
120
54
36,194
68
transformers
20
tests/gpt2/test_modeling_tf_gpt2.py
Python
7
{ "docstring": "This test gives the exact same results as the non-xla test above", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
https://github.com/huggingface/transformers.git
4
Error
def Error(filename, linenum, category, confidence, message): if _ShouldPrintError(category, confidence, linenum): _cpplint_state.IncrementErrorCount(category) if _cpplint_state.output_format == 'vs7': sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) elif _cpplint_state.output_format == 'eclipse': sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) else: sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( filename, linenum, message, category, confidence)) # Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') # Matches strings. Escape codes should already be removed by ESCAPES. _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') # Matches characters. Escape codes should already be removed by ESCAPES. _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") # Matches multi-line C++ comments. # This RE is a little bit more complicated than one might expect, because we # have to take care of space removals tools so we can handle comments inside # statements better. # The current rule is: We only clear spaces from both sides when we're at the # end of the line. Otherwise, we try to remove spaces from the right side, # if this doesn't work we try on left side but only if there's a non-character # on the right. _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( r, re.VERBOSE)
cc4d0564756ca067516f71718a3d135996525909
14
cpp_lint.py
241
Balanced joint maximum mean discrepancy for deep transfer learning
12,139
0
246
106
120
60,411
192
transferlearning
20
code/deep/BJMMD/caffe/scripts/cpp_lint.py
Python
12
{ "docstring": "Logs the fact we've found a lint error.\n\n We log where the error was found, and also our confidence in the error,\n that is, how certain we are this is a legitimate style regression, and\n not a misidentification or a use that's sometimes justified.\n\n False positives can be suppressed by the use of\n \"cpplint(category)\" comments on the offending line. These are\n parsed into _error_suppressions.\n\n Args:\n filename: The name of the file containing the error.\n linenum: The number of the line containing the error.\n category: A string used to describe the \"category\" this bug\n falls under: \"whitespace\", say, or \"runtime\". Categories\n may have a hierarchy separated by slashes: \"whitespace/indent\".\n confidence: A number from 1-5 representing a confidence score for\n the error, with 5 meaning that we are certain of the problem,\n and 1 meaning that it could be a legitimate construct.\n message: The error message.\n (\\s*/\\*.*\\*/\\s*$|\n /\\*.*\\*/\\s+|\n \\s+/\\*.*\\*/(?=\\W)|\n /\\*.*\\*/)", "language": "en", "n_whitespaces": 223, "n_words": 148, "vocab_size": 103 }
https://github.com/jindongwang/transferlearning.git
1
write_gexf
def write_gexf(G, path, encoding="utf-8", prettyprint=True, version="1.2draft"): writer = GEXFWriter(encoding=encoding, prettyprint=prettyprint, version=version) writer.add_graph(G) writer.write(path)
54e36acb36c75e09bc53dfcb81c73386b82a20c9
9
gexf.py
77
Update gexf website link in documentation (#5275) Hi, we've recently put the GEXF website again into its own domain http://gexf.net/ so this documentation should be updated. Thanks!
41,769
0
25
48
13
176,216
13
networkx
10
networkx/readwrite/gexf.py
Python
4
{ "docstring": "Write G in GEXF format to path.\n\n \"GEXF (Graph Exchange XML Format) is a language for describing\n complex networks structures, their associated data and dynamics\" [1]_.\n\n Node attributes are checked according to the version of the GEXF\n schemas used for parameters which are not user defined,\n e.g. visualization 'viz' [2]_. See example for usage.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n path : file or string\n File or file name to write.\n File names ending in .gz or .bz2 will be compressed.\n encoding : string (optional, default: 'utf-8')\n Encoding for text data.\n prettyprint : bool (optional, default: True)\n If True use line breaks and indenting in output XML.\n version: string (optional, default: '1.2draft')\n The version of GEXF to be used for nodes attributes checking\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> nx.write_gexf(G, \"test.gexf\")\n\n # visualization data\n >>> G.nodes[0][\"viz\"] = {\"size\": 54}\n >>> G.nodes[0][\"viz\"][\"position\"] = {\"x\": 0, \"y\": 1}\n >>> G.nodes[0][\"viz\"][\"color\"] = {\"r\": 0, \"g\": 0, \"b\": 256}\n\n\n Notes\n -----\n This implementation does not support mixed graphs (directed and undirected\n edges together).\n\n The node id attribute is set to be the string of the node label.\n If you want to specify an id use set it as node data, e.g.\n node['a']['id']=1 to set the id of node 'a' to 1.\n\n References\n ----------\n .. [1] GEXF File Format, http://gexf.net/\n .. [2] GEXF schema, http://gexf.net/schema.html\n ", "language": "en", "n_whitespaces": 355, "n_words": 223, "vocab_size": 147 }
https://github.com/networkx/networkx.git
1
test_user_supplied_base_job_with_labels
def test_user_supplied_base_job_with_labels(self): manifest = KubernetesJob( command=["echo", "hello"], job={ "apiVersion": "batch/v1", "kind": "Job", "metadata": {"labels": {"my-custom-label": "sweet"}}, "spec": { "template": { "spec": { "parallelism": 1, "completions": 1, "restartPolicy": "Never", "containers": [ { "name": "prefect-job", "env": [], } ], } } }, }, ).build_job() assert manifest["metadata"]["labels"] == { # the labels provided in the user's job base "my-custom-label": "sweet", }
daddc2985f0cba6c6e0ae3903232cbca155e7e91
23
test_kubernetes_job.py
201
Port KubernetesFlowRunner tests to KubernetesJob tests
11,669
0
574
104
47
57,490
58
prefect
7
tests/infrastructure/test_kubernetes_job.py
Python
27
{ "docstring": "The user can supply a custom base job with labels and they will be\n included in the final manifest", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 19 }
https://github.com/PrefectHQ/prefect.git
1
require_torch_bf16_gpu
def require_torch_bf16_gpu(test_case): return unittest.skipUnless( is_torch_bf16_gpu_available(), "test requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0", )(test_case)
a2d34b7c040723b92823310e3b8fd66874c9d667
10
testing_utils.py
38
deprecate is_torch_bf16_available (#17738) * deprecate is_torch_bf16_available * address suggestions
5,727
0
40
21
17
31,379
17
transformers
5
src/transformers/testing_utils.py
Python
5
{ "docstring": "Decorator marking a test that requires torch>=1.10, using Ampere GPU or newer arch with cuda>=11.0", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 15 }
https://github.com/huggingface/transformers.git
1
test_enable_disable_conflict_with_config
def test_enable_disable_conflict_with_config(): nlp = English() nlp.add_pipe("tagger") nlp.add_pipe("senter") nlp.add_pipe("sentencizer") with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) # Expected to fail, as config and arguments conflict. with pytest.raises(ValueError): spacy.load( tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} ) # Expected to succeed without warning due to the lack of a conflicting config option. spacy.load(tmp_dir, enable=["tagger"]) # Expected to succeed with a warning, as disable=[] should override the config setting. with pytest.warns(UserWarning): spacy.load( tmp_dir, enable=["tagger"], disable=[], config={"nlp": {"disabled": ["senter"]}}, )
aea16719be04d4d6ab889cd20fe0e323b2c7ffee
18
test_pipe_methods.py
235
Simplify and clarify enable/disable behavior of spacy.load() (#11459) * Change enable/disable behavior so that arguments take precedence over config options. Extend error message on conflict. Add warning message in case of overwriting config option with arguments. * Fix tests in test_serialize_pipeline.py to reflect changes to handling of enable/disable. * Fix type issue. * Move comment. * Move comment. * Issue UserWarning instead of printing wasabi message. Adjust test. * Added pytest.warns(UserWarning) for expected warning to fix tests. * Update warning message. * Move type handling out of fetch_pipes_status(). * Add global variable for default value. Use id() to determine whether used values are default value. * Fix default value for disable. * Rename DEFAULT_PIPE_STATUS to _DEFAULT_EMPTY_PIPES.
24,437
0
258
127
49
111,556
72
spaCy
17
spacy/tests/pipeline/test_pipe_methods.py
Python
19
{ "docstring": "Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/explosion/spaCy.git
14
broadcast_object_list
def broadcast_object_list(object_list, src=0, group=None, device=None): if _rank_not_in_group(group): _warn_not_in_group("broadcast_object_list") return my_rank = get_rank() # Serialize object_list elements to tensors on src rank. if my_rank == src: tensor_list, size_list = zip(*[_object_to_tensor(obj) for obj in object_list]) object_sizes_tensor = torch.cat(size_list) else: object_sizes_tensor = torch.empty(len(object_list), dtype=torch.long) # Current device selection. # To preserve backwards compatibility, ``device`` is default to ``None`` # in which case we run current logic of device selection, i.e. # ``current_device`` is CUDA if backend is NCCL otherwise CPU device. In the # case it is not ``None`` we move the size and object tensors to be # broadcasted to this device. is_nccl_backend = _check_for_nccl_backend(group) current_device = None if device is not None: if is_nccl_backend and device.type != "cuda": raise ValueError("device type must be cuda for nccl backend") current_device = device else: current_device = torch.device("cpu") if is_nccl_backend: # See note about using torch.cuda.current_device() here in # docstring. We cannot simply use my_rank since rank == device is # not necessarily true. current_device = torch.device("cuda", torch.cuda.current_device()) if is_nccl_backend: object_sizes_tensor = object_sizes_tensor.to(current_device) # Broadcast object sizes broadcast(object_sizes_tensor, src=src, group=group) # Concatenate and broadcast serialized object tensors if my_rank == src: object_tensor = torch.cat(tensor_list) else: object_tensor = torch.empty( torch.sum(object_sizes_tensor).item(), # type: ignore[arg-type] dtype=torch.uint8, ) if is_nccl_backend: object_tensor = object_tensor.to(current_device) broadcast(object_tensor, src=src, group=group) # Deserialize objects using their stored sizes. offset = 0 if my_rank != src: for i, obj_size in enumerate(object_sizes_tensor): obj_view = object_tensor[offset : offset + obj_size] obj_view = obj_view.type(torch.uint8) if obj_view.device != torch.device("cpu"): obj_view = obj_view.cpu() offset += obj_size object_list[i] = _tensor_to_object(obj_view, obj_size)
e1e43c4e710389a3fcf54cd7f3537336e21d3ae5
16
distributed_c10d.py
502
Prevent sum overflow in broadcast_object_list (#70605) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/70605 broadcast_object_list casted the sum of all object lengths to int from long causing overflows. Test Plan: Add a Tensor with >2GB storage requirement (in distributed_test.py) to object broadcast. This Tensor is only added if test are running at Meta as github tests will oom. Without fix the length will overflow and the program will request a negative sized Tensor: ``` RuntimeError: Trying to create tensor with negative dimension -2147482417: [-2147482417] ``` With fix it will pass the test. Test used on server with GPUs: buck test mode/dev-nosan //caffe2/test/distributed:distributed_nccl_spawn --local -- broadcast_object buck test mode/dev-nosan //caffe2/test/distributed:distributed_gloo_spawn --local -- broadcast_object Reviewed By: r-barnes Differential Revision: D33405741 fbshipit-source-id: 972165f8297b3f5d475636e6127ed4a49adacab1
21,501
0
584
301
149
102,230
250
pytorch
40
torch/distributed/distributed_c10d.py
Python
42
{ "docstring": "\n Broadcasts picklable objects in ``object_list`` to the whole group. Similar\n to :func:`broadcast`, but Python objects can be passed in.\n Note that all objects in ``object_list`` must be picklable in order to be\n broadcasted.\n\n Args:\n object_list (List[Any]): List of input objects to broadcast.\n Each object must be picklable. Only objects on the ``src`` rank will\n be broadcast, but each rank must provide lists of equal sizes.\n src (int): Source rank from which to broadcast ``object_list``.\n group: (ProcessGroup, optional): The process group to work on. If None,\n the default process group will be used. Default is ``None``.\n device (``torch.device``, optional): If not None, the objects are\n serialized and converted to tensors which are moved to the\n ``device`` before broadcasting. Default is ``None``.\n\n Returns:\n ``None``. If rank is part of the group, ``object_list`` will contain the\n broadcasted objects from ``src`` rank.\n\n .. note:: For NCCL-based processed groups, internal tensor representations\n of objects must be moved to the GPU device before communication takes\n place. In this case, the device used is given by\n ``torch.cuda.current_device()`` and it is the user's responsiblity to\n ensure that this is set so that each rank has an individual GPU, via\n ``torch.cuda.set_device()``.\n\n .. note:: Note that this API differs slightly from the :func:`all_gather`\n collective since it does not provide an ``async_op`` handle and thus\n will be a blocking call.\n\n .. warning::\n :func:`broadcast_object_list` uses ``pickle`` module implicitly, which\n is known to be insecure. It is possible to construct malicious pickle\n data which will execute arbitrary code during unpickling. Only call this\n function with data you trust.\n\n Example::\n >>> # Note: Process group initialization omitted on each rank.\n >>> import torch.distributed as dist\n >>> if dist.get_rank() == 0:\n >>> # Assumes world_size of 3.\n >>> objects = [\"foo\", 12, {1: 2}] # any picklable object\n >>> else:\n >>> objects = [None, None, None]\n >>> # Assumes backend is not NCCL\n >>> device = torch.device(\"cpu\")\n >>> dist.broadcast_object_list(objects, src=0, device=device)\n >>> objects\n ['foo', 12, {1: 2}]\n ", "language": "en", "n_whitespaces": 626, "n_words": 322, "vocab_size": 187 }
https://github.com/pytorch/pytorch.git
3
forward
def forward(self, data, optimizer=None, return_loss=False, **kwargs): batch_inputs, batch_data_samples = self.preprocss_data(data) if torch.onnx.is_in_onnx_export(): # TODO: Delete assert len(batch_inputs) == 1 return self.onnx_export(batch_inputs, batch_data_samples) if return_loss: losses = self.forward_train(batch_inputs, batch_data_samples, **kwargs) loss, log_vars = self._parse_losses(losses) outputs = dict( loss=loss, log_vars=log_vars, num_samples=len(batch_data_samples)) return outputs else: # TODO: refactor and support aug test later assert isinstance(data[0]['inputs'], torch.Tensor), \ 'Only support simple test currently. Aug-test is ' \ 'not supported yet' return self.forward_simple_test(batch_inputs, batch_data_samples, **kwargs)
924c381a78eb70cede198e042ef34e038e05c15a
13
base.py
209
Modify RetinaNet model interface
70,395
0
372
135
55
244,473
70
mmdetection
25
mmdet/models/detectors/base.py
Python
20
{ "docstring": "The iteration step during training and testing. This method defines\n an iteration step during training and testing, except for the back\n propagation and optimizer updating during training, which are done in\n an optimizer hook.\n\n Args:\n data (list[dict]): The output of dataloader.\n optimizer (:obj:`torch.optim.Optimizer`, dict, Optional): The\n optimizer of runner. This argument is unused and reserved.\n Default to None.\n return_loss (bool): Whether to return loss. In general,\n it will be set to True during training and False\n during testing. Default to False.\n\n Returns:\n during training\n dict: It should contain at least 3 keys: ``loss``,\n ``log_vars``, ``num_samples``.\n - ``loss`` is a tensor for back propagation, which can be a\n weighted sum of multiple losses.\n - ``log_vars`` contains all the variables to be sent to the\n logger.\n - ``num_samples`` indicates the batch size (when the model\n is DDP, it means the batch size on each GPU), which is\n used for averaging the logs.\n\n during testing\n list(obj:`DetDataSample`): Detection results of the\n input images. Each DetDataSample usually contains\n ``pred_instances`` or ``pred_panoptic_seg`` or\n ``pred_sem_seg``.\n ", "language": "en", "n_whitespaces": 562, "n_words": 168, "vocab_size": 111 }
https://github.com/open-mmlab/mmdetection.git
8
mode
def mode(a, axis=0, nan_policy='propagate'): a, axis = _chk_asarray(a, axis) if a.size == 0: return ModeResult(np.array([]), np.array([])) contains_nan, nan_policy = _contains_nan(a, nan_policy) if contains_nan and nan_policy == 'omit': a = ma.masked_invalid(a) return mstats_basic.mode(a, axis) if a.dtype == object and np.nan in set(a.ravel()): # Fall back to a slower method since np.unique does not work with NaN scores = set(np.ravel(a)) # get ALL unique values testshape = list(a.shape) testshape[axis] = 1 oldmostfreq = np.zeros(testshape, dtype=a.dtype) oldcounts = np.zeros(testshape, dtype=int) for score in scores: template = (a == score) counts = np.sum(template, axis, keepdims=True) mostfrequent = np.where(counts > oldcounts, score, oldmostfreq) oldcounts = np.maximum(counts, oldcounts) oldmostfreq = mostfrequent return ModeResult(mostfrequent, oldcounts)
7438fe5edfb565ff341fa6ab054461fcdd504aa2
13
_stats_py.py
336
MAINT: stats: mode: fix negative axis issue with np.moveaxis instead of custom code (#15421)
69,724
0
259
340
78
241,885
108
scipy
35
scipy/stats/_stats_py.py
Python
31
{ "docstring": "Return an array of the modal (most common) value in the passed array.\n\n If there is more than one such value, only the smallest is returned.\n The bin-count for the modal bins is also returned.\n\n Parameters\n ----------\n a : array_like\n n-dimensional array of which to find mode(s).\n axis : int or None, optional\n Axis along which to operate. Default is 0. If None, compute over\n the whole array `a`.\n nan_policy : {'propagate', 'raise', 'omit'}, optional\n Defines how to handle when input contains nan.\n The following options are available (default is 'propagate'):\n\n * 'propagate': returns nan\n * 'raise': throws an error\n * 'omit': performs the calculations ignoring nan values\n\n Returns\n -------\n mode : ndarray\n Array of modal values.\n count : ndarray\n Array of counts for each mode.\n\n Examples\n --------\n >>> a = np.array([[6, 8, 3, 0],\n ... [3, 2, 1, 7],\n ... [8, 1, 8, 4],\n ... [5, 3, 0, 5],\n ... [4, 7, 5, 9]])\n >>> from scipy import stats\n >>> stats.mode(a)\n ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))\n\n To get mode of whole array, specify ``axis=None``:\n\n >>> stats.mode(a, axis=None)\n ModeResult(mode=array([3]), count=array([3]))\n\n ", "language": "en", "n_whitespaces": 390, "n_words": 183, "vocab_size": 131 }
https://github.com/scipy/scipy.git
3
ascents
def ascents(self): a = self.array_form pos = [i for i in range(len(a) - 1) if a[i] < a[i + 1]] return pos
498015021131af4dbb07eb110e5badaba8250c7b
13
permutations.py
67
Updated import locations
47,664
0
50
42
20
196,164
22
sympy
8
sympy/combinatorics/permutations.py
Python
4
{ "docstring": "\n Returns the positions of ascents in a permutation, ie, the location\n where p[i] < p[i+1]\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> p = Permutation([4, 0, 1, 3, 2])\n >>> p.ascents()\n [1, 2]\n\n See Also\n ========\n\n descents, inversions, min, max\n ", "language": "en", "n_whitespaces": 126, "n_words": 41, "vocab_size": 37 }
https://github.com/sympy/sympy.git
20
_orbit
def _orbit(degree, generators, alpha, action='tuples'): r if not hasattr(alpha, '__getitem__'): alpha = [alpha] gens = [x._array_form for x in generators] if len(alpha) == 1 or action == 'union': orb = alpha used = [False]*degree for el in alpha: used[el] = True for b in orb: for gen in gens: temp = gen[b] if used[temp] == False: orb.append(temp) used[temp] = True return set(orb) elif action == 'tuples': alpha = tuple(alpha) orb = [alpha] used = {alpha} for b in orb: for gen in gens: temp = tuple([gen[x] for x in b]) if temp not in used: orb.append(temp) used.add(temp) return set(orb) elif action == 'sets': alpha = frozenset(alpha) orb = [alpha] used = {alpha} for b in orb: for gen in gens: temp = frozenset([gen[x] for x in b]) if temp not in used: orb.append(temp) used.add(temp) return {tuple(x) for x in orb}
498015021131af4dbb07eb110e5badaba8250c7b
17
perm_groups.py
405
Updated import locations
47,623
0
517
258
56
196,123
140
sympy
21
sympy/combinatorics/perm_groups.py
Python
74
{ "docstring": "Compute the orbit of alpha `\\{g(\\alpha) | g \\in G\\}` as a set.\n\n Explanation\n ===========\n\n The time complexity of the algorithm used here is `O(|Orb|*r)` where\n `|Orb|` is the size of the orbit and ``r`` is the number of generators of\n the group. For a more detailed analysis, see [1], p.78, [2], pp. 19-21.\n Here alpha can be a single point, or a list of points.\n\n If alpha is a single point, the ordinary orbit is computed.\n if alpha is a list of points, there are three available options:\n\n 'union' - computes the union of the orbits of the points in the list\n 'tuples' - computes the orbit of the list interpreted as an ordered\n tuple under the group action ( i.e., g((1, 2, 3)) = (g(1), g(2), g(3)) )\n 'sets' - computes the orbit of the list interpreted as a sets\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> from sympy.combinatorics.perm_groups import _orbit\n >>> a = Permutation([1, 2, 0, 4, 5, 6, 3])\n >>> G = PermutationGroup([a])\n >>> _orbit(G.degree, G.generators, 0)\n {0, 1, 2}\n >>> _orbit(G.degree, G.generators, [0, 4], 'union')\n {0, 1, 2, 3, 4, 5, 6}\n\n See Also\n ========\n\n orbit, orbit_transversal\n\n ", "language": "en", "n_whitespaces": 272, "n_words": 194, "vocab_size": 119 }
https://github.com/sympy/sympy.git
1
_obj_reference_counts
def _obj_reference_counts(self): self._maybe_create_attribute( "_obj_reference_counts_dict", object_identity.ObjectIdentityDictionary(), ) return self._obj_reference_counts_dict
84afc5193d38057e2e2badf9c889ea87d80d8fbf
9
base_layer.py
41
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,532
0
58
23
8
270,708
8
keras
6
keras/engine/base_layer.py
Python
6
{ "docstring": "A dictionary counting the number of attributes referencing an object.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/keras-team/keras.git
2
on_idle
async def on_idle(self) -> None: if self._require_styles_update: await self.post_message(messages.StylesUpdated(self)) self._require_styles_update = False
ada31e68de9065d5d407b16868fe1a2ddb88c548
12
app.py
53
docstring
43,997
0
48
30
12
182,890
12
textual
6
src/textual/app.py
Python
5
{ "docstring": "Perform actions when there are no messages in the queue.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/Textualize/textual.git
4
unset_outlier_removal
def unset_outlier_removal(self): if self.ft_params.get('use_SVM_to_remove_outliers', False): self.ft_params.update({'use_SVM_to_remove_outliers': False}) logger.warning('User tried to use SVM with RL. Deactivating SVM.') if self.ft_params.get('use_DBSCAN_to_remove_outliers', False): self.ft_params.update({'use_SVM_to_remove_outliers': False}) logger.warning('User tried to use DBSCAN with RL. Deactivating DBSCAN.') if self.freqai_info['data_split_parameters'].get('shuffle', False): self.freqai_info['data_split_parameters'].update('shuffle', False) logger.warning('User tried to shuffle training data. Setting shuffle to False')
8aac644009dd7a8ab8f006594b547abddad5aca9
12
BaseReinforcementLearningModel.py
180
add tests. add guardrails.
34,968
0
139
100
27
151,174
45
freqtrade
8
freqtrade/freqai/RL/BaseReinforcementLearningModel.py
Python
10
{ "docstring": "\n If user has activated any function that may remove training points, this\n function will set them to false and warn them\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 19 }
https://github.com/freqtrade/freqtrade.git
2
log_has_when
def log_has_when(line, logs, when): return any(line == message.message for message in logs.get_records(when))
866a5649588c77ec686ba65e581d1306d8ea3751
10
conftest.py
44
update emc start/shutdown, initial emc tests
34,960
0
18
28
12
151,131
12
freqtrade
7
tests/conftest.py
Python
2
{ "docstring": "Check if line is found in caplog's messages during a specified stage", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/freqtrade/freqtrade.git
1
bcoo_dot_general_sampled
def bcoo_dot_general_sampled(A, B, indices, *, dimension_numbers): (lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers cdims = (api_util._ensure_index_tuple(lhs_contract), api_util._ensure_index_tuple(rhs_contract)) bdims = (api_util._ensure_index_tuple(lhs_batch), api_util._ensure_index_tuple(rhs_batch)) return bcoo_dot_general_sampled_p.bind(A, B, indices, dimension_numbers=(cdims, bdims)) @bcoo_dot_general_sampled_p.def_impl
3184dd65a222354bffa2466d9a375162f5649132
@bcoo_dot_general_sampled_p.def_impl
9
bcoo.py
124
[sparse] Update docstrings for bcoo primitives. PiperOrigin-RevId: 438685829
26,736
1
91
80
23
119,984
27
jax
16
jax/experimental/sparse/bcoo.py
Python
8
{ "docstring": "A contraction operation with output computed at given sparse indices.\n\n Args:\n lhs: An ndarray.\n rhs: An ndarray.\n indices: BCOO indices.\n dimension_numbers: a tuple of tuples of the form\n `((lhs_contracting_dims, rhs_contracting_dims),\n (lhs_batch_dims, rhs_batch_dims))`.\n\n Returns:\n BCOO data, an ndarray containing the result.\n ", "language": "en", "n_whitespaces": 68, "n_words": 40, "vocab_size": 34 }
https://github.com/google/jax.git
2
setup_args
def setup_args(parser=None) -> ParlaiParser: if parser is None: parser = ParlaiParser(True, True, 'Train a model') train = parser.add_argument_group('Training Loop Arguments') train.add_argument( '-et', '--evaltask', help='task to use for valid/test (defaults to the one used for training)', ) train.add_argument( '--final-extra-opt', type=str, default='', help="A '.opt' file that is used for final eval. Useful for setting skip-generation to false. 'datatype' must be included as part of the opt.", ) train.add_argument( '--eval-batchsize', type=int, hidden=True, help='Eval time batch size (defaults to same as -bs)', ) train.add_argument( '--eval-dynamic-batching', # FIXME: see https://github.com/facebookresearch/ParlAI/issues/3367 default=None, type='nonestr', choices={None, 'off', 'full', 'batchsort'}, help=( 'Set dynamic batching at evaluation time. Set to off for ' 'train-only dynamic batching. Set to none (default) to use same ' 'setting as --dynamic-batching.' ), ) train.add_argument( '--num-workers', default=0, type=int, help='Number of background workers (training only)', ) train.add_argument('--display-examples', type='bool', default=False, hidden=True) train.add_argument('-eps', '--num-epochs', type=float, default=-1) train.add_argument('-ttim', '--max-train-time', type=float, default=-1) train.add_argument( '-tstep', '--max-train-steps', '--max-lr-steps', type=int, default=-1, help='End training after n model updates', ) train.add_argument('-ltim', '--log-every-n-secs', type=float, default=-1) train.add_argument( '-lstep', '--log-every-n-steps', type=int, default=50, help='Log every n training steps', ) train.add_argument( '-vtim', '--validation-every-n-secs', type=float, default=-1, help='Validate every n seconds. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-vstep', '--validation-every-n-steps', type=int, default=-1, help='Validate every n training steps. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-stim', '--save-every-n-secs', type=float, default=-1, help='Saves the model to model_file.checkpoint after ' 'every n seconds (default -1, never).', ) train.add_argument( '-sval', '--save-after-valid', type='bool', default=False, help='Saves the model to model_file.checkpoint after ' 'every validation (default %(default)s).', ) train.add_argument( '-veps', '--validation-every-n-epochs', type=float, default=-1, help='Validate every n epochs. Saves model to model_file ' '(if set) whenever best val metric is found', ) train.add_argument( '-vme', '--validation-max-exs', type=int, default=-1, hidden=True, help='max examples to use during validation (default -1 uses all)', ) train.add_argument( '--short-final-eval', default=False, hidden=True, type='bool', help='If true, obeys --validation-max-exs in the final ' 'validation and test evaluations.', ) train.add_argument( '-vp', '--validation-patience', type=int, default=10, help=( 'number of iterations of validation where result' ' does not improve before we stop training' ), ) train.add_argument( '-vmt', '--validation-metric', default='accuracy', help='key into report table for selecting best validation', ) train.add_argument( '-vmm', '--validation-metric-mode', type=str, choices=['max', 'min'], help='the direction in which to optimize the validation metric, i.e. maximize or minimize', ) train.add_argument( '-vcut', '--validation-cutoff', type=float, default=1.0, hidden=True, help='value at which training will stop if exceeded by metric', ) train.add_argument( '-lfc', '--load-from-checkpoint', type='bool', default=True, hidden=True, help='load model from checkpoint if available', ) train.add_argument( '-vshare', '--validation-share-agent', default=False, hidden=True, help='use a shared copy of the agent for validation. ' 'this will eventually default to True, but ' 'currently defaults to False.', ) train.add_argument( '-mcs', '--metrics', type=str, default='default', help='list of metrics to show/compute, e.g. all, default,' 'or give a list split by , like ' 'ppl,f1,accuracy,hits@1,rouge,bleu' 'the rouge metrics will be computed as rouge-1, rouge-2 and rouge-l', ) train.add_argument( '-micro', '--aggregate-micro', type='bool', default=False, help='Report micro-averaged metrics instead of macro averaged metrics.', recommended=False, ) train.add_argument( '--world-logs', type=str, default='', help='Saves a jsonl file of the world logs.' 'Set to the empty string to not save at all.', ) train.add_argument( '--save-format', type=str, default='conversations', choices=['conversations', 'parlai'], ) WorldLogger.add_cmdline_args(parser, partial_opt=None) TensorboardLogger.add_cmdline_args(parser, partial_opt=None) WandbLogger.add_cmdline_args(parser, partial_opt=None) parser = setup_dict_args(parser) return parser
7c2b199d0b315c9016072897d849811cfc8a5073
11
train_model.py
1,162
Add WorldLogger to train_model script. (#4369) * Add WorldLogger to train_model ParlAI script. * Fix test_save_multiple_world_logs in test_train_model.py
47,086
0
1,676
687
289
194,794
516
ParlAI
21
parlai/scripts/train_model.py
Python
206
{ "docstring": "\n Build the ParlAI parser, adding command line args if necessary.\n\n :param ParlaiParser parser:\n Preexisting parser to append options to. Will be created if needed.\n\n :returns:\n the ParlaiParser with CLI options added.\n ", "language": "en", "n_whitespaces": 58, "n_words": 31, "vocab_size": 27 }
https://github.com/facebookresearch/ParlAI.git
2
test_messages_filter_not_labels
def test_messages_filter_not_labels(self) -> None: self._send_labelled_messages_in_room() token = "s0_0_0_0_0_0_0_0_0" channel = self.make_request( "GET", "/rooms/%s/messages?access_token=%s&from=%s&filter=%s" % (self.room_id, self.tok, token, json.dumps(self.FILTER_NOT_LABELS)), ) events = channel.json_body["chunk"] self.assertEqual(len(events), 4, [event["content"] for event in events]) self.assertEqual(events[0]["content"]["body"], "without label", events[0]) self.assertEqual(events[1]["content"]["body"], "without label", events[1]) self.assertEqual(events[2]["content"]["body"], "with wrong label", events[2]) self.assertEqual( events[3]["content"]["body"], "with two wrong labels", events[3] )
2ffaf30803f93273a4d8a65c9e6c3110c8433488
13
test_rooms.py
272
Add type hints to `tests/rest/client` (#12108) * Add type hints to `tests/rest/client` * newsfile * fix imports * add `test_account.py` * Remove one type hint in `test_report_event.py` * change `on_create_room` to `async` * update new functions in `test_third_party_rules.py` * Add `test_filter.py` * add `test_rooms.py` * change to `assertEquals` to `assertEqual` * lint
71,573
0
178
166
42
247,290
50
synapse
16
tests/rest/client/test_rooms.py
Python
17
{ "docstring": "Test that we can filter by the absence of a label on a /messages request.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
https://github.com/matrix-org/synapse.git
4
json
def json(self, body): import streamlit as st if isinstance(body, LazySessionState): body = body.to_dict() if not isinstance(body, str): try: body = json.dumps(body, default=repr) except TypeError as err: st.warning( "Warning: this data structure was not fully serializable as " "JSON due to one or more unexpected keys. (Error was: %s)" % err ) body = json.dumps(body, skipkeys=True, default=repr) json_proto = JsonProto() json_proto.body = body return self.dg._enqueue("json", json_proto)
72703b38029f9358a0ec7ca5ed875a6b438ece19
15
json.py
168
Replace static apps with live Cloud apps (#4317) Co-authored-by: kajarenc <[email protected]>
26,392
0
246
101
51
118,735
65
streamlit
20
lib/streamlit/elements/json.py
Python
16
{ "docstring": "Display object or string as a pretty-printed JSON string.\n\n Parameters\n ----------\n body : Object or str\n The object to print as JSON. All referenced objects should be\n serializable to JSON as well. If object is a string, we assume it\n contains serialized JSON.\n\n Example\n -------\n >>> st.json({\n ... 'foo': 'bar',\n ... 'baz': 'boz',\n ... 'stuff': [\n ... 'stuff 1',\n ... 'stuff 2',\n ... 'stuff 3',\n ... 'stuff 5',\n ... ],\n ... })\n\n .. output::\n https://share.streamlit.io/streamlit/docs/main/python/api-examples-source/data.json.py\n height: 385px\n\n ", "language": "en", "n_whitespaces": 297, "n_words": 77, "vocab_size": 57 }
https://github.com/streamlit/streamlit.git
4
set_kill_child_on_death_win32
def set_kill_child_on_death_win32(child_proc): if isinstance(child_proc, subprocess.Popen): child_proc = child_proc._handle assert isinstance(child_proc, subprocess.Handle) if detect_fate_sharing_support_win32(): if not win32_AssignProcessToJobObject(win32_job, int(child_proc)): import ctypes raise OSError(ctypes.get_last_error(), "AssignProcessToJobObject() failed") else: assert False, "AssignProcessToJobObject used despite being unavailable"
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
13
utils.py
111
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,222
0
89
66
27
130,302
31
ray
14
python/ray/_private/utils.py
Python
10
{ "docstring": "Ensures the child process dies if this process dies (fate-sharing).\n\n Windows-only. Must be called by the parent, after spawning the child.\n\n Args:\n child_proc: The subprocess.Popen or subprocess.Handle object.\n ", "language": "en", "n_whitespaces": 44, "n_words": 28, "vocab_size": 24 }
https://github.com/ray-project/ray.git
4
unflatten_superdims
def unflatten_superdims(assignment): def check(cond): if cond: return raise NotImplementedError("Failed to convert OpSharding into a ShardingSpec. " "Please open a bug report!") flat_assignment = np.asarray(assignment, dtype=np.int64) check(flat_assignment[0] == 0) dims = [] while flat_assignment.size > 1: stride = flat_assignment[1] for i in range(len(flat_assignment)): if flat_assignment[i] != i * stride: break else: # After this loop i should point to an "element after the sequence", so # we have to increment it if the whole array is a strided sequence. i += 1 size = i dims.append((size, stride)) assert size > 1 # Ensure progress flat_assignment = flat_assignment[::size] return dims
4b587fa1f0049db5366fd04812ab940d80a71a22
11
pjit.py
192
Move `pjit.py` to `jax/_src` in preparation for merging the `jit` and `pjit` frontend APIs PiperOrigin-RevId: 495944279
27,261
0
182
101
74
122,886
98
jax
17
jax/_src/pjit.py
Python
16
{ "docstring": "Unflatten a list of dimension sizes and their strides that generates assignment.\n\n If this function succeeds for a given ``assignment``, then the following property\n should be satisfied::\n\n dims_with_strides = unflatten_superdims(assignment)\n base_array = np.arange(map(fst, sorted(dims_with_strides, key=snd, reverse=True)))\n assignment == base_array.transpose(argsort(dims_with_strides, key=snd, reverse=True)).flatten()\n\n That is, the returned dimensions list all sizes of the base array (with strides\n indicating their initial order). The order of dimensions in the list corresponds\n to the permutation that applied to the base array generates the assignment.\n ", "language": "en", "n_whitespaces": 94, "n_words": 79, "vocab_size": 56 }
https://github.com/google/jax.git
4
_get_storage_by_url
def _get_storage_by_url(self, url, storage_objects): from io_storages.models import get_storage_classes for storage_object in storage_objects: # check url is string because task can have int, float, dict, list # and 'can_resolve_url' will fail if isinstance(url, str) and storage_object.can_resolve_url(url): return storage_object
6293c3226e3713bdae678603d6c1300e09c41448
10
models.py
61
fix: DEV-1476: Resolving performance for project storages (#1910) * Fix: DEV-1476: Resolving performance for project storages * Rewrite cache * Remove cache completely
42,439
0
106
38
34
177,558
37
label-studio
11
label_studio/tasks/models.py
Python
5
{ "docstring": "Find the first compatible storage and returns pre-signed URL", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/heartexlabs/label-studio.git
1
swap_memory
def swap_memory(): mem = cext.virtual_mem() total_phys = mem[0] free_phys = mem[1] total_system = mem[2] free_system = mem[3] # Despite the name PageFile refers to total system memory here # thus physical memory values need to be subtracted to get swap values total = total_system - total_phys free = min(total, free_system - free_phys) used = total - free percent = usage_percent(used, total, round_=1) return _common.sswap(total, used, free, percent, 0, 0) # ===================================================================== # --- disk # ===================================================================== disk_io_counters = cext.disk_io_counters
471b19d2aa799cd73bded23379e864dd35bec2b6
9
_pswindows.py
142
Fix typos
45,962
0
114
85
53
188,999
79
psutil
18
psutil/_pswindows.py
Python
11
{ "docstring": "Swap system memory as a (total, used, free, sin, sout) tuple.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/giampaolo/psutil.git
1
test_default_disabling_entity
async def test_default_disabling_entity(hass, create_registrations, webhook_client): webhook_id = create_registrations[1]["webhook_id"] webhook_url = f"/api/webhook/{webhook_id}" reg_resp = await webhook_client.post( webhook_url, json={ "type": "register_sensor", "data": { "name": "Battery State", "type": "sensor", "unique_id": "battery_state", "default_disabled": True, }, }, ) assert reg_resp.status == HTTPStatus.CREATED json = await reg_resp.json() assert json == {"success": True} await hass.async_block_till_done() entity = hass.states.get("sensor.test_1_battery_state") assert entity is None assert ( er.async_get(hass).async_get("sensor.test_1_battery_state").disabled_by == er.RegistryEntryDisabler.INTEGRATION )
539ce7ff0e9d9bc59cd8f028f245c09f802c89cb
15
test_sensor.py
230
Allow mobile app to disable entities by default (#71562)
99,145
0
228
129
45
300,279
61
core
21
tests/components/mobile_app/test_sensor.py
Python
25
{ "docstring": "Test that sensors can be disabled by default upon registration.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
6
RGS
def RGS(self): rgs = {} partition = self.partition for i, part in enumerate(partition): for j in part: rgs[j] = i return tuple([rgs[i] for i in sorted( [i for p in partition for i in p], key=default_sort_key)])
498015021131af4dbb07eb110e5badaba8250c7b
13
partitions.py
102
Updated import locations
47,606
0
108
67
23
196,106
36
sympy
13
sympy/combinatorics/partitions.py
Python
8
{ "docstring": "\n Returns the \"restricted growth string\" of the partition.\n\n Explanation\n ===========\n\n The RGS is returned as a list of indices, L, where L[i] indicates\n the block in which element i appears. For example, in a partition\n of 3 elements (a, b, c) into 2 blocks ([c], [a, b]) the RGS is\n [1, 1, 0]: \"a\" is in block 1, \"b\" is in block 1 and \"c\" is in block 0.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Partition\n >>> a = Partition([1, 2], [3], [4, 5])\n >>> a.members\n (1, 2, 3, 4, 5)\n >>> a.RGS\n (0, 0, 1, 2, 2)\n >>> a + 1\n Partition({3}, {4}, {5}, {1, 2})\n >>> _.RGS\n (0, 0, 1, 2, 3)\n ", "language": "en", "n_whitespaces": 255, "n_words": 114, "vocab_size": 81 }
https://github.com/sympy/sympy.git
2
pause_time
def pause_time(self) -> int | None: pause_time = self._status.get("timer_target") if pause_time is not None: return wilight_to_hass_pause_time(pause_time) return pause_time
34984a8af8efc5ef6d1d204404c517e7f7c2d1bb
9
switch.py
57
Add switch to wilight (#62873) * Created switch.py and support * updated support.py * test for wilight switch * Update for Test * Updated test_switch.py * Trigger service with index * Updated support.py and switch.py * Updated support.py * Updated switch.py as PR#63614 * Updated switch.py * add type hints * Updated support.py * Updated switch.py * Updated switch.py and services.yaml * Updated pywilight * Update homeassistant/components/wilight/switch.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/wilight/switch.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/wilight/switch.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/wilight/switch.py Co-authored-by: Martin Hjelmare <[email protected]> * Update ci.yaml * Update ci.yaml * Updated as pywilight Renamed Device as PyWiLightDevice in pywilight. * Updated as pywilight Renamed Device as PyWiLightDevice in pywilight. * Updated as pywilight Renamed Device as PyWiLightDevice in pywilight. * Updated as pywilight Renamed Device as PyWiLightDevice in pywilight. * Update switch.py * Update homeassistant/components/wilight/support.py Co-authored-by: Martin Hjelmare <[email protected]> * Update support.py * Update switch.py * Update support.py * Update support.py * Update switch.py * Update switch.py * Update services.yaml * Update switch.py * Update services.yaml * Update switch.py * Update homeassistant/components/wilight/switch.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/wilight/switch.py Co-authored-by: Martin Hjelmare <[email protected]> * Update homeassistant/components/wilight/switch.py Co-authored-by: Martin Hjelmare <[email protected]> * Update switch.py * Update switch.py * Update switch.py * Update test_switch.py * Update test_switch.py * Update test_switch.py * Decrease exception scope * Clean up Co-authored-by: Martin Hjelmare <[email protected]>
102,317
0
57
33
14
303,498
18
core
6
homeassistant/components/wilight/switch.py
Python
9
{ "docstring": "Return pause time of valve switch.\n\n None is unknown, 1 is minimum, 24 is maximum.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 13 }
https://github.com/home-assistant/core.git
7
set_param_recursive
def set_param_recursive(pipeline_steps, parameter, value): for (_, obj) in pipeline_steps: recursive_attrs = ["steps", "transformer_list", "estimators"] for attr in recursive_attrs: if hasattr(obj, attr): set_param_recursive(getattr(obj, attr), parameter, value) if hasattr(obj, "estimator"): # nested estimator est = getattr(obj, "estimator") if hasattr(est, parameter): setattr(est, parameter, value) if hasattr(obj, parameter): setattr(obj, parameter, value)
388616b6247ca4ea8de4e2f340d6206aee523541
15
export_utils.py
159
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,655
0
156
102
33
181,902
47
tpot
12
tpot/export_utils.py
Python
12
{ "docstring": "Recursively iterate through all objects in the pipeline and set a given parameter.\n\n Parameters\n ----------\n pipeline_steps: array-like\n List of (str, obj) tuples from a scikit-learn pipeline or related object\n parameter: str\n The parameter to assign a value for in each pipeline object\n value: any\n The value to assign the parameter to in each pipeline object\n Returns\n -------\n None\n\n ", "language": "en", "n_whitespaces": 106, "n_words": 58, "vocab_size": 41 }
https://github.com/EpistasisLab/tpot.git
2
embedding_dim
def embedding_dim(self) -> int: emedding_dim = self.model.get_sentence_embedding_dimension() if not emedding_dim: logger.warning( "Can't find the output embedding dimensions for '%s'. Some checks will not run as intended.", self.model_name_or_path, ) return emedding_dim
101d2bc86cab29f5e93cccd58730c5932acf8ca3
10
sentence_transformers.py
59
feat: `MultiModalRetriever` (#2891) * Adding Data2VecVision and Data2VecText to the supported models and adapt Tokenizers accordingly * content_types * Splitting classes into respective folders * small changes * Fix EOF * eof * black * API * EOF * whitespace * api * improve multimodal similarity processor * tokenizer -> feature extractor * Making feature vectors come out of the feature extractor in the similarity head * embed_queries is now self-sufficient * couple trivial errors * Implemented separate language model classes for multimodal inference * Document embedding seems to work * removing batch_encode_plus, is deprecated anyway * Realized the base Data2Vec models are not trained on retrieval tasks * Issue with the generated embeddings * Add batching * Try to fit CLIP in * Stub of CLIP integration * Retrieval goes through but returns noise only * Still working on the scores * Introduce temporary adapter for CLIP models * Image retrieval now works with sentence-transformers * Tidying up the code * Refactoring is now functional * Add MPNet to the supported sentence transformers models * Remove unused classes * pylint * docs * docs * Remove the method renaming * mpyp first pass * docs * tutorial * schema * mypy * Move devices setup into get_model * more mypy * mypy * pylint * Move a few params in HaystackModel's init * make feature extractor work with squadprocessor * fix feature_extractor_kwargs forwarding * Forgotten part of the fix * Revert unrelated ES change * Revert unrelated memdocstore changes * comment * Small corrections * mypy and pylint * mypy * typo * mypy * Refactor the call * mypy * Do not make FARMReader use the new FeatureExtractor * mypy * Detach DPR tests from FeatureExtractor too * Detach processor tests too * Add end2end marker * extract end2end feature extractor tests * temporary disable feature extraction tests * Introduce end2end tests for tokenizer tests * pylint * Fix model loading from folder in FeatureExtractor * working o n end2end * end2end keeps failing * Restructuring retriever tests * Restructuring retriever tests * remove covert_dataset_to_dataloader * remove comment * Better check sentence-transformers models * Use embed_meta_fields properly * rename passage into document * Embedding dims can't be found * Add check for models that support it * pylint * Split all retriever tests into suites, running mostly on InMemory only * fix mypy * fix tfidf test * fix weaviate tests * Parallelize on every docstore * Fix schema and specify modality in base retriever suite * tests * Add first image tests * remove comment * Revert to simpler tests * Update docs/_src/api/api/primitives.md Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Update haystack/modeling/model/multimodal/__init__.py Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * get_args * mypy * Update haystack/modeling/model/multimodal/__init__.py * Update haystack/modeling/model/multimodal/base.py * Update haystack/modeling/model/multimodal/base.py Co-authored-by: Agnieszka Marzec <[email protected]> * Update haystack/modeling/model/multimodal/sentence_transformers.py * Update haystack/modeling/model/multimodal/sentence_transformers.py Co-authored-by: Agnieszka Marzec <[email protected]> * Update haystack/modeling/model/multimodal/transformers.py * Update haystack/modeling/model/multimodal/transformers.py Co-authored-by: Agnieszka Marzec <[email protected]> * Update haystack/modeling/model/multimodal/transformers.py Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Apply suggestions from code review Co-authored-by: Agnieszka Marzec <[email protected]> * Update haystack/nodes/retriever/multimodal/retriever.py Co-authored-by: Agnieszka Marzec <[email protected]> * mypy * mypy * removing more ContentTypes * more contentypes * pylint * add to __init__ * revert end2end workflow for now * missing integration markers * Update haystack/nodes/retriever/multimodal/embedder.py Co-authored-by: bogdankostic <[email protected]> * review feedback, removing HaystackImageTransformerModel * review feedback part 2 * mypy & pylint * mypy * mypy * fix multimodal docs also for Pinecone * add note on internal constants * Fix pinecone write_documents * schemas * keep support for sentence-transformers only * fix pinecone test * schemas * fix pinecone again * temporarily disable some tests, need to understand if they're still relevant Co-authored-by: Agnieszka Marzec <[email protected]> Co-authored-by: bogdankostic <[email protected]>
75,174
0
110
34
28
257,944
30
haystack
9
haystack/modeling/model/multimodal/sentence_transformers.py
Python
11
{ "docstring": "\n Finds out the output embedding dim by running the model on a minimal amount of mock data.\n ", "language": "en", "n_whitespaces": 32, "n_words": 17, "vocab_size": 16 }
https://github.com/deepset-ai/haystack.git
2
blobprotovector_str_to_arraylist
def blobprotovector_str_to_arraylist(str): vec = caffe_pb2.BlobProtoVector() vec.ParseFromString(str) return [blobproto_to_array(blob) for blob in vec.blobs]
cc4d0564756ca067516f71718a3d135996525909
8
io.py
54
Balanced joint maximum mean discrepancy for deep transfer learning
12,054
0
24
32
12
60,263
12
transferlearning
9
code/deep/BJMMD/caffe/python/caffe/io.py
Python
4
{ "docstring": "Converts a serialized blobprotovec to a list of arrays.\n ", "language": "en", "n_whitespaces": 12, "n_words": 9, "vocab_size": 8 }
https://github.com/jindongwang/transferlearning.git
2
dag_bag_ext
def dag_bag_ext(): clear_db_runs() dag_bag = DagBag(dag_folder=DEV_NULL, include_examples=False) dag_0 = DAG("dag_0", start_date=DEFAULT_DATE, schedule_interval=None) task_a_0 = EmptyOperator(task_id="task_a_0", dag=dag_0) task_b_0 = ExternalTaskMarker( task_id="task_b_0", external_dag_id="dag_1", external_task_id="task_a_1", recursion_depth=3, dag=dag_0 ) task_a_0 >> task_b_0 dag_1 = DAG("dag_1", start_date=DEFAULT_DATE, schedule_interval=None) task_a_1 = ExternalTaskSensor( task_id="task_a_1", external_dag_id=dag_0.dag_id, external_task_id=task_b_0.task_id, dag=dag_1 ) task_b_1 = ExternalTaskMarker( task_id="task_b_1", external_dag_id="dag_2", external_task_id="task_a_2", recursion_depth=2, dag=dag_1 ) task_a_1 >> task_b_1 dag_2 = DAG("dag_2", start_date=DEFAULT_DATE, schedule_interval=None) task_a_2 = ExternalTaskSensor( task_id="task_a_2", external_dag_id=dag_1.dag_id, external_task_id=task_b_1.task_id, dag=dag_2 ) task_b_2 = ExternalTaskMarker( task_id="task_b_2", external_dag_id="dag_3", external_task_id="task_a_3", recursion_depth=1, dag=dag_2 ) task_a_2 >> task_b_2 dag_3 = DAG("dag_3", start_date=DEFAULT_DATE, schedule_interval=None) task_a_3 = ExternalTaskSensor( task_id="task_a_3", external_dag_id=dag_2.dag_id, external_task_id=task_b_2.task_id, dag=dag_3 ) task_b_3 = EmptyOperator(task_id="task_b_3", dag=dag_3) task_a_3 >> task_b_3 for dag in [dag_0, dag_1, dag_2, dag_3]: dag_bag.bag_dag(dag=dag, root_dag=dag) yield dag_bag clear_db_runs() @pytest.fixture
49e336ae0302b386a2f47269a6d13988382d975f
@pytest.fixture
10
test_external_task_sensor.py
460
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
9,197
1
243
290
69
47,660
111
airflow
36
tests/sensors/test_external_task_sensor.py
Python
35
{ "docstring": "\n Create a DagBag with DAGs looking like this. The dotted lines represent external dependencies\n set up using ExternalTaskMarker and ExternalTaskSensor.\n\n dag_0: task_a_0 >> task_b_0\n |\n |\n dag_1: ---> task_a_1 >> task_b_1\n |\n |\n dag_2: ---> task_a_2 >> task_b_2\n |\n |\n dag_3: ---> task_a_3 >> task_b_3\n ", "language": "en", "n_whitespaces": 480, "n_words": 45, "vocab_size": 35 }
https://github.com/apache/airflow.git
3
clone_model
def clone_model(model, input_tensors=None, clone_function=None): with serialization.DisableSharedObjectScope(): if clone_function is None: clone_function = _clone_layer if isinstance(model, Sequential): return _clone_sequential_model( model, input_tensors=input_tensors, layer_fn=clone_function ) else: return _clone_functional_model( model, input_tensors=input_tensors, layer_fn=clone_function ) # "Clone" a subclassed model by resetting all of the attributes.
c269e3cd8fed713fb54d2971319df0bfe6e1bf10
13
cloning.py
103
Move serialization-related logic in utils/generic_utils.py to saving/legacy/serialization.py. PiperOrigin-RevId: 479688207
83,239
0
151
65
33
280,071
40
keras
12
keras/models/cloning.py
Python
12
{ "docstring": "Clone a Functional or Sequential `Model` instance.\n\n Model cloning is similar to calling a model on new inputs,\n except that it creates new layers (and thus new weights) instead\n of sharing the weights of the existing layers.\n\n Note that\n `clone_model` will not preserve the uniqueness of shared objects within the\n model (e.g. a single variable attached to two distinct layers will be\n restored as two separate variables).\n\n Args:\n model: Instance of `Model`\n (could be a Functional model or a Sequential model).\n input_tensors: optional list of input tensors or InputLayer objects\n to build the model upon. If not provided,\n new `Input` objects will be created.\n clone_function: Callable to be used to clone each layer in the target\n model (except `InputLayer` instances). It takes as argument the\n layer instance to be cloned, and returns the corresponding layer\n instance to be used in the model copy. If unspecified, this callable\n defaults to the following serialization/deserialization function:\n `lambda layer: layer.__class__.from_config(layer.get_config())`.\n By passing a custom callable, you can customize your copy of the\n model, e.g. by wrapping certain layers of interest (you might want\n to replace all `LSTM` instances with equivalent\n `Bidirectional(LSTM(...))` instances, for example).\n\n Returns:\n An instance of `Model` reproducing the behavior\n of the original model, on top of new inputs tensors,\n using newly instantiated weights. The cloned model may behave\n differently from the original model if a custom `clone_function`\n modifies the layer.\n\n Example:\n\n ```python\n # Create a test Sequential model.\n model = keras.Sequential([\n keras.Input(shape=(728,)),\n keras.layers.Dense(32, activation='relu'),\n keras.layers.Dense(1, activation='sigmoid'),\n ])\n # Create a copy of the test model (with freshly initialized weights).\n new_model = clone_model(model)\n ```\n\n Note that subclassed models cannot be cloned, since their internal\n layer structure is not known. To achieve equivalent functionality\n as `clone_model` in the case of a subclassed model, simply make sure\n that the model class implements `get_config()`\n (and optionally `from_config()`), and call:\n\n ```python\n new_model = model.__class__.from_config(model.get_config())\n ```\n ", "language": "en", "n_whitespaces": 586, "n_words": 309, "vocab_size": 192 }
https://github.com/keras-team/keras.git
11
update_billed_amount_based_on_so
def update_billed_amount_based_on_so(so_detail, update_modified=True): # Billed against Sales Order directly billed_against_so = frappe.db.sql(, so_detail) billed_against_so = billed_against_so and billed_against_so[0][0] or 0 # Get all Delivery Note Item rows against the Sales Order Item row dn_details = frappe.db.sql(, so_detail, as_dict=1) updated_dn = [] for dnd in dn_details: billed_amt_agianst_dn = 0 # If delivered against Sales Invoice if dnd.si_detail: billed_amt_agianst_dn = flt(dnd.amount) billed_against_so -= billed_amt_agianst_dn else: # Get billed amount directly against Delivery Note billed_amt_agianst_dn = frappe.db.sql(, dnd.name) billed_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0 # Distribute billed amount directly against SO between DNs based on FIFO if billed_against_so and billed_amt_agianst_dn < dnd.amount: if dnd.returned_qty: pending_to_bill = flt(dnd.amount) * (dnd.stock_qty - dnd.returned_qty) / dnd.stock_qty else: pending_to_bill = flt(dnd.amount) pending_to_bill -= billed_amt_agianst_dn if pending_to_bill <= billed_against_so: billed_amt_agianst_dn += pending_to_bill billed_against_so -= pending_to_bill else: billed_amt_agianst_dn += billed_against_so billed_against_so = 0 frappe.db.set_value("Delivery Note Item", dnd.name, "billed_amt", billed_amt_agianst_dn, update_modified=update_modified) updated_dn.append(dnd.parent) return updated_dn
b50036c04a116b2a3aa1784daf161a2f618765a8
17
delivery_note.py
331
fix: consider returned_qty while updating billed_amt (cherry picked from commit 63aaa1e357280b24c537a502a479f7bb7a6654e4)
13,584
0
112
205
75
64,240
145
erpnext
22
erpnext/stock/doctype/delivery_note/delivery_note.py
Python
44
{ "docstring": "select sum(si_item.amount)\n\t\tfrom `tabSales Invoice Item` si_item, `tabSales Invoice` si\n\t\twhere\n\t\t\tsi_item.parent = si.name\n\t\t\tand si_item.so_detail=%s\n\t\t\tand (si_item.dn_detail is null or si_item.dn_detail = '')\n\t\t\tand si_item.docstatus=1\n\t\t\tand si.update_stock = 0\n\t\tselect dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent, dn_item.stock_qty, dn_item.returned_qty\n\t\tfrom `tabDelivery Note Item` dn_item, `tabDelivery Note` dn\n\t\twhere\n\t\t\tdn.name = dn_item.parent\n\t\t\tand dn_item.so_detail=%s\n\t\t\tand dn.docstatus=1\n\t\t\tand dn.is_return = 0\n\t\torder by dn.posting_date asc, dn.posting_time asc, dn.name ascselect sum(amount) from `tabSales Invoice Item`\n\t\t\t\twhere dn_detail=%s and docstatus=1", "language": "en", "n_whitespaces": 57, "n_words": 74, "vocab_size": 49 }
https://github.com/frappe/erpnext.git
4
floyd_warshall_numpy
def floyd_warshall_numpy(G, nodelist=None, weight="weight"): import numpy as np if nodelist is not None: if not (len(nodelist) == len(G) == len(set(nodelist))): raise nx.NetworkXError( "nodelist must contain every node in G with no repeats." "If you wanted a subgraph of G use G.subgraph(nodelist)" ) # To handle cases when an edge has weight=0, we must make sure that # nonedges are not given the value 0 as well. A = nx.to_numpy_array( G, nodelist, multigraph_weight=min, weight=weight, nonedge=np.inf ) n, m = A.shape np.fill_diagonal(A, 0) # diagonal elements should be zero for i in range(n): # The second term has the same shape as A due to broadcasting A = np.minimum(A, A[i, :][np.newaxis, :] + A[:, i][:, np.newaxis]) return A
930121ffb89d01077b9888abbd5e810a7e5e16a4
14
dense.py
220
More numpy.matrix cleanups for NX2.7 (#5319) * Fix return type in docstring of internal function. * Rm explicit mention of numpy matrix from class docstrings. * Fix return type of floyd_warshall_numpy docstring. * Remove mention of numpy matrix from code comment. * Fix simrank similarity internal docstring. * Rm explicit mention of matrix in favor of 2D array. * Update to_networkx_graph array exception wording. * Remove extraneous mention of numpy matrix. * Don't print array in exception message.
41,826
0
230
139
96
176,311
116
networkx
24
networkx/algorithms/shortest_paths/dense.py
Python
16
{ "docstring": "Find all-pairs shortest path lengths using Floyd's algorithm.\n\n This algorithm for finding shortest paths takes advantage of\n matrix representations of a graph and works well for dense\n graphs where all-pairs shortest path lengths are desired.\n The results are returned as a NumPy array, distance[i, j],\n where i and j are the indexes of two nodes in nodelist.\n The entry distance[i, j] is the distance along a shortest\n path from i to j. If no path exists the distance is Inf.\n\n Parameters\n ----------\n G : NetworkX graph\n\n nodelist : list, optional (default=G.nodes)\n The rows and columns are ordered by the nodes in nodelist.\n If nodelist is None then the ordering is produced by G.nodes.\n Nodelist should include all nodes in G.\n\n weight: string, optional (default='weight')\n Edge data key corresponding to the edge weight.\n\n Returns\n -------\n distance : 2D numpy.ndarray\n A numpy array of shortest path distances between nodes.\n If there is no path between two nodes the value is Inf.\n\n Notes\n -----\n Floyd's algorithm is appropriate for finding shortest paths in\n dense graphs or graphs with negative weights when Dijkstra's\n algorithm fails. This algorithm can still fail if there are negative\n cycles. It has running time $O(n^3)$ with running space of $O(n^2)$.\n\n Raises\n ------\n NetworkXError\n If nodelist is not a list of the nodes in G.\n ", "language": "en", "n_whitespaces": 335, "n_words": 215, "vocab_size": 127 }
https://github.com/networkx/networkx.git
3
__getattr__
def __getattr__(cls, name): if _is_dunder(name): raise AttributeError(name) try: return cls._member_map_[name] except KeyError: raise AttributeError(name) from None
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
enum.py
62
add python 3.10.4 for windows
54,713
0
77
38
14
217,315
16
XX-Net
7
python3.10.4/Lib/enum.py
Python
7
{ "docstring": "\n Return the enum member matching `name`\n\n We use __getattr__ instead of descriptors or inserting into the enum\n class' __dict__ in order to support `name` and `value` being both\n properties for enum members (which live in the class' __dict__) and\n enum members themselves.\n ", "language": "en", "n_whitespaces": 85, "n_words": 42, "vocab_size": 32 }
https://github.com/XX-net/XX-Net.git
2
index_subdirectory
def index_subdirectory(directory, class_indices, follow_links, formats): dirname = os.path.basename(directory) valid_files = iter_valid_files(directory, follow_links, formats) labels = [] filenames = [] for root, fname in valid_files: labels.append(class_indices[dirname]) absolute_path = tf.io.gfile.join(root, fname) relative_path = tf.io.gfile.join( dirname, os.path.relpath(absolute_path, directory)) filenames.append(relative_path) return filenames, labels
b3f1f645238c93ec3b283f64f73a02df3aea6b53
13
dataset_utils.py
158
use tf.io.gfile instead of os.path to support cloud paths. PiperOrigin-RevId: 430975400
79,856
0
65
103
31
269,045
39
keras
23
keras/preprocessing/dataset_utils.py
Python
12
{ "docstring": "Recursively walks directory and list image paths and their class index.\n\n Args:\n directory: string, target directory.\n class_indices: dict mapping class names to their index.\n follow_links: boolean, whether to recursively follow subdirectories\n (if False, we only list top-level images in `directory`).\n formats: Allowlist of file extensions to index (e.g. \".jpg\", \".txt\").\n\n Returns:\n tuple `(filenames, labels)`. `filenames` is a list of relative file\n paths, and `labels` is a list of integer labels corresponding to these\n files.\n ", "language": "en", "n_whitespaces": 107, "n_words": 74, "vocab_size": 58 }
https://github.com/keras-team/keras.git
1
test_loading_race_condition
async def test_loading_race_condition(hass): store = auth_store.AuthStore(hass) with patch( "homeassistant.helpers.entity_registry.async_get" ) as mock_ent_registry, patch( "homeassistant.helpers.device_registry.async_get" ) as mock_dev_registry, patch( "homeassistant.helpers.storage.Store.async_load", return_value=None ) as mock_load: results = await asyncio.gather(store.async_get_users(), store.async_get_users()) mock_ent_registry.assert_called_once_with(hass) mock_dev_registry.assert_called_once_with(hass) mock_load.assert_called_once_with() assert results[0] == results[1]
69cc6ab5f1d58adc586c3b300a4f7f0cde2cd0c2
13
test_auth_store.py
152
Clean up accessing entity_registry.async_get_registry helper via hass (#72005)
99,666
0
109
86
28
300,810
35
core
15
tests/auth/test_auth_store.py
Python
14
{ "docstring": "Test only one storage load called when concurrent loading occurred .", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/home-assistant/core.git
2
test_fcos_head_loss
def test_fcos_head_loss(self): s = 256 img_metas = [{ 'img_shape': (s, s, 3), 'scale_factor': 1, }] fcos_head = FCOSHead( num_classes=4, in_channels=1, feat_channels=1, stacked_convs=1, norm_cfg=None) # Fcos head expects a multiple levels of features per image feats = ( torch.rand(1, 1, s // stride[1], s // stride[0]) for stride in fcos_head.prior_generator.strides) cls_scores, bbox_preds, centernesses = fcos_head.forward(feats) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses, [gt_instances], img_metas) # When there is no truth, the cls loss should be nonzero but # box loss and centerness loss should be zero empty_cls_loss = empty_gt_losses['loss_cls'].item() empty_box_loss = empty_gt_losses['loss_bbox'].item() empty_ctr_loss = empty_gt_losses['loss_centerness'].item() self.assertGreater(empty_cls_loss, 0, 'cls loss should be non-zero') self.assertEqual( empty_box_loss, 0, 'there should be no box loss when there are no true boxes') self.assertEqual( empty_ctr_loss, 0, 'there should be no centerness loss when there are no true boxes') # When truth is non-empty then all cls, box loss and centerness loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses, [gt_instances], img_metas) onegt_cls_loss = one_gt_losses['loss_cls'].item() onegt_box_loss = one_gt_losses['loss_bbox'].item() onegt_ctr_loss = one_gt_losses['loss_centerness'].item() self.assertGreater(onegt_cls_loss, 0, 'cls loss should be non-zero') self.assertGreater(onegt_box_loss, 0, 'box loss should be non-zero') self.assertGreater(onegt_ctr_loss, 0, 'centerness loss should be non-zero') # Test the `center_sampling` works fine. fcos_head.center_sampling = True ctrsamp_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses, [gt_instances], img_metas) ctrsamp_cls_loss = ctrsamp_losses['loss_cls'].item() ctrsamp_box_loss = ctrsamp_losses['loss_bbox'].item() ctrsamp_ctr_loss = ctrsamp_losses['loss_centerness'].item() self.assertGreater(ctrsamp_cls_loss, 0, 'cls loss should be non-zero') self.assertGreater(ctrsamp_box_loss, 0, 'box loss should be non-zero') self.assertGreater(ctrsamp_ctr_loss, 0, 'centerness loss should be non-zero') # Test the `norm_on_bbox` works fine. fcos_head.norm_on_bbox = True normbox_losses = fcos_head.loss(cls_scores, bbox_preds, centernesses, [gt_instances], img_metas) normbox_cls_loss = normbox_losses['loss_cls'].item() normbox_box_loss = normbox_losses['loss_bbox'].item() normbox_ctr_loss = normbox_losses['loss_centerness'].item() self.assertGreater(normbox_cls_loss, 0, 'cls loss should be non-zero') self.assertGreater(normbox_box_loss, 0, 'box loss should be non-zero') self.assertGreater(normbox_ctr_loss, 0, 'centerness loss should be non-zero')
015f8a9bafe808fbe3db673d629f126a804a9207
11
test_fcos_head.py
780
Refactor interface of base dense free head and fcos head
70,419
0
1,066
484
153
244,526
314
mmdetection
50
tests/test_models/test_dense_heads/test_fcos_head.py
Python
64
{ "docstring": "Tests fcos head loss when truth is empty and non-empty.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/open-mmlab/mmdetection.git
1
test_cancellation_while_holding_read_lock
def test_cancellation_while_holding_read_lock(self): rwlock = ReadWriteLock() key = "key" # 1. A reader takes the lock and blocks. reader_d, _, _ = self._start_blocking_reader(rwlock, key, "read completed") # 2. A writer waits for the reader to complete. writer_d, _ = self._start_nonblocking_writer(rwlock, key, "write completed") self.assertFalse(writer_d.called) # 3. The reader is cancelled. reader_d.cancel() self.failureResultOf(reader_d, CancelledError) # 4. The writer should take the lock and complete. self.assertTrue( writer_d.called, "Writer is stuck waiting for a cancelled reader" ) self.assertEqual("write completed", self.successResultOf(writer_d))
605d161d7d585847fd1bb98d14d5281daeac8e86
9
test_rwlock.py
152
Add cancellation support to `ReadWriteLock` (#12120) Also convert `ReadWriteLock` to use async context managers. Signed-off-by: Sean Quah <[email protected]>
71,759
0
192
88
55
247,584
76
synapse
18
tests/util/test_rwlock.py
Python
12
{ "docstring": "Test cancellation while holding a read lock.\n\n A waiting writer should be given the lock when the reader holding the lock is\n cancelled.\n ", "language": "en", "n_whitespaces": 44, "n_words": 23, "vocab_size": 19 }
https://github.com/matrix-org/synapse.git
4
description_of
def description_of(lines, name="stdin"): u = UniversalDetector() for line in lines: line = bytearray(line) u.feed(line) # shortcut out of the loop to save reading further - particularly useful if we read a BOM. if u.done: break u.close() result = u.result if result["encoding"]: return f'{name}: {result["encoding"]} with confidence {result["confidence"]}' return f"{name}: no result"
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
11
chardetect.py
133
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,090
0
118
60
45
21,896
51
pipenv
11
pipenv/patched/pip/_vendor/chardet/cli/chardetect.py
Python
12
{ "docstring": "\n Return a string describing the probable encoding of a file or\n list of strings.\n\n :param lines: The lines to get the encoding of.\n :type lines: Iterable of bytes\n :param name: Name of file or collection of lines\n :type name: str\n ", "language": "en", "n_whitespaces": 62, "n_words": 40, "vocab_size": 26 }
https://github.com/pypa/pipenv.git
1
load
def load(self, loader): loader.add_option( "connection_strategy", str, "eager", "Determine when server connections should be established. When set to lazy, mitmproxy " "tries to defer establishing an upstream connection as long as possible. This makes it possible to " "use server replay while being offline. When set to eager, mitmproxy can detect protocols with " "server-side greetings, as well as accurately mirror TLS ALPN negotiation.", choices=("eager", "lazy"), ) loader.add_option( "stream_large_bodies", Optional[str], None, , ) loader.add_option( "body_size_limit", Optional[str], None, , ) loader.add_option( "keep_host_header", bool, False, , ) loader.add_option( "proxy_debug", bool, False, "Enable debug logs in the proxy core.", ) loader.add_option( "normalize_outbound_headers", bool, True, , ) loader.add_option( "validate_inbound_headers", bool, True, , ) loader.add_option( "connect_addr", Optional[str], None, , )
42ccc85b6f1881d92b55e411ba9719f26459b720
10
proxyserver.py
217
[quic] work on eventify layers
73,982
0
621
144
74
252,727
114
mitmproxy
8
mitmproxy/addons/proxyserver.py
Python
76
{ "docstring": "\n Stream data to the client if response body exceeds the given\n threshold. If streamed, the body will not be stored in any way.\n Understands k/m/g suffixes, i.e. 3m for 3 megabytes.\n \n Byte size limit of HTTP request and response bodies. Understands\n k/m/g suffixes, i.e. 3m for 3 megabytes.\n \n Reverse Proxy: Keep the original host header instead of rewriting it\n to the reverse proxy target.\n \n Normalize outgoing HTTP/2 header names, but emit a warning when doing so.\n HTTP/2 does not allow uppercase header names. This option makes sure that HTTP/2 headers set\n in custom scripts are lowercased before they are sent.\n \n Make sure that incoming HTTP requests are not malformed.\n Disabling this option makes mitmproxy vulnerable to HTTP smuggling attacks.\n Set the local IP address that mitmproxy should use when connecting to upstream servers.", "language": "en", "n_whitespaces": 324, "n_words": 133, "vocab_size": 96 }
https://github.com/mitmproxy/mitmproxy.git
5
parse_etags
def parse_etags(etag_str): if etag_str.strip() == "*": return ["*"] else: # Parse each ETag individually, and return any that are valid. etag_matches = (ETAG_MATCH.match(etag.strip()) for etag in etag_str.split(",")) return [match[1] for match in etag_matches if match]
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
http.py
99
Refs #33476 -- Reformatted code with Black.
51,638
0
72
57
29
206,695
35
django
8
django/utils/http.py
Python
6
{ "docstring": "\n Parse a string of ETags given in an If-None-Match or If-Match header as\n defined by RFC 7232. Return a list of quoted ETags, or ['*'] if all ETags\n should be matched.\n ", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 27 }
https://github.com/django/django.git
17
print_
def print_(*args, **kwargs): fp = kwargs.pop("file", sys.stdout) if fp is None: return
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
9
six.py
49
upd; format
13,497
0
44
208
11
63,755
12
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_vendor/six.py
Python
40
{ "docstring": "The new-style print function for Python 2.4 and 2.5.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/jindongwang/transferlearning.git
5
_solve_least_squares
def _solve_least_squares(M, rhs, method='CH'): if method == 'CH': return M.cholesky_solve(rhs) elif method == 'QR': return M.QRsolve(rhs) elif method == 'LDL': return M.LDLsolve(rhs) elif method == 'PINV': return M.pinv_solve(rhs) else: t = M.H return (t * M).solve(t * rhs, method=method)
59d22b6bb7287613d598611027f640d068ca5748
12
solvers.py
143
Moved imports to higher level
47,906
0
99
84
25
196,406
39
sympy
11
sympy/matrices/solvers.py
Python
12
{ "docstring": "Return the least-square fit to the data.\n\n Parameters\n ==========\n\n rhs : Matrix\n Vector representing the right hand side of the linear equation.\n\n method : string or boolean, optional\n If set to ``'CH'``, ``cholesky_solve`` routine will be used.\n\n If set to ``'LDL'``, ``LDLsolve`` routine will be used.\n\n If set to ``'QR'``, ``QRsolve`` routine will be used.\n\n If set to ``'PINV'``, ``pinv_solve`` routine will be used.\n\n Otherwise, the conjugate of ``M`` will be used to create a system\n of equations that is passed to ``solve`` along with the hint\n defined by ``method``.\n\n Returns\n =======\n\n solutions : Matrix\n Vector representing the solution.\n\n Examples\n ========\n\n >>> from sympy import Matrix, ones\n >>> A = Matrix([1, 2, 3])\n >>> B = Matrix([2, 3, 4])\n >>> S = Matrix(A.row_join(B))\n >>> S\n Matrix([\n [1, 2],\n [2, 3],\n [3, 4]])\n\n If each line of S represent coefficients of Ax + By\n and x and y are [2, 3] then S*xy is:\n\n >>> r = S*Matrix([2, 3]); r\n Matrix([\n [ 8],\n [13],\n [18]])\n\n But let's add 1 to the middle value and then solve for the\n least-squares value of xy:\n\n >>> xy = S.solve_least_squares(Matrix([8, 14, 18])); xy\n Matrix([\n [ 5/3],\n [10/3]])\n\n The error is given by S*xy - r:\n\n >>> S*xy - r\n Matrix([\n [1/3],\n [1/3],\n [1/3]])\n >>> _.norm().n(2)\n 0.58\n\n If a different xy is used, the norm will be higher:\n\n >>> xy += ones(2, 1)/10\n >>> (S*xy - r).norm().n(2)\n 1.5\n\n ", "language": "en", "n_whitespaces": 427, "n_words": 232, "vocab_size": 143 }
https://github.com/sympy/sympy.git
3
get_bound_panel
def get_bound_panel(self, instance=None, request=None, form=None): if self.model is None: raise ImproperlyConfigured( "%s.bind_to_model(model) must be called before get_bound_panel" % type(self).__name__ ) if not issubclass(self.BoundPanel, EditHandler.BoundPanel): raise ImproperlyConfigured( "%s.BoundPanel must be a subclass of EditHandler.BoundPanel" % type(self).__name__ ) return self.BoundPanel( panel=self, instance=instance, request=request, form=form )
37784643e9207380abaed3c5720dbbcaf69b473b
13
panels.py
130
API docs for Panel
16,647
0
193
83
34
77,267
43
wagtail
13
wagtail/admin/panels.py
Python
14
{ "docstring": "\n Return a ``BoundPanel`` instance that can be rendered onto the template as a component. By default, this creates an instance\n of the panel class's inner ``BoundPanel`` class, which must inherit from ``Panel.BoundPanel``.\n ", "language": "en", "n_whitespaces": 54, "n_words": 32, "vocab_size": 28 }
https://github.com/wagtail/wagtail.git
3
ols_ridge_dataset
def ols_ridge_dataset(global_random_seed, request): # Make larger dim more than double as big as the smaller one. # This helps when constructing singular matrices like (X, X). if request.param == "long": n_samples, n_features = 12, 4 else: n_samples, n_features = 4, 12 k = min(n_samples, n_features) rng = np.random.RandomState(global_random_seed) X = make_low_rank_matrix( n_samples=n_samples, n_features=n_features, effective_rank=k ) X[:, -1] = 1 # last columns acts as intercept U, s, Vt = linalg.svd(X) assert np.all(s) > 1e-3 # to be sure U1, U2 = U[:, :k], U[:, k:] Vt1, _ = Vt[:k, :], Vt[k:, :] if request.param == "long": # Add a term that vanishes in the product X'y coef_ols = rng.uniform(low=-10, high=10, size=n_features) y = X @ coef_ols y += U2 @ rng.normal(size=n_samples - n_features) ** 2 else: y = rng.uniform(low=-10, high=10, size=n_samples) # w = X'(XX')^-1 y = V s^-1 U' y coef_ols = Vt1.T @ np.diag(1 / s) @ U1.T @ y # Add penalty alpha * ||coef||_2^2 for alpha=1 and solve via normal equations. # Note that the problem is well conditioned such that we get accurate results. alpha = 1 d = alpha * np.identity(n_features) d[-1, -1] = 0 # intercept gets no penalty coef_ridge = linalg.solve(X.T @ X + d, X.T @ y) # To be sure R_OLS = y - X @ coef_ols R_Ridge = y - X @ coef_ridge assert np.linalg.norm(R_OLS) < np.linalg.norm(R_Ridge) return X, y, coef_ols, coef_ridge @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
6528e14085d059f9d0c94f93378e7e3c0b967f27
@pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
15
test_ridge.py
524
TST tight and clean tests for Ridge (#22910) * MNT replace pinvh by solve * DOC more info for svd solver * TST rewrite test_ridge * MNT remove test_ridge_singular * MNT restructure into several tests * MNT remove test_toy_ridge_object * MNT remove test_ridge_sparse_svd This is tested in test_ridge_fit_intercept_sparse_error. * TST exclude cholesky from singular problem * CLN two fixes * MNT parametrize test_ridge_sample_weights * MNT restructure test_ridge_sample_weights * CLN tighten tolerance for sag solver * CLN try to fix saga tolerance * CLN make test_ridge_sample_weights nicer * MNT remove test_ridge_regression_sample_weights * MNT rename to test_ridge_regression_sample_weights * CLN make test_ridge_regression_unpenalized pass for all random seeds * CLN make tests pass for all random seeds * DOC fix typos * TST skip cholesky for singular problems * MNT move up test_ridge_regression_sample_weights * CLN set skip reason as comment
75,817
1
390
306
154
259,553
238
scikit-learn
46
sklearn/linear_model/tests/test_ridge.py
Python
30
{ "docstring": "Dataset with OLS and Ridge solutions, well conditioned X.\n\n The construction is based on the SVD decomposition of X = U S V'.\n\n Parameters\n ----------\n type : {\"long\", \"wide\"}\n If \"long\", then n_samples > n_features.\n If \"wide\", then n_features > n_samples.\n\n For \"wide\", we return the minimum norm solution w = X' (XX')^-1 y:\n\n min ||w||_2 subject to X w = y\n\n Returns\n -------\n X : ndarray\n Last column of 1, i.e. intercept.\n y : ndarray\n coef_ols : ndarray of shape\n Minimum norm OLS solutions, i.e. min ||X w - y||_2_2 (with mininum ||w||_2 in\n case of ambiguity)\n Last coefficient is intercept.\n coef_ridge : ndarray of shape (5,)\n Ridge solution with alpha=1, i.e. min ||X w - y||_2_2 + ||w||_2^2.\n Last coefficient is intercept.\n ", "language": "en", "n_whitespaces": 223, "n_words": 124, "vocab_size": 78 }
https://github.com/scikit-learn/scikit-learn.git
2
get_supplier_details
def get_supplier_details(suppliers): supplier_details = {} for supp in frappe.db.sql( % ", ".join(["%s"] * len(suppliers)), tuple(suppliers), as_dict=1, ): supplier_details.setdefault(supp.name, supp.supplier_group) return supplier_details
494bd9ef78313436f0424b918f200dab8fc7c20b
13
purchase_register.py
96
style: format code with black
13,854
0
12
59
20
65,320
21
erpnext
14
erpnext/accounts/report/purchase_register/purchase_register.py
Python
11
{ "docstring": "select name, supplier_group from `tabSupplier`\n\t\twhere name in (%s)", "language": "en", "n_whitespaces": 7, "n_words": 9, "vocab_size": 9 }
https://github.com/frappe/erpnext.git
1
test_get_whois_admin
def test_get_whois_admin(self) -> None: channel = self.make_request( "GET", self.url, access_token=self.admin_user_tok, ) self.assertEqual(HTTPStatus.OK, channel.code, msg=channel.json_body) self.assertEqual(self.other_user, channel.json_body["user_id"]) self.assertIn("devices", channel.json_body)
901b264c0c88f39cbfb8b2229e0dc57968882658
10
test_user.py
112
Add type hints to `tests/rest/admin` (#11851)
71,103
0
93
70
18
246,209
18
synapse
15
tests/rest/admin/test_user.py
Python
12
{ "docstring": "\n The lookup should succeed for an admin.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/matrix-org/synapse.git
8
compute_configs
def compute_configs(organization_id=None, project_id=None, public_key=None): from sentry.models import Project, ProjectKey validate_args(organization_id, project_id, public_key) configs = {} if organization_id: # We want to re-compute all projects in an organization, instead of simply # removing the configs and rely on relay requests to lazily re-compute them. This # is done because we do want want to delete project configs in `invalidate_project_config` # which might cause the key to disappear and trigger the task again. Without this behavior # it could be possible that refrequent invalidations cause the task to take excessive time # to complete. projects = list(Project.objects.filter(organization_id=organization_id)) for key in ProjectKey.objects.filter(project__in=projects): # If we find the config in the cache it means it was active. As such we want to # recalculate it. If the config was not there at all, we leave it and avoid the # cost of re-computation. if projectconfig_cache.get(key.public_key) is not None: configs[key.public_key] = compute_projectkey_config(key) elif project_id: for key in ProjectKey.objects.filter(project_id=project_id): configs[key.public_key] = compute_projectkey_config(key) elif public_key: try: key = ProjectKey.objects.get(public_key=public_key) except ProjectKey.DoesNotExist: # The invalidation task was triggered for a deletion and the # ProjectKey should be deleted from the cache. # # This used to delete the cache entry instead of disabling it. The # reason for that was to work around a bug in our model signal # handlers that sent off the invalidation tasks before the DB # transaction was committed, causing us to write stale caches. That # bug was fixed in https://github.com/getsentry/sentry/pull/35671 configs[public_key] = {"disabled": True} else: configs[public_key] = compute_projectkey_config(key) else: raise TypeError("One of the arguments must not be None") return configs
d2fdbaf39e38bacf1fc5924b3feeb46f26de2171
15
relay.py
291
feat(relay): Eagerly recalculate project options in compute_configs (#36059)
18,859
0
572
172
148
92,047
259
sentry
21
src/sentry/tasks/relay.py
Python
22
{ "docstring": "Computes all configs for the org, project or single public key.\n\n You must only provide one single argument, not all.\n\n :returns: A dict mapping all affected public keys to their config. The dict will not\n contain keys which should be retained in the cache unchanged.\n ", "language": "en", "n_whitespaces": 61, "n_words": 45, "vocab_size": 38 }
https://github.com/getsentry/sentry.git
4
_signature_get_bound_param
def _signature_get_bound_param(spec): assert spec.startswith('($') pos = spec.find(',') if pos == -1: pos = spec.find(')') cpos = spec.find(':') assert cpos == -1 or cpos > pos cpos = spec.find('=') assert cpos == -1 or cpos > pos return spec[2:pos]
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
inspect.py
134
add python 3.10.4 for windows
55,271
0
72
76
19
218,383
38
XX-Net
6
python3.10.4/Lib/inspect.py
Python
10
{ "docstring": " Private helper to get first parameter name from a\n __text_signature__ of a builtin method, which should\n be in the following format: '($param1, ...)'.\n Assumptions are that the first argument won't have\n a default value or an annotation.\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 33 }
https://github.com/XX-net/XX-Net.git
6
get_data
def get_data(report_filters): from_date = get_unsync_date(report_filters) if not from_date: return [] result = [] voucher_wise_dict = {} data = frappe.db.sql( , (from_date, report_filters.company), as_dict=1, ) for d in data: voucher_wise_dict.setdefault((d.item_code, d.warehouse), []).append(d) closing_date = add_days(from_date, -1) for key, stock_data in voucher_wise_dict.items(): prev_stock_value = get_stock_value_on( posting_date=closing_date, item_code=key[0], warehouse=key[1] ) for data in stock_data: expected_stock_value = prev_stock_value + data.stock_value_difference if abs(data.stock_value - expected_stock_value) > 0.1: data.difference_value = abs(data.stock_value - expected_stock_value) data.expected_stock_value = expected_stock_value result.append(data) return result
494bd9ef78313436f0424b918f200dab8fc7c20b
16
incorrect_stock_value_report.py
258
style: format code with black
14,647
0
48
169
50
67,870
73
erpnext
30
erpnext/stock/report/incorrect_stock_value_report/incorrect_stock_value_report.py
Python
36
{ "docstring": "\n\t\t\tSELECT\n\t\t\t\tname, posting_date, posting_time, voucher_type, voucher_no,\n\t\t\t\tstock_value_difference, stock_value, warehouse, item_code\n\t\t\tFROM\n\t\t\t\t`tabStock Ledger Entry`\n\t\t\tWHERE\n\t\t\t\tposting_date\n\t\t\t\t= %s and company = %s\n\t\t\t\tand is_cancelled = 0\n\t\t\tORDER BY timestamp(posting_date, posting_time) asc, creation asc\n\t\t", "language": "en", "n_whitespaces": 23, "n_words": 33, "vocab_size": 29 }
https://github.com/frappe/erpnext.git