complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
alembic_stamp
def alembic_stamp(revision): # lazy import for performance import alembic.command alembic.command.stamp(alembic_config(), revision=revision)
36e7e0838aeaffc9492b330297e4905f3ab4b11f
9
alembic_commands.py
42
code review revisions pt3
10,767
0
23
24
10
53,272
11
prefect
6
src/prefect/orion/database/alembic_commands.py
Python
3
{ "docstring": "\n Stamp the revision table with the given revision; don’t run any migrations\n\n Args:\n revision: The revision passed to `alembic stamp`.\n ", "language": "en", "n_whitespaces": 37, "n_words": 20, "vocab_size": 18 }
https://github.com/PrefectHQ/prefect.git
2
compat_system
def compat_system(source_dir): try: system = load_system(source_dir) except (FileNotFoundError, KeyError): system = {} system.setdefault( 'build-backend', 'setuptools.build_meta:__legacy__', ) system.setdefault('requires', ['setuptools', 'wheel']) return system
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
build.py
87
upd; format
13,077
0
70
48
18
62,961
21
transferlearning
7
.venv/lib/python3.8/site-packages/pip/_vendor/pep517/build.py
Python
11
{ "docstring": "\n Given a source dir, attempt to get a build system backend\n and requirements from pyproject.toml. Fallback to\n setuptools but only if the file was not found or a build\n system was not indicated.\n ", "language": "en", "n_whitespaces": 49, "n_words": 33, "vocab_size": 26 }
https://github.com/jindongwang/transferlearning.git
2
get_static_upper_page
def get_static_upper_page(with_shutdown): template = if with_shutdown: shutdown_link = '<a href="SHUTDOWN THE SERVER">Shutdown</a>' else: shutdown_link = "" return template % shutdown_link
b0e85694107992e00a2f9fb48e6410c50fe1f1f6
10
wordnet_app.py
49
Updated Copyright year to 2022 (#2928)
7,529
0
50
24
15
42,431
20
nltk
4
nltk/app/wordnet_app.py
Python
30
{ "docstring": "\n Return the upper frame page,\n\n If with_shutdown is True then a 'shutdown' button is also provided\n to shutdown the server.\n \n<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">\n<html>\n <!-- Natural Language Toolkit: Wordnet Interface: Graphical Wordnet Browser\n Copyright (C) 2001-2022 NLTK Project\n Author: Jussi Salmela <[email protected]>\n URL: <https://www.nltk.org/>\n For license information, see LICENSE.TXT -->\n <head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=iso-8859-1\" />\n <title>Untitled Document</title>\n </head>\n <body>\n <form method=\"GET\" action=\"search\" target=\"body\">\n Current Word:&nbsp;<input type=\"text\" id=\"currentWord\" size=\"10\" disabled>\n Next Word:&nbsp;<input type=\"text\" id=\"nextWord\" name=\"nextWord\" size=\"10\">\n <input name=\"searchButton\" type=\"submit\" value=\"Search\">\n </form>\n <a target=\"body\" href=\"web_help.html\">Help</a>\n %s\n\n</body>\n</html>\n", "language": "en", "n_whitespaces": 215, "n_words": 91, "vocab_size": 85 }
https://github.com/nltk/nltk.git
1
test_add_battery_later
async def test_add_battery_later(hass, aioclient_mock, mock_deconz_websocket): data = { "sensors": { "1": { "name": "Switch 1", "type": "ZHASwitch", "state": {"buttonevent": 1000}, "config": {}, "uniqueid": "00:00:00:00:00:00:00:00-00-0000", }, "2": { "name": "Switch 2", "type": "ZHASwitch", "state": {"buttonevent": 1000}, "config": {}, "uniqueid": "00:00:00:00:00:00:00:00-00-0001", }, } } with patch.dict(DECONZ_WEB_REQUEST, data): await setup_deconz_integration(hass, aioclient_mock) assert len(hass.states.async_all()) == 0 event_changed_sensor = { "t": "event", "e": "changed", "r": "sensors", "id": "2", "config": {"battery": 50}, } await mock_deconz_websocket(data=event_changed_sensor) await hass.async_block_till_done() assert len(hass.states.async_all()) == 0 event_changed_sensor = { "t": "event", "e": "changed", "r": "sensors", "id": "1", "config": {"battery": 50}, } await mock_deconz_websocket(data=event_changed_sensor) await hass.async_block_till_done() assert len(hass.states.async_all()) == 1 assert hass.states.get("sensor.switch_1_battery").state == "50" @pytest.mark.parametrize("model_id", ["0x8030", "0x8031", "0x8034", "0x8035"])
61ff52c93acd2f274d24469494b51356c88bb66f
@pytest.mark.parametrize("model_id", ["0x8030", "0x8031", "0x8034", "0x8035"])
15
test_sensor.py
466
Normalize deCONZ sensor unique IDs (#76357) * Normalize deCONZ sensor unique IDs * Handle battery sensors properly * Fix daylight sensor unique ID
104,097
1
439
229
59
305,307
107
core
19
tests/components/deconz/test_sensor.py
Python
43
{ "docstring": "Test that a battery sensor can be created later on.\n\n Without an initial battery state a battery sensor\n can be created once a value is reported.\n ", "language": "en", "n_whitespaces": 35, "n_words": 26, "vocab_size": 18 }
https://github.com/home-assistant/core.git
6
_get_text_value
def _get_text_value(self, row) -> Tuple[str, Dict[str, str]]: # construct text query from the hf_text_fields specified text_dict = {} for col in self.hf_text_fields: text_part = row.get(col) if text_part is None: raise KeyError(f'Feature "{col}" not found in data.') text_dict[col] = text_part query = '\n'.join(text_dict.values()) if hasattr(self, "hf_message_fields"): for col in self.hf_message_fields: text_part = row.get(col) if text_part is None: raise KeyError(f'Feature "{col}" not found in data.') text_dict[col] = text_part return query, text_dict
4f7e4b60975ff73be4227d7c8c428319366e1fe9
15
agents.py
189
[tasks/huggingface] Way to add to message but not "text" (#4516) I want to be able to add my own mutators to grab information from other fields out of a huggingface dataset. Since I don't want it to show up in the text itself, I do it in a new attribute. I also delete some code that was written at some point but isn't used anywhere while I'm here. Test Plan: Run `parlai dd -t` with a new dataset that I'm working on that uses this. Also run `parlai dd -t glue; parlai dd -t superglue` to validate that I don't break anything that already exists. (Former dies, but it's cause of a dataset issues on the HF side where the size of the datasets have changed, so not worrying about it)
47,158
0
241
112
40
195,032
69
ParlAI
17
parlai/tasks/huggingface/agents.py
Python
18
{ "docstring": "\n return the constructed text query and dict mapping text field names to values.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/facebookresearch/ParlAI.git
2
_postprocess
def _postprocess(self, inputs): final_results = [] for text, similarity in zip(inputs['text'], inputs['result']): result = {} result['text1'] = text[0] result['text2'] = text[1] result['similarity'] = similarity final_results.append(result) return final_results
621357338437ee420eabbbf5ab19065bc85e73a5
10
text_similarity.py
112
Update neural search readme and Add Paddle Serving Support (#1558) * add recall inference similarity * update examples * updatea readme * update dir name * update neural search readme * update milvus readme * update domain adaptive pretraining readme * fix the mistakes * update readme * add recall Paddle Serving Support * update readme * update readme and format the code * reformat the files * move the files * reformat the code * remove redundant code Co-authored-by: Zeyu Chen <[email protected]> Co-authored-by: tianxin <[email protected]>
118,095
0
110
66
21
322,212
27
PaddleNLP
9
paddlenlp/taskflow/text_similarity.py
Python
9
{ "docstring": "\n The model output is tag ids, this function will convert the model output to raw text.\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 14 }
https://github.com/PaddlePaddle/PaddleNLP.git
10
_most_frequent
def _most_frequent(array, extra_value, n_repeat): # Compute the most frequent value in array only if array.size > 0: if array.dtype == object: # scipy.stats.mode is slow with object dtype array. # Python Counter is more efficient counter = Counter(array) most_frequent_count = counter.most_common(1)[0][1] # tie breaking similarly to scipy.stats.mode most_frequent_value = min( value for value, count in counter.items() if count == most_frequent_count ) else: mode = _mode(array) most_frequent_value = mode[0][0] most_frequent_count = mode[1][0] else: most_frequent_value = 0 most_frequent_count = 0 # Compare to array + [extra_value] * n_repeat if most_frequent_count == 0 and n_repeat == 0: return np.nan elif most_frequent_count < n_repeat: return extra_value elif most_frequent_count > n_repeat: return most_frequent_value elif most_frequent_count == n_repeat: # tie breaking similarly to scipy.stats.mode return min(most_frequent_value, extra_value)
02a4b342181e5ff0226081691308414e53c3107b
15
_base.py
221
MAINT fix the way to call stats.mode (#23633) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Meekail Zain <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]>
76,401
0
366
137
67
260,662
121
scikit-learn
20
sklearn/impute/_base.py
Python
25
{ "docstring": "Compute the most frequent value in a 1d array extended with\n [extra_value] * n_repeat, where extra_value is assumed to be not part\n of the array.", "language": "en", "n_whitespaces": 30, "n_words": 25, "vocab_size": 24 }
https://github.com/scikit-learn/scikit-learn.git
2
order
def order(self): return reduce(lcm, [len(cycle) for cycle in self.cyclic_form], 1)
498015021131af4dbb07eb110e5badaba8250c7b
10
permutations.py
41
Updated import locations
47,658
0
24
26
10
196,158
10
sympy
7
sympy/combinatorics/permutations.py
Python
2
{ "docstring": "\n Computes the order of a permutation.\n\n When the permutation is raised to the power of its\n order it equals the identity permutation.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> from sympy import init_printing\n >>> init_printing(perm_cyclic=False, pretty_print=False)\n >>> p = Permutation([3, 1, 5, 2, 4, 0])\n >>> p.order()\n 4\n >>> (p**(p.order()))\n Permutation([], size=6)\n\n See Also\n ========\n\n identity, cardinality, length, rank, size\n ", "language": "en", "n_whitespaces": 181, "n_words": 61, "vocab_size": 47 }
https://github.com/sympy/sympy.git
1
test_filename_date_parse_valid_ymd
def test_filename_date_parse_valid_ymd(self, *args): self.assertEqual( parse_date("/tmp/Scan-2022-04-01.pdf", "No date in here"), datetime.datetime(2022, 4, 1, 0, 0, tzinfo=tz.gettz(settings.TIME_ZONE)), )
8a6aaf4e2d05021a14adc681c66dff6a815aa2a0
13
test_date_parsing.py
73
Adds additional testing for both date parsing and consumed document created date
116,952
0
59
47
15
319,512
16
paperless-ngx
11
src/documents/tests/test_date_parsing.py
Python
5
{ "docstring": "\n GIVEN:\n - Date parsing from the filename is enabled\n - Filename date format is with Year Month Day (YMD)\n - Filename contains date matching the format\n\n THEN:\n - Should parse the date from the filename\n ", "language": "en", "n_whitespaces": 101, "n_words": 35, "vocab_size": 22 }
https://github.com/paperless-ngx/paperless-ngx.git
8
install_artifact
def install_artifact(b_coll_targz_path, b_collection_path, b_temp_path, signatures, keyring): try: with tarfile.open(b_coll_targz_path, mode='r') as collection_tar: # Verify the signature on the MANIFEST.json before extracting anything else _extract_tar_file(collection_tar, MANIFEST_FILENAME, b_collection_path, b_temp_path) if signatures and keyring is not None: manifest_file = os.path.join(to_text(b_collection_path, errors='surrogate_or_strict'), MANIFEST_FILENAME) verify_artifact_manifest(manifest_file, signatures, keyring) files_member_obj = collection_tar.getmember('FILES.json') with _tarfile_extract(collection_tar, files_member_obj) as (dummy, files_obj): files = json.loads(to_text(files_obj.read(), errors='surrogate_or_strict')) _extract_tar_file(collection_tar, 'FILES.json', b_collection_path, b_temp_path) for file_info in files['files']: file_name = file_info['name'] if file_name == '.': continue if file_info['ftype'] == 'file': _extract_tar_file(collection_tar, file_name, b_collection_path, b_temp_path, expected_hash=file_info['chksum_sha256']) else: _extract_tar_dir(collection_tar, file_name, b_collection_path) except Exception: # Ensure we don't leave the dir behind in case of a failure. shutil.rmtree(b_collection_path) b_namespace_path = os.path.dirname(b_collection_path) if not os.listdir(b_namespace_path): os.rmdir(b_namespace_path) raise
43e55db20821a1341d21ffa1e4e7e6185b244105
18
__init__.py
353
ansible-galaxy - add signature verification of the MANIFEST.json (#76681) * ansible-galaxy collection install|verify: - Support verifying the origin of the MANIFEST.json when the Galaxy server has provided signatures. - Allow supplemental signatures to use during verification on the CLI/requirements file. * ansible-galaxy collection install: - Support disabling signature verification. This silences the warning provided by ansible-galaxy if the Galaxy server provided signatures it cannot use because no keyring is configured. - Store Galaxy server metadata alongside installed collections for provenance. This is used by 'ansible-galaxy collection verify --offline'. * Add unit tests for method that gets signatures from a Galaxy server * Add integration tests for user-provided signature sources - Test CLI option combinations - Test installing collections with valid/invalid signature sources - Test disabling GPG verification when installing collections - Test verifying collections with valid/invalid signature sources * Make signature verification advisory-by-default if signatures are provided by the Galaxy server - Make the default keyring None - Warn if the keyring is None but the Galaxy server provided signatures - Error if the keyring is None but the user supplied signatures - Error if the keyring is not None but is invalid * changelog * add ansible-galaxy user documentation for new options Co-authored-by: Matt Martz <[email protected]> Co-authored-by: Sviatoslav Sydorenko <[email protected]> Co-authored-by: Martin Krizek <[email protected]> Co-authored-by: Sandra McCann <[email protected]> Co-authored-by: Andy Mott <[email protected]> Co-authored-by: John R Barker <[email protected]>
78,488
0
446
216
83
266,590
108
ansible
39
lib/ansible/galaxy/collection/__init__.py
Python
26
{ "docstring": "Install a collection from tarball under a given path.\n\n :param b_coll_targz_path: Collection tarball to be installed.\n :param b_collection_path: Collection dirs layout path.\n :param b_temp_path: Temporary dir path.\n :param signatures: frozenset of signatures to verify the MANIFEST.json\n :param keyring: The keyring used during GPG verification\n ", "language": "en", "n_whitespaces": 62, "n_words": 44, "vocab_size": 34 }
https://github.com/ansible/ansible.git
1
test_create_version
def test_create_version(self) -> None: version = self.get_success( self.handler.create_version( self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", }, ) ) self.assertEqual(version, "1") # check we can retrieve it as the current version res = self.get_success(self.handler.get_version_info(self.local_user)) version_etag = res["etag"] self.assertIsInstance(version_etag, str) del res["etag"] self.assertDictEqual( res, { "version": "1", "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", "count": 0, }, ) # check we can retrieve it as a specific version res = self.get_success(self.handler.get_version_info(self.local_user, "1")) self.assertEqual(res["etag"], version_etag) del res["etag"] self.assertDictEqual( res, { "version": "1", "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", "count": 0, }, ) # upload a new one... version = self.get_success( self.handler.create_version( self.local_user, { "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", }, ) ) self.assertEqual(version, "2") # check we can retrieve it as the current version res = self.get_success(self.handler.get_version_info(self.local_user)) del res["etag"] self.assertDictEqual( res, { "version": "2", "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", "count": 0, }, )
652d1669c5a103b1c20478770c4aaf18849c09a3
13
test_e2e_room_keys.py
448
Add missing type hints to tests.handlers. (#14680) And do not allow untyped defs in tests.handlers.
73,370
0
802
253
52
250,292
131
synapse
14
tests/handlers/test_e2e_room_keys.py
Python
58
{ "docstring": "Check that we can create and then retrieve versions.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/matrix-org/synapse.git
4
is_parametric_dtype
def is_parametric_dtype(dtype): if dtype.itemsize == 0: return True if issubclass(dtype.type, (np.datetime64, np.timedelta64)): if dtype.name.endswith("64"): # Generic time units return True return False
7332a698c8194c6e680510da086678fe07d9cf9d
10
test_array_coercion.py
78
Fix some typos.
38,598
0
70
47
17
160,332
22
numpy
10
numpy/core/tests/test_array_coercion.py
Python
7
{ "docstring": "Returns True if the dtype is a parametric legacy dtype (itemsize\n is 0, or a datetime without units)\n ", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 15 }
https://github.com/numpy/numpy.git
1
_handle_default_message
def _handle_default_message(self, type, data): logger.debug(f"Received message from Leader of type {type}: {data}")
2b5f0678772bea0abaf4abe93efc55de43ea3e0e
9
rpc.py
37
Refactoring, minor improvements, data provider improvements
34,858
0
26
17
12
150,868
12
freqtrade
6
freqtrade/rpc/rpc.py
Python
2
{ "docstring": "\n Default leader message handler, just logs it. We should never have to\n run this unless the leader sends us some weird message.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 21 }
https://github.com/freqtrade/freqtrade.git
3
borrowed
def borrowed(self) -> float: if self.has_no_leverage: return 0.0 elif not self.is_short: return (self.amount * self.open_rate) * ((self.leverage - 1) / self.leverage) else: return self.amount
baefda80d19e3eaf14e2ec6a600dcb8c456a3ccf
14
models.py
83
Enable flake8 E226 rule
34,448
0
85
53
21
149,544
24
freqtrade
8
freqtrade/persistence/models.py
Python
12
{ "docstring": "\n The amount of currency borrowed from the exchange for leverage trades\n If a long trade, the amount is in base currency\n If a short trade, the amount is in the other currency being traded\n ", "language": "en", "n_whitespaces": 75, "n_words": 34, "vocab_size": 22 }
https://github.com/freqtrade/freqtrade.git
5
fit_transform
def fit_transform(self, y): if self.classes is not None: return self.fit(y).transform(y) self._validate_params() self._cached_dict = None # Automatically increment on new class class_mapping = defaultdict(int) class_mapping.default_factory = class_mapping.__len__ yt = self._transform(y, class_mapping) # sort classes and reorder columns tmp = sorted(class_mapping, key=class_mapping.get) # (make safe for tuples) dtype = int if all(isinstance(c, int) for c in tmp) else object class_mapping = np.empty(len(tmp), dtype=dtype) class_mapping[:] = tmp self.classes_, inverse = np.unique(class_mapping, return_inverse=True) # ensure yt.indices keeps its current dtype yt.indices = np.array(inverse[yt.indices], dtype=yt.indices.dtype, copy=False) if not self.sparse_output: yt = yt.toarray() return yt
fd60379f95f5c0d3791b2f54c4d070c0aa2ac576
11
_label.py
269
MAINT parameter validation for Multilabel Binarizer (#23802) Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,245
0
244
171
65
260,432
89
scikit-learn
36
sklearn/preprocessing/_label.py
Python
17
{ "docstring": "Fit the label sets binarizer and transform the given label sets.\n\n Parameters\n ----------\n y : iterable of iterables\n A set of labels (any orderable and hashable object) for each\n sample. If the `classes` parameter is set, `y` will not be\n iterated.\n\n Returns\n -------\n y_indicator : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]`\n is in `y[i]`, and 0 otherwise. Sparse matrix will be of CSR\n format.\n ", "language": "en", "n_whitespaces": 190, "n_words": 75, "vocab_size": 61 }
https://github.com/scikit-learn/scikit-learn.git
1
select_query
def select_query(self, query): renderer = SqlalchemyRender('mysql') query_str = renderer.get_string(query, with_failback=True) return self.native_query(query_str)
e00014335bbda3623ee1ccac02c9427ae324141d
9
mysql_handler.py
56
Update mysql handler
25,255
0
42
33
11
114,701
12
mindsdb
9
mindsdb/integrations/mysql_handler/mysql_handler.py
Python
4
{ "docstring": "\n Retrieve the data from the SQL statement.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 6 }
https://github.com/mindsdb/mindsdb.git
1
test_multigraph_add_edges_from_four_tuple_misordered
def test_multigraph_add_edges_from_four_tuple_misordered(self): G = nx.MultiGraph() with pytest.raises(TypeError): # key/data values flipped in 4-tuple G.add_edges_from([(0, 1, {"color": "red"}, 0)])
15614a4e2736752b0200d3a770d83f2be2d130b9
14
test_multigraph.py
73
Update tests in base class and simple rename in convert.py (#5848)
42,237
0
61
41
18
177,027
18
networkx
9
networkx/classes/tests/test_multigraph.py
Python
4
{ "docstring": "add_edges_from expects 4-tuples of the format (u, v, key, data_dict).\n\n Ensure 4-tuples of form (u, v, data_dict, key) raise exception.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 16 }
https://github.com/networkx/networkx.git
1
test_get_previous_start_date_none
def test_get_previous_start_date_none(self, dag_maker): with dag_maker("test_get_previous_start_date_none", schedule_interval=None) as dag: task = EmptyOperator(task_id="op") day_1 = DEFAULT_DATE day_2 = DEFAULT_DATE + datetime.timedelta(days=1) # Create a DagRun for day_1 and day_2. Calling ti_2.get_previous_start_date() # should return the start_date of ti_1 (which is None because ti_1 was not run). # It should not raise an error. dagrun_1 = dag_maker.create_dagrun( execution_date=day_1, state=State.RUNNING, run_type=DagRunType.MANUAL, ) dagrun_2 = dag.create_dagrun( execution_date=day_2, state=State.RUNNING, run_type=DagRunType.MANUAL, ) ti_1 = dagrun_1.get_task_instance(task.task_id) ti_2 = dagrun_2.get_task_instance(task.task_id) ti_1.task = task ti_2.task = task assert ti_2.get_previous_start_date() == ti_1.start_date assert ti_1.start_date is None
49e336ae0302b386a2f47269a6d13988382d975f
12
test_taskinstance.py
219
Replace usage of `DummyOperator` with `EmptyOperator` (#22974) * Replace usage of `DummyOperator` with `EmptyOperator`
9,179
0
281
135
59
47,625
85
airflow
29
tests/models/test_taskinstance.py
Python
21
{ "docstring": "\n Test that get_previous_start_date() can handle TaskInstance with no start_date.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/apache/airflow.git
2
print_mathml
def print_mathml(expr, printer='content', **settings): if printer == 'presentation': s = MathMLPresentationPrinter(settings) else: s = MathMLContentPrinter(settings) xml = s._print(sympify(expr)) s.apply_patch() pretty_xml = xml.toprettyxml() s.restore_patch() print(pretty_xml) # For backward compatibility MathMLPrinter = MathMLContentPrinter
59d22b6bb7287613d598611027f640d068ca5748
11
mathml.py
119
Moved imports to higher level
47,914
0
67
64
26
196,414
31
sympy
16
sympy/printing/mathml.py
Python
10
{ "docstring": "\n Prints a pretty representation of the MathML code for expr. If printer is\n presentation then prints Presentation MathML else prints content MathML.\n\n Examples\n ========\n\n >>> ##\n >>> from sympy import print_mathml\n >>> from sympy.abc import x\n >>> print_mathml(x+1) #doctest: +NORMALIZE_WHITESPACE\n <apply>\n <plus/>\n <ci>x</ci>\n <cn>1</cn>\n </apply>\n >>> print_mathml(x+1, printer='presentation')\n <mrow>\n <mi>x</mi>\n <mo>+</mo>\n <mn>1</mn>\n </mrow>\n\n ", "language": "en", "n_whitespaces": 138, "n_words": 53, "vocab_size": 45 }
https://github.com/sympy/sympy.git
2
sync_to_numpy_or_python_type
def sync_to_numpy_or_python_type(tensors): if isinstance(tensors, tf.distribute.experimental.coordinator.RemoteValue): tensors = tensors.fetch()
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
tf_utils.py
49
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,872
0
21
42
8
277,101
8
keras
9
keras/utils/tf_utils.py
Python
5
{ "docstring": "Syncs and converts a structure of `Tensor`s to `NumPy` arrays or Python scalar types.\n\n For each tensor, it calls `tensor.numpy()`. If the result is a scalar value,\n it converts it to a Python type, such as a float or int, by calling\n `result.item()`.\n\n Numpy scalars are converted, as Python types are often more convenient to deal\n with. This is especially useful for bfloat16 Numpy scalars, which don't\n support as many operations as other Numpy values.\n\n Async strategies (such as `TPUStrategy` and `ParameterServerStrategy`) are\n forced to\n sync during this process.\n\n Args:\n tensors: A structure of tensors.\n\n Returns:\n `tensors`, but scalar tensors are converted to Python types and non-scalar\n tensors are converted to Numpy arrays.\n ", "language": "en", "n_whitespaces": 164, "n_words": 113, "vocab_size": 77 }
https://github.com/keras-team/keras.git
2
test_setup_permanent_error
async def test_setup_permanent_error(hass, aioclient_mock): fake_async_add_entities = MagicMock() errors = [HTTPStatus.BAD_REQUEST, HTTPStatus.UNAUTHORIZED, HTTPStatus.FORBIDDEN] for error in errors: aioclient_mock.get(re.compile("api.foobot.io/v2/owner/.*"), status=error) result = await foobot.async_setup_platform( hass, VALID_CONFIG, fake_async_add_entities ) assert result is None
8896229ea641a558161d8caed796895e9a78f457
12
test_sensor.py
103
Improve type hint in foobot sensor entity (#77164)
103,613
0
81
65
26
304,818
30
core
19
tests/components/foobot/test_sensor.py
Python
9
{ "docstring": "Expected failures caused by permanent errors in API response.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
9
integrate
def integrate(self, comet_ml=None, wandb=None, mlflow=None) -> None: analytics_integration = "" if comet_ml is not None: analytics_integration = "CometML" comet_ml.log_other("Created from", "Gradio") if self.share_url is not None: comet_ml.log_text("gradio: " + self.share_url) comet_ml.end() else: comet_ml.log_text("gradio: " + self.local_url) comet_ml.end() if wandb is not None: analytics_integration = "WandB" if self.share_url is not None: wandb.log( { "Gradio panel": wandb.Html( '<iframe src="' + self.share_url + '" width="' + str(self.width) + '" height="' + str(self.height) + '" frameBorder="0"></iframe>' ) } ) else: print( "The WandB integration requires you to " "`launch(share=True)` first." ) if mlflow is not None: analytics_integration = "MLFlow" if self.share_url is not None: mlflow.log_param("Gradio Interface Share Link", self.share_url) else: mlflow.log_param("Gradio Interface Local Link", self.local_url) if self.analytics_enabled and analytics_integration: data = {"integration": analytics_integration} utils.integration_analytics(data)
cc0cff893f9d7d472788adc2510c123967b384fe
22
interface.py
356
Format The Codebase - black formatting - isort formatting
42,929
0
755
202
66
179,258
120
gradio
22
gradio/interface.py
Python
52
{ "docstring": "\n A catch-all method for integrating with other libraries.\n Should be run after launch()\n Parameters:\n comet_ml (Experiment): If a comet_ml Experiment object is provided,\n will integrate with the experiment and appear on Comet dashboard\n wandb (module): If the wandb module is provided, will integrate\n with it and appear on WandB dashboard\n mlflow (module): If the mlflow module is provided, will integrate\n with the experiment and appear on ML Flow dashboard\n ", "language": "en", "n_whitespaces": 165, "n_words": 69, "vocab_size": 39 }
https://github.com/gradio-app/gradio.git
2
require_huggingface_suite
def require_huggingface_suite(test_case): return unittest.skipUnless( is_transformers_available() and is_datasets_available(), "test requires the Hugging Face suite" )(test_case)
8944975a3c8db55f57774e9e566e7a791be766cb
11
testing.py
44
Reenable Gather for Metrics (#590) * Clean and finish Co-authored-by: Sylvain Gugger <[email protected]>
121,157
0
30
24
14
338,046
14
accelerate
6
src/accelerate/test_utils/testing.py
Python
4
{ "docstring": "\n Decorator marking a test that requires transformers and datasets. These tests are skipped when they are not.\n ", "language": "en", "n_whitespaces": 24, "n_words": 17, "vocab_size": 16 }
https://github.com/huggingface/accelerate.git
14
_type
def _type(string, has_invisible=True, numparse=True): if has_invisible and ( isinstance(string, _text_type) or isinstance(string, _binary_type) ): string = _strip_invisible(string) if string is None: return _none_type elif hasattr(string, "isoformat"): # datetime.datetime, date, and time return _text_type elif _isbool(string): return _bool_type elif _isint(string) and numparse: return int elif _isint(string, _long_type) and numparse: return int elif _isnumber(string) and numparse: return float elif isinstance(string, _binary_type): return _binary_type else: return _text_type
adf24bfa9723b0621183bb27f0c889b813c06e8a
10
tabulate.py
179
[State Observability] Use a table format by default (#26159) NOTE: tabulate is copied/pasted to the codebase for table formatting. This PR changes the default layout to be the table format for both summary and list APIs.
27,792
0
168
110
40
125,175
64
ray
17
python/ray/_private/thirdparty/tabulate/tabulate.py
Python
21
{ "docstring": "The least generic type (type(None), int, float, str, unicode).\n\n >>> _type(None) is type(None)\n True\n >>> _type(\"foo\") is type(\"\")\n True\n >>> _type(\"1\") is type(1)\n True\n >>> _type('\\x1b[31m42\\x1b[0m') is type(42)\n True\n >>> _type('\\x1b[31m42\\x1b[0m') is type(42)\n True\n\n ", "language": "en", "n_whitespaces": 67, "n_words": 34, "vocab_size": 20 }
https://github.com/ray-project/ray.git
9
multi_gpu_train_one_step
def multi_gpu_train_one_step(algorithm, train_batch) -> Dict: config = algorithm.config workers = algorithm.workers local_worker = workers.local_worker() num_sgd_iter = config.get("num_sgd_iter", 1) sgd_minibatch_size = config.get("sgd_minibatch_size", config["train_batch_size"]) # Determine the number of devices (GPUs or 1 CPU) we use. num_devices = int(math.ceil(config["num_gpus"] or 1)) # Make sure total batch size is dividable by the number of devices. # Batch size per tower. per_device_batch_size = sgd_minibatch_size // num_devices # Total batch size. batch_size = per_device_batch_size * num_devices assert batch_size % num_devices == 0 assert batch_size >= num_devices, "Batch size too small!" # Handle everything as if multi-agent. train_batch = train_batch.as_multi_agent() # Load data into GPUs. load_timer = algorithm._timers[LOAD_BATCH_TIMER] with load_timer: num_loaded_samples = {} for policy_id, batch in train_batch.policy_batches.items(): # Not a policy-to-train. if not local_worker.is_policy_to_train(policy_id, train_batch): continue # Decompress SampleBatch, in case some columns are compressed. batch.decompress_if_needed() # Load the entire train batch into the Policy's only buffer # (idx=0). Policies only have >1 buffers, if we are training # asynchronously. num_loaded_samples[policy_id] = local_worker.policy_map[ policy_id ].load_batch_into_buffer(batch, buffer_index=0) # Execute minibatch SGD on loaded data. learn_timer = algorithm._timers[LEARN_ON_BATCH_TIMER] with learn_timer: # Use LearnerInfoBuilder as a unified way to build the final # results dict from `learn_on_loaded_batch` call(s). # This makes sure results dicts always have the same structure # no matter the setup (multi-GPU, multi-agent, minibatch SGD, # tf vs torch). learner_info_builder = LearnerInfoBuilder(num_devices=num_devices) for policy_id, samples_per_device in num_loaded_samples.items(): policy = local_worker.policy_map[policy_id] num_batches = max(1, int(samples_per_device) // int(per_device_batch_size)) logger.debug("== sgd epochs for {} ==".format(policy_id)) for _ in range(num_sgd_iter): permutation = np.random.permutation(num_batches) for batch_index in range(num_batches): # Learn on the pre-loaded data in the buffer. # Note: For minibatch SGD, the data is an offset into # the pre-loaded entire train batch. results = policy.learn_on_loaded_batch( permutation[batch_index] * per_device_batch_size, buffer_index=0 ) learner_info_builder.add_learn_on_batch_results(results, policy_id) # Tower reduce and finalize results. learner_info = learner_info_builder.finalize() load_timer.push_units_processed(train_batch.count) learn_timer.push_units_processed(train_batch.count) # TODO: Move this into Trainer's `training_iteration` method for # better transparency. algorithm._counters[NUM_ENV_STEPS_TRAINED] += train_batch.count algorithm._counters[NUM_AGENT_STEPS_TRAINED] += train_batch.agent_steps() if algorithm.reward_estimators: learner_info[DEFAULT_POLICY_ID]["off_policy_estimation"] = {} for name, estimator in algorithm.reward_estimators.items(): learner_info[DEFAULT_POLICY_ID]["off_policy_estimation"][ name ] = estimator.train(train_batch) return learner_info
38c9e1d52aeada536a0460fba01b2633973bc989
18
train_ops.py
616
[RLlib]: Fix OPE trainables (#26279) Co-authored-by: Kourosh Hakhamaneshi <[email protected]>
27,750
0
892
362
216
125,012
330
ray
63
rllib/execution/train_ops.py
Python
66
{ "docstring": "Multi-GPU version of train_one_step.\n\n Uses the policies' `load_batch_into_buffer` and `learn_on_loaded_batch` methods\n to be more efficient wrt CPU/GPU data transfers. For example, when doing multiple\n passes through a train batch (e.g. for PPO) using `config.num_sgd_iter`, the\n actual train batch is only split once and loaded once into the GPU(s).\n\n Examples:\n >>> from ray.rllib.execution.rollout_ops import synchronous_parallel_sample\n >>> algo = [...] # doctest: +SKIP\n >>> train_batch = synchronous_parallel_sample(algo.workers) # doctest: +SKIP\n >>> # This trains the policy on one batch.\n >>> results = multi_gpu_train_one_step(algo, train_batch)) # doctest: +SKIP\n {\"default_policy\": ...}\n\n Updates the NUM_ENV_STEPS_TRAINED and NUM_AGENT_STEPS_TRAINED counters as well as\n the LOAD_BATCH_TIMER and LEARN_ON_BATCH_TIMER timers of the Algorithm instance.\n ", "language": "en", "n_whitespaces": 171, "n_words": 105, "vocab_size": 78 }
https://github.com/ray-project/ray.git
11
get_keys
def get_keys(self, subpath, method, view): if hasattr(view, 'action'): # Viewsets have explicitly named actions. action = view.action else: # Views have no associated action, so we determine one from the method. if is_list_view(subpath, method, view): action = 'list' else: action = self.default_mapping[method.lower()] named_path_components = [ component for component in subpath.strip('/').split('/') if '{' not in component ] if is_custom_action(action): # Custom action, eg "/users/{pk}/activate/", "/users/active/" mapped_methods = { # Don't count head mapping, e.g. not part of the schema method for method in view.action_map if method != 'head' } if len(mapped_methods) > 1: action = self.default_mapping[method.lower()] if action in self.coerce_method_names: action = self.coerce_method_names[action] return named_path_components + [action] else: return named_path_components[:-1] + [action] if action in self.coerce_method_names: action = self.coerce_method_names[action] # Default action, eg "/users/", "/users/{pk}/" return named_path_components + [action]
df584350b4f77143d84615f05000f71408aec9c0
15
coreapi.py
287
Prevent head method mapping to coerce action name (#7729)
9,553
0
484
174
73
48,644
128
django-rest-framework
19
rest_framework/schemas/coreapi.py
Python
27
{ "docstring": "\n Return a list of keys that should be used to layout a link within\n the schema document.\n\n /users/ (\"users\", \"list\"), (\"users\", \"create\")\n /users/{pk}/ (\"users\", \"read\"), (\"users\", \"update\"), (\"users\", \"delete\")\n /users/enabled/ (\"users\", \"enabled\") # custom viewset list action\n /users/{pk}/star/ (\"users\", \"star\") # custom viewset detail action\n /users/{pk}/groups/ (\"users\", \"groups\", \"list\"), (\"users\", \"groups\", \"create\")\n /users/{pk}/groups/{pk}/ (\"users\", \"groups\", \"read\"), (\"users\", \"groups\", \"update\"), (\"users\", \"groups\", \"delete\")\n ", "language": "en", "n_whitespaces": 187, "n_words": 62, "vocab_size": 36 }
https://github.com/encode/django-rest-framework.git
3
set_pickradius
def set_pickradius(self, pickradius): if not isinstance(pickradius, Number) or pickradius < 0: raise ValueError("pick radius should be a distance") self._pickradius = pickradius pickradius = property(get_pickradius, set_pickradius)
91f47d6eff63187f582c395c007d0152980be6b3
10
lines.py
65
Unify set_pickradius argument
23,452
0
60
31
22
109,136
25
matplotlib
9
lib/matplotlib/lines.py
Python
4
{ "docstring": "\n Set the pick radius used for containment tests.\n\n See `.contains` for more details.\n\n Parameters\n ----------\n pickradius : float\n Pick radius, in points.\n ", "language": "en", "n_whitespaces": 76, "n_words": 22, "vocab_size": 21 }
https://github.com/matplotlib/matplotlib.git
1
test_missing_config
def test_missing_config(self): with patch("streamlit.config.os.path.exists") as path_exists: path_exists.return_value = False config.get_config_options() self.assertEqual(True, config.get_option("client.caching")) self.assertIsNone(config.get_option("theme.font"))
dd9084523e365e637443ea351eaaaa25f52d8412
12
config_test.py
90
Report sharing removal (#4260) The report sharing feature is a substantial but completely unused portion of the code in Streamlit's underlying machinery. The feature was created early on, used by just a few groups, and has not been used by anyone for a while, as indicated by no activity in the associated S3 buckets. This commit removes that code to make the remaining code easier to navigate and understand.
26,360
0
71
48
13
118,685
13
streamlit
10
lib/tests/streamlit/config_test.py
Python
6
{ "docstring": "Test that we can initialize our config even if the file is missing.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/streamlit/streamlit.git
3
get_last_runtime
def get_last_runtime(self, file_path): stat = self._file_stats.get(file_path) return stat.last_duration.total_seconds() if stat and stat.last_duration else None
1507ca48d7c211799129ce7956c11f4c45fee5bc
9
manager.py
55
Fix StatD timing metric units (#21106) Co-authored-by: Tzu-ping Chung <[email protected]> Co-authored-by: Tzu-ping Chung <[email protected]>
7,740
0
35
34
13
42,816
14
airflow
8
airflow/dag_processing/manager.py
Python
3
{ "docstring": "\n :param file_path: the path to the file that was processed\n :return: the runtime (in seconds) of the process of the last run, or\n None if the file was never processed.\n :rtype: float\n ", "language": "en", "n_whitespaces": 72, "n_words": 32, "vocab_size": 24 }
https://github.com/apache/airflow.git
4
turn_on
def turn_on(self) -> None: if DPCode.SWITCH in self.device.function: self._send_command([{"code": DPCode.SWITCH, "value": True}]) return # Fake turn on for mode in (HVACMode.HEAT_COOL, HVACMode.HEAT, HVACMode.COOL): if mode not in self.hvac_modes: continue self.set_hvac_mode(mode) break
e688f6b315a2edcab6c7e6bd1dfe8a77bf715fed
13
climate.py
116
Use climate enums in tuya (#70747)
97,746
0
129
71
27
298,805
31
core
14
homeassistant/components/tuya/climate.py
Python
10
{ "docstring": "Turn the device on, retaining current HVAC (if supported).", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
13
train
def train(self): if self._warmup_time is None: self._warmup_time = time.time() - self._start_time start = time.time() try: result = self.step() except Exception as e: raise skip_exceptions(e) from None assert isinstance(result, dict), "step() needs to return a dict." # We do not modify internal state nor update this result if duplicate. if RESULT_DUPLICATE in result: return result result = result.copy() self._iteration += 1 self._iterations_since_restore += 1 if result.get(TIME_THIS_ITER_S) is not None: time_this_iter = result[TIME_THIS_ITER_S] else: time_this_iter = time.time() - start self._time_total += time_this_iter self._time_since_restore += time_this_iter result.setdefault(DONE, False) # self._timesteps_total should only be tracked if increments provided if result.get(TIMESTEPS_THIS_ITER) is not None: if self._timesteps_total is None: self._timesteps_total = 0 self._timesteps_total += result[TIMESTEPS_THIS_ITER] self._timesteps_since_restore += result[TIMESTEPS_THIS_ITER] # self._episodes_total should only be tracked if increments provided if result.get(EPISODES_THIS_ITER) is not None: if self._episodes_total is None: self._episodes_total = 0 self._episodes_total += result[EPISODES_THIS_ITER] # self._timesteps_total should not override user-provided total result.setdefault(TIMESTEPS_TOTAL, self._timesteps_total) result.setdefault(EPISODES_TOTAL, self._episodes_total) result.setdefault(TRAINING_ITERATION, self._iteration) # Provides auto-filled neg_mean_loss for avoiding regressions if result.get("mean_loss"): result.setdefault("neg_mean_loss", -result["mean_loss"]) now = datetime.today() result.update(self.get_auto_filled_metrics(now, time_this_iter)) monitor_data = self._monitor.get_data() if monitor_data: result.update(monitor_data) self.log_result(result) if self._stdout_context: self._stdout_stream.flush() if self._stderr_context: self._stderr_stream.flush() self._last_result = result return result
d0678b80edf7ab50ffde93c13f4a8cdbd7dbba99
12
trainable.py
536
[rfc] [air/tune/train] Improve trial/training failure error printing (#27946) When training fails, the console output is currently cluttered with tracebacks which are hard to digest. This problem is exacerbated when running multiple trials in a tuning run. The main problems here are: 1. Tracebacks are printed multiple times: In the remote worker and on the driver 2. Tracebacks include many internal wrappers The proposed solution for 1 is to only print tracebacks once (on the driver) or never (if configured). The proposed solution for 2 is to shorten the tracebacks to include mostly user-provided code. ### Deduplicating traceback printing The solution here is to use `logger.error` instead of `logger.exception` in the `function_trainable.py` to avoid printing a traceback in the trainable. Additionally, we introduce an environment variable `TUNE_PRINT_ALL_TRIAL_ERRORS` which defaults to 1. If set to 0, trial errors will not be printed at all in the console (only the error.txt files will exist). To be discussed: We could also default this to 0, but I think the expectation is to see at least some failure output in the console logs per default. ### Removing internal wrappers from tracebacks The solution here is to introcude a magic local variable `_ray_start_tb`. In two places, we use this magic local variable to reduce the stacktrace. A utility `shorten_tb` looks for the last occurence of `_ray_start_tb` in the stacktrace and starts the traceback from there. This takes only linear time. If the magic variable is not present, the full traceback is returned - this means that if the error does not come up in user code, the full traceback is returned, giving visibility in possible internal bugs. Additionally there is an env variable `RAY_AIR_FULL_TRACEBACKS` which disables traceback shortening. Signed-off-by: Kai Fricke <[email protected]>
28,398
0
623
328
104
127,235
183
ray
47
python/ray/tune/trainable/trainable.py
Python
47
{ "docstring": "Runs one logical iteration of training.\n\n Calls ``step()`` internally. Subclasses should override ``step()``\n instead to return results.\n This method automatically fills the following fields in the result:\n\n `done` (bool): training is terminated. Filled only if not provided.\n\n `time_this_iter_s` (float): Time in seconds this iteration\n took to run. This may be overridden in order to override the\n system-computed time difference.\n\n `time_total_s` (float): Accumulated time in seconds for this\n entire experiment.\n\n `experiment_id` (str): Unique string identifier\n for this experiment. This id is preserved\n across checkpoint / restore calls.\n\n `training_iteration` (int): The index of this\n training iteration, e.g. call to train(). This is incremented\n after `step()` is called.\n\n `pid` (str): The pid of the training process.\n\n `date` (str): A formatted date of when the result was processed.\n\n `timestamp` (str): A UNIX timestamp of when the result\n was processed.\n\n `hostname` (str): Hostname of the machine hosting the training\n process.\n\n `node_ip` (str): Node ip of the machine hosting the training\n process.\n\n Returns:\n A dict that describes training progress.\n ", "language": "en", "n_whitespaces": 429, "n_words": 163, "vocab_size": 104 }
https://github.com/ray-project/ray.git
3
legacy_plugin_dir_to_plugin_type
def legacy_plugin_dir_to_plugin_type(legacy_plugin_dir_name): legacy_plugin_dir_name = to_text(legacy_plugin_dir_name) plugin_type = legacy_plugin_dir_name.removesuffix(u'_plugins') if plugin_type == u'library': plugin_type = u'modules' if plugin_type not in AnsibleCollectionRef.VALID_REF_TYPES: raise ValueError('{0} cannot be mapped to a valid collection ref type'.format(to_native(legacy_plugin_dir_name))) return plugin_type
884244f1b2da3c3f367e064ef4ac0123fcb12675
13
_collection_finder.py
93
Python 3.9 min for controller (#77566)
79,008
0
97
54
26
267,700
33
ansible
10
lib/ansible/utils/collection_loader/_collection_finder.py
Python
8
{ "docstring": "\n Utility method to convert from a PluginLoader dir name to a plugin ref_type\n :param legacy_plugin_dir_name: PluginLoader dir name (eg, 'action_plugins', 'library')\n :return: the corresponding plugin ref_type (eg, 'action', 'role')\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 21 }
https://github.com/ansible/ansible.git
1
test_fs_checkpoint_dict
def test_fs_checkpoint_dict(self): checkpoint = self._prepare_fs_checkpoint() # Convert into dict checkpoint data_dict = checkpoint.to_dict() self.assertIsInstance(data_dict, dict) # Create from dict checkpoint = Checkpoint.from_dict(data_dict) self.assertTrue(checkpoint._data_dict) self._assert_fs_checkpoint(checkpoint)
b267be475863a66e9feedb2be5f0a30a2ed8c493
8
test_checkpoints.py
87
[ml] Add Ray ML / AIR checkpoint implementation (#22691) This PR splits up the changes in #22393 and introduces an implementation of the ML Checkpoint interface used by Ray Tune. This means, the TuneCheckpoint class implements the to/from_[bytes|dict|directory|object_ref|uri] conversion functions, as well as more high-level functions to transition between the different TuneCheckpoint classes. It also includes test cases for Tune's main conversion modes, i.e. dict - intermediate - dict and fs - intermediate - fs. These changes will be the basis for refactoring the tune interface to use TuneCheckpoint objects instead of TrialCheckpoints (externally) and instead of paths/objects (internally).
33,588
0
87
50
18
146,015
24
ray
13
python/ray/ml/tests/test_checkpoints.py
Python
7
{ "docstring": "Test conversion from fs to dict checkpoint and back.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
6
_validate_index_level
def _validate_index_level(self, level) -> None: if isinstance(level, int): if level < 0 and level != -1: raise IndexError( "Too many levels: Index has only 1 level, " f"{level} is not a valid level number" ) if level > 0: raise IndexError( f"Too many levels: Index has only 1 level, not {level + 1}" ) elif level != self.name: raise KeyError( f"Requested level ({level}) does not match index name ({self.name})" )
d13c9e034ce8a1d738766c4b1cec80c76f5523be
14
base.py
121
STYLE: fix pylint: no-else-raise (#49520) * fix pylint: no-else-raise * fix possible imbalanced tuple unpacking warning Co-authored-by: carlotta <[email protected]>
40,622
0
267
59
48
170,932
70
pandas
8
pandas/core/indexes/base.py
Python
22
{ "docstring": "\n Validate index level.\n\n For single-level Index getting level number is a no-op, but some\n verification must be done like in MultiIndex.\n\n ", "language": "en", "n_whitespaces": 50, "n_words": 21, "vocab_size": 21 }
https://github.com/pandas-dev/pandas.git
15
make_sl_entries
def make_sl_entries(sl_entries, allow_negative_stock=False, via_landed_cost_voucher=False): from erpnext.controllers.stock_controller import future_sle_exists if sl_entries: cancel = sl_entries[0].get("is_cancelled") if cancel: validate_cancellation(sl_entries) set_as_cancel(sl_entries[0].get("voucher_type"), sl_entries[0].get("voucher_no")) args = get_args_for_future_sle(sl_entries[0]) future_sle_exists(args, sl_entries) for sle in sl_entries: if sle.serial_no and not via_landed_cost_voucher: validate_serial_no(sle) if cancel: sle["actual_qty"] = -flt(sle.get("actual_qty")) if sle["actual_qty"] < 0 and not sle.get("outgoing_rate"): sle["outgoing_rate"] = get_incoming_outgoing_rate_for_cancel( sle.item_code, sle.voucher_type, sle.voucher_no, sle.voucher_detail_no ) sle["incoming_rate"] = 0.0 if sle["actual_qty"] > 0 and not sle.get("incoming_rate"): sle["incoming_rate"] = get_incoming_outgoing_rate_for_cancel( sle.item_code, sle.voucher_type, sle.voucher_no, sle.voucher_detail_no ) sle["outgoing_rate"] = 0.0 if sle.get("actual_qty") or sle.get("voucher_type") == "Stock Reconciliation": sle_doc = make_entry(sle, allow_negative_stock, via_landed_cost_voucher) args = sle_doc.as_dict() if sle.get("voucher_type") == "Stock Reconciliation": # preserve previous_qty_after_transaction for qty reposting args.previous_qty_after_transaction = sle.get("previous_qty_after_transaction") is_stock_item = frappe.get_cached_value("Item", args.get("item_code"), "is_stock_item") if is_stock_item: bin_name = get_or_make_bin(args.get("item_code"), args.get("warehouse")) repost_current_voucher(args, allow_negative_stock, via_landed_cost_voucher) update_bin_qty(bin_name, args) else: frappe.msgprint( _("Item {0} ignored since it is not a stock item").format(args.get("item_code")) )
494bd9ef78313436f0424b918f200dab8fc7c20b
19
stock_ledger.py
563
style: format code with black
14,689
0
93
336
85
67,967
132
erpnext
37
erpnext/stock/stock_ledger.py
Python
38
{ "docstring": "Create SL entries from SL entry dicts\n\n\targs:\n\t - allow_negative_stock: disable negative stock valiations if true\n\t - via_landed_cost_voucher: landed cost voucher cancels and reposts\n\t entries of purchase document. This flag is used to identify if\n\t cancellation and repost is happening via landed cost voucher, in\n\t such cases certain validations need to be ignored (like negative\n\t stock)\n\t", "language": "en", "n_whitespaces": 112, "n_words": 56, "vocab_size": 46 }
https://github.com/frappe/erpnext.git
1
get_feature_names_out
def get_feature_names_out(self, input_features=None): class_name = self.__class__.__name__.lower() return np.asarray([f"{class_name}0"], dtype=object)
8991c3d7870df692fe01510e0fe6de62ea550cad
10
isotonic.py
61
ENH Adds get_feature_names to isotonic module (#22249)
75,349
0
30
35
9
258,647
9
scikit-learn
11
sklearn/isotonic.py
Python
3
{ "docstring": "Get output feature names for transformation.\n\n Parameters\n ----------\n input_features : array-like of str or None, default=None\n Ignored.\n\n Returns\n -------\n feature_names_out : ndarray of str objects\n An ndarray with one string i.e. [\"isotonicregression0\"].\n ", "language": "en", "n_whitespaces": 103, "n_words": 32, "vocab_size": 28 }
https://github.com/scikit-learn/scikit-learn.git
2
config_test
def config_test(self) -> None: try: util.run_script([self.conf('ctl'), "-c", self.nginx_conf, "-t"]) except errors.SubprocessError as err: raise errors.MisconfigurationError(str(err))
16aad35d31a887dab157f9d4f5e0fe9218d06064
13
configurator.py
84
Fully type certbot-nginx module (#9124) * Work in progress * Fix type * Work in progress * Work in progress * Work in progress * Work in progress * Work in progress * Oups. * Fix typing in UnspacedList * Fix logic * Finish typing * List certbot-nginx as fully typed in tox * Fix lint * Fix checks * Organize imports * Fix typing for Python 3.6 * Fix checks * Fix lint * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <[email protected]> * Update certbot-nginx/certbot_nginx/_internal/configurator.py Co-authored-by: alexzorin <[email protected]> * Fix signature of deploy_cert regarding the installer interface * Update certbot-nginx/certbot_nginx/_internal/obj.py Co-authored-by: alexzorin <[email protected]> * Fix types * Update certbot-nginx/certbot_nginx/_internal/parser.py Co-authored-by: alexzorin <[email protected]> * Precise type * Precise _coerce possible inputs/outputs * Fix type * Update certbot-nginx/certbot_nginx/_internal/http_01.py Co-authored-by: ohemorange <[email protected]> * Fix type * Remove an undesirable implementation. * Fix type Co-authored-by: alexzorin <[email protected]> Co-authored-by: ohemorange <[email protected]>
45,498
0
58
48
15
186,582
15
certbot
11
certbot-nginx/certbot_nginx/_internal/configurator.py
Python
10
{ "docstring": "Check the configuration of Nginx for errors.\n\n :raises .errors.MisconfigurationError: If config_test fails\n\n ", "language": "en", "n_whitespaces": 26, "n_words": 12, "vocab_size": 12 }
https://github.com/certbot/certbot.git
2
_is_arraylike_not_scalar
def _is_arraylike_not_scalar(array): return _is_arraylike(array) and not np.isscalar(array)
5d1571db5c1f5b65b1dfc47c21d08050238a60f7
9
validation.py
34
FIX Supports numpy strings and array-like in KMean.init (#22154) Co-authored-by: Jérémie du Boisberranger <[email protected]>
75,501
0
13
19
7
258,972
7
scikit-learn
5
sklearn/utils/validation.py
Python
2
{ "docstring": "Return True if array is array-like and not a scalar", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/scikit-learn/scikit-learn.git
2
get_result_type
def get_result_type(self) -> str: if "result_type_string" in self._bound_other_args_to_resolve: return self._bound_other_args_to_resolve["result_type_string"] @DeveloperAPI
203253321d34543aa25483803ebc21e3903679b6
@DeveloperAPI
9
input_node.py
45
[serve] Add additional features to DAG visualization with Gradio (#28246)
28,431
1
35
22
11
127,398
11
ray
5
python/ray/dag/input_node.py
Python
7
{ "docstring": "Get type of the output of this DAGNode.\n\n Generated by ray.experimental.gradio_utils.type_to_string().\n ", "language": "en", "n_whitespaces": 25, "n_words": 11, "vocab_size": 10 }
https://github.com/ray-project/ray.git
1
test_album_from_url
def test_album_from_url(): album = Album.from_url("https://open.spotify.com/album/4MQnUDGXmHOvnsWCpzeqWT") assert album.name == "NCS: The Best of 2017" assert album.url == "https://open.spotify.com/album/4MQnUDGXmHOvnsWCpzeqWT" assert album.artist["name"] == "Various Artists" assert len(album.tracks) == 16 @pytest.mark.vcr()
fa2ad657482aca9dc628e6d7062b8badf2706bb6
@pytest.mark.vcr()
9
test_album.py
94
v4 init
5,332
1
44
43
21
30,131
27
spotify-downloader
12
tests/types/test_album.py
Python
6
{ "docstring": "\n Test if Album class can be initialized from url.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/spotDL/spotify-downloader.git
7
link_pyqt
def link_pyqt(executable, venv_path, *, version='5'): if version not in ["5", "6"]: raise ValueError(f"Invalid version {version}") try: get_lib_path(executable, f'PyQt{version}.sip') except Error: # There is no PyQt5.sip, so we need to copy the toplevel sip. sip_file = get_lib_path(executable, 'sip') else: # There is a PyQt5.sip, it'll get copied with the PyQt5 dir. sip_file = None sipconfig_file = get_lib_path(executable, 'sipconfig', required=False) pyqt_dir = os.path.dirname(get_lib_path(executable, f'PyQt{version}.QtCore')) for path in [sip_file, sipconfig_file, pyqt_dir]: if path is None: continue fn = os.path.basename(path) dest = os.path.join(venv_path, fn) if os.path.exists(dest): if needs_update(path, dest): remove(dest) else: continue copy_or_link(path, dest)
9212ba94d651e4bed5724dea28e90ca2e190b007
13
link_pyqt.py
262
scripts: Allow linking/installing pyqt-5 or pyqt-6
117,879
0
254
153
68
321,744
90
qutebrowser
22
scripts/link_pyqt.py
Python
22
{ "docstring": "Symlink the systemwide PyQt/sip into the venv.\n\n Args:\n executable: The python executable where the source files are present.\n venv_path: The path to the virtualenv site-packages.\n version: The PyQt version to use.\n ", "language": "en", "n_whitespaces": 58, "n_words": 31, "vocab_size": 25 }
https://github.com/qutebrowser/qutebrowser.git
3
get_next_unordered
def get_next_unordered(self, timeout=None): if not self.has_next(): raise StopIteration("No more results to get") # TODO(ekl) bulk wait for performance res, _ = ray.wait(list(self._future_to_actor), num_returns=1, timeout=timeout) if res: [future] = res else: raise TimeoutError("Timed out waiting for result") i, a = self._future_to_actor.pop(future) self._return_actor(a) del self._index_to_future[i] self._next_return_index = max(self._next_return_index, i + 1) return ray.get(future)
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
11
actor_pool.py
175
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,857
0
161
107
45
132,898
51
ray
22
python/ray/util/actor_pool.py
Python
13
{ "docstring": "Returns any of the next pending results.\n\n This returns some result produced by submit(), blocking for up to\n the specified timeout until it is available. Unlike get_next(), the\n results are not always returned in same order as submitted, which can\n improve performance.\n\n Returns:\n The next result.\n\n Raises:\n TimeoutError if the timeout is reached.\n\n Examples:\n >>> pool = ActorPool(...)\n >>> pool.submit(lambda a, v: a.double.remote(v), 1)\n >>> pool.submit(lambda a, v: a.double.remote(v), 2)\n >>> print(pool.get_next_unordered())\n 4\n >>> print(pool.get_next_unordered())\n 2\n ", "language": "en", "n_whitespaces": 231, "n_words": 76, "vocab_size": 61 }
https://github.com/ray-project/ray.git
3
normalize_eols
def normalize_eols(raw_contents): lines_list = raw_contents.splitlines() # Ensure last line has its EOL if lines_list and lines_list[-1]: lines_list.append("") return "\n".join(lines_list)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
makemessages.py
67
Refs #33476 -- Reformatted code with Black.
50,833
0
41
35
18
204,660
19
django
6
django/core/management/commands/makemessages.py
Python
5
{ "docstring": "\n Take a block of raw text that will be passed through str.splitlines() to\n get universal newlines treatment.\n\n Return the resulting block of text with normalized `\\n` EOL sequences ready\n to be written to disk using current platform's native EOLs.\n ", "language": "en", "n_whitespaces": 55, "n_words": 39, "vocab_size": 33 }
https://github.com/django/django.git
5
tree_data
def tree_data(G, root, ident="id", children="children"): if G.number_of_nodes() != G.number_of_edges() + 1: raise TypeError("G is not a tree.") if not G.is_directed(): raise TypeError("G is not directed.") if not nx.is_weakly_connected(G): raise TypeError("G is not weakly connected.") if ident == children: raise nx.NetworkXError("The values for `id` and `children` must be different.")
5b7d549bf1369440151708fa6b988eb36e1a1bde
10
tree.py
134
remove old attr keyword from json_graph/tree (#5785)
42,160
0
91
116
34
176,891
48
networkx
12
networkx/readwrite/json_graph/tree.py
Python
13
{ "docstring": "Returns data in tree format that is suitable for JSON serialization\n and use in Javascript documents.\n\n Parameters\n ----------\n G : NetworkX graph\n G must be an oriented tree\n\n root : node\n The root of the tree\n\n ident : string\n Attribute name for storing NetworkX-internal graph data. `ident` must\n have a different value than `children`. The default is 'id'.\n\n children : string\n Attribute name for storing NetworkX-internal graph data. `children`\n must have a different value than `ident`. The default is 'children'.\n\n Returns\n -------\n data : dict\n A dictionary with node-link formatted data.\n\n Raises\n ------\n NetworkXError\n If `children` and `ident` attributes are identical.\n\n Examples\n --------\n >>> from networkx.readwrite import json_graph\n >>> G = nx.DiGraph([(1, 2)])\n >>> data = json_graph.tree_data(G, root=1)\n\n To serialize with json\n\n >>> import json\n >>> s = json.dumps(data)\n\n Notes\n -----\n Node attributes are stored in this format but keys\n for attributes must be strings if you want to serialize with JSON.\n\n Graph and edge attributes are not stored.\n\n See Also\n --------\n tree_graph, node_link_data, adjacency_data\n ", "language": "en", "n_whitespaces": 308, "n_words": 165, "vocab_size": 103 }
https://github.com/networkx/networkx.git
15
_check_cg_simp
def _check_cg_simp(expr, simp, sign, lt, term_list, variables, dep_variables, build_index_expr, index_expr): other_part = 0 i = 0 while i < len(term_list): sub_1 = _check_cg(term_list[i], expr, len(variables)) if sub_1 is None: i += 1 continue if not build_index_expr.subs(sub_1).is_number: i += 1 continue sub_dep = [(x, sub_1[x]) for x in dep_variables] cg_index = [None]*build_index_expr.subs(sub_1) for j in range(i, len(term_list)): sub_2 = _check_cg(term_list[j], expr.subs(sub_dep), len(variables) - len(dep_variables), sign=(sign.subs(sub_1), sign.subs(sub_dep))) if sub_2 is None: continue if not index_expr.subs(sub_dep).subs(sub_2).is_number: continue cg_index[index_expr.subs(sub_dep).subs(sub_2)] = j, expr.subs(lt, 1).subs(sub_dep).subs(sub_2), lt.subs(sub_2), sign.subs(sub_dep).subs(sub_2) if not any(i is None for i in cg_index): min_lt = min(*[ abs(term[2]) for term in cg_index ]) indices = [ term[0] for term in cg_index] indices.sort() indices.reverse() [ term_list.pop(j) for j in indices ] for term in cg_index: if abs(term[2]) > min_lt: term_list.append( (term[2] - min_lt*term[3])*term[1] ) other_part += min_lt*(sign*simp).subs(sub_1) else: i += 1 return term_list, other_part
2a1afca9477eb781f16d5d6b63fa37abed7740a3
20
cg.py
557
Use sympify less
48,858
0
450
366
82
198,289
139
sympy
33
sympy/physics/quantum/cg.py
Python
33
{ "docstring": " Checks for simplifications that can be made, returning a tuple of the\n simplified list of terms and any terms generated by simplification.\n\n Parameters\n ==========\n\n expr: expression\n The expression with Wild terms that will be matched to the terms in\n the sum\n\n simp: expression\n The expression with Wild terms that is substituted in place of the CG\n terms in the case of simplification\n\n sign: expression\n The expression with Wild terms denoting the sign that is on expr that\n must match\n\n lt: expression\n The expression with Wild terms that gives the leading term of the\n matched expr\n\n term_list: list\n A list of all of the terms is the sum to be simplified\n\n variables: list\n A list of all the variables that appears in expr\n\n dep_variables: list\n A list of the variables that must match for all the terms in the sum,\n i.e. the dependent variables\n\n build_index_expr: expression\n Expression with Wild terms giving the number of elements in cg_index\n\n index_expr: expression\n Expression with Wild terms giving the index terms have when storing\n them to cg_index\n\n ", "language": "en", "n_whitespaces": 317, "n_words": 172, "vocab_size": 72 }
https://github.com/sympy/sympy.git
1
test_filtering
def test_filtering(self): self.assertQuerysetEqual( Book.published_objects.all(), [ "How to program", ], lambda b: b.title, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
tests.py
52
Refs #33476 -- Reformatted code with Black.
50,134
0
93
31
13
202,491
13
django
8
tests/custom_managers/tests.py
Python
8
{ "docstring": "\n Custom managers respond to usual filtering methods\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
1
test_process_datetime_to_timestamp_freeze_time
def test_process_datetime_to_timestamp_freeze_time(time_zone, hass): hass.config.set_time_zone(time_zone) utc_now = dt_util.utcnow() with freeze_time(utc_now): epoch = utc_now.timestamp() assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch now = dt_util.now() assert process_datetime_to_timestamp(now) == epoch @pytest.mark.parametrize( "time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii", "UTC"] )
1d9fb4bca871f97109684419f0f9526a0c151f2d
@pytest.mark.parametrize( "time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii", "UTC"] )
12
test_models.py
141
Fix process_datetime_to_timestamp and add test coverage (#71755)
99,327
1
71
61
24
300,467
30
core
16
tests/components/recorder/test_models.py
Python
8
{ "docstring": "Test we can handle processing database datatimes to timestamps.\n\n This test freezes time to make sure everything matches.\n ", "language": "en", "n_whitespaces": 24, "n_words": 18, "vocab_size": 17 }
https://github.com/home-assistant/core.git
1
test_null_localpart
def test_null_localpart(self) -> None: userinfo = { "sub": "tester", "username": None, } self.get_success(_make_callback_with_userinfo(self.hs, userinfo)) self.assertRenderedError("mapping_error", "localpart is invalid: ")
5dd949bee6158a8b651db9f2ae417a62c8184bfd
10
test_oidc.py
75
Add type hints to some tests/handlers files. (#12224)
71,794
0
76
41
19
247,630
19
synapse
7
tests/handlers/test_oidc.py
Python
8
{ "docstring": "Mapping onto a null localpart via an empty OIDC attribute should be rejected", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/matrix-org/synapse.git
3
points2polygon
def points2polygon(points): if isinstance(points, list): points = np.array(points) assert isinstance(points, np.ndarray) assert (points.size % 2 == 0) and (points.size >= 8) point_mat = points.reshape([-1, 2]) return plg.Polygon(point_mat)
9f62b610dea6161627200ed85d92e19b1923279a
10
fce_postprocess.py
109
add fcenet
4,551
0
52
69
23
23,234
27
PaddleOCR
12
ppocr/postprocess/fce_postprocess.py
Python
7
{ "docstring": "Convert k points to 1 polygon.\n\n Args:\n points (ndarray or list): A ndarray or a list of shape (2k)\n that indicates k points.\n\n Returns:\n polygon (Polygon): A polygon object.\n ", "language": "en", "n_whitespaces": 63, "n_words": 29, "vocab_size": 24 }
https://github.com/PaddlePaddle/PaddleOCR.git
1
import_pandas_dataframe
def import_pandas_dataframe(cls, df, name=None): return cls.import_arrow_table(pa.Table.from_pandas(df))
1c0935c1bc0856d43f69c1e32498636ee24ebc85
10
base_worker.py
42
FEAT-#4913: Enabling pyhdk (#4900) Co-authored-by: ienkovich <[email protected]> Signed-off-by: izamyati <[email protected]>
35,959
0
20
26
6
154,398
6
modin
8
modin/experimental/core/execution/native/implementations/omnisci_on_native/base_worker.py
Python
2
{ "docstring": "\n Import ``pandas.DataFrame`` to the worker.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A frame to import.\n name : str, optional\n A table name to use. None to generate a unique name.\n\n Returns\n -------\n str\n Imported table name.\n ", "language": "en", "n_whitespaces": 132, "n_words": 35, "vocab_size": 27 }
https://github.com/modin-project/modin.git
9
get_operation_id_base
def get_operation_id_base(self, path, method, action): model = getattr(getattr(self.view, 'queryset', None), 'model', None) if self.operation_id_base is not None: name = self.operation_id_base # Try to deduce the ID from the view's model elif model is not None: name = model.__name__ # Try with the serializer class name elif self.get_serializer(path, method) is not None: name = self.get_serializer(path, method).__class__.__name__ if name.endswith('Serializer'): name = name[:-10] # Fallback to the view name else: name = self.view.__class__.__name__ if name.endswith('APIView'): name = name[:-7] elif name.endswith('View'): name = name[:-4] # Due to camel-casing of classes and `action` being lowercase, apply title in order to find if action truly # comes at the end of the name if name.endswith(action.title()): # ListView, UpdateAPIView, ThingDelete ... name = name[:-len(action)] if action == 'list': name = pluralize(name) return name
9e328a9549b8cc5fbeff7b70cf2523fa3d97518f
16
openapi.py
295
Fix OpenAPI operation name plural appropriately (#8017)
9,574
0
385
177
72
48,712
126
django-rest-framework
17
rest_framework/schemas/openapi.py
Python
21
{ "docstring": "\n Compute the base part for operation ID from the model, serializer or view name.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 13 }
https://github.com/encode/django-rest-framework.git
2
workflow_logging_context
def workflow_logging_context(job_id) -> None: node = ray.worker._global_node original_out_file, original_err_file = node.get_log_file_handles( get_worker_log_file_name("WORKER") ) out_file, err_file = node.get_log_file_handles( get_worker_log_file_name("WORKER", job_id) ) try: configure_log_file(out_file, err_file) yield finally: configure_log_file(original_out_file, original_err_file)
e8fc66af348f2afd2b578fe1c6776cc88ea82499
11
workflow_context.py
104
[Workflow]Make workflow logs publish to the correct driver. (#24089) All workflow tasks are executed as remote functions that submitted from WorkflowManagmentActor. WorkflowManagmentActor is a detached long-running actor whose owner is the first driver in the cluster that runs the very first workflow execution. Therefore, for new drivers that run workflows, the loggings won't be properly published back to the driver because loggings are saved and published based on job_id and the job_id is always the first driver's job_id as the ownership goes like: first_driver -> WorkflowManagmentActor -> workflow executions using remote functions. To solve this, during workflow execution, we pass the actual driver's job_id along with execution, and re-configure the logging files on each worker that runs the remote functions. Notice that we need to do this in multiple places as a workflow task is executed with more than one remote functions that are running in different workers.
31,619
0
86
60
23
139,165
27
ray
13
python/ray/workflow/workflow_context.py
Python
27
{ "docstring": "Initialize the workflow logging context.\n\n Workflow executions are running as remote functions from\n WorkflowManagementActor. Without logging redirection, workflow\n inner execution logs will be pushed to the driver that initially\n created WorkflowManagementActor rather than the driver that\n actually submits the current workflow execution.\n We use this conext manager to re-configure the log files to send\n the logs to the correct driver, and to restore the log files once\n the execution is done.\n\n Args:\n job_id: The ID of the job that submits the workflow execution.\n ", "language": "en", "n_whitespaces": 120, "n_words": 83, "vocab_size": 56 }
https://github.com/ray-project/ray.git
4
load_model_from_db
def load_model_from_db(cls, instance_or_id, allow_cache=True): if isinstance(instance_or_id, int): if hasattr(cls.objects, "get_from_cache") and allow_cache: return cls.objects.get_from_cache(pk=instance_or_id) return cls.objects.get(pk=instance_or_id) return instance_or_id
8c83731694d352c5832ef6dd26ec57333bc8d5b5
12
base.py
89
feat(celery): Disallow complex pickle for new tasks (#35811)
18,819
0
52
56
15
91,851
18
sentry
11
src/sentry/tasks/base.py
Python
6
{ "docstring": "Utility function to allow a task to transition to passing ids rather than model instances.", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 13 }
https://github.com/getsentry/sentry.git
2
ndcg_score
def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False): y_true = check_array(y_true, ensure_2d=False) y_score = check_array(y_score, ensure_2d=False) check_consistent_length(y_true, y_score, sample_weight) if y_true.min() < 0: # TODO(1.4): Replace warning w/ ValueError warnings.warn( "ndcg_score should not be used on negative y_true values. ndcg_score will" " raise a ValueError on negative y_true values starting from version 1.4.", FutureWarning, ) _check_dcg_target_type(y_true) gain = _ndcg_sample_scores(y_true, y_score, k=k, ignore_ties=ignore_ties) return np.average(gain, weights=sample_weight)
9bb209817a978d6da0fec392ed212685efe08694
11
_ranking.py
154
ENH Added warning for `ndcg_score` when used w/ negative `y_true` values (#23461) Co-authored-by: trinhcon <[email protected]> Co-authored-by: Victor Ko <[email protected]> Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Jérémie du Boisberranger <[email protected]>
76,130
0
142
100
54
260,239
64
scikit-learn
19
sklearn/metrics/_ranking.py
Python
13
{ "docstring": "Compute Normalized Discounted Cumulative Gain.\n\n Sum the true scores ranked in the order induced by the predicted scores,\n after applying a logarithmic discount. Then divide by the best possible\n score (Ideal DCG, obtained for a perfect ranking) to obtain a score between\n 0 and 1.\n\n This ranking metric returns a high value if true labels are ranked high by\n ``y_score``.\n\n Parameters\n ----------\n y_true : ndarray of shape (n_samples, n_labels)\n True targets of multilabel classification, or true scores of entities\n to be ranked. Negative values in `y_true` may result in an output\n that is not between 0 and 1.\n\n .. versionchanged:: 1.2\n These negative values are deprecated, and will raise an error in v1.4.\n\n y_score : ndarray of shape (n_samples, n_labels)\n Target scores, can either be probability estimates, confidence values,\n or non-thresholded measure of decisions (as returned by\n \"decision_function\" on some classifiers).\n\n k : int, default=None\n Only consider the highest k scores in the ranking. If `None`, use all\n outputs.\n\n sample_weight : ndarray of shape (n_samples,), default=None\n Sample weights. If `None`, all samples are given the same weight.\n\n ignore_ties : bool, default=False\n Assume that there are no ties in y_score (which is likely to be the\n case if y_score is continuous) for efficiency gains.\n\n Returns\n -------\n normalized_discounted_cumulative_gain : float in [0., 1.]\n The averaged NDCG scores for all samples.\n\n See Also\n --------\n dcg_score : Discounted Cumulative Gain (not normalized).\n\n References\n ----------\n `Wikipedia entry for Discounted Cumulative Gain\n <https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_\n\n Jarvelin, K., & Kekalainen, J. (2002).\n Cumulated gain-based evaluation of IR techniques. ACM Transactions on\n Information Systems (TOIS), 20(4), 422-446.\n\n Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).\n A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th\n Annual Conference on Learning Theory (COLT 2013)\n\n McSherry, F., & Najork, M. (2008, March). Computing information retrieval\n performance measures efficiently in the presence of tied scores. In\n European conference on information retrieval (pp. 414-421). Springer,\n Berlin, Heidelberg.\n\n Examples\n --------\n >>> import numpy as np\n >>> from sklearn.metrics import ndcg_score\n >>> # we have groud-truth relevance of some answers to a query:\n >>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])\n >>> # we predict some scores (relevance) for the answers\n >>> scores = np.asarray([[.1, .2, .3, 4, 70]])\n >>> ndcg_score(true_relevance, scores)\n 0.69...\n >>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])\n >>> ndcg_score(true_relevance, scores)\n 0.49...\n >>> # we can set k to truncate the sum; only top k answers contribute.\n >>> ndcg_score(true_relevance, scores, k=4)\n 0.35...\n >>> # the normalization takes k into account so a perfect answer\n >>> # would still get 1.0\n >>> ndcg_score(true_relevance, true_relevance, k=4)\n 1.0\n >>> # now we have some ties in our prediction\n >>> scores = np.asarray([[1, 0, 0, 0, 1]])\n >>> # by default ties are averaged, so here we get the average (normalized)\n >>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75\n >>> ndcg_score(true_relevance, scores, k=1)\n 0.75\n >>> # we can choose to ignore ties for faster results, but only\n >>> # if we know there aren't ties in our scores, otherwise we get\n >>> # wrong results:\n >>> ndcg_score(true_relevance,\n ... scores, k=1, ignore_ties=True)\n 0.5\n ", "language": "en", "n_whitespaces": 835, "n_words": 525, "vocab_size": 314 }
https://github.com/scikit-learn/scikit-learn.git
1
get_supplier_block_status
def get_supplier_block_status(party_name): supplier = frappe.get_doc("Supplier", party_name) info = { "on_hold": supplier.on_hold, "release_date": supplier.release_date, "hold_type": supplier.hold_type, } return info
494bd9ef78313436f0424b918f200dab8fc7c20b
9
accounts_controller.py
70
style: format code with black
13,959
0
10
40
16
65,623
18
erpnext
9
erpnext/controllers/accounts_controller.py
Python
8
{ "docstring": "\n\tReturns a dict containing the values of `on_hold`, `release_date` and `hold_type` of\n\ta `Supplier`\n\t", "language": "en", "n_whitespaces": 12, "n_words": 14, "vocab_size": 12 }
https://github.com/frappe/erpnext.git
1
compare_total
def compare_total(self, a, b): a = _convert_other(a, raiseit=True) return a.compare_total(b)
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
_pydecimal.py
43
add python 3.10.4 for windows
55,795
0
31
27
10
219,780
10
XX-Net
6
python3.10.4/Lib/_pydecimal.py
Python
3
{ "docstring": "Compares two operands using their abstract representation.\n\n This is not like the standard compare, which use their numerical\n value. Note that a total ordering is defined for all possible abstract\n representations.\n\n >>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))\n Decimal('-1')\n >>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))\n Decimal('-1')\n >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))\n Decimal('-1')\n >>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))\n Decimal('0')\n >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))\n Decimal('1')\n >>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))\n Decimal('-1')\n >>> ExtendedContext.compare_total(1, 2)\n Decimal('-1')\n >>> ExtendedContext.compare_total(Decimal(1), 2)\n Decimal('-1')\n >>> ExtendedContext.compare_total(1, Decimal(2))\n Decimal('-1')\n ", "language": "en", "n_whitespaces": 224, "n_words": 67, "vocab_size": 46 }
https://github.com/XX-net/XX-Net.git
1
step_pred
def step_pred(self, score, x, t): # TODO(Patrick) better comments + non-PyTorch t = self.repeat_scalar(t, x.shape[0]) timesteps = self.long((t * (len(self.timesteps) - 1))) sigma = self.discrete_sigmas[timesteps] adjacent_sigma = self.get_adjacent_sigma(timesteps, t) drift = self.zeros_like(x) diffusion = (sigma**2 - adjacent_sigma**2) ** 0.5 # equation 6 in the paper: the score modeled by the network is grad_x log pt(x) # also equation 47 shows the analog from SDE models to ancestral sampling methods drift = drift - diffusion[:, None, None, None] ** 2 * score # equation 6: sample noise for the diffusion term of noise = self.randn_like(x) x_mean = x - drift # subtract because `dt` is a small negative timestep # TODO is the variable diffusion the correct scaling term for the noise? x = x_mean + diffusion[:, None, None, None] * noise # add impact of diffusion field g return x, x_mean
63c68d979a8e4a9c9fea306bdf63e73812843453
15
scheduling_sde_ve.py
215
VE/VP SDE updates (#90) * improve comments for sde_ve scheduler, init tests * more comments, tweaking pipelines * timesteps --> num_training_timesteps, some comments * merge cpu test, add m1 data * fix scheduler tests with num_train_timesteps * make np compatible, add tests for sde ve * minor default variable fixes * make style and fix-copies Co-authored-by: Patrick von Platen <[email protected]>
120,850
0
263
140
87
336,027
141
diffusers
20
src/diffusers/schedulers/scheduling_sde_ve.py
Python
12
{ "docstring": "\n Predict the sample at the previous timestep by reversing the SDE.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 9 }
https://github.com/huggingface/diffusers.git
1
test_get_bad_image
def test_get_bad_image(self): # Get response = self.client.get( reverse( "wagtailimages:generate_url", args=(self.image.id + 1, "fill-800x600") ) ) # Check response self.assertEqual(response.status_code, 404) self.assertEqual(response["Content-Type"], "application/json") # Check JSON self.assertJSONEqual( response.content.decode(), json.dumps( { "error": "Cannot find image.", } ), )
d10f15e55806c6944827d801cd9c2d53f5da4186
15
test_admin_views.py
137
Reformat with black
16,373
0
225
79
30
75,173
36
wagtail
16
wagtail/images/tests/test_admin_views.py
Python
16
{ "docstring": "\n This tests that the view gives a 404 response if a user attempts to use it with an image which doesn't exist\n ", "language": "en", "n_whitespaces": 37, "n_words": 22, "vocab_size": 21 }
https://github.com/wagtail/wagtail.git
3
get
def get(self, url, cache=True, **kwargs): if not url.isValid(): urlutils.invalid_url_error(url, "start download") return None req = QNetworkRequest(url) user_agent = websettings.user_agent(url) req.setHeader(QNetworkRequest.KnownHeaders.UserAgentHeader, user_agent) if not cache: req.setAttribute(QNetworkRequest.Attribute.CacheSaveControlAttribute, False) return self.get_request(req, **kwargs)
0877fb0d78635692e481c8bde224fac5ad0dd430
11
qtnetworkdownloads.py
136
Run scripts/dev/rewrite_enums.py
117,539
0
111
85
25
321,111
29
qutebrowser
19
qutebrowser/browser/qtnetworkdownloads.py
Python
10
{ "docstring": "Start a download with a link URL.\n\n Args:\n url: The URL to get, as QUrl\n cache: If set to False, don't cache the response.\n **kwargs: passed to get_request().\n\n Return:\n The created DownloadItem.\n ", "language": "en", "n_whitespaces": 97, "n_words": 32, "vocab_size": 28 }
https://github.com/qutebrowser/qutebrowser.git
23
getcomments
def getcomments(object): try: lines, lnum = findsource(object) except (OSError, TypeError): return None if ismodule(object): # Look for a comment block at the top of the file. start = 0 if lines and lines[0][:2] == '#!': start = 1 while start < len(lines) and lines[start].strip() in ('', '#'): start = start + 1 if start < len(lines) and lines[start][:1] == '#': comments = [] end = start while end < len(lines) and lines[end][:1] == '#': comments.append(lines[end].expandtabs()) end = end + 1 return ''.join(comments) # Look for a preceding block of comments at the same indentation. elif lnum > 0: indent = indentsize(lines[lnum]) end = lnum - 1 if end >= 0 and lines[end].lstrip()[:1] == '#' and \ indentsize(lines[end]) == indent: comments = [lines[end].expandtabs().lstrip()] if end > 0: end = end - 1 comment = lines[end].expandtabs().lstrip() while comment[:1] == '#' and indentsize(lines[end]) == indent: comments[:0] = [comment] end = end - 1 if end < 0: break comment = lines[end].expandtabs().lstrip() while comments and comments[0].strip() == '#': comments[:1] = [] while comments and comments[-1].strip() == '#': comments[-1:] = [] return ''.join(comments)
8198943edd73a363c266633e1aa5b2a9e9c9f526
20
inspect.py
571
add python 3.10.4 for windows
55,330
0
568
345
74
218,471
178
XX-Net
20
python3.10.4/Lib/inspect.py
Python
36
{ "docstring": "Get lines of comments immediately preceding an object's source code.\n\n Returns None when source can't be found.\n ", "language": "en", "n_whitespaces": 23, "n_words": 17, "vocab_size": 16 }
https://github.com/XX-net/XX-Net.git
7
convert_to_strings
def convert_to_strings(cls, obj): if isinstance(obj, dict): return { cls.convert_to_strings(key): cls.convert_to_strings(value) for key, value in obj.items() } elif isinstance(obj, list) or isinstance(obj, tuple): return [cls.convert_to_strings(element) for element in obj] elif isinstance(obj, bytes): return str(obj)[2:-1] return obj
8c2428c9d355ca5fbc3dd90e9820ceb1cc795837
11
jsondump.py
140
[autofix.ci] apply automated fixes
74,068
0
144
91
26
253,331
35
mitmproxy
13
examples/contrib/jsondump.py
Python
11
{ "docstring": "\n Recursively convert all list/dict elements of type `bytes` into strings.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/mitmproxy/mitmproxy.git
1
test_makemigrations_interactive_reject
def test_makemigrations_interactive_reject(self): # Monkeypatch interactive questioner to auto reject with mock.patch('builtins.input', mock.Mock(return_value='N')): with self.temporary_migration_module(module="migrations.test_migrations_conflict") as migration_dir: with captured_stdout(): call_command( 'makemigrations', 'migrations', name='merge', merge=True, interactive=True, verbosity=0, ) merge_file = os.path.join(migration_dir, '0003_merge.py') self.assertFalse(os.path.exists(merge_file))
0ab58c120939093fea90822f376e1866fc714d1f
15
test_commands.py
158
Refs #29026 -- Allowed customizing InteractiveMigrationQuestioner's prompt destination. Previously, the questioner did not obey the value of stdout provided to the command.
50,165
0
284
88
29
202,906
31
django
21
tests/migrations/test_commands.py
Python
14
{ "docstring": "\n makemigrations enters and exits interactive mode properly.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/django/django.git
16
customize_compiler
def customize_compiler(compiler): if compiler.compiler_type == "unix": if sys.platform == "darwin": # Perform first-time customization of compiler-related # config vars on OS X now that we know we need a compiler. # This is primarily to support Pythons from binary # installers. The kind and paths to build tools on # the user system may vary significantly from the system # that Python itself was built on. Also the user OS # version and build tools may not support the same set # of CPU architectures for universal builds. if not _config_vars.get('CUSTOMIZED_OSX_COMPILER'): import _osx_support _osx_support.customize_compiler(_config_vars) _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True' (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \ get_config_vars('CC', 'CXX', 'CFLAGS', 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS') if 'CC' in os.environ: newcc = os.environ['CC'] if (sys.platform == 'darwin' and 'LDSHARED' not in os.environ and ldshared.startswith(cc)): # On OS X, if CC is overridden, use that as the default # command for LDSHARED as well ldshared = newcc + ldshared[len(cc):] cc = newcc if 'CXX' in os.environ: cxx = os.environ['CXX'] if 'LDSHARED' in os.environ: ldshared = os.environ['LDSHARED'] if 'CPP' in os.environ: cpp = os.environ['CPP'] else: cpp = cc + " -E" # not always if 'LDFLAGS' in os.environ: ldshared = ldshared + ' ' + os.environ['LDFLAGS'] if 'CFLAGS' in os.environ: cflags = cflags + ' ' + os.environ['CFLAGS'] ldshared = ldshared + ' ' + os.environ['CFLAGS'] if 'CPPFLAGS' in os.environ: cpp = cpp + ' ' + os.environ['CPPFLAGS'] cflags = cflags + ' ' + os.environ['CPPFLAGS'] ldshared = ldshared + ' ' + os.environ['CPPFLAGS'] if 'AR' in os.environ: ar = os.environ['AR'] if 'ARFLAGS' in os.environ: archiver = ar + ' ' + os.environ['ARFLAGS'] else: archiver = ar + ' ' + ar_flags cc_cmd = cc + ' ' + cflags compiler.set_executables( preprocessor=cpp, compiler=cc_cmd, compiler_so=cc_cmd + ' ' + ccshared, compiler_cxx=cxx, linker_so=ldshared, linker_exe=cc, archiver=archiver) compiler.shared_lib_extension = shlib_suffix
8198943edd73a363c266633e1aa5b2a9e9c9f526
17
sysconfig.py
658
add python 3.10.4 for windows
56,846
0
957
369
147
223,004
303
XX-Net
32
python3.10.4/Lib/distutils/sysconfig.py
Python
50
{ "docstring": "Do any platform-specific customization of a CCompiler instance.\n\n Mainly needed on Unix, so we can plug in the information that\n varies across Unices and is stored in Python's Makefile.\n ", "language": "en", "n_whitespaces": 38, "n_words": 29, "vocab_size": 28 }
https://github.com/XX-net/XX-Net.git
8
_check_list_filter_item
def _check_list_filter_item(self, obj, item, label): from django.contrib.admin import FieldListFilter, ListFilter if callable(item) and not isinstance(item, models.Field): # If item is option 3, it should be a ListFilter... if not _issubclass(item, ListFilter): return must_inherit_from( parent="ListFilter", option=label, obj=obj, id="admin.E113" ) # ... but not a FieldListFilter. elif issubclass(item, FieldListFilter): return [ checks.Error( "The value of '%s' must not inherit from 'FieldListFilter'." % label, obj=obj.__class__, id="admin.E114", ) ] else: return [] elif isinstance(item, (tuple, list)): # item is option #2 field, list_filter_class = item if not _issubclass(list_filter_class, FieldListFilter): return must_inherit_from( parent="FieldListFilter", option="%s[1]" % label, obj=obj, id="admin.E115", ) else: return [] else: # item is option #1 field = item # Validate the field string try: get_fields_from_path(obj.model, field) except (NotRelationField, FieldDoesNotExist): return [ checks.Error( "The value of '%s' refers to '%s', which does not refer to a Field." % (label, field), obj=obj.__class__, id="admin.E116", ) ] else: return []
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
checks.py
331
Refs #33476 -- Reformatted code with Black.
50,332
0
880
207
89
203,361
144
django
31
django/contrib/admin/checks.py
Python
44
{ "docstring": "\n Check one item of `list_filter`, i.e. check if it is one of three options:\n 1. 'field' -- a basic field filter, possibly w/ relationships (e.g.\n 'field__rel')\n 2. ('field', SomeFieldListFilter) - a field-based list filter class\n 3. SomeListFilter - a non-field list filter class\n ", "language": "en", "n_whitespaces": 89, "n_words": 43, "vocab_size": 35 }
https://github.com/django/django.git
1
get_redemption_details
def get_redemption_details(customer, loyalty_program, company): return frappe._dict( frappe.db.sql( , (customer, loyalty_program, company), ) )
494bd9ef78313436f0424b918f200dab8fc7c20b
10
loyalty_point_entry.py
47
style: format code with black
13,746
0
6
32
11
64,880
13
erpnext
8
erpnext/accounts/doctype/loyalty_point_entry/loyalty_point_entry.py
Python
12
{ "docstring": "\n\t\tselect redeem_against, sum(loyalty_points)\n\t\tfrom `tabLoyalty Point Entry`\n\t\twhere customer=%s and loyalty_program=%s and loyalty_points<0 and company=%s\n\t\tgroup by redeem_against\n\t", "language": "en", "n_whitespaces": 14, "n_words": 18, "vocab_size": 16 }
https://github.com/frappe/erpnext.git
4
get_all_tests
def get_all_tests(): test_root_dir = os.path.join(PATH_TO_TRANFORMERS, "tests") # test folders/files directly under `tests` folder tests = os.listdir(test_root_dir) tests = sorted( list(filter(lambda x: os.path.isdir(x) or x.startswith("tests/test_"), [f"tests/{x}" for x in tests])) ) # model specific test folders model_tests_folders = os.listdir(os.path.join(test_root_dir, "models")) model_test_folders = sorted(list(filter(os.path.isdir, [f"tests/models/{x}" for x in model_tests_folders]))) tests.remove("tests/models") tests = model_test_folders + tests return tests
38043d8453b82a9c712f8d5c98323150fbee7503
17
tests_fetcher.py
205
Update self-push workflow (#17177) * update push ci * install git-python * update comment * update deepspeed jobs * fix report * skip 2 more tests that require fairscale * Fix changes in test_fetcher.py (to deal with `setup.py` is changed) * set RUN_PT_TF_CROSS_TESTS=1 and final clean-up * remove SIGOPT_API_TOKEN * remove echo "$matrix_folders" Co-authored-by: ydshieh <[email protected]>
6,971
0
98
118
40
38,418
55
transformers
17
utils/tests_fetcher.py
Python
11
{ "docstring": "\n Return a list of paths to all test folders and files under `tests`. All paths are rooted at `tests`.\n\n - folders under `tests`: `tokenization`, `pipelines`, etc. The folder `models` is excluded.\n - folders under `tests/models`: `bert`, `gpt2`, etc.\n - test files under `tests`: `test_modeling_common.py`, `test_tokenization_common.py`, etc.\n ", "language": "en", "n_whitespaces": 62, "n_words": 46, "vocab_size": 32 }
https://github.com/huggingface/transformers.git
4
promote_snaps
def promote_snaps(version): assert_logged_into_snapcraft() for snap in SNAPS: revisions = get_snap_revisions(snap, version) # The loop below is kind of slow, so let's print some output about what # it is doing. print('Releasing', snap, 'snaps to the stable channel') for revision in revisions: cmd = ['snapcraft', 'release', snap, revision, 'stable'] try: subprocess.run(cmd, check=True, stdout=subprocess.PIPE, universal_newlines=True) except subprocess.CalledProcessError as e: print("The command", f"'{' '.join(cmd)}'", "failed.") print("The output printed to stdout was:") print(e.stdout) raise
39e8d14e1b221bf37526cc05ecc83beee30a3c57
19
finish_release.py
180
Set up 2.0 pre-releases (#9400) * update credential info * update release tooling to use candidate channel * split deploy jobs * pass parameter through * add 2.0 pipeline prerelease * add comments * quote file path
45,647
0
222
97
61
186,894
70
certbot
19
tools/finish_release.py
Python
14
{ "docstring": "Promotes all Certbot snaps from the candidate to stable channel.\n\n If the snaps have already been released to the stable channel, this\n function will try to release them again which has no effect.\n\n :param str version: the version number that should be found in the\n candidate channel, e.g. 1.7.0\n\n :raises SystemExit: if the command snapcraft is unavailable or it\n isn't logged into an account\n\n :raises subprocess.CalledProcessError: if a snapcraft command fails\n for another reason\n\n ", "language": "en", "n_whitespaces": 113, "n_words": 74, "vocab_size": 59 }
https://github.com/certbot/certbot.git
5
get_ss_earning_map
def get_ss_earning_map(salary_slips, currency, company_currency): ss_earnings = frappe.db.sql( % (", ".join(["%s"] * len(salary_slips))), tuple([d.name for d in salary_slips]), as_dict=1, ) ss_earning_map = {} for d in ss_earnings: ss_earning_map.setdefault(d.parent, frappe._dict()).setdefault(d.salary_component, 0.0) if currency == company_currency: ss_earning_map[d.parent][d.salary_component] += flt(d.amount) * flt( d.exchange_rate if d.exchange_rate else 1 ) else: ss_earning_map[d.parent][d.salary_component] += flt(d.amount) return ss_earning_map
494bd9ef78313436f0424b918f200dab8fc7c20b
15
salary_register.py
221
style: format code with black
14,391
0
34
145
38
66,963
50
erpnext
22
erpnext/payroll/report/salary_register/salary_register.py
Python
18
{ "docstring": "select sd.parent, sd.salary_component, sd.amount, ss.exchange_rate, ss.name\n\t\tfrom `tabSalary Detail` sd, `tabSalary Slip` ss where sd.parent=ss.name and sd.parent in (%s)", "language": "en", "n_whitespaces": 17, "n_words": 19, "vocab_size": 18 }
https://github.com/frappe/erpnext.git
5
_get_save_kwargs
def _get_save_kwargs(self) -> Dict[str, Union[bool, int, str]]: filetype = self.config["format"] kwargs = {} if filetype in ("gif", "jpg", "png"): kwargs["optimize"] = self.config["optimize"] if filetype == "gif": kwargs["interlace"] = self.config["gif_interlace"] if filetype == "png": kwargs["compress_level"] = self.config["png_compress_level"] if filetype == "tif": kwargs["compression"] = self.config["tif_compression"] logger.debug(kwargs) return kwargs
049314429f71a21e6595e9d27e9e36f6a3479c42
10
pillow.py
195
Convert: Add option to output mask separately for draw-transparent
20,507
0
153
110
31
101,070
46
faceswap
12
plugins/convert/writer/pillow.py
Python
20
{ "docstring": " Return the save parameters for the file format\n\n Returns\n -------\n dict\n The specific keyword arguments for the selected file format\n ", "language": "en", "n_whitespaces": 60, "n_words": 20, "vocab_size": 15 }
https://github.com/deepfakes/faceswap.git
1
test_multi_conv
def test_multi_conv(self): n = coord_net_spec() # multi bottom/top n.conv_data, n.conv_aux = L.Convolution( n.data, n.aux, ntop=2, num_output=10, kernel_size=5, stride=2, pad=0) ax1, a1, b1 = coord_map_from_to(n.conv_data, n.data) ax2, a2, b2 = coord_map_from_to(n.conv_aux, n.aux) self.assertEquals(ax1, ax2) self.assertEquals(a1, a2) self.assertEquals(b1, b2)
cc4d0564756ca067516f71718a3d135996525909
9
test_coord_map.py
160
Balanced joint maximum mean discrepancy for deep transfer learning
12,080
0
122
107
34
60,302
37
transferlearning
23
code/deep/BJMMD/caffe/python/caffe/test/test_coord_map.py
Python
10
{ "docstring": "\n Multiple bottoms/tops of a layer are identically mapped.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/jindongwang/transferlearning.git
7
get_submatrix
def get_submatrix(self, matrix): r reduced, non_reduced = self.get_reduced_nonreduced() # if reduced == [], then det(matrix) should be 1 if reduced == []: return diag([1]) # reduced != [] reduction_set = [v ** self.degrees[i] for i, v in enumerate(self.variables)] ais = [self.polynomials[i].coeff(reduction_set[i]) for i in range(self.n)] reduced_matrix = matrix[:, reduced] keep = [] for row in range(reduced_matrix.rows): check = [ai in reduced_matrix[row, :] for ai in ais] if True not in check: keep.append(row) return matrix[keep, non_reduced]
9d58006fc0a23afcba38f641c9472917c436428a
12
multivariate_resultants.py
212
Code cleanup
48,966
0
237
138
54
198,503
75
sympy
25
sympy/polys/multivariate_resultants.py
Python
24
{ "docstring": "\n Returns\n =======\n\n macaulay_submatrix: Matrix\n The Macaulay denominator matrix. Columns that are non reduced are kept.\n The row which contains one of the a_{i}s is dropped. a_{i}s\n are the coefficients of x_i ^ {d_i}.\n ", "language": "en", "n_whitespaces": 95, "n_words": 33, "vocab_size": 27 }
https://github.com/sympy/sympy.git
3
check_components_alive
def check_components_alive(cluster, component_type, check_component_alive): worker_nodes = get_other_nodes(cluster) assert len(worker_nodes) > 0 for node in worker_nodes: process = node.all_processes[component_type][0].process if check_component_alive: assert process.poll() is None else: print( "waiting for " + component_type + " with PID " + str(process.pid) + "to terminate" ) process.wait() print( "done waiting for " + component_type + " with PID " + str(process.pid) + "to terminate" ) assert not process.poll() is None @pytest.mark.parametrize( "ray_start_cluster", [ { "num_cpus": 8, "num_nodes": 4, "_system_config": {"num_heartbeats_timeout": 10}, } ], indirect=True, )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
@pytest.mark.parametrize( "ray_start_cluster", [ { "num_cpus": 8, "num_nodes": 4, "_system_config": {"num_heartbeats_timeout": 10}, } ], indirect=True, )
17
test_component_failures_2.py
235
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,534
1
398
102
52
131,456
81
ray
19
python/ray/tests/test_component_failures_2.py
Python
24
{ "docstring": "Check that a given component type is alive on all worker nodes.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/ray-project/ray.git
3
compute_current_divisions
def compute_current_divisions(self, col=None): if col is None and self.known_divisions: return self.divisions from dask.dataframe.shuffle import compute_divisions return compute_divisions(self, col=col)
cccb9d8d8e33a891396b1275c2448c352ef40c27
8
core.py
62
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
36,561
0
57
40
17
156,105
18
dask
9
dask/dataframe/core.py
Python
5
{ "docstring": "Compute the current divisions of the DataFrame.\n\n This method triggers immediate computation. If you find yourself running this command\n repeatedly for the same dataframe, we recommend storing the result\n so you don't have to rerun it.\n\n If the column or index values overlap between partitions, raises ``ValueError``.\n To prevent this, make sure the data are sorted by the column or index.\n\n Parameters\n ----------\n col : string, optional\n Calculate the divisions for a non-index column by passing in the name of the column.\n If col is not specified, the index will be used to calculate divisions.\n In this case, if the divisions are already known, they will be returned\n immediately without computing.\n\n Examples\n --------\n >>> import dask\n >>> ddf = dask.datasets.timeseries(start=\"2021-01-01\", end=\"2021-01-07\", freq=\"1H\").clear_divisions()\n >>> divisions = ddf.compute_current_divisions()\n >>> print(divisions) # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2021-01-01 00:00:00'),\n Timestamp('2021-01-02 00:00:00'),\n Timestamp('2021-01-03 00:00:00'),\n Timestamp('2021-01-04 00:00:00'),\n Timestamp('2021-01-05 00:00:00'),\n Timestamp('2021-01-06 00:00:00'),\n Timestamp('2021-01-06 23:00:00'))\n\n >>> ddf.divisions = divisions\n >>> ddf.known_divisions\n True\n\n >>> ddf = ddf.reset_index().clear_divisions()\n >>> divisions = ddf.compute_current_divisions(\"timestamp\")\n >>> print(divisions) # doctest: +NORMALIZE_WHITESPACE\n (Timestamp('2021-01-01 00:00:00'),\n Timestamp('2021-01-02 00:00:00'),\n Timestamp('2021-01-03 00:00:00'),\n Timestamp('2021-01-04 00:00:00'),\n Timestamp('2021-01-05 00:00:00'),\n Timestamp('2021-01-06 00:00:00'),\n Timestamp('2021-01-06 23:00:00'))\n\n >>> ddf = ddf.set_index(\"timestamp\", divisions=divisions, sorted=True)\n ", "language": "en", "n_whitespaces": 495, "n_words": 185, "vocab_size": 113 }
https://github.com/dask/dask.git
2
parse_wheel
def parse_wheel(wheel_zip, name): # type: (ZipFile, str) -> Tuple[str, Message] try: info_dir = wheel_dist_info_dir(wheel_zip, name) metadata = wheel_metadata(wheel_zip, info_dir) version = wheel_version(metadata) except UnsupportedWheel as e: raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e))) check_compatibility(version, name) return info_dir, metadata
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
14
wheel.py
103
upd; format
12,522
0
85
62
35
61,340
39
transferlearning
14
.venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py
Python
9
{ "docstring": "Extract information from the provided wheel, ensuring it meets basic\n standards.\n\n Returns the name of the .dist-info directory and the parsed WHEEL metadata.\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 20 }
https://github.com/jindongwang/transferlearning.git
1
require_torch_bf16_cpu
def require_torch_bf16_cpu(test_case): return unittest.skipUnless( is_torch_bf16_cpu_available(), "test requires torch>=1.10, using CPU", )(test_case)
a2d34b7c040723b92823310e3b8fd66874c9d667
10
testing_utils.py
38
deprecate is_torch_bf16_available (#17738) * deprecate is_torch_bf16_available * address suggestions
5,726
0
34
21
11
31,378
11
transformers
5
src/transformers/testing_utils.py
Python
5
{ "docstring": "Decorator marking a test that requires torch>=1.10, using CPU.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/huggingface/transformers.git
3
is_show
def is_show(self, value, header=""): # @TODO: possible optimisation: create a re.compile list return any(j for j in [re.match(i, value) for i in self.get_conf_value('show', header=header)])
4d50e8cb5756755cf019cef1f45cd569bef708fc
13
glances_plugin.py
69
Create a Show option in the configuration file to only show some stats #2052
15,210
0
45
43
22
69,969
24
glances
10
glances/plugins/glances_plugin.py
Python
2
{ "docstring": "Return True if the value is in the show configuration list.\n\n If the show value is empty, return True (show by default)\n\n The show configuration list is defined in the glances.conf file.\n It is a comma separated list of regexp.\n Example for diskio:\n show=sda.*\n ", "language": "en", "n_whitespaces": 86, "n_words": 44, "vocab_size": 31 }
https://github.com/nicolargo/glances.git
2
test_estimator_does_not_support_feature_names
def test_estimator_does_not_support_feature_names(): pytest.importorskip("pandas") X, y = datasets.load_iris(as_frame=True, return_X_y=True) all_feature_names = set(X.columns)
9f85c9d44965b764f40169ef2917e5f7a798684f
9
test_from_model.py
61
TST Better info when checking for no warnings in tests (#22362)
75,364
0
23
114
10
258,673
11
scikit-learn
12
sklearn/feature_selection/tests/test_from_model.py
Python
14
{ "docstring": "SelectFromModel works with estimators that do not support feature_names_in_.\n\n Non-regression test for #21949.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
https://github.com/scikit-learn/scikit-learn.git
1
test_get_member_list_no_permission_former_member
def test_get_member_list_no_permission_former_member(self): # create a room, invite the user and the user joins room_id = self.helper.create_room_as("@alice:red") self.helper.invite(room_id, "@alice:red", self.user_id) self.helper.join(room_id, self.user_id) # check that the user can see the member list to start with channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(200, channel.code, msg=channel.result["body"]) # ban the user self.helper.change_membership(room_id, "@alice:red", self.user_id, "ban") # check the user can no longer see the member list channel = self.make_request("GET", "/rooms/%s/members" % room_id) self.assertEqual(403, channel.code, msg=channel.result["body"])
02d708568b476f2f7716000b35c0adfa4cbd31b3
10
test_rooms.py
203
Replace assertEquals and friends with non-deprecated versions. (#12092)
71,395
0
162
120
42
246,901
71
synapse
15
tests/rest/client/test_rooms.py
Python
9
{ "docstring": "\n Tests that a former member of the room can not get the member list.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git
6
get_dated_queryset
def get_dated_queryset(self, **lookup): qs = self.get_queryset().filter(**lookup) date_field = self.get_date_field() allow_future = self.get_allow_future() allow_empty = self.get_allow_empty() paginate_by = self.get_paginate_by(qs) if not allow_future: now = timezone.now() if self.uses_datetime_field else timezone_today() qs = qs.filter(**{"%s__lte" % date_field: now}) if not allow_empty: # When pagination is enabled, it's better to do a cheap query # than to load the unpaginated queryset in memory. is_empty = not qs if paginate_by is None else not qs.exists() if is_empty: raise Http404( _("No %(verbose_name_plural)s available") % { "verbose_name_plural": qs.model._meta.verbose_name_plural, } ) return qs
9c19aff7c7561e3a82978a272ecdaad40dda5c00
17
dates.py
222
Refs #33476 -- Reformatted code with Black.
51,756
0
323
130
61
206,855
84
django
25
django/views/generic/dates.py
Python
19
{ "docstring": "\n Get a queryset properly filtered according to `allow_future` and any\n extra lookup kwargs.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/django/django.git
1
default_params
def default_params(self) -> dict: return {"order": "asc", "sort": self.sort_key, "limit": self.limit}
63af98e3b999d4b223237b51472a819915c5a558
8
streams.py
49
🎉 Recurly Schema Revamp (#9866) * Cleanup Recurly connector schemas * Add more Recurly schemas to the connector - `billing_infos` - `shipping_addresses` - `shipping_methods` - `subscription_changes` * Add Recurly `add-on` resouce * Add Recurly's account notes resource schema * Add unique coupons to Recurly source * Add credit payments to Recurly connector * Add Recurly resources to integration tests configurations * Bump Recurly source version to `0.4.0` * Add `line_items` Recurly resource * Add `line_items` to Recurly documentation * Add missing `line_items` JSON schema * Replace Subscription Change Recurly API call with Subscription `pending_changes` field * Replace Recurly unique coupon codes API call with coupons `unique_coupon` field To avoid the extra API call to import unique coupon calls * Revert "Replace Recurly unique coupon codes API call with coupons `unique_coupon` field" This reverts commit 1c4592d82da3c5e5e0026dda8eb2ed7a896ac5b8. * Add `end_time` parameter to Recurly connector * Order Recurly specs * Set the Recurly `begin_time` and `end_time` to be optional * Add `order` to Recurly `source_spec.yaml` * Add `maxLength` to Recurly source schemas * Set `maxLength` for Recurly Subscription and Transaction `uuid` * Fix Recurly `export_dates` acceptance tests
628
0
25
26
11
4,171
11
airbyte
5
airbyte-integrations/connectors/source-recurly/source_recurly/streams.py
Python
5
{ "docstring": "\n Returns the parameters to be sent together with the API call to Recurly\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 11 }
https://github.com/airbytehq/airbyte.git
3
filter_on_submodules
def filter_on_submodules(all_modules, submodule): filtered_modules = [ mod for mod in all_modules if PACKAGE + submodule in mod.__name__ ] return filtered_modules
a449efe29b092e658a29cd847e0494979a47d252
10
keras_doctest.py
43
Add a keras doctest modeled on tensorflow doctest PiperOrigin-RevId: 424672415
79,737
0
29
27
17
268,868
20
keras
7
keras/tests/keras_doctest.py
Python
5
{ "docstring": "Filters all the modules based on the module flag.\n\n The module flag has to be relative to the core package imported.\n For example, if `submodule=keras.layers` then, this function will return\n all the modules in the submodule.\n\n Args:\n all_modules: All the modules in the core package.\n submodule: Submodule to filter from all the modules.\n\n Returns:\n All the modules in the submodule.\n ", "language": "en", "n_whitespaces": 75, "n_words": 60, "vocab_size": 38 }
https://github.com/keras-team/keras.git
1
is_stale
def is_stale(self, resource, path): # Cache invalidation is a hard problem :-) return True
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
6
resources.py
21
upd; format
12,869
0
35
12
14
62,100
14
transferlearning
4
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/resources.py
Python
2
{ "docstring": "\n Is the cache stale for the given resource?\n\n :param resource: The :class:`Resource` being cached.\n :param path: The path of the resource in the cache.\n :return: True if the cache is stale.\n ", "language": "en", "n_whitespaces": 67, "n_words": 31, "vocab_size": 24 }
https://github.com/jindongwang/transferlearning.git
3
set_result
def set_result(self, result): with self._condition: if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: raise InvalidStateError('{}: {!r}'.format(self._state, self)) self._result = result self._state = FINISHED for waiter in self._waiters: waiter.add_result(self) self._condition.notify_all() self._invoke_callbacks()
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
_base.py
123
add python 3.10.4 for windows
56,431
0
134
75
25
221,566
28
XX-Net
16
python3.10.4/Lib/concurrent/futures/_base.py
Python
10
{ "docstring": "Sets the return value of work associated with the future.\n\n Should only be used by Executor implementations and unit tests.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 19 }
https://github.com/XX-net/XX-Net.git
9
get_data
def get_data(self, session_id, metric): if session_id is None: raw = self._data else: data = self._data.get(session_id) if not data: return None raw = {session_id: data} dtype = "float32" if metric == "loss" else "float64" retval = {} for idx, data in raw.items(): val = {metric: np.frombuffer(zlib.decompress(data[metric]), dtype=dtype).reshape(data[f"{metric}_shape"])} if metric == "loss": val["labels"] = data["labels"] retval[idx] = val logger.debug("Obtained cached data: %s", {session_id: {k: v.shape if isinstance(v, np.ndarray) else v for k, v in data.items()} for session_id, data in retval.items()}) return retval
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
17
event_reader.py
284
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
19,804
0
358
173
52
100,307
80
faceswap
25
lib/gui/analysis/event_reader.py
Python
21
{ "docstring": " Retrieve the decompressed cached data from the cache for the given session id.\n\n Parameters\n ----------\n session_id: int or ``None``\n If session_id is provided, then the cached data for that session is returned. If\n session_id is ``None`` then the cached data for all sessions is returned\n metric: ['loss', 'timestamps']\n The metric to return the data for.\n\n Returns\n -------\n dict\n The `session_id` (s) as key, the values are a dictionary containing the requested\n metric information for each session returned\n ", "language": "en", "n_whitespaces": 189, "n_words": 77, "vocab_size": 50 }
https://github.com/deepfakes/faceswap.git
2
find_all_matches
def find_all_matches(self, sources=None, finder=None): # type: (Optional[List[Dict[S, Union[S, bool]]]], Optional[PackageFinder]) -> List[InstallationCandidate] from .dependencies import find_all_matches, get_finder if not finder: _, finder = get_finder(sources=sources) return find_all_matches(finder, self.as_ireq())
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
11
requirements.py
77
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,277
0
73
46
27
22,233
27
pipenv
8
pipenv/vendor/requirementslib/models/requirements.py
Python
5
{ "docstring": "Find all matching candidates for the current requirement.\n\n Consults a finder to find all matching candidates.\n\n :param sources: Pipfile-formatted sources, defaults to None\n :param sources: list[dict], optional\n :param PackageFinder finder: A **PackageFinder** instance from pip's repository implementation\n :return: A list of Installation Candidates\n :rtype: list[ :class:`~pipenv.patched.pip._internal.index.InstallationCandidate` ]\n ", "language": "en", "n_whitespaces": 96, "n_words": 47, "vocab_size": 40 }
https://github.com/pypa/pipenv.git
4
test_ddpg_compilation
def test_ddpg_compilation(self): config = ( ddpg.DDPGConfig() .training(num_steps_sampled_before_learning_starts=0) .rollouts(num_rollout_workers=0, num_envs_per_worker=2) ) explore = config.exploration_config.update({"random_timesteps": 100}) config.exploration(exploration_config=explore) num_iterations = 1 # Test against all frameworks. for _ in framework_iterator(config, with_eager_tracing=True): algo = config.build(env="Pendulum-v1") for i in range(num_iterations): results = algo.train() check_train_results(results) print(results) check_compute_single_action(algo) # Ensure apply_gradient_fn is being called and updating global_step pol = algo.get_policy() if config.framework_str == "tf": a = pol.get_session().run(pol.global_step) else: a = pol.global_step check(a, 500) algo.stop()
0dceddb912ed92286032b5563dd2e541a8a7031f
14
test_ddpg.py
257
[RLlib] Move learning_starts logic from buffers into `training_step()`. (#26032)
28,309
0
330
153
56
126,954
67
ray
37
rllib/algorithms/ddpg/tests/test_ddpg.py
Python
23
{ "docstring": "Test whether DDPG can be built with both frameworks.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
1
increment_iterations
def increment_iterations(self) -> None: self._iterations += 1 self._sessions[self._session_id]["iterations"] += 1
ff6b0209dd5ad57b81b0aca570df7f39a7119bfb
9
model.py
45
Refactoring and TravisCI to Github Actions (#1239) * refactor training * travis to actions
20,292
0
31
26
8
100,841
10
faceswap
5
plugins/train/model/_base/model.py
Python
4
{ "docstring": " Increment :attr:`iterations` and session iterations by 1. ", "language": "en", "n_whitespaces": 8, "n_words": 7, "vocab_size": 7 }
https://github.com/deepfakes/faceswap.git
1
export_docker_compose
def export_docker_compose(args): Flow.load_config(args.flowpath).to_docker_compose_yaml( output_path=args.outpath, network_name=args.network_name )
16b16b07a66cd5a8fc7cca1d3f1c378a9c63d38c
10
exporter.py
48
refactor: rename cli to jina_cli (#4890) * chore: fix readme * chore: fix readme * chore: fix dockerignore * fix: #4845 * style: fix overload and cli autocomplete * fix: cicd export cli Co-authored-by: Jina Dev Bot <[email protected]>
2,301
0
22
29
6
12,474
6
jina
9
jina/exporter.py
Python
4
{ "docstring": "Export to Docker compose yaml files\n\n :param args: args from CLI\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
https://github.com/jina-ai/jina.git
7
test_search_based_stream_should_not_attempt_to_get_more_than_10k_records
def test_search_based_stream_should_not_attempt_to_get_more_than_10k_records(requests_mock, common_params, fake_properties_list): responses = [ { "json": { "results": [{"id": f"{y}", "updatedAt": "2022-02-25T16:43:11Z"} for y in range(100)], "paging": {"next": {"after": f"{x*100}",}} }, "status_code": 200, } for x in range(1, 101) ] # After reaching 10K records, it performs a new search query. responses.extend([ { "json": { "results": [{"id": f"{y}", "updatedAt": "2022-03-01T00:00:00Z"} for y in range(100)], "paging": {"next": {"after": f"{x*100}",}} }, "status_code": 200, } for x in range(1, 10) ]) # Last page... it does not have paging->next->after responses.append({ "json": { "results": [{"id": f"{y}", "updatedAt": "2022-03-01T00:00:00Z"} for y in range(100)], "paging": {} }, "status_code": 200, }) properties_response = [{ "json": [ {"name": property_name, "type": "string", "updatedAt": 1571085954360, "createdAt": 1565059306048} for property_name in fake_properties_list ], "status_code": 200 }] # Create test_stream instance with some state test_stream = Companies(**common_params) test_stream.state = {"updatedAt": "2022-02-24T16:43:11Z"} # Mocking Request requests_mock.register_uri("POST", test_stream.url, responses) requests_mock.register_uri("GET", "/properties/v2/company/properties", properties_response) records = list(test_stream.read_records(sync_mode=SyncMode.incremental)) # The stream should not attempt to get more than 10K records. # Instead, it should use the new state to start a new search query. assert len(records) == 11000 assert test_stream.state['updatedAt'] == '2022-03-01T00:00:00+00:00'
710543a9abacc7578238cb5edaa47f43ed7c0431
19
test_source.py
514
🐛 Source Hubspot: Handled 10K+ search-endpoint queries (#10700) * Handled search queries that would output more than 10K records * Getting CRM search objects in ascending chronological ortder * Fixed stream * Fixed rebase * Fixed condition * Added unit test * Removed unused import * Started a new query when reached 10K records * Moved comment
652
0
524
277
107
4,306
178
airbyte
24
airbyte-integrations/connectors/source-hubspot/unit_tests/test_source.py
Python
42
{ "docstring": "\n If there are more than 10,000 records that would be returned by the Hubspot search endpoint,\n the CRMSearchStream instance should stop at the 10Kth record\n ", "language": "en", "n_whitespaces": 35, "n_words": 25, "vocab_size": 23 }
https://github.com/airbytehq/airbyte.git
4
get_mode_of_payments
def get_mode_of_payments(filters): mode_of_payments = {} invoice_list = get_invoices(filters) invoice_list_names = ",".join("'" + invoice["name"] + "'" for invoice in invoice_list) if invoice_list: inv_mop = frappe.db.sql( .format( invoice_list_names=invoice_list_names ), as_dict=1, ) for d in inv_mop: mode_of_payments.setdefault(d["owner"] + cstr(d["posting_date"]), []).append(d.mode_of_payment) return mode_of_payments
74a782d81d8f8c4a4d9214a9c06377e5e6e464dd
18
sales_payment_summary.py
157
refactor: DB independent quoting and truthy/falsy values (#31358) * refactor: DB independent quoting and truthy/falsy values * style: reformat to black spec * fix: ifnull -> coalesce * fix: coalesce -> Coalesce * fix: revert pypika comparison * refactor: convert queries to QB * fix: incorrect value types for query `=` query makes no sense with list of values * fix: remove warehouse docstatus condition * fix: keep using base rate as rate Co-authored-by: Ankush Menat <[email protected]>
14,908
0
25
93
31
68,837
39
erpnext
19
erpnext/accounts/report/sales_payment_summary/sales_payment_summary.py
Python
34
{ "docstring": "select a.owner,a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment\n\t\t\tfrom `tabSales Invoice` a, `tabSales Invoice Payment` b\n\t\t\twhere a.name = b.parent\n\t\t\tand a.docstatus = 1\n\t\t\tand a.name in ({invoice_list_names})\n\t\t\tunion\n\t\t\tselect a.owner,a.posting_date, ifnull(b.mode_of_payment, '') as mode_of_payment\n\t\t\tfrom `tabSales Invoice` a, `tabPayment Entry` b,`tabPayment Entry Reference` c\n\t\t\twhere a.name = c.reference_name\n\t\t\tand b.name = c.parent\n\t\t\tand b.docstatus = 1\n\t\t\tand a.name in ({invoice_list_names})\n\t\t\tunion\n\t\t\tselect a.owner, a.posting_date,\n\t\t\tifnull(a.voucher_type,'') as mode_of_payment\n\t\t\tfrom `tabJournal Entry` a, `tabJournal Entry Account` b\n\t\t\twhere a.name = b.parent\n\t\t\tand a.docstatus = 1\n\t\t\tand b.reference_type = 'Sales Invoice'\n\t\t\tand b.reference_name in ({invoice_list_names})\n\t\t\t", "language": "en", "n_whitespaces": 71, "n_words": 91, "vocab_size": 42 }
https://github.com/frappe/erpnext.git
1
test_alter_value
def test_alter_value(self): altered_raw_data = apply_changes_to_raw_data( raw_data=self.raw_data, block_path_str="char1", operation=AlterBlockValueOperation(new_value="foo"), streamfield=models.SampleModel.content, ) self.assertEqual(altered_raw_data[0]["value"], "foo") self.assertEqual(altered_raw_data[1]["value"], self.raw_data[1]["value"]) self.assertEqual(altered_raw_data[2]["value"], "foo") self.assertEqual(altered_raw_data[3]["value"], self.raw_data[3]["value"])
ad65741b94f36fbe793cf15f0ab002482070cdb6
13
test_simple_structures.py
181
Add tests for streamfield migration helpers Currently failing due to wagtail-factories being broken on Wagtail 4.1: https://github.com/wagtail/wagtail-factories/issues/65
17,024
0
111
110
17
80,146
18
wagtail
14
wagtail/tests/streamfield_migrations/test_simple_structures.py
Python
11
{ "docstring": "Change the value of each `char1` block to `foo`\n\n Check whether the value of each `char1` block has changed to `foo`.\n Check whether the values of other blocks are intact.\n ", "language": "en", "n_whitespaces": 51, "n_words": 30, "vocab_size": 19 }
https://github.com/wagtail/wagtail.git
2
get_popularity_based_topk
def get_popularity_based_topk(self, top_k=10, items=True): # TODO: get most frequent users if not items: raise ValueError("Not implemented") return self.item_frequencies.orderBy("frequency", ascending=False).limit(top_k)
99b46f65e08a67f3b11cbaf80edeffe6dd5d6b2e
10
SARPlus.py
70
Implemented get_popularity_based_topk
7,242
0
58
41
19
39,476
19
recommenders
9
contrib/sarplus/python/pysarplus/SARPlus.py
Python
4
{ "docstring": "Get top K most frequently occurring items across all users.\n\n Args:\n top_k (int): number of top items to recommend.\n items (bool): if false, return most frequent users instead.\n\n Returns:\n pyspark.sql.DataFrame: Spark dataframe with top k most popular items\n and their frequencies in descending order.\n ", "language": "en", "n_whitespaces": 109, "n_words": 44, "vocab_size": 37 }
https://github.com/microsoft/recommenders.git
2
format_string_to_json
def format_string_to_json(balance_info): Working Account|KES|481000.00|481000.00|0.00|0.00 balance_dict = frappe._dict() for account_info in balance_info.split("&"): account_info = account_info.split("|") balance_dict[account_info[0]] = dict( current_balance=fmt_money(account_info[2], currency="KES"), available_balance=fmt_money(account_info[3], currency="KES"), reserved_balance=fmt_money(account_info[4], currency="KES"), uncleared_balance=fmt_money(account_info[5], currency="KES"), ) return dumps(balance_dict)
494bd9ef78313436f0424b918f200dab8fc7c20b
15
mpesa_settings.py
166
style: format code with black
14,080
0
16
103
22
65,988
28
erpnext
15
erpnext/erpnext_integrations/doctype/mpesa_settings/mpesa_settings.py
Python
11
{ "docstring": "\n\tFormat string to json.\n\n\te.g: \n\t=> {'Working Account': {'current_balance': '481000.00',\n\t 'available_balance': '481000.00',\n\t 'reserved_balance': '0.00',\n\t 'uncleared_balance': '0.00'}}\n\t", "language": "en", "n_whitespaces": 35, "n_words": 16, "vocab_size": 15 }
https://github.com/frappe/erpnext.git
8
setimage
def setimage(self, im, extents=None): # following c code self.im = im if extents: (x0, y0, x1, y1) = extents else: (x0, y0, x1, y1) = (0, 0, 0, 0) if x0 == 0 and x1 == 0: self.state.xsize, self.state.ysize = self.im.size else: self.state.xoff = x0 self.state.yoff = y0 self.state.xsize = x1 - x0 self.state.ysize = y1 - y0 if self.state.xsize <= 0 or self.state.ysize <= 0: raise ValueError("Size cannot be negative") if ( self.state.xsize + self.state.xoff > self.im.size[0] or self.state.ysize + self.state.yoff > self.im.size[1] ): raise ValueError("Tile cannot extend outside image")
a0e1fde1eddf45f26653e2ff6080d31e177adbec
11
ImageFile.py
282
Added PyEncoder
69,856
0
282
184
54
242,434
91
Pillow
15
src/PIL/ImageFile.py
Python
20
{ "docstring": "\n Called from ImageFile to set the core output image for the codec\n\n :param im: A core image object\n :param extents: a 4 tuple of (x0, y0, x1, y1) defining the rectangle\n for this tile\n :returns: None\n ", "language": "en", "n_whitespaces": 83, "n_words": 36, "vocab_size": 30 }
https://github.com/python-pillow/Pillow.git
1
get_cache_attr_name
def get_cache_attr_name(cls): return "_{}.{}".format(cls._meta.app_label, cls._meta.model_name).lower()
d10f15e55806c6944827d801cd9c2d53f5da4186
11
models.py
47
Reformat with black
16,023
0
19
27
5
73,478
5
wagtail
7
wagtail/contrib/settings/models.py
Python
2
{ "docstring": "\n Returns the name of the attribute that should be used to store\n a reference to the fetched/created object on a request.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 17 }
https://github.com/wagtail/wagtail.git
5
serialize
def serialize(self): data = dict() for attr in self.fattributes: if attr not in ('block', 'rescue', 'always'): data[attr] = getattr(self, attr) data['dep_chain'] = self.get_dep_chain() if self._role is not None: data['role'] = self._role.serialize() if self._parent is not None: data['parent'] = self._parent.copy(exclude_tasks=True).serialize() data['parent_type'] = self._parent.__class__.__name__ return data
43153c58310d02223f2cb0964f4255ba1ac4ed53
13
block.py
188
`FieldAttribute`s as descriptors (#73908)
78,962
0
152
112
30
267,581
44
ansible
14
lib/ansible/playbook/block.py
Python
12
{ "docstring": "\n Override of the default serialize method, since when we're serializing\n a task we don't want to include the attribute list of tasks.\n ", "language": "en", "n_whitespaces": 44, "n_words": 22, "vocab_size": 20 }
https://github.com/ansible/ansible.git
4
_apply_scores
def _apply_scores(self, scores, value, scores_mask=None, training=None): if scores_mask is not None: padding_mask = tf.logical_not(scores_mask) # Bias so padding positions do not contribute to attention # distribution. Note 65504. is the max float16 value. if scores.dtype is tf.float16: scores -= 65504.0 * tf.cast(padding_mask, dtype=scores.dtype) else: scores -= 1.0e9 * tf.cast(padding_mask, dtype=scores.dtype) if training is None: training = backend.learning_phase() weights = tf.nn.softmax(scores)
8401e08334d4b1f102a6ee9479738bacfee0600c
16
base_dense_attention.py
153
reduce layers line-too-long
82,075
0
185
133
44
277,507
60
keras
17
keras/layers/attention/base_dense_attention.py
Python
15
{ "docstring": "Applies attention scores to the given value tensor.\n\n To use this method in your attention layer, follow the steps:\n\n * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of\n shape `[batch_size, Tv]` to calculate the attention `scores`.\n * Pass `scores` and `value` tensors to this method. The method applies\n `scores_mask`, calculates `attention_distribution = softmax(scores)`,\n then returns `matmul(attention_distribution, value).\n * Apply `query_mask` and return the result.\n\n Args:\n scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.\n value: Value tensor of shape `[batch_size, Tv, dim]`.\n scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or\n `[batch_size, Tq, Tv]`. If given, scores at positions where\n `scores_mask==False` do not contribute to the result. It must\n contain at least one `True` value in each line along the last\n dimension.\n training: Python boolean indicating whether the layer should behave in\n training mode (adding dropout) or in inference mode (no dropout).\n\n Returns:\n Tensor of shape `[batch_size, Tq, dim]`.\n Attention scores after masking and softmax with shape\n `[batch_size, Tq, Tv]`.\n ", "language": "en", "n_whitespaces": 361, "n_words": 165, "vocab_size": 108 }
https://github.com/keras-team/keras.git
3
_build_sub_tabs
def _build_sub_tabs(self) -> None: for section, plugins in self.config_tools.plugins_dict.items(): for plugin in plugins: config_key = ".".join((section, plugin)) config_dict = self.config_tools.config_dicts[config_key] tab = ConfigFrame(self, config_key, config_dict) self._tabs[section][plugin] = tab text = plugin.replace("_", " ").title() cast(ttk.Notebook, self._tabs[section]["tab"]).add(tab, text=text)
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
16
preview.py
175
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,846
0
151
110
29
101,433
36
faceswap
22
tools/preview/preview.py
Python
10
{ "docstring": " Build the notebook sub tabs for each convert section's plugin. ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
https://github.com/deepfakes/faceswap.git
1
test_realm_quota
def test_realm_quota(self) -> None: self.login("hamlet") d1 = StringIO("zulip!") d1.name = "dummy_1.txt" result = self.client_post("/json/user_uploads", {"file": d1}) response_dict = self.assert_json_success(result) d1_path_id = re.sub("/user_uploads/", "", response_dict["uri"]) d1_attachment = Attachment.objects.get(path_id=d1_path_id) realm = get_realm("zulip") realm.upload_quota_gb = 1 realm.save(update_fields=["upload_quota_gb"]) # The size of StringIO("zulip!") is 6 bytes. Setting the size of # d1_attachment to realm.upload_quota_bytes() - 11 should allow # us to upload only one more attachment. quota = realm.upload_quota_bytes() assert quota is not None d1_attachment.size = quota - 11 d1_attachment.save(update_fields=["size"]) d2 = StringIO("zulip!") d2.name = "dummy_2.txt" result = self.client_post("/json/user_uploads", {"file": d2}) self.assert_json_success(result) d3 = StringIO("zulip!") d3.name = "dummy_3.txt" result = self.client_post("/json/user_uploads", {"file": d3}) self.assert_json_error(result, "Upload would exceed your organization's upload quota.") realm.upload_quota_gb = None realm.save(update_fields=["upload_quota_gb"]) result = self.client_post("/json/user_uploads", {"file": d3}) self.assert_json_success(result)
a142fbff85302c5e3acb2e204eca2e9c75dbc74b
11
test_upload.py
402
tests: Refactor away result.json() calls with helpers. Signed-off-by: Zixuan James Li <[email protected]>
17,789
0
327
223
69
84,150
117
zulip
29
zerver/tests/test_upload.py
Python
30
{ "docstring": "\n Realm quota for uploading should not be exceeded.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/zulip/zulip.git
1
forward_dummy
def forward_dummy(self, img, img_metas): super(SingleStageDetector, self).forward_train(img, img_metas) x = self.extract_feat(img) outs = self.panoptic_head(x, img_metas) return outs
cac356380d505bf15587f07c0529218cc36b9652
9
maskformer.py
67
[Feature] Add Maskformer to mmdet (#7212) * first commit * add README * move model description from config to readme add description for binary_input add description for dice loss add a independent panoptic gt processing function add a independent panoptic gt processing function remove compatibility of pretrain in maskformer * update comments in maskformer_head * update docs format
70,214
0
51
43
13
244,047
16
mmdetection
11
mmdet/models/detectors/maskformer.py
Python
5
{ "docstring": "Used for computing network flops. See\n `mmdetection/tools/analysis_tools/get_flops.py`\n\n Args:\n img (Tensor): of shape (N, C, H, W) encoding input images.\n Typically these should be mean centered and std scaled.\n img_metas (list[Dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n ", "language": "en", "n_whitespaces": 179, "n_words": 61, "vocab_size": 55 }
https://github.com/open-mmlab/mmdetection.git