complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
test_update_realm_allow_message_editing
def test_update_realm_allow_message_editing(self) -> None: self.set_up_db("allow_message_editing", False) self.set_up_db("message_content_edit_limit_seconds", None) self.set_up_db("edit_topic_policy", Realm.POLICY_ADMINS_ONLY) realm = self.update_with_api("allow_message_editing", True) realm = self.update_with_api("message_content_edit_limit_seconds", 100) realm = self.update_with_api("edit_topic_policy", Realm.POLICY_EVERYONE) self.assertEqual(realm.allow_message_editing, True) self.assertEqual(realm.message_content_edit_limit_seconds, 100) self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE) realm = self.update_with_api("allow_message_editing", False) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 100) self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE) realm = self.update_with_api("message_content_edit_limit_seconds", 200) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 200) self.assertEqual(realm.edit_topic_policy, Realm.POLICY_EVERYONE) realm = self.update_with_api("edit_topic_policy", Realm.POLICY_ADMINS_ONLY) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 200) self.assertEqual(realm.edit_topic_policy, Realm.POLICY_ADMINS_ONLY) realm = self.update_with_api("edit_topic_policy", Realm.POLICY_MODERATORS_ONLY) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 200) self.assertEqual(realm.edit_topic_policy, Realm.POLICY_MODERATORS_ONLY) realm = self.update_with_api("edit_topic_policy", Realm.POLICY_FULL_MEMBERS_ONLY) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 200) self.assertEqual(realm.edit_topic_policy, Realm.POLICY_FULL_MEMBERS_ONLY) realm = self.update_with_api("edit_topic_policy", Realm.POLICY_MEMBERS_ONLY) self.assertEqual(realm.allow_message_editing, False) self.assertEqual(realm.message_content_edit_limit_seconds, 200) self.assertEqual(realm.edit_topic_policy, Realm.POLICY_MEMBERS_ONLY) # Test an invalid value for edit_topic_policy invalid_edit_topic_policy_value = 10 req = {"edit_topic_policy": orjson.dumps(invalid_edit_topic_policy_value).decode()} result = self.client_patch("/json/realm", req) self.assert_json_error(result, "Invalid edit_topic_policy") # Test 0 is invalid value of message_content_edit_limit_seconds invalid_message_content_edit_limit_seconds_value = 0 req = { "message_content_edit_limit_seconds": orjson.dumps( invalid_message_content_edit_limit_seconds_value ).decode() } result = self.client_patch("/json/realm", req) self.assert_json_error(result, "Bad value for 'message_content_edit_limit_seconds': 0")
a1f40ccda5d9bbf91122d755f88e8f03f426fc9a
12
test_realm.py
695
message_edit: Make zero invalid value for message_content_edit_time_limit_seconds. This commit changes the code to consider zero as an invalid value for message_content_edit_time_limit_seconds. Now to represent the setting that user can edit the message anytime, the setting value will be "None" in database and "unlimited" will be passed to API from clients.
17,846
0
490
432
56
84,571
138
zulip
24
zerver/tests/test_realm.py
Python
47
{ "docstring": "Tests updating the realm property 'allow_message_editing'.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
https://github.com/zulip/zulip.git
1
engine
def engine(self): raise NotImplementedError # TODO: make this cache_readonly after docstring inheritance is fixed.
b240370bf83c88589d293b76b4a2409294e06f90
6
parquet_dispatcher.py
17
FEAT-#4733: Support fastparquet as engine for `read_parquet` (#4807) Signed-off-by: Karthik Velayutham <[email protected]>
35,890
0
31
8
14
154,270
14
modin
3
modin/core/io/column_stores/parquet_dispatcher.py
Python
2
{ "docstring": "Return string representing what engine is being used.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/modin-project/modin.git
2
__truediv__
def __truediv__(self, other): if self._delegate_binop(other): return NotImplemented return np.true_divide(self, other)
6d77c591c59b5678f14ae5af2127eebb7d2415bc
7
core.py
44
ENH: Adding __array_ufunc__ capability to MaskedArrays. This enables any ufunc numpy operations that are called on a MaskedArray to use the masked version of that function automatically without needing to resort to np.ma.func() calls.
38,769
0
42
27
9
160,869
10
numpy
7
numpy/ma/core.py
Python
4
{ "docstring": "\n Divide other into self, and return a new masked array.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/numpy/numpy.git
2
__call__
def __call__(self, y_true, y_pred, sample_weight=None): # If we are wrapping a lambda function strip '<>' from the name as it is not # accepted in scope name. graph_ctx = tf_utils.graph_context_for_symbolic_tensors( y_true, y_pred, sample_weight ) with backend.name_scope(self._name_scope), graph_ctx: if tf.executing_eagerly(): call_fn = self.call else: call_fn = tf.__internal__.autograph.tf_convert( self.call, tf.__internal__.autograph.control_status_ctx() ) losses = call_fn(y_true, y_pred) return losses_utils.compute_weighted_loss( losses, sample_weight, reduction=self._get_reduction() )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
17
losses.py
160
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,211
0
246
101
50
274,533
59
keras
24
keras/losses.py
Python
15
{ "docstring": "Invokes the `Loss` instance.\n\n Args:\n y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except\n sparse loss functions such as sparse categorical crossentropy where\n shape = `[batch_size, d0, .. dN-1]`\n y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`\n sample_weight: Optional `sample_weight` acts as a coefficient for the\n loss. If a scalar is provided, then the loss is simply scaled by the\n given value. If `sample_weight` is a tensor of size `[batch_size]`, then\n the total loss for each sample of the batch is rescaled by the\n corresponding element in the `sample_weight` vector. If the shape of\n `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted to\n this shape), then each loss element of `y_pred` is scaled\n by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss\n functions reduce by 1 dimension, usually axis=-1.)\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has\n shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`\n because all loss functions reduce by 1 dimension, usually axis=-1.)\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid.\n ", "language": "en", "n_whitespaces": 384, "n_words": 177, "vocab_size": 94 }
https://github.com/keras-team/keras.git
1
test_block_render_result_is_safe
def test_block_render_result_is_safe(self): stream_block = blocks.StreamBlock( [("paragraph", blocks.CharBlock(template="tests/jinja2/paragraph.html"))] ) stream_value = stream_block.to_python( [ {"type": "paragraph", "value": "hello world"}, ] ) result = render_to_string( "tests/jinja2/stream.html", { "value": stream_value, }, ) self.assertIn("<p>hello world</p>", result)
d10f15e55806c6944827d801cd9c2d53f5da4186
14
test_jinja2.py
123
Reformat with black
16,230
0
183
68
26
74,199
31
wagtail
12
wagtail/core/tests/test_jinja2.py
Python
16
{ "docstring": "\n Ensure that any results of template rendering in block.render are marked safe\n so that they don't get double-escaped when inserted into a parent template (#2541)\n ", "language": "en", "n_whitespaces": 47, "n_words": 25, "vocab_size": 23 }
https://github.com/wagtail/wagtail.git
8
get_matching_frame_actions
def get_matching_frame_actions(self, frames, platform, exception_data=None, cache=None): if not self.matchers: return [] # 1 - Check if exception matchers match for m in self._exception_matchers: if not m.matches_frame(frames, None, platform, exception_data, cache): return [] rv = [] # 2 - Check if frame matchers match for idx, frame in enumerate(frames): if all( m.matches_frame(frames, idx, platform, exception_data, cache) for m in self._other_matchers ): for action in self.actions: rv.append((idx, action)) return rv
686675f81bf9402bc9b671e61ea0481b0c5c3468
14
__init__.py
166
fix(grouping): Exception matcher with no frames (#38994) We used to pass `-1` as a frame index for exception matchers, which worked by accident because `-1` is a valid list index in Python, except when the list of frames was empty. Replace `-1` by `None` and make sure we do not attempt to access the list of frames in the exception matcher, by giving it its own `matches_frame` override. Fixes SENTRY-VWW
18,062
0
247
112
40
85,913
68
sentry
19
src/sentry/grouping/enhancer/__init__.py
Python
15
{ "docstring": "Given a frame returns all the matching actions based on this rule.\n If the rule does not match `None` is returned.\n ", "language": "en", "n_whitespaces": 35, "n_words": 21, "vocab_size": 20 }
https://github.com/getsentry/sentry.git
3
update
def update(self): x, y = pygame.mouse.get_pos() if x >= 0 and x <= (self.__W - self._width): self._xLoc = x
f0af0c43340763724f139fa68aa1e5a9ffe458b4
10
brickout-game.py
69
refactor: clean code Signed-off-by: slowy07 <[email protected]>
4,369
0
51
39
16
22,589
19
Python
10
brickout-game/brickout-game.py
Python
4
{ "docstring": "\n moves the paddle at the screen via mouse\n \n This class represents a simple Brick class.\n For representing bricks onto screen.\n", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 19 }
https://github.com/geekcomputers/Python.git
3
_create_placement_group
def _create_placement_group(self, num_workers): pg = get_current_placement_group() if pg is None: bundle = {"CPU": self._num_cpus_per_worker, "GPU": int(self._use_gpu)} bundles = [bundle] * num_workers pg = ray.util.placement_group(bundles, strategy="SPREAD") logger.debug("Waiting for placement group to start.") ready, _ = ray.wait([pg.ready()], timeout=SGD_PLACEMENT_GROUP_TIMEOUT_S) if ready: logger.debug("Placement group has started.") else: raise TimeoutError( "Placement group creation timed out. Make sure " "your cluster either has enough resources or use " "an autoscaling cluster. Current resources " "available: {}, resources requested by the " "placement group: {}".format( ray.available_resources(), pg.bundle_specs ) ) self._worker_placement_group = pg
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
16
worker_group.py
212
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
30,006
0
376
121
67
133,393
85
ray
26
python/ray/util/sgd/torch/worker_group.py
Python
21
{ "docstring": "Creates a placement group for the workers.\n\n If this worker is already in a placement group then a new one will\n not be created. This is primarily for when Tune is the upstream and\n will allocate resources for SGD workers.\n\n If this worker is not in a placement group, a new one will be created\n and set. The placement group will have a single bundle for each worker\n and use the SPREAD strategy for an even distribution.\n ", "language": "en", "n_whitespaces": 126, "n_words": 77, "vocab_size": 43 }
https://github.com/ray-project/ray.git
3
print_help
def print_help(self): id_string = "" for s_id, sub_dict in self.current_series.items(): id_string += ( f" {s_id.upper()}{(self.long_id-len(s_id)) * ' '} :" f" [italic]{sub_dict['title']}[/italic]\n" ) if not id_string: id_string += " [bold][red]None[/red][/bold]\n" help_text = f console.print(text=help_text, menu="Economy - Federal Reserve Economic Data")
82747072c511beb1b2672846ae2ee4aec53eb562
17
fred_controller.py
243
Terminal Wide Rich (#1161) * My idea for how we handle Rich moving forward * remove independent consoles * FIxed pylint issues * add a few vars * Switched print to console * More transitions * Changed more prints * Replaced all prints * Fixing tabulate * Finished replace tabulate * Finished removing rich from Tabulate * add Panel around menu * add GST watermark under feature flag * Fixed 46 tests * Delete test_screener[False].yaml * Delete test_screener[True].yaml * Fixed the rest of the tests * add help and source color vars and use rgb * rich on stocks/options * update rich on disc, dps, sia * rich in gov, ins and scr menus * ba and ca menus with rich * Fixed import issue * Fixed some tests * removed termcolor * Removed prettytable * add rich to remaining stocks menus * FIxed linting issue * Added James' changes * Updated dependencies * Add rich to cryptocurrency menu * refactor economy and forex * refactor etf with rich * refactor mfunds * refactor rich rest * not specify style so default color works well on any background * Fixing mypy issues * Updated tests * More test fixes * James' test fixes * Updating tests : stocks/screener - fix cassettes using BR * Updating tests : crypto * Updating tests : disable DEBUG_MODE * Updating tests : stocks/fa/yfinance * minor fixes that escape * Improve the rich table function (that replaces tabulate :D ) * Fixed bad code * delete rogue file + dcf fix + NoConsole * sia mypy * fuck you linter * fuck you linter pt 2 * skip hehe * i hate the black linter * ubuntu mypy attempt * Update : rich_config + gtff * Updating tests : conftest * Updating tests : stocks * Update : rich_config * Updating : rich_config * make panel configurable for Theodore :b * colors update * Merged * Updating : rich_config + feature_flags * Updating : rich_config * Updating tests : stocks * Updating : feature_flags Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: james <[email protected]> Co-authored-by: jose-donato <[email protected]>
83,791
0
150
53
34
281,474
39
OpenBBTerminal
16
gamestonk_terminal/economy/fred/fred_controller.py
Python
21
{ "docstring": "Print help[cmds]\n search search FRED series notes\n add add series ID to list\n rmv remove series ID from list[/cmds]\n\n[param]Current Series IDs:[/param]\n{id_string}{'[dim]'if not self.current_series else \"\"}[cmds]\n plot plot selected series [/cmds]{'[/dim]'if not self.current_series else \"\"}\n{'[dim]'if len(self.current_series.keys())!=1 else \"\"}[menu]\n> pred prediction techniques (single SeriesID)[/menu]{'[/dim]'if len(self.current_series.keys())!=1 else \"\"}\n ", "language": "en", "n_whitespaces": 111, "n_words": 49, "vocab_size": 35 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
8
deserialize
def deserialize(obj): if isinstance(obj, ObjectIDType): return ray.get(obj) elif isinstance(obj, (tuple, list)) and any( isinstance(o, ObjectIDType) for o in obj ): return ray.get(list(obj)) elif isinstance(obj, dict) and any( isinstance(val, ObjectIDType) for val in obj.values() ): return dict(zip(obj.keys(), ray.get(list(obj.values())))) else: return obj
b22b93df20ad25ae7a11f0c89d32fb2f234d4641
18
utils.py
177
FIX-#4464: Refactor Ray utils and quick fix groupby.count failing on virtual partitions (#4490) Co-authored-by: Devin Petersohn <[email protected]> Signed-off-by: jeffreykennethli <[email protected]>
35,654
0
103
113
27
153,841
40
modin
15
modin/core/execution/ray/common/utils.py
Python
13
{ "docstring": "\n Deserialize a Ray object.\n\n Parameters\n ----------\n obj : ObjectIDType, iterable of ObjectIDType, or mapping of keys to ObjectIDTypes\n Object(s) to deserialize.\n\n Returns\n -------\n obj\n The deserialized object.\n ", "language": "en", "n_whitespaces": 66, "n_words": 27, "vocab_size": 22 }
https://github.com/modin-project/modin.git
1
test_metadata
def test_metadata(self): checkpoint = self._prepare_fs_checkpoint() # Convert into dict checkpoint data_dict = checkpoint.to_dict() self.assertIsInstance(data_dict, dict) data_dict["my_marker"] = "marked" # Create from dict checkpoint = Checkpoint.from_dict(data_dict) self.assertTrue(checkpoint._data_dict) self._assert_fs_checkpoint(checkpoint) # Convert back to dict data_dict_2 = Checkpoint.from_directory(checkpoint.to_directory()).to_dict() assert data_dict_2["my_marker"] == "marked"
cc53a1e28bdb0dc7121f4378c651e6290b7bc84d
12
test_checkpoints.py
142
[air] update checkpoint.py to deal with metadata in conversion. (#25727) This is carved out from https://github.com/ray-project/ray/pull/25558. tlrd: checkpoint.py current doesn't support the following ``` a. from fs to dict checkpoint; b. drop some marker to dict checkpoint; c. convert back to fs checkpoint; d. convert back to dict checkpoint. Assert that the marker should still be there ```
32,525
0
130
79
27
141,909
39
ray
16
python/ray/air/tests/test_checkpoints.py
Python
10
{ "docstring": "Test conversion with metadata involved.\n\n a. from fs to dict checkpoint;\n b. drop some marker to dict checkpoint;\n c. convert back to fs checkpoint;\n d. convert back to dict checkpoint.\n\n Assert that the marker should still be there.", "language": "en", "n_whitespaces": 72, "n_words": 38, "vocab_size": 27 }
https://github.com/ray-project/ray.git
3
get
def get(self, request, *args, **kwargs): ( self.term, self.model_admin, self.source_field, to_field_name, ) = self.process_request(request) if not self.has_perm(request): raise PermissionDenied self.object_list = self.get_queryset() context = self.get_context_data() return JsonResponse( { "results": [ self.serialize_result(obj, to_field_name) for obj in context["object_list"] ], "pagination": {"more": context["page_obj"].has_next()}, } )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
autocomplete.py
169
Refs #33476 -- Reformatted code with Black.
50,440
0
257
105
38
203,544
41
django
20
django/contrib/admin/views/autocomplete.py
Python
20
{ "docstring": "\n Return a JsonResponse with search results as defined in\n serialize_result(), by default:\n {\n results: [{id: \"123\" text: \"foo\"}],\n pagination: {more: true}\n }\n ", "language": "en", "n_whitespaces": 80, "n_words": 22, "vocab_size": 22 }
https://github.com/django/django.git
4
get_users_from_dialog
def get_users_from_dialog(old_data, dialog, logger): try: user_blocks = dialog.find_elements(By.TAG_NAME, "a") loaded_users = [ extract_text_from_element(u) for u in user_blocks if extract_text_from_element(u) ] new_data = old_data + loaded_users new_data = remove_duplicates(new_data, True, None) return new_data except (NoSuchElementException, StaleElementReferenceException) as exc: # Catch stale elements if any logger.error( "Error occurred while retrieving data.\n\t{}".format( str(exc).encode("utf-8") ) ) return old_data
2a157d452611d37cf50ccb7d56ff1a06e9790ecb
16
util.py
147
PR - Fix `extract_text_from_element()`and `find_element*()` to `find_element()` (#6438) * Updated getUserData() and find_element* Signed-off-by: elulcao <[email protected]> Thanks @breuerfelix for reviewing, 🚀 People in this thread please let me know if something is not OK, IG changed a lot these days. 🤗 @her
846
0
203
88
42
5,824
54
InstaPy
20
instapy/util.py
Python
18
{ "docstring": "\n Prepared to work specially with the dynamic data load in the 'Likes'\n dialog box\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 13 }
https://github.com/InstaPy/InstaPy.git
4
test_log_redirect_to_stderr
def test_log_redirect_to_stderr(shutdown_only, capfd): log_components = { ray_constants.PROCESS_TYPE_DASHBOARD: "Dashboard head grpc address", ray_constants.PROCESS_TYPE_DASHBOARD_AGENT: "Dashboard agent grpc address", ray_constants.PROCESS_TYPE_GCS_SERVER: "Loading job table data", # No log monitor output if all components are writing to stderr. ray_constants.PROCESS_TYPE_LOG_MONITOR: "", ray_constants.PROCESS_TYPE_MONITOR: "Starting monitor using ray installation", ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER: "worker server started", ray_constants.PROCESS_TYPE_PYTHON_CORE_WORKER_DRIVER: "driver server started", # TODO(Clark): Add coverage for Ray Client. # ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER: "Starting Ray Client server", ray_constants.PROCESS_TYPE_RAY_CLIENT_SERVER: "", ray_constants.PROCESS_TYPE_RAYLET: "Starting object store with directory", # No reaper process run (kernel fate-sharing). ray_constants.PROCESS_TYPE_REAPER: "", # No reporter process run. ray_constants.PROCESS_TYPE_REPORTER: "", # No web UI process run. ray_constants.PROCESS_TYPE_WEB_UI: "", # Unused. ray_constants.PROCESS_TYPE_WORKER: "", } script =
43aa2299e6623c8f8c7c4a1b80133459d0aa68b0
script = """ import os from pathlib import Path import ray os.environ["RAY_LOG_TO_STDERR"] = "1" ray.init() session_dir = ray._private.worker.global_worker.node.address_info["session_dir"] session_path = Path(session_dir) log_dir_path = session_path / "logs"@ray.remote
9
test_logging.py
167
[api] Annotate as public / move ray-core APIs to _private and add enforcement rule (#25695) Enable checking of the ray core module, excluding serve, workflows, and tune, in ./ci/lint/check_api_annotations.py. This required moving many files to ray._private and associated fixes.
32,740
2
250
150
73
142,603
101
ray
21
python/ray/tests/test_logging.py
Python
53
{ "docstring": "\nimport os\nfrom pathlib import Path\n\nimport ray\n\nos.environ[\"RAY_LOG_TO_STDERR\"] = \"1\"\nray.init()\n\nsession_dir = ray._private.worker.global_worker.node.address_info[\"session_dir\"]\nsession_path = Path(session_dir)\nlog_dir_path = session_path / \"logs\"\n\n# Run the basic workload.\[email protected]", "language": "en", "n_whitespaces": 19, "n_words": 29, "vocab_size": 23 }
https://github.com/ray-project/ray.git
1
_set_rounding
def _set_rounding(self, type): rounding = self.rounding self.rounding = type return rounding
8198943edd73a363c266633e1aa5b2a9e9c9f526
7
_pydecimal.py
34
add python 3.10.4 for windows
55,717
0
39
20
8
219,692
11
XX-Net
4
python3.10.4/Lib/_pydecimal.py
Python
4
{ "docstring": "Sets the rounding type.\n\n Sets the rounding type, and returns the current (previous)\n rounding type. Often used like:\n\n context = context.copy()\n # so you don't change the calling context\n # if an error occurs in the middle.\n rounding = context._set_rounding(ROUND_UP)\n val = self.__sub__(other, context=context)\n context._set_rounding(rounding)\n\n This will make it round up for that operation.\n ", "language": "en", "n_whitespaces": 125, "n_words": 54, "vocab_size": 41 }
https://github.com/XX-net/XX-Net.git
7
add_edge
def add_edge(self, u_for_edge, v_for_edge, key=None, **attr): u, v = u_for_edge, v_for_edge # add nodes if u not in self._adj: if u is None: raise ValueError("None cannot be a node") self._adj[u] = self.adjlist_inner_dict_factory() self._node[u] = self.node_attr_dict_factory() if v not in self._adj: if v is None: raise ValueError("None cannot be a node") self._adj[v] = self.adjlist_inner_dict_factory() self._node[v] = self.node_attr_dict_factory() if key is None: key = self.new_edge_key(u, v) if v in self._adj[u]: keydict = self._adj[u][v] datadict = keydict.get(key, self.edge_attr_dict_factory()) datadict.update(attr) keydict[key] = datadict else: # selfloops work this way without special treatment datadict = self.edge_attr_dict_factory() datadict.update(attr) keydict = self.edge_key_dict_factory() keydict[key] = datadict self._adj[u][v] = keydict self._adj[v][u] = keydict return key
8f4c99debc9440728c5e85f8bffa5d26b232eb6f
12
multigraph.py
352
Multigraph docs update (#5389) * Updated MultiDiGraph documentation to include more examples of actually using parallel edges, and fixed references to things like G[u, v] where G[u, v, k] is required for a MultiDigraph. Have not made parallel changes in MultiGraph which should maybe also be made? Docs tests pass on my end; no code outside of comments was changed. -Peter Mawhorter * Updated docs for MultiGraph to add more multigraph-specific examples and fix a few places where untested examples were wrong. -Peter Mawhorter * [DOC] fix typo * add the right amount of separators Co-authored-by: Mridul Seth <[email protected]>
41,876
0
397
222
55
176,411
106
networkx
20
networkx/classes/multigraph.py
Python
27
{ "docstring": "Add an edge between u and v.\n\n The nodes u and v will be automatically added if they are\n not already in the graph.\n\n Edge attributes can be specified with keywords or by directly\n accessing the edge's attribute dictionary. See examples below.\n\n Parameters\n ----------\n u_for_edge, v_for_edge : nodes\n Nodes can be, for example, strings or numbers.\n Nodes must be hashable (and not None) Python objects.\n key : hashable identifier, optional (default=lowest unused integer)\n Used to distinguish multiedges between a pair of nodes.\n attr : keyword arguments, optional\n Edge data (or labels or objects) can be assigned using\n keyword arguments.\n\n Returns\n -------\n The edge key assigned to the edge.\n\n See Also\n --------\n add_edges_from : add a collection of edges\n\n Notes\n -----\n To replace/update edge data, use the optional key argument\n to identify a unique edge. Otherwise a new edge will be created.\n\n NetworkX algorithms designed for weighted graphs cannot use\n multigraphs directly because it is not clear how to handle\n multiedge weights. Convert to Graph using edge attribute\n 'weight' to enable weighted graph algorithms.\n\n Default keys are generated using the method `new_edge_key()`.\n This method can be overridden by subclassing the base class and\n providing a custom `new_edge_key()` method.\n\n Examples\n --------\n The following each add an additional edge e=(1, 2) to graph G:\n\n >>> G = nx.MultiGraph()\n >>> e = (1, 2)\n >>> ekey = G.add_edge(1, 2) # explicit two-node form\n >>> G.add_edge(*e) # single edge as tuple of two nodes\n 1\n >>> G.add_edges_from([(1, 2)]) # add edges from iterable container\n [2]\n\n Associate data to edges using keywords:\n\n >>> ekey = G.add_edge(1, 2, weight=3)\n >>> ekey = G.add_edge(1, 2, key=0, weight=4) # update data for key=0\n >>> ekey = G.add_edge(1, 3, weight=7, capacity=15, length=342.7)\n\n For non-string attribute keys, use subscript notation.\n\n >>> ekey = G.add_edge(1, 2)\n >>> G[1][2][0].update({0: 5})\n >>> G.edges[1, 2, 0].update({0: 5})\n ", "language": "en", "n_whitespaces": 678, "n_words": 302, "vocab_size": 187 }
https://github.com/networkx/networkx.git
1
set_logdir
def set_logdir(self, logdir): self.logfile = tempfile.NamedTemporaryFile( prefix="log_sync_out", dir=logdir, suffix=".log", delete=False ) self._closed = False
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
10
sync_client.py
61
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,741
0
53
37
13
132,332
14
ray
11
python/ray/tune/sync_client.py
Python
5
{ "docstring": "Sets the directory to log sync execution output in.\n\n Args:\n logdir (str): Log directory.\n ", "language": "en", "n_whitespaces": 39, "n_words": 14, "vocab_size": 14 }
https://github.com/ray-project/ray.git
1
setUp
def setUp(self): data_prefix = osp.join(osp.dirname(__file__), '../../data') img_path = osp.join(data_prefix, 'color.jpg') rng = np.random.RandomState(0) self.results1 = { 'img_id': 1, 'img_path': img_path, 'ori_height': 300, 'ori_width': 400, 'height': 600, 'width': 800, 'scale_factor': 2.0, 'flip': False, 'img': rng.rand(300, 400), 'gt_seg_map': rng.rand(300, 400), 'gt_masks': BitmapMasks(rng.rand(3, 300, 400), height=300, width=400), 'gt_bboxes_labels': rng.rand(3, ), 'gt_ignore_flags': np.array([0, 0, 1], dtype=np.bool) } self.results2 = { 'img_id': 1, 'img_path': img_path, 'ori_height': 300, 'ori_width': 400, 'height': 600, 'width': 800, 'scale_factor': 2.0, 'flip': False, 'img': rng.rand(300, 400), 'gt_seg_map': rng.rand(300, 400), 'gt_masks': BitmapMasks(rng.rand(3, 300, 400), height=300, width=400), 'gt_bboxes_labels': rng.rand(3, ) } self.meta_keys = ('img_id', 'img_path', 'ori_shape', 'img_shape', 'scale_factor', 'flip')
2cc631f7656258dec0d12bcce459f5fe3f781b68
12
test_formatting.py
432
Add Transforms
70,384
0
481
269
55
244,450
96
mmdetection
22
tests/test_datasets/test_pipelines/test_formatting.py
Python
37
{ "docstring": "Setup the model and optimizer which are used in every test method.\n\n TestCase calls functions in this order: setUp() -> testMethod() ->\n tearDown() -> cleanUp()\n ", "language": "en", "n_whitespaces": 46, "n_words": 25, "vocab_size": 22 }
https://github.com/open-mmlab/mmdetection.git
5
_no_global_under_venv
def _no_global_under_venv() -> bool: cfg_lines = _get_pyvenv_cfg_lines() if cfg_lines is None: # We're not in a "sane" venv, so assume there is no system # site-packages access (since that's PEP 405's default state). logger.warning( "Could not access 'pyvenv.cfg' despite a virtual environment " "being active. Assuming global site-packages is not accessible " "in this environment." ) return True for line in cfg_lines: match = _INCLUDE_SYSTEM_SITE_PACKAGES_REGEX.match(line) if match is not None and match.group("value") == "false": return True return False
f3166e673fe8d40277b804d35d77dcdb760fc3b3
12
virtualenv.py
109
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,177
0
186
59
59
20,009
78
pipenv
10
pipenv/patched/notpip/_internal/utils/virtualenv.py
Python
24
{ "docstring": "Check `{sys.prefix}/pyvenv.cfg` for system site-packages inclusion\n\n PEP 405 specifies that when system site-packages are not supposed to be\n visible from a virtual environment, `pyvenv.cfg` must contain the following\n line:\n\n include-system-site-packages = false\n\n Additionally, log a warning if accessing the file fails.\n ", "language": "en", "n_whitespaces": 63, "n_words": 41, "vocab_size": 37 }
https://github.com/pypa/pipenv.git
2
close
def close(self) -> None: if self.handles is not None: self.handles.close()
734db4f1fde2566a02b3c7ff661a479b0a71633c
10
_json.py
40
TYP: Return annotations for io/{formats,json} (#47516) * TYP: Return annotations for io/{formats,json} * flake8 * explicitly check whether width is None
40,007
0
35
23
9
167,423
10
pandas
3
pandas/io/json/_json.py
Python
9
{ "docstring": "\n If we opened a stream earlier, in _get_data_from_filepath, we should\n close it.\n\n If an open stream or file was passed, we leave it open.\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 20 }
https://github.com/pandas-dev/pandas.git
4
_laplace_deep_collect
def _laplace_deep_collect(f, t): func = f.func args = list(f.args) if len(f.args) == 0: return f else: args = [_laplace_deep_collect(arg, t) for arg in args] if func.is_Add: return func(*args).collect(t) else: return func(*args)
7d773eb18daaef3c54f34d1ac6cbc5b83a5bb16c
15
transforms.py
119
Cleanup loops and ranges
48,899
0
96
73
24
198,387
31
sympy
10
sympy/integrals/transforms.py
Python
11
{ "docstring": "\n This is an internal helper function that traverses through the epression\n tree of `f(t)` and collects arguments. The purpose of it is that\n anything like `f(w*t-1*t-c)` will be written as `f((w-1)*t-c)` such that\n it can match `f(a*t+b)`.\n ", "language": "en", "n_whitespaces": 53, "n_words": 37, "vocab_size": 32 }
https://github.com/sympy/sympy.git
1
test_from_yaml_string
def test_from_yaml_string(self): valid_yaml_string = _dedent( ) assert DatasetMetadata.from_yaml_string(valid_yaml_string) duplicate_yaml_keys = _dedent( ) with self.assertRaises(TypeError): DatasetMetadata.from_yaml_string(duplicate_yaml_keys) valid_yaml_with_optional_keys = _dedent( ) assert DatasetMetadata.from_yaml_string(valid_yaml_with_optional_keys)
67e65c90e9490810b89ee140da11fdd13c356c9c
9
test_metadata_util.py
95
Dataset infos in yaml (#4926) * wip * fix Features yaml * splits to yaml * add _to_yaml_list * style * example: conll2000 * example: crime_and_punish * add pyyaml dependency * remove unused imports * remove validation tests * style * allow dataset_infos to be struct or list in YAML * fix test * style * update "datasets-cli test" + remove "version" * remove config definitions in conll2000 and crime_and_punish * remove versions for conll2000 and crime_and_punish * move conll2000 and cap dummy data * fix test * add tests * comments and tests * more test * don't mention the dataset_infos.json file in docs * nit in docs * docs * dataset_infos -> dataset_info * again * use id2label in class_label * update conll2000 * fix utf-8 yaml dump * --save_infos -> --save_info * Apply suggestions from code review Co-authored-by: Polina Kazakova <[email protected]> * style * fix reloading a single dataset_info * push info to README.md in push_to_hub * update test Co-authored-by: Polina Kazakova <[email protected]>
22,202
0
130
51
14
105,744
21
datasets
10
tests/test_metadata_util.py
Python
99
{ "docstring": "\\\n annotations_creators:\n - found\n language_creators:\n - found\n language:\n - en\n license:\n - unknown\n multilinguality:\n - monolingual\n pretty_name: Test Dataset\n size_categories:\n - 10K<n<100K\n source_datasets:\n - extended|other-yahoo-webscope-l6\n task_categories:\n - question-answering\n task_ids:\n - open-domain-qa\n \\\n annotations_creators:\n - found\n language:\n - en\n license:\n - unknown\n multilinguality:\n - monolingual\n pretty_name: Test Dataset\n size_categories:\n - 10K<n<100K\n source_datasets:\n - extended|other-yahoo-webscope-l6\n task_categories:\n - question-answering\n task_ids:\n - open-domain-qa\n task_ids:\n - open-domain-qa\n \\\n annotations_creators:\n - found\n language_creators:\n - found\n language:\n - en\n license:\n - unknown\n multilinguality:\n - monolingual\n pretty_name: Test Dataset\n size_categories:\n - 10K<n<100K\n source_datasets:\n - extended|other-yahoo-webscope-l6\n task_categories:\n - text-classification\n task_ids:\n - multi-class-classification\n paperswithcode_id:\n - squad\n configs:\n - en\n train-eval-index:\n - config: en\n task: text-classification\n task_id: multi_class_classification\n splits:\n train_split: train\n eval_split: test\n col_mapping:\n text: text\n label: target\n metrics:\n - type: accuracy\n name: Accuracy\n extra_gated_prompt: |\n By clicking on “Access repository” below, you also agree to ImageNet Terms of Access:\n [RESEARCHER_FULLNAME] (the \"Researcher\") has requested permission to use the ImageNet database (the \"Database\") at Princeton University and Stanford University. In exchange for such permission, Researcher hereby agrees to the following terms and conditions:\n 1. Researcher shall use the Database only for non-commercial research and educational purposes.\n extra_gated_fields:\n Company: text\n Country: text\n I agree to use this model for non-commerical use ONLY: checkbox\n ", "language": "en", "n_whitespaces": 1184, "n_words": 201, "vocab_size": 107 }
https://github.com/huggingface/datasets.git
1
test_playlist_from_string
def test_playlist_from_string(): playlist = Playlist.from_search_term("playlist:this is gorillaz") assert playlist.name == "This Is Gorillaz" assert playlist.url == "http://open.spotify.com/playlist/37i9dQZF1DZ06evO25rXbO" assert len(playlist.urls) > 1 @pytest.mark.vcr()
57ce5c09ee1ac101f79962e59bd44a0396dfb76c
@pytest.mark.vcr()
9
test_playlist.py
77
Search album by string enhancement (#1663)
5,605
1
36
34
19
30,466
22
spotify-downloader
11
tests/types/test_playlist.py
Python
5
{ "docstring": "\n Test if Playlist class can be initialized from string.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/spotDL/spotify-downloader.git
16
untar_file
def untar_file(filename, location): # type: (str, str) -> None ensure_dir(location) if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"): mode = "r:gz" elif filename.lower().endswith(BZ2_EXTENSIONS): mode = "r:bz2" elif filename.lower().endswith(XZ_EXTENSIONS): mode = "r:xz" elif filename.lower().endswith(".tar"): mode = "r" else: logger.warning( "Cannot determine compression type for file %s", filename, ) mode = "r:*" tar = tarfile.open(filename, mode) try: leading = has_leading_dir([member.name for member in tar.getmembers()]) for member in tar.getmembers(): fn = member.name if leading: fn = split_leading_dir(fn)[1] path = os.path.join(location, fn) if not is_within_directory(location, path): message = ( "The tar file ({}) has a file ({}) trying to install " "outside target directory ({})" ) raise InstallationError(message.format(filename, path, location)) if member.isdir(): ensure_dir(path) elif member.issym(): try: # https://github.com/python/typeshed/issues/2673 tar._extract_member(member, path) # type: ignore except Exception as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warning( "In the tar file %s the member %s is invalid: %s", filename, member.name, exc, ) continue else: try: fp = tar.extractfile(member) except (KeyError, AttributeError) as exc: # Some corrupt tar files seem to produce this # (specifically bad symlinks) logger.warning( "In the tar file %s the member %s is invalid: %s", filename, member.name, exc, ) continue ensure_dir(os.path.dirname(path)) assert fp is not None with open(path, "wb") as destfp: shutil.copyfileobj(fp, destfp) fp.close() # Update the timestamp (useful for cython compiled files) tar.utime(member, path) # member have any execute permissions for user/group/world? if member.mode & 0o111: set_extracted_file_to_default_mode_plus_executable(path) finally: tar.close()
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
18
unpacking.py
591
upd; format
12,505
0
1,194
345
133
61,319
229
transferlearning
44
.venv/lib/python3.8/site-packages/pip/_internal/utils/unpacking.py
Python
64
{ "docstring": "\n Untar the file (with path `filename`) to the destination `location`.\n All files are written based on system defaults and umask (i.e. permissions\n are not preserved), except that regular file members with any execute\n permissions (user, group, or world) have \"chmod +x\" applied after being\n written. Note that for windows, any execute changes using os.chmod are\n no-ops per the python docs.\n ", "language": "en", "n_whitespaces": 83, "n_words": 60, "vocab_size": 51 }
https://github.com/jindongwang/transferlearning.git
2
parent
def parent(self) -> DOMNode: if self._parent is None: raise NoParent(f"{self} has no parent") assert isinstance(self._parent, DOMNode) return self._parent
2635f58e7c3d10b161ee69a15ebfe6499ac26daa
11
dom.py
60
docstrings and tidy
43,685
0
57
34
17
181,946
18
textual
6
src/textual/dom.py
Python
13
{ "docstring": "Get the parent node.\n\n Raises:\n NoParent: If this is the root node.\n\n Returns:\n DOMNode: The node which is the direct parent of this node.\n ", "language": "en", "n_whitespaces": 67, "n_words": 24, "vocab_size": 17 }
https://github.com/Textualize/textual.git
2
merge_subcluster
def merge_subcluster(self, nominee_cluster, threshold): new_ss = self.squared_sum_ + nominee_cluster.squared_sum_ new_ls = self.linear_sum_ + nominee_cluster.linear_sum_ new_n = self.n_samples_ + nominee_cluster.n_samples_ new_centroid = (1 / new_n) * new_ls new_sq_norm = np.dot(new_centroid, new_centroid) # The squared radius of the cluster is defined: # r^2 = sum_i ||x_i - c||^2 / n # with x_i the n points assigned to the cluster and c its centroid: # c = sum_i x_i / n # This can be expanded to: # r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n # and therefore simplifies to: # r^2 = sum_i ||x_i||^2 / n - ||c||^2 sq_radius = new_ss / new_n - new_sq_norm if sq_radius <= threshold**2: ( self.n_samples_, self.linear_sum_, self.squared_sum_, self.centroid_, self.sq_norm_, ) = (new_n, new_ls, new_ss, new_centroid, new_sq_norm) return True return False
1fc86b6aacd89da44a3b4e8abf7c3e2ba4336ffe
10
_birch.py
167
MNT Update black to stable version (#22474)
75,440
0
373
108
79
258,813
137
scikit-learn
17
sklearn/cluster/_birch.py
Python
17
{ "docstring": "Check if a cluster is worthy enough to be merged. If\n yes then merge.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
https://github.com/scikit-learn/scikit-learn.git
1
test_lda_numerical_consistency
def test_lda_numerical_consistency(learning_method, global_random_seed): rng = np.random.RandomState(global_random_seed) X64 = rng.uniform(size=(20, 10)) X32 = X64.astype(np.float32) lda_64 = LatentDirichletAllocation( n_components=5, random_state=global_random_seed, learning_method=learning_method ).fit(X64) lda_32 = LatentDirichletAllocation( n_components=5, random_state=global_random_seed, learning_method=learning_method ).fit(X32) assert_allclose(lda_32.components_, lda_64.components_) assert_allclose(lda_32.transform(X32), lda_64.transform(X64))
703bee65e2122ad273cf0b42460c5c28ca638af8
11
test_online_lda.py
170
ENH Preserving dtype for np.float32 in LatentDirichletAllocation (#24528) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]> Co-authored-by: takoika <> Co-authored-by: Takeshi Oura <[email protected]>
76,684
0
75
110
23
261,187
31
scikit-learn
22
sklearn/decomposition/tests/test_online_lda.py
Python
12
{ "docstring": "Check numerical consistency between np.float32 and np.float64.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/scikit-learn/scikit-learn.git
1
current_columns
def current_columns(self): return copy.deepcopy(self.custcols) #deepcopy to prevent users from changing it
7b9bb6e62424e4b3c960e9e25c45a6946988959c
8
create_custom_column.py
28
Yet another version of CreateNewCustomColumn. My apologies for the multiple commits. I have been working with @davidfor and we cycled a few times. I hope this is the last, barring bugs.
45,933
0
25
15
11
188,795
11
calibre
5
src/calibre/gui2/preferences/create_custom_column.py
Python
2
{ "docstring": "\n Return the currently defined custom columns\n\n Return the currently defined custom columns including the ones that haven't\n yet been created. It is a dict of dicts defined as follows:\n custcols[lookup_name] = {\n 'label': lookup_name,\n 'name': column_heading,\n 'datatype': datatype,\n 'display': display,\n 'normalized': None,\n 'colnum': an integer used internally,\n 'is_multiple': is_multiple,\n }\n Columns that already exist will have additional attributes that this class\n doesn't use. See calibre.library.field_metadata.add_custom_field() for the\n complete list.\n ", "language": "en", "n_whitespaces": 278, "n_words": 69, "vocab_size": 58 }
https://github.com/kovidgoyal/calibre.git
1
get_tables
def get_tables(self) -> Response: q = 'SHOW TABLES;' return self.native_query(q)
257dfe6bac18d28088c7bfc79ca22cde682f9cd6
7
tdengine_handler.py
34
Added TDENgine Handler
25,874
0
40
18
10
116,966
10
mindsdb
5
mindsdb/integrations/handlers/tdengine_handler/tdengine_handler.py
Python
6
{ "docstring": "\n Get a list with all of the tabels in TDEngine\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/mindsdb/mindsdb.git
2
set_immutable
def set_immutable(self, immutable): for task in self.tasks: task.set_immutable(immutable)
577eee60d51bdd75d3658699effdf6f78a3e604d
9
canvas.py
35
Canvas.py doc enhancement (#7907) * Enhanced doc for canvas._chord.set_immutable() * Enhanced doc for canvas._chord.link() * Enhanced doc for canvas._chord.link_error() * Enhanced doc for canvas._chord.__length_hint__() * Enhanced doc for canvas._chord._descend() * Enhanced doc for canvas._chord.from_dict() * Enhanced doc for canvas._chord.run()
52,289
0
33
21
8
208,353
8
celery
5
celery/canvas.py
Python
3
{ "docstring": "Sets the immutable flag on the chord header only.\n\n Note:\n Does not affect the chord body.\n\n Arguments:\n immutable (bool): The new mutability value for chord header.\n ", "language": "en", "n_whitespaces": 69, "n_words": 26, "vocab_size": 21 }
https://github.com/celery/celery.git
1
test_model_admin_no_delete_permission_externalsubscriber
def test_model_admin_no_delete_permission_externalsubscriber(self): permission = Permission.objects.get(codename="delete_subscriber") self.user.user_permissions.add(permission) delete_confirmation_data = { ACTION_CHECKBOX_NAME: [self.s1.pk, self.s2.pk], "action": "delete_selected", "post": "yes", } response = self.client.post( reverse("admin:admin_views_subscriber_changelist"), delete_confirmation_data ) self.assertEqual(response.status_code, 403)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
test_actions.py
137
Refs #33476 -- Reformatted code with Black.
51,987
0
125
81
22
207,497
25
django
21
tests/admin_views/test_actions.py
Python
12
{ "docstring": "\n Permission is denied if the user doesn't have delete permission for a\n related model (ExternalSubscriber).\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/django/django.git
1
test_download_info_vcs
def test_download_info_vcs(self) -> None: finder = make_test_finder() with self._basic_resolver(finder) as resolver: ireq = get_processed_req_from_line( "pip-test-package @ git+https://github.com/pypa/pip-test-package" ) reqset = resolver.resolve([ireq], True) assert len(reqset.all_requirements) == 1 req = reqset.all_requirements[0] assert req.download_info assert isinstance(req.download_info.info, VcsInfo) assert req.download_info.url == "https://github.com/pypa/pip-test-package" assert req.download_info.info.vcs == "git"
c6baa7514ab87426780a004e34386c3b104a7f56
11
test_req.py
153
Add download_info to InstallRequirement
41,327
0
177
91
33
174,533
42
pip
19
tests/unit/test_req.py
Python
14
{ "docstring": "Test that download_info is set for requirements from git.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/pypa/pip.git
2
get_formatted_field_choices
def get_formatted_field_choices(self, field): if "\n" in field.choices: choices = map( lambda x: ( x.strip().rstrip(",").strip(), x.strip().rstrip(",").strip(), ), field.choices.split("\r\n"), ) else: choices = map(lambda x: (x.strip(), x.strip()), field.choices.split(",")) return choices
134bd19bef529f0c205a48cedb8574ee0c52d436
18
forms.py
172
add ability for form builder to split choices by newline - fixes #3001 - keep support for comma separated lists if supplied
16,608
0
172
99
23
76,983
28
wagtail
9
wagtail/contrib/forms/forms.py
Python
12
{ "docstring": "\n Returns a list of choices [(string, string),] for the field.\n Split the provided choices into a list, separated by new lines.\n If no new lines in the provided choices, split by commas.\n ", "language": "en", "n_whitespaces": 61, "n_words": 32, "vocab_size": 25 }
https://github.com/wagtail/wagtail.git
1
test_400_missing_param_without_id_access_token
def test_400_missing_param_without_id_access_token(self) -> None: channel = self.make_request( method="POST", path="/rooms/" + self.room_id + "/invite", content={ "id_server": "example.com", "medium": "email", "address": "[email protected]", }, access_token=self.tok, ) self.assertEqual(channel.code, 400) self.assertEqual(channel.json_body["errcode"], "M_MISSING_PARAM")
84ddcd7bbfe4100101741a408a91f283a8f742c7
12
test_rooms.py
133
Drop support for calling `/_matrix/client/v3/rooms/{roomId}/invite` without an `id_access_token` (#13241) Fixes #13206 Signed-off-by: Jacek Kusnierz [email protected]
72,915
0
162
75
26
249,435
27
synapse
13
tests/rest/client/test_rooms.py
Python
17
{ "docstring": "\n Test that a 3pid invite request returns 400 M_MISSING_PARAM\n if we do not include id_access_token.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 15 }
https://github.com/matrix-org/synapse.git
1
test_multiple_server_connections
def test_multiple_server_connections(tctx): server1 = Placeholder(Server) server2 = Placeholder(Server) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
b3587b52b25077f68116b9852b041d33e7fc6601
11
test_http.py
61
make it black!
73,835
0
25
219
10
251,831
13
mitmproxy
13
test/mitmproxy/proxy/layers/http/test_http.py
Python
35
{ "docstring": "Test multiple requests being rewritten to different targets.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/mitmproxy/mitmproxy.git
2
parse
def parse(version): # type: (str) -> Union[LegacyVersion, Version] try: return Version(version) except InvalidVersion: return LegacyVersion(version)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
version.py
39
upd; format
13,074
0
41
21
14
62,933
15
transferlearning
5
.venv/lib/python3.8/site-packages/pip/_vendor/packaging/version.py
Python
5
{ "docstring": "\n Parse the given version string and return either a :class:`Version` object\n or a :class:`LegacyVersion` object depending on if the given version is\n a valid PEP 440 version or a legacy version.\n ", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 22 }
https://github.com/jindongwang/transferlearning.git
8
test_promote_types_metadata
def test_promote_types_metadata(self, dtype1, dtype2): metadata1 = {1: 1} metadata2 = {2: 2} dtype1 = np.dtype(dtype1, metadata=metadata1) dtype2 = np.dtype(dtype2, metadata=metadata2) try: res = np.promote_types(dtype1, dtype2) except TypeError: # Promotion failed, this test only checks metadata return if res.char not in "USV" or res.names is not None or res.shape != (): # All except string dtypes (and unstructured void) lose metadata # on promotion (unless both dtypes are identical). # At some point structured ones did not, but were restrictive. assert res.metadata is None elif res == dtype1: # If one result is the result, it is usually returned unchanged: assert res is dtype1 elif res == dtype2: # dtype1 may have been cast to the same type/kind as dtype2. # If the resulting dtype is identical we currently pick the cast # version of dtype1, which lost the metadata: if np.promote_types(dtype1, dtype2.kind) == dtype2: res.metadata is None else: res.metadata == metadata2 else: assert res.metadata is None # Try again for byteswapped version dtype1 = dtype1.newbyteorder() assert dtype1.metadata == metadata1 res_bs = np.promote_types(dtype1, dtype2) assert res_bs == res assert res_bs.metadata == res.metadata
1b9a98af861441899b882e2ceb40770b4a96b584
13
test_numeric.py
284
TST: Fixup slow metadata promotion tests to match new behaviour
38,594
0
495
175
112
160,301
181
numpy
18
numpy/core/tests/test_numeric.py
Python
25
{ "docstring": "Metadata handling in promotion does not appear formalized\n right now in NumPy. This test should thus be considered to\n document behaviour, rather than test the correct definition of it.\n\n This test is very ugly, it was useful for rewriting part of the\n promotion, but probably should eventually be replaced/deleted\n (i.e. when metadata handling in promotion is better defined).\n ", "language": "en", "n_whitespaces": 100, "n_words": 58, "vocab_size": 46 }
https://github.com/numpy/numpy.git
7
equals
def equals(self, other): if not isinstance(other, PermutationGroup): return False set_self_gens = set(self.generators) set_other_gens = set(other.generators) # before reaching the general case there are also certain # optimisation and obvious cases requiring less or no actual # computation. if set_self_gens == set_other_gens: return True # in the most general case it will check that each generator of # one group belongs to the other PermutationGroup and vice-versa for gen1 in set_self_gens: if not other.contains(gen1): return False for gen2 in set_other_gens: if not self.contains(gen2): return False return True
3e167a67bde4b4817666de48bf98d247bed86e2d
10
perm_groups.py
127
Update sympy/combinatorics/perm_groups.py
47,843
0
251
76
59
196,343
86
sympy
12
sympy/combinatorics/perm_groups.py
Python
14
{ "docstring": "Return ``True`` if PermutationGroup generated by elements in the\n group are same i.e they represent the same PermutationGroup.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation, PermutationGroup\n >>> p = Permutation(0, 1, 2, 3, 4, 5)\n >>> G = PermutationGroup([p, p**2])\n >>> H = PermutationGroup([p**2, p])\n >>> G.generators == H.generators\n False\n >>> G.equals(H)\n True\n\n ", "language": "en", "n_whitespaces": 137, "n_words": 53, "vocab_size": 43 }
https://github.com/sympy/sympy.git
2
check_libraries
def check_libraries(): modules = { 'jinja2': _missing_str("jinja2"), 'yaml': _missing_str("PyYAML"), 'PyQt5.QtQml': _missing_str("PyQt5.QtQml"), 'PyQt5.QtSql': _missing_str("PyQt5.QtSql"), 'PyQt5.QtOpenGL': _missing_str("PyQt5.QtOpenGL"), 'PyQt5.QtDBus': _missing_str("PyQt5.QtDBus"), } if sys.version_info < (3, 9): # Backport required modules['importlib_resources'] = _missing_str("importlib_resources") _check_modules(modules)
cd4429db25746acbec3ef434935959d0f5f66224
11
earlyinit.py
145
Remove 3.6 pins from requirements files. Then regenerate the relevant files. Also drop dataclasses from requirements files. TODO: should we drop the dataclasses-types requirement for mypy too?
117,285
0
101
82
29
320,692
30
qutebrowser
6
qutebrowser/misc/earlyinit.py
Python
13
{ "docstring": "Check if all needed Python libraries are installed.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/qutebrowser/qutebrowser.git
9
alpha_composite
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)): if not isinstance(source, (list, tuple)): msg = "Source must be a tuple" raise ValueError(msg) if not isinstance(dest, (list, tuple)): msg = "Destination must be a tuple" raise ValueError(msg) if not len(source) in (2, 4): msg = "Source must be a 2 or 4-tuple" raise ValueError(msg) if not len(dest) == 2: msg = "Destination must be a 2-tuple" raise ValueError(msg) if min(source) < 0: msg = "Source must be non-negative" raise ValueError(msg) if len(source) == 2: source = source + im.size # over image, crop if it's not the whole thing. if source == (0, 0) + im.size: overlay = im else: overlay = im.crop(source) # target for the paste box = dest + (dest[0] + overlay.width, dest[1] + overlay.height) # destination image. don't copy if we're using the whole image. if box == (0, 0) + self.size: background = self else: background = self.crop(box) result = alpha_composite(background, overlay) self.paste(result, box)
2ae55ccbdad9c842929fb238ea1eb81d1f999024
11
Image.py
362
Improve exception traceback readability
70,092
0
441
226
80
243,723
157
Pillow
21
src/PIL/Image.py
Python
41
{ "docstring": "'In-place' analog of Image.alpha_composite. Composites an image\n onto this image.\n\n :param im: image to composite over this one\n :param dest: Optional 2 tuple (left, top) specifying the upper\n left corner in this (destination) image.\n :param source: Optional 2 (left, top) tuple for the upper left\n corner in the overlay source image, or 4 tuple (left, top, right,\n bottom) for the bounds of the source rectangle\n\n Performance Note: Not currently implemented in-place in the core layer.\n ", "language": "en", "n_whitespaces": 144, "n_words": 75, "vocab_size": 49 }
https://github.com/python-pillow/Pillow.git
7
override_recursive
def override_recursive(a, b): for key in b: if isinstance(b[key], dict) is False: a[key] = b[key] elif key not in a or isinstance(a[key], dict) is False: a[key] = b[key] # make config section empty by demand elif isinstance(b[key], dict) is True and b[key] == {}: a[key] = b[key] else: override_recursive(a[key], b[key]) @pytest.fixture(scope="module")
ae4fa77a2c0a9fa57cc9c8bc7e8961dd01e4067e
@pytest.fixture(scope="module")
13
conftest.py
176
It mysql api test pytest (#3694) * migration to pytest * Tests start passing * Fully working tests * Increase timeout for mindsdb start * reduce amount of logs * show logs only for failed tests
25,896
1
135
106
35
117,084
51
mindsdb
9
tests/integration_tests/flows/conftest.py
Python
10
{ "docstring": "Overrides some elements in json 'a' by elements in json 'b'", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 8 }
https://github.com/mindsdb/mindsdb.git
1
get_all_applicant
def get_all_applicant(self) -> List[NoSQLUserApplication]: return self.user_application_manager.all()
58da94aae4e66576a7f40b55f2de2d69693bfe20
8
user_manager.py
33
added user application manager, reverted default signup on user creation
286
0
20
19
6
2,395
6
PySyft
6
packages/syft/src/syft/core/node/common/node_manager/user_manager.py
Python
7
{ "docstring": "Returns the application data of all the applicants in the database.\n\n Returns:\n List[NoSQLUserApplication]: All user applications.\n ", "language": "en", "n_whitespaces": 41, "n_words": 16, "vocab_size": 14 }
https://github.com/OpenMined/PySyft.git
3
test_gumbel_softmax
def test_gumbel_softmax(self): for fw, sess in framework_iterator(frameworks=("tf2", "tf"), session=True): batch_size = 1000 num_categories = 5 input_space = Box(-1.0, 1.0, shape=(batch_size, num_categories)) input_space.seed(42) # Batch of size=n and deterministic. inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected = softmax(inputs) # Sample n times, expect always mean value (deterministic draw). out = gumbel_softmax.deterministic_sample() check(out, expected) # Batch of size=n and non-deterministic -> expect roughly that # the max-likelihood (argmax) ints are output (most of the time). inputs = input_space.sample() gumbel_softmax = GumbelSoftmax(inputs, {}, temperature=1.0) expected_mean = np.mean(np.argmax(inputs, -1)).astype(np.float32) outs = gumbel_softmax.sample() if sess: outs = sess.run(outs) check(np.mean(np.argmax(outs, -1)), expected_mean, rtol=0.08)
432f023642731bf53aac9b6c778f9dd7b1d82a57
15
test_distributions.py
286
[RLlib] Deprecate `AlgorithmConfig.framework("tfe")`: Use `tf2` instead. (#29755)
30,547
0
337
188
71
135,117
99
ray
32
rllib/models/tests/test_distributions.py
Python
18
{ "docstring": "Tests the GumbelSoftmax ActionDistribution (tf + eager only).", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
7
_render_cmd
def _render_cmd(cmd, cwd, template, saltenv=None, pillarenv=None, pillar_override=None): if saltenv is None: saltenv = __opts__.get("saltenv", "base") if not template: return (cmd, cwd) # render the path as a template using path_template_engine as the engine if template not in salt.utils.templates.TEMPLATE_REGISTRY: raise CommandExecutionError( "Attempted to render file paths with unavailable engine {}".format(template) ) kwargs = {} kwargs["salt"] = __salt__ if pillarenv is not None or pillar_override is not None: pillarenv = pillarenv or __opts__["pillarenv"] kwargs["pillar"] = _gather_pillar(pillarenv, pillar_override) else: kwargs["pillar"] = __pillar__ kwargs["grains"] = __grains__ kwargs["opts"] = __opts__ kwargs["saltenv"] = saltenv
21d3f4bc9eb7b9fb1118c59073595a9e9ee836bd
12
cmdmod.py
225
fixes salt bug 61507
54,474
0
184
155
60
216,231
88
salt
20
salt/modules/cmdmod.py
Python
23
{ "docstring": "\n If template is a valid template engine, process the cmd and cwd through\n that engine.\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 14 }
https://github.com/saltstack/salt.git
2
_compile_output
def _compile_output(self) -> Union[List[str], List[Tuple[str, int]]]: action = self._job.replace("-", "_") processor = getattr(self, f"_get_{action}") logger.debug("Processor: %s", processor) return [item for item in processor()] # pylint:disable=unnecessary-comprehension
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
10
jobs.py
106
Alignments Tool - Typing, Documentation + Re-org
21,118
0
61
63
24
101,714
25
faceswap
15
tools/alignments/jobs.py
Python
12
{ "docstring": " Compile list of frames that meet criteria\n\n Returns\n -------\n list\n List of filenames or filenames and face indices for the selected criteria\n ", "language": "en", "n_whitespaces": 62, "n_words": 22, "vocab_size": 18 }
https://github.com/deepfakes/faceswap.git
1
test_layout_change_warning
def test_layout_change_warning(layout): fig, ax = plt.subplots(layout=layout) with pytest.warns(UserWarning, match='The figure layout has changed to'): plt.tight_layout() @check_figures_equal(extensions=["png", "pdf"])
94397f4949511881dfc35fbab76cb237f6911424
@check_figures_equal(extensions=["png", "pdf"])
11
test_figure.py
84
BUG: Warn when an existing layout manager changes to tight layout
24,198
1
32
35
17
110,522
17
matplotlib
13
lib/matplotlib/tests/test_figure.py
Python
4
{ "docstring": "\n Raise a warning when a previously assigned layout changes to tight using\n plt.tight_layout().\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 12 }
https://github.com/matplotlib/matplotlib.git
2
move_cursor_forward
def move_cursor_forward(self) -> None: row, col = self.cursor_position if col == self.screen_size.col - 1: row += 1 col = 0 else: col += 1 SetConsoleCursorPosition( self._handle, coords=WindowsCoordinates(row=row, col=col) )
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
11
_win32_console.py
91
Vendor in pip 22.1.2
3,923
0
115
56
23
21,553
29
pipenv
10
pipenv/patched/notpip/_vendor/rich/_win32_console.py
Python
11
{ "docstring": "Move the cursor forward a single cell. Wrap to the next line if required.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/pypa/pipenv.git
2
get_item_info
def get_item_info(filters): from erpnext.stock.report.stock_ledger.stock_ledger import get_item_group_condition conditions = [get_item_group_condition(filters.get("item_group"))] if filters.get("brand"): conditions.append("item.brand=%(brand)s") conditions.append("is_stock_item = 1") return frappe.db.sql( .format( " and ".join(conditions) ), filters, as_dict=1, )
494bd9ef78313436f0424b918f200dab8fc7c20b
12
itemwise_recommended_reorder_level.py
128
style: format code with black
14,656
0
12
76
24
67,893
25
erpnext
16
erpnext/stock/report/itemwise_recommended_reorder_level/itemwise_recommended_reorder_level.py
Python
14
{ "docstring": "select name, item_name, description, brand, item_group,\n\t\tsafety_stock, lead_time_days from `tabItem` item where {}", "language": "en", "n_whitespaces": 11, "n_words": 13, "vocab_size": 13 }
https://github.com/frappe/erpnext.git
5
parse_content_disposition
def parse_content_disposition(reply): is_inline = True filename = None content_disposition_header = b'Content-Disposition' # First check if the Content-Disposition header has a filename # attribute. if reply.hasRawHeader(content_disposition_header): # We use the unsafe variant of the filename as we sanitize it via # os.path.basename later. try: value = bytes(reply.rawHeader(content_disposition_header)) log.network.debug("Parsing Content-Disposition: {value!r}") content_disposition = ContentDisposition.parse(value) filename = content_disposition.filename() except ContentDispositionError as e: log.network.error(f"Error while parsing filename: {e}") else: is_inline = content_disposition.is_inline() # Then try to get filename from url if not filename: filename = reply.url().path().rstrip('/') # If that fails as well, use a fallback if not filename: filename = 'qutebrowser-download' return is_inline, os.path.basename(filename)
bd8c940320b7d8476b422edd9c417703db64f603
14
http.py
222
Simplify some syntax Found via pyupgrade
117,279
0
251
121
68
320,685
100
qutebrowser
23
qutebrowser/browser/webkit/http.py
Python
19
{ "docstring": "Parse a content_disposition header.\n\n Args:\n reply: The QNetworkReply to get a filename for.\n\n Return:\n A (is_inline, filename) tuple.\n ", "language": "en", "n_whitespaces": 41, "n_words": 18, "vocab_size": 17 }
https://github.com/qutebrowser/qutebrowser.git
1
get_protobuf_schema
def get_protobuf_schema() -> GeneratedProtocolMessageType: return GetAllRequestsMessage_PB @serializable()
05edf746cf5742b562996cf1a319b404152960e5
@serializable()
6
object_request_messages.py
26
MOVE GetAllRequestsMessage and GetAllRequestsResponseMessage to the proper message file
12
1
20
9
7
20
7
PySyft
4
packages/syft/src/syft/core/node/common/node_service/object_request/object_request_messages.py
Python
17
{ "docstring": "Return the type of protobuf object which stores a class of this type\n\n As a part of serialization and deserialization, we need the ability to\n lookup the protobuf object type directly from the object type. This\n static method allows us to do this.\n\n Importantly, this method is also used to create the reverse lookup ability within\n the metaclass of Serializable. In the metaclass, it calls this method and then\n it takes whatever type is returned from this method and adds an attribute to it\n with the type of this class attached to it. See the MetaSerializable class for details.\n\n :return: the type of protobuf object which corresponds to this class.\n :rtype: GeneratedProtocolMessageType\n\n ", "language": "en", "n_whitespaces": 182, "n_words": 112, "vocab_size": 63 }
https://github.com/OpenMined/PySyft.git
1
_get_server
def _get_server(self): live_run = self._get_live_run() return live_run.architect.server
c946fb395d52e0f7ad67646a1c01b8327c232027
8
tests.py
34
Updating to Mephisto 1.0 (#4426) * Version bumps * Correct version * Correct beta release * Tests should _run_, but fail * Temporarily force run all crowdsourcing tests * Forcerun crowdsourcing checks * Typo * Update config.yml * Minor * Update acute_eval_runner.py * Simple fix, more tests run * Force cleanup of the server. * Correct AcuteEvalRunner.run_unit * using v1.0.0 from Mephisto github * Rolling back temp solution * Fix model chat test? * chat demo fix * QA fix * model image chat * Only add sleep time to model image chat * Remove agent retry loop * Remove 2 lines * Lengthen time * Fixing model chat onboarding and task frontend * fix acute eval test * Remove TODOs Co-authored-by: Eric Smith <[email protected]> Co-authored-by: Stephen Roller <[email protected]> Co-authored-by: EricMichaelSmith <[email protected]> Co-authored-by: Dexter Ju <[email protected]>
47,139
0
28
19
7
194,984
7
ParlAI
6
parlai/crowdsourcing/utils/tests.py
Python
3
{ "docstring": "\n Return the MockArchitect's server associated with this run\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/facebookresearch/ParlAI.git
1
_installed_pip
def _installed_pip(self): with Popen(f"{sys.executable} -m pip freeze", shell=True, stdout=PIPE) as pip: installed = pip.communicate()[0].decode(self._encoding, errors="replace").splitlines() return "\n".join(installed)
48c886b3dce3d3117ad16edaf35c8abd28dc51f5
15
sysinfo.py
103
Allow decoding errors
21,435
0
49
55
17
102,070
17
faceswap
16
lib/sysinfo.py
Python
4
{ "docstring": " str: The list of installed pip packages within Faceswap's scope. ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
https://github.com/deepfakes/faceswap.git
2
copy2
def copy2(src, dst): if os.path.isdir(dst): dst = os.path.join(dst, os.path.basename(src)) copyfile(src, dst) copystat(src, dst)
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
13
shutil.py
78
Vendor in pip 22.1.2
3,771
0
32
49
12
21,340
13
pipenv
10
pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py
Python
5
{ "docstring": "Copy data and all stat info (\"cp -p src dst\").\n\n The destination may be a directory.\n\n ", "language": "en", "n_whitespaces": 22, "n_words": 16, "vocab_size": 16 }
https://github.com/pypa/pipenv.git
1
_merge_batch_beams
def _merge_batch_beams(self, x): r check_type(x, 'x', (Variable), 'BeamSearchDecoder._merge_batch_beams') # TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch return nn.reshape(x, shape=[-1] + list(x.shape[2:]))
ffcde21305c61d950a9f93e57e6180c9a9665b87
13
beam.py
74
add disco_diffusion_ernievil_base
10,041
0
49
46
22
50,183
22
PaddleHub
9
modules/image/text_to_image/disco_diffusion_ernievil_base/vit_b_16x/ernievil2/transformers/beam.py
Python
15
{ "docstring": "\n Reshape a tensor with shape `[batch_size, beam_size, ...]` to a new\n tensor with shape `[batch_size * beam_size, ...]`.\n\n Parameters:\n x(Variable): A tensor with shape `[batch_size, beam_size, ...]`. The\n data type should be float32, float64, int32, int64 or bool.\n\n Returns:\n Variable: A tensor with shape `[batch_size * beam_size, ...]`, whose \\\n data type is same as `x`.\n ", "language": "en", "n_whitespaces": 144, "n_words": 56, "vocab_size": 36 }
https://github.com/PaddlePaddle/PaddleHub.git
1
_median_bias
def _median_bias(n): ii_2 = jnp.arange(2., n, 2) return 1 + jnp.sum(1. / (ii_2 + 1) - 1. / ii_2)
e085370ec4137cf0f73c5163cb664bc4e1c46082
12
signal_helper.py
59
Add some functions for spectral analysis. This commit adds "stft", "csd", and "welch" functions in scipy.signal.
26,584
0
22
40
17
119,313
19
jax
6
jax/_src/third_party/scipy/signal_helper.py
Python
3
{ "docstring": "\n Returns the bias of the median of a set of periodograms relative to\n the mean.\n See Appendix B from [1]_ for details.\n Parameters\n ----------\n n : int\n Numbers of periodograms being averaged.\n Returns\n -------\n bias : float\n Calculated bias.\n References\n ----------\n .. [1] B. Allen, W.G. Anderson, P.R. Brady, D.A. Brown, J.D.E. Creighton.\n \"FINDCHIRP: an algorithm for detection of gravitational waves from\n inspiraling compact binaries\", Physical Review D 85, 2012,\n :arxiv:`gr-qc/0509116`\n ", "language": "en", "n_whitespaces": 122, "n_words": 71, "vocab_size": 58 }
https://github.com/google/jax.git
2
parseaddr
def parseaddr(addr): addrs = _AddressList(addr).addresslist if not addrs: return '', '' return addrs[0] # rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
utils.py
51
add python 3.10.4 for windows
57,132
0
39
27
20
223,882
21
XX-Net
5
python3.10.4/Lib/email/utils.py
Python
5
{ "docstring": "\n Parse addr into its constituent realname and email address parts.\n\n Return a tuple of realname and email address, unless the parse fails, in\n which case return a 2-tuple of ('', '').\n ", "language": "en", "n_whitespaces": 44, "n_words": 31, "vocab_size": 26 }
https://github.com/XX-net/XX-Net.git
2
handle_defect
def handle_defect(self, obj, defect): if self.raise_on_defect: raise defect self.register_defect(obj, defect)
8198943edd73a363c266633e1aa5b2a9e9c9f526
7
_policybase.py
40
add python 3.10.4 for windows
57,022
0
42
25
10
223,636
10
XX-Net
6
python3.10.4/Lib/email/_policybase.py
Python
4
{ "docstring": "Based on policy, either raise defect or call register_defect.\n\n handle_defect(obj, defect)\n\n defect should be a Defect subclass, but in any case must be an\n Exception subclass. obj is the object on which the defect should be\n registered if it is not raised. If the raise_on_defect is True, the\n defect is raised as an error, otherwise the object and the defect are\n passed to register_defect.\n\n This method is intended to be called by parsers that discover defects.\n The email package parsers always call it with Defect instances.\n\n ", "language": "en", "n_whitespaces": 155, "n_words": 86, "vocab_size": 60 }
https://github.com/XX-net/XX-Net.git
10
output_difference
def output_difference(self, example, got, optionflags): want = example.want # If <BLANKLINE>s are being used, then replace blank lines # with <BLANKLINE> in the actual output string. if not (optionflags & DONT_ACCEPT_BLANKLINE): got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got) # Check if we should use diff. if self._do_a_fancy_diff(want, got, optionflags): # Split want & got into lines. want_lines = want.splitlines(keepends=True) got_lines = got.splitlines(keepends=True) # Use difflib to find their differences. if optionflags & REPORT_UDIFF: diff = difflib.unified_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'unified diff with -expected +actual' elif optionflags & REPORT_CDIFF: diff = difflib.context_diff(want_lines, got_lines, n=2) diff = list(diff)[2:] # strip the diff header kind = 'context diff with expected followed by actual' elif optionflags & REPORT_NDIFF: engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK) diff = list(engine.compare(want_lines, got_lines)) kind = 'ndiff with -expected +actual' else: assert 0, 'Bad diff option' return 'Differences (%s):\n' % kind + _indent(''.join(diff)) # If we're not using diff, then simply list the expected # output followed by the actual output. if want and got: return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)) elif want: return 'Expected:\n%sGot nothing\n' % _indent(want) elif got: return 'Expected nothing\nGot:\n%s' % _indent(got) else: return 'Expected nothing\nGot nothing\n'
8198943edd73a363c266633e1aa5b2a9e9c9f526
15
doctest.py
396
add python 3.10.4 for windows
56,917
0
589
229
111
223,462
194
XX-Net
32
python3.10.4/Lib/doctest.py
Python
30
{ "docstring": "\n Return a string describing the differences between the\n expected output for a given example (`example`) and the actual\n output (`got`). `optionflags` is the set of option flags used\n to compare `want` and `got`.\n ", "language": "en", "n_whitespaces": 70, "n_words": 33, "vocab_size": 27 }
https://github.com/XX-net/XX-Net.git
1
subclass_exception
def subclass_exception(name, bases, module, attached_to): return type( name, bases, { "__module__": module, "__qualname__": "%s.%s" % (attached_to.__qualname__, name), }, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
base.py
61
Refs #33476 -- Reformatted code with Black.
51,117
0
78
39
17
205,408
19
django
7
django/db/models/base.py
Python
9
{ "docstring": "\n Create exception subclass. Used by ModelBase below.\n\n The exception is created in a way that allows it to be pickled, assuming\n that the returned exception class will be added as an attribute to the\n 'attached_to' class.\n ", "language": "en", "n_whitespaces": 52, "n_words": 36, "vocab_size": 30 }
https://github.com/django/django.git
12
_make_zipfile
def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): zip_filename = base_name + ".zip" archive_dir = os.path.dirname(base_name) if not os.path.exists(archive_dir): if logger is not None: logger.info("creating %s", archive_dir) if not dry_run: os.makedirs(archive_dir) # If zipfile module is not available, try spawning an external 'zip' # command. try: import zipfile except ImportError: zipfile = None if zipfile is None: _call_external_zip(base_dir, zip_filename, verbose, dry_run) else: if logger is not None: logger.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) if not dry_run: zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) for dirpath, dirnames, filenames in os.walk(base_dir): for name in filenames: path = os.path.normpath(os.path.join(dirpath, name)) if os.path.isfile(path): zip.write(path, path) if logger is not None: logger.info("adding '%s'", path) zip.close() return zip_filename _ARCHIVE_FORMATS = { 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), 'zip': (_make_zipfile, [], "ZIP file"), } if _BZ2_SUPPORTED: _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file")
c69d55f7c82d5ae2cce542bcfb98d043ca4836a0
21
shutil.py
479
Vendor in pip 22.1.2
3,780
0
529
210
97
21,352
148
pipenv
34
pipenv/patched/notpip/_vendor/distlib/_backport/shutil.py
Python
30
{ "docstring": "Create a zip file from all the files under 'base_dir'.\n\n The output zip file will be named 'base_name' + \".zip\". Uses either the\n \"zipfile\" Python module (if available) or the InfoZIP \"zip\" utility\n (if installed and found on the default search path). If neither tool is\n available, raises ExecError. Returns the name of the output zip\n file.\n ", "language": "en", "n_whitespaces": 78, "n_words": 57, "vocab_size": 47 }
https://github.com/pypa/pipenv.git
3
needs_document_end_workaround
def needs_document_end_workaround(self): if objects.backend == usertypes.Backend.QtWebKit: return False assert objects.backend == usertypes.Backend.QtWebEngine, objects.backend broken_scripts = [ ('http://userstyles.org', None), ('https://github.com/ParticleCore', 'Iridium'), ] return any(self._matches_id(namespace=namespace, name=name) for namespace, name in broken_scripts)
c5a51eb0bcbab0b68cdfbf3eba2e681cff2adf7a
10
greasemonkey.py
112
Drop Qt < 5.15 Fixes #7091 TODO: Add changelog
117,818
0
122
71
25
321,601
29
qutebrowser
13
qutebrowser/browser/greasemonkey.py
Python
10
{ "docstring": "Check whether to force @run-at document-end.\n\n This needs to be done on QtWebEngine for known-broken scripts.\n\n On Qt 5.12, accessing the DOM isn't possible with \"@run-at\n document-start\". It was documented to be impossible before, but seems\n to work fine.\n\n However, some scripts do DOM access with \"@run-at document-start\". Fix\n those by forcing them to use document-end instead.\n ", "language": "en", "n_whitespaces": 106, "n_words": 57, "vocab_size": 48 }
https://github.com/qutebrowser/qutebrowser.git
1
test_explorer_list_private
def test_explorer_list_private(self): response = self.client.get( reverse("wagtailadmin_explore", args=(self.private_page.id,)) ) # Check the response self.assertEqual(response.status_code, 200) # Must have one privacy icon (next to the private child page) self.assertContains( response, '<span class="indicator privacy-indicator icon icon-no-view"', count=1, )
d10f15e55806c6944827d801cd9c2d53f5da4186
14
test_privacy.py
88
Reformat with black
15,840
0
135
53
30
72,114
35
wagtail
13
wagtail/admin/tests/test_privacy.py
Python
10
{ "docstring": "\n This tests that there is a padlock displayed\n next to the private child page in the private pages explorer listing\n ", "language": "en", "n_whitespaces": 42, "n_words": 20, "vocab_size": 18 }
https://github.com/wagtail/wagtail.git
1
test_order_query_customer
def test_order_query_customer(api_client): query = response = api_client.post_graphql(query) assert_no_permission(response) @pytest.mark.parametrize( "total_authorized, total_charged, expected_status", [ (Decimal("98.40"), Decimal("0"), OrderAuthorizeStatusEnum.FULL.name), (Decimal("0"), Decimal("98.40"), OrderAuthorizeStatusEnum.FULL.name), (Decimal("10"), Decimal("88.40"), OrderAuthorizeStatusEnum.FULL.name), (Decimal("0"), Decimal("0"), OrderAuthorizeStatusEnum.NONE.name), (Decimal("11"), Decimal("0"), OrderAuthorizeStatusEnum.PARTIAL.name), (Decimal("0"), Decimal("50.00"), OrderAuthorizeStatusEnum.PARTIAL.name), (Decimal("10"), Decimal("40.40"), OrderAuthorizeStatusEnum.PARTIAL.name), ], )
9effd5aec81acbdd2a1076c1d72bbee1afcc65a1
@pytest.mark.parametrize( "total_authorized, total_charged, expected_status", [ (Decimal("98.40"), Decimal("0"), OrderAuthorizeStatusEnum.FULL.name), (Decimal("0"), Decimal("98.40"), OrderAuthorizeStatusEnum.FULL.name), (Decimal("10"), Decimal("88.40"), OrderAuthorizeStatusEnum.FULL.name), (Decimal("0"), Decimal("0"), OrderAuthorizeStatusEnum.NONE.name), (Decimal("11"), Decimal("0"), OrderAuthorizeStatusEnum.PARTIAL.name), (Decimal("0"), Decimal("50.00"), OrderAuthorizeStatusEnum.PARTIAL.name), (Decimal("10"), Decimal("40.40"), OrderAuthorizeStatusEnum.PARTIAL.name), ], )
11
test_order.py
261
restructure order app tests (#11226)
5,251
1
101
20
26
29,667
36
saleor
15
saleor/graphql/order/tests/queries/test_order.py
Python
14
{ "docstring": "\n query OrdersQuery {\n orders(first: 1) {\n edges {\n node {\n id\n }\n }\n }\n }\n ", "language": "en", "n_whitespaces": 146, "n_words": 15, "vocab_size": 9 }
https://github.com/saleor/saleor.git
1
test_numeric_repl
def test_numeric_repl(file, multiline_file): file.replace(multiline_file, r"Etiam", 123) assert "123" in multiline_file.read_text()
a35b29b2651bf33c5d5b45e64bc7765ffde4aff4
8
test_replace.py
46
Add some funtional tests Add functional tests for the following: - file.readlink - file.replace - file.symlink Remove unit tests for file.replace as they are duplicated in the added functional test
54,182
0
19
27
10
215,808
10
salt
5
tests/pytests/functional/modules/file/test_replace.py
Python
3
{ "docstring": "\n This test covers cases where the replacement string is numeric. The CLI\n parser yaml-fies it into a numeric type. If not converted back to a string\n type in file.replace, a TypeError occurs when the replace is attempted. See\n https://github.com/saltstack/salt/issues/9097 for more information.\n ", "language": "en", "n_whitespaces": 58, "n_words": 42, "vocab_size": 37 }
https://github.com/saltstack/salt.git
2
PreStem
def PreStem(name=None): if name is None: name = "prestem" + str(backend.get_uid("prestem"))
2d1086447a25d281f9428832d046c473d80ad761
14
convnext.py
49
Corrected preprocess_input docstring in regnet.py and convnext.py
80,000
0
24
30
10
269,281
11
keras
5
keras/applications/convnext.py
Python
5
{ "docstring": "Normalizes inputs with ImageNet-1k mean and std.\n\n Args:\n name (str): Name prefix.\n\n Returns:\n A presemt function.\n ", "language": "en", "n_whitespaces": 25, "n_words": 16, "vocab_size": 16 }
https://github.com/keras-team/keras.git
23
model_fn
def model_fn(features, labels, mode, params, config): del config hparams = params length = features.length spec = features.spec is_training = mode == tf_estimator.ModeKeys.TRAIN if is_training: onset_labels = labels.onsets offset_labels = labels.offsets velocity_labels = labels.velocities frame_labels = labels.labels frame_label_weights = labels.label_weights if hparams.stop_activation_gradient and not hparams.activation_loss: raise ValueError( 'If stop_activation_gradient is true, activation_loss must be true.') losses = {} with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training): with tf.variable_scope('onsets'): onset_outputs = acoustic_model( spec, hparams, lstm_units=hparams.onset_lstm_units, lengths=length) onset_probs = slim.fully_connected( onset_outputs, constants.MIDI_PITCHES, activation_fn=tf.sigmoid, scope='onset_probs') # onset_probs_flat is used during inference. onset_probs_flat = flatten_maybe_padded_sequences(onset_probs, length) if is_training: onset_labels_flat = flatten_maybe_padded_sequences(onset_labels, length) onset_losses = tf_utils.log_loss(onset_labels_flat, onset_probs_flat) tf.losses.add_loss(tf.reduce_mean(onset_losses)) losses['onset'] = onset_losses with tf.variable_scope('offsets'): offset_outputs = acoustic_model( spec, hparams, lstm_units=hparams.offset_lstm_units, lengths=length) offset_probs = slim.fully_connected( offset_outputs, constants.MIDI_PITCHES, activation_fn=tf.sigmoid, scope='offset_probs') # offset_probs_flat is used during inference. offset_probs_flat = flatten_maybe_padded_sequences(offset_probs, length) if is_training: offset_labels_flat = flatten_maybe_padded_sequences( offset_labels, length) offset_losses = tf_utils.log_loss(offset_labels_flat, offset_probs_flat) tf.losses.add_loss(tf.reduce_mean(offset_losses)) losses['offset'] = offset_losses with tf.variable_scope('velocity'): velocity_outputs = acoustic_model( spec, hparams, lstm_units=hparams.velocity_lstm_units, lengths=length) velocity_values = slim.fully_connected( velocity_outputs, constants.MIDI_PITCHES, activation_fn=None, scope='onset_velocities') velocity_values_flat = flatten_maybe_padded_sequences( velocity_values, length) if is_training: velocity_labels_flat = flatten_maybe_padded_sequences( velocity_labels, length) velocity_loss = tf.reduce_sum( onset_labels_flat * tf.square(velocity_labels_flat - velocity_values_flat), axis=1) tf.losses.add_loss(tf.reduce_mean(velocity_loss)) losses['velocity'] = velocity_loss with tf.variable_scope('frame'): if not hparams.share_conv_features: # TODO(eriche): this is broken when hparams.frame_lstm_units > 0 activation_outputs = acoustic_model( spec, hparams, lstm_units=hparams.frame_lstm_units, lengths=length) activation_probs = slim.fully_connected( activation_outputs, constants.MIDI_PITCHES, activation_fn=tf.sigmoid, scope='activation_probs') else: activation_probs = slim.fully_connected( onset_outputs, constants.MIDI_PITCHES, activation_fn=tf.sigmoid, scope='activation_probs') probs = [] if hparams.stop_onset_gradient: probs.append(tf.stop_gradient(onset_probs)) else: probs.append(onset_probs) if hparams.stop_activation_gradient: probs.append(tf.stop_gradient(activation_probs)) else: probs.append(activation_probs) if hparams.stop_offset_gradient: probs.append(tf.stop_gradient(offset_probs)) else: probs.append(offset_probs) combined_probs = tf.concat(probs, 2) if hparams.combined_lstm_units > 0: outputs = lstm_layer( combined_probs, hparams.combined_lstm_units, lengths=length if hparams.use_lengths else None, stack_size=hparams.combined_rnn_stack_size, use_cudnn=hparams.use_cudnn, bidirectional=hparams.bidirectional) else: outputs = combined_probs frame_probs = slim.fully_connected( outputs, constants.MIDI_PITCHES, activation_fn=tf.sigmoid, scope='frame_probs') frame_probs_flat = flatten_maybe_padded_sequences(frame_probs, length) if is_training: frame_labels_flat = flatten_maybe_padded_sequences(frame_labels, length) frame_label_weights_flat = flatten_maybe_padded_sequences( frame_label_weights, length) if hparams.weight_frame_and_activation_loss: frame_loss_weights = frame_label_weights_flat else: frame_loss_weights = None frame_losses = tf_utils.log_loss( frame_labels_flat, frame_probs_flat, weights=frame_loss_weights) tf.losses.add_loss(tf.reduce_mean(frame_losses)) losses['frame'] = frame_losses if hparams.activation_loss: if hparams.weight_frame_and_activation_loss: activation_loss_weights = frame_label_weights else: activation_loss_weights = None activation_losses = tf_utils.log_loss( frame_labels_flat, flatten_maybe_padded_sequences(activation_probs, length), weights=activation_loss_weights) tf.losses.add_loss(tf.reduce_mean(activation_losses)) losses['activation'] = activation_losses frame_predictions = frame_probs_flat > hparams.predict_frame_threshold onset_predictions = onset_probs_flat > hparams.predict_onset_threshold offset_predictions = offset_probs_flat > hparams.predict_offset_threshold frame_predictions = tf.expand_dims(frame_predictions, axis=0) onset_predictions = tf.expand_dims(onset_predictions, axis=0) offset_predictions = tf.expand_dims(offset_predictions, axis=0) velocity_values = tf.expand_dims(velocity_values_flat, axis=0) metrics_values = metrics.define_metrics( frame_probs=frame_probs, onset_probs=onset_probs, frame_predictions=frame_predictions, onset_predictions=onset_predictions, offset_predictions=offset_predictions, velocity_values=velocity_values, length=features.length, sequence_label=labels.note_sequence, frame_labels=labels.labels, sequence_id=features.sequence_id, hparams=hparams) for label, loss_collection in losses.items(): loss_label = 'losses/' + label metrics_values[loss_label] = loss_collection def predict_sequence():
f73ff0c91f0159a925fb6547612199bb7c915248
18
model.py
1,442
Explicitly import estimator from tensorflow as a separate import instead of accessing it via tf.estimator and depend on the tensorflow estimator target. PiperOrigin-RevId: 436568278
40,851
0
1,488
1,401
217
173,538
385
magenta
114
magenta/models/onsets_frames_transcription/model.py
Python
228
{ "docstring": "Builds the acoustic model.Convert frame predictions into a sequence (TF).", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/magenta/magenta.git
1
test_title_present
def test_title_present(self): response = self.get(4) self.assertContains(response, "Christmas", 3)
d10f15e55806c6944827d801cd9c2d53f5da4186
8
test_page_modeladmin.py
42
Reformat with black
15,993
0
29
24
8
73,217
8
wagtail
5
wagtail/contrib/modeladmin/tests/test_page_modeladmin.py
Python
3
{ "docstring": "\n The page title should appear three times. Once in the header, and two times\n in the field listing (as the actual title and as the draft title)\n ", "language": "en", "n_whitespaces": 49, "n_words": 27, "vocab_size": 21 }
https://github.com/wagtail/wagtail.git
2
all_triads
def all_triads(G): triplets = combinations(G.nodes(), 3) for triplet in triplets: yield G.subgraph(triplet).copy() @not_implemented_for("undirected")
db35812af218482b0ddf9ca47e4792e47e4d4666
@not_implemented_for("undirected")
12
triads.py
69
Add docstring examples for triads functions (#5522) Adds docstring examples to the functions in the triads module as well as some additional explanatory text + links to other examples. Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
42,209
1
28
34
13
176,981
13
networkx
9
networkx/algorithms/triads.py
Python
4
{ "docstring": "A generator of all possible triads in G.\n\n Parameters\n ----------\n G : digraph\n A NetworkX DiGraph\n\n Returns\n -------\n all_triads : generator of DiGraphs\n Generator of triads (order-3 DiGraphs)\n\n Examples\n --------\n >>> G = nx.DiGraph([(1, 2), (2, 3), (3, 1), (3, 4), (4, 1), (4, 2)])\n >>> for triad in nx.all_triads(G):\n ... print(triad.edges)\n [(1, 2), (2, 3), (3, 1)]\n [(1, 2), (4, 1), (4, 2)]\n [(3, 1), (3, 4), (4, 1)]\n [(2, 3), (3, 4), (4, 2)]\n\n ", "language": "en", "n_whitespaces": 140, "n_words": 76, "vocab_size": 45 }
https://github.com/networkx/networkx.git
10
totientrange
def totientrange(self, a, b): a = max(1, _as_int_ceiling(a)) b = _as_int_ceiling(b) n = len(self._tlist) if a >= b: return elif b <= n: for i in range(a, b): yield self._tlist[i] else: self._tlist += _arange(n, b) for i in range(1, n): ti = self._tlist[i] startindex = (n + i - 1) // i * i for j in range(startindex, b, i): self._tlist[j] -= ti if i >= a: yield ti for i in range(n, b): ti = self._tlist[i] for j in range(2 * i, b, i): self._tlist[j] -= ti if i >= a: yield ti
e0dc14eca132f37c5f49369eb4051eae37c9b119
16
generate.py
274
Refactored import ordering in functions
48,298
0
394
177
50
197,041
94
sympy
15
sympy/ntheory/generate.py
Python
24
{ "docstring": "Generate all totient numbers for the range [a, b).\n\n Examples\n ========\n\n >>> from sympy import sieve\n >>> print([i for i in sieve.totientrange(7, 18)])\n [6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16]\n ", "language": "en", "n_whitespaces": 76, "n_words": 34, "vocab_size": 28 }
https://github.com/sympy/sympy.git
2
get_deps
def get_deps(self, candidate): key = format_requirement(candidate) if key not in self.dep_dict: from .requirements import Requirement req = Requirement.from_line(key) req = req.merge_markers(self.markers) self.dep_dict[key] = req.get_abstract_dependencies() return self.dep_dict[key]
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
11
dependencies.py
105
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,272
0
98
64
20
22,228
26
pipenv
13
pipenv/vendor/requirementslib/models/dependencies.py
Python
8
{ "docstring": "Get the dependencies of the supplied candidate.\n\n :param candidate: An installrequirement\n :type candidate: :class:`~pipenv.patched.pip._internal.req.req_install.InstallRequirement`\n :return: A list of abstract dependencies\n :rtype: list[:class:`~requirementslib.models.dependency.AbstractDependency`]\n ", "language": "en", "n_whitespaces": 57, "n_words": 22, "vocab_size": 18 }
https://github.com/pypa/pipenv.git
1
test_all
def test_all(self): self.write_settings("settings_to_diff.py", sdict={"STATIC_URL": "None"}) args = ["diffsettings", "--settings=settings_to_diff", "--all"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "### STATIC_URL = None")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
tests.py
98
Refs #33476 -- Reformatted code with Black.
51,934
0
62
53
18
207,354
20
django
10
tests/admin_scripts/tests.py
Python
6
{ "docstring": "The all option also shows settings with the default value.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/django/django.git
15
_validate
def _validate(self) -> None: if (self._args.writer == "ffmpeg" and not self._images.is_video and self._args.reference_video is None): raise FaceswapError("Output as video selected, but using frames as input. You must " "provide a reference video ('-ref', '--reference-video').") if (self._args.on_the_fly and self._args.mask_type not in ("none", "extended", "components")): logger.warning("You have selected an incompatible mask type ('%s') for On-The-Fly " "conversion. Switching to 'extended'", self._args.mask_type) self._args.mask_type = "extended" if (not self._args.on_the_fly and self._args.mask_type not in ("none", "predicted") and not self._alignments.mask_is_valid(self._args.mask_type)): msg = (f"You have selected the Mask Type `{self._args.mask_type}` but at least one " "face does not have this mask stored in the Alignments File.\nYou should " "generate the required masks with the Mask Tool or set the Mask Type option to " "an existing Mask Type.\nA summary of existing masks is as follows:\nTotal " f"faces: {self._alignments.faces_count}, " f"Masks: {self._alignments.mask_summary}") raise FaceswapError(msg) if self._args.mask_type == "predicted" and not self._predictor.has_predicted_mask: available_masks = [k for k, v in self._alignments.mask_summary.items() if k != "none" and v == self._alignments.faces_count] if not available_masks: msg = ("Predicted Mask selected, but the model was not trained with a mask and no " "masks are stored in the Alignments File.\nYou should generate the " "required masks with the Mask Tool or set the Mask Type to `none`.") raise FaceswapError(msg) mask_type = available_masks[0] logger.warning("Predicted Mask selected, but the model was not trained with a " "mask. Selecting first available mask: '%s'", mask_type) self._args.mask_type = mask_type
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
14
convert.py
423
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
20,791
0
727
224
125
101,376
230
faceswap
23
scripts/convert.py
Python
49
{ "docstring": " Validate the Command Line Options.\n\n Ensure that certain cli selections are valid and won't result in an error. Checks:\n * If frames have been passed in with video output, ensure user supplies reference\n video.\n * If \"on-the-fly\" and a Neural Network mask is selected, warn and switch to 'extended'\n * If a mask-type is selected, ensure it exists in the alignments file.\n * If a predicted mask-type is selected, ensure model has been trained with a mask\n otherwise attempt to select first available masks, otherwise raise error.\n\n Raises\n ------\n FaceswapError\n If an invalid selection has been found.\n\n ", "language": "en", "n_whitespaces": 210, "n_words": 97, "vocab_size": 66 }
https://github.com/deepfakes/faceswap.git
3
test_cas_redirect_confirm
def test_cas_redirect_confirm(self) -> None: base_url = "/_matrix/client/r0/login/cas/ticket?redirectUrl" redirect_url = "https://dodgy-site.com/" url_parts = list(urllib.parse.urlparse(base_url)) query = dict(urllib.parse.parse_qsl(url_parts[4])) query.update({"redirectUrl": redirect_url}) query.update({"ticket": "ticket"}) url_parts[4] = urllib.parse.urlencode(query) cas_ticket_url = urllib.parse.urlunparse(url_parts) # Get Synapse to call the fake CAS and serve the template. channel = self.make_request("GET", cas_ticket_url) # Test that the response is HTML. self.assertEqual(channel.code, 200, channel.result) content_type_header_value = "" for header in channel.result.get("headers", []): if header[0] == b"Content-Type": content_type_header_value = header[1].decode("utf8") self.assertTrue(content_type_header_value.startswith("text/html")) # Test that the body isn't empty. self.assertTrue(len(channel.result["body"]) > 0) # And that it contains our redirect link self.assertIn(redirect_url, channel.result["body"].decode("UTF-8"))
64c73c6ac88a740ee480a0ad1f9afc8596bccfa4
13
test_login.py
333
Add type hints to `tests/rest/client` (#12066)
71,284
0
254
193
70
246,592
88
synapse
29
tests/rest/client/test_login.py
Python
21
{ "docstring": "Tests that the SSO login flow serves a confirmation page before redirecting a\n user to the redirect URL.\n ", "language": "en", "n_whitespaces": 32, "n_words": 18, "vocab_size": 16 }
https://github.com/matrix-org/synapse.git
1
test_state_default_level
def test_state_default_level(self): creator = "@creator:example.com" pleb = "@joiner:example.com" king = "@joiner2:example.com" auth_events = [ _create_event(RoomVersions.V1, creator), _join_event(RoomVersions.V1, creator), _power_levels_event( RoomVersions.V1, creator, {"state_default": "30", "users": {pleb: "29", king: "30"}}, ), _join_event(RoomVersions.V1, pleb), _join_event(RoomVersions.V1, king), ] # pleb should not be able to send state self.assertRaises( AuthError, event_auth.check_auth_rules_for_event, RoomVersions.V1, _random_state_event(RoomVersions.V1, pleb), auth_events, ), # king should be able to send state event_auth.check_auth_rules_for_event( RoomVersions.V1, _random_state_event(RoomVersions.V1, king), auth_events, )
2959184a42398277ff916206235b844a8f7be5d7
14
test_event_auth.py
202
EventAuthTestCase: build events for the right room version In practice, when we run the auth rules, all of the events have the right room version. Let's stop building Room V1 events for these tests and use the right version.
72,341
0
348
130
43
248,549
65
synapse
16
tests/test_event_auth.py
Python
27
{ "docstring": "\n Check that users above the state_default level can send state and\n those below cannot\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
https://github.com/matrix-org/synapse.git
5
P
def P(self): if self.data.minute == 0 and self.data.hour == 0: return _("midnight") if self.data.minute == 0 and self.data.hour == 12: return _("noon") return "%s %s" % (self.f(), self.a())
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
dateformat.py
112
Refs #33476 -- Reformatted code with Black.
51,583
0
78
66
18
206,599
28
django
8
django/utils/dateformat.py
Python
6
{ "docstring": "\n Time, in 12-hour hours, minutes and 'a.m.'/'p.m.', with minutes left off\n if they're zero and the strings 'midnight' and 'noon' if appropriate.\n Examples: '1 a.m.', '1:30 p.m.', 'midnight', 'noon', '12:30 p.m.'\n Proprietary extension.\n ", "language": "en", "n_whitespaces": 69, "n_words": 33, "vocab_size": 29 }
https://github.com/django/django.git
17
sample
def sample(self) -> SampleBatchType: if self.fake_sampler and self.last_batch is not None: return self.last_batch elif self.input_reader is None: raise ValueError( "RolloutWorker has no `input_reader` object! " "Cannot call `sample()`. You can try setting " "`create_env_on_driver` to True." ) if log_once("sample_start"): logger.info( "Generating sample batch of size {}".format( self.rollout_fragment_length ) ) batches = [self.input_reader.next()] steps_so_far = ( batches[0].count if self.count_steps_by == "env_steps" else batches[0].agent_steps() ) # In truncate_episodes mode, never pull more than 1 batch per env. # This avoids over-running the target batch size. if self.batch_mode == "truncate_episodes": max_batches = self.num_envs else: max_batches = float("inf") while steps_so_far < self.rollout_fragment_length and ( len(batches) < max_batches or self.policy_config.get("offline_sampling") ): batch = self.input_reader.next() steps_so_far += ( batch.count if self.count_steps_by == "env_steps" else batch.agent_steps() ) batches.append(batch) batch = batches[0].concat_samples(batches) if len(batches) > 1 else batches[0] self.callbacks.on_sample_end(worker=self, samples=batch) # Always do writes prior to compression for consistency and to allow # for better compression inside the writer. self.output_writer.write(batch) # Do off-policy estimation, if needed. if self.reward_estimators: for estimator in self.reward_estimators: estimator.process(batch) if log_once("sample_end"): logger.info("Completed sample batch:\n\n{}\n".format(summarize(batch))) if self.compress_observations: batch.compress(bulk=self.compress_observations == "bulk") if self.fake_sampler: self.last_batch = batch return batch
1243ed62bf4121c83881c3ddc095bc6a873a09f3
13
rollout_worker.py
483
[RLlib] Make Dataset reader default reader and enable CRR to use dataset (#26304) Co-authored-by: avnish <[email protected]>
27,618
0
713
284
119
124,505
182
ray
41
rllib/evaluation/rollout_worker.py
Python
65
{ "docstring": "Returns a batch of experience sampled from this worker.\n\n This method must be implemented by subclasses.\n\n Returns:\n A columnar batch of experiences (e.g., tensors).\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker\n >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGStaticGraphTFPolicy\n >>> worker = RolloutWorker( # doctest: +SKIP\n ... env_creator=lambda _: gym.make(\"CartPole-v0\"), # doctest: +SKIP\n ... policy_spec=PGStaticGraphTFPolicy) # doctest: +SKIP\n >>> print(worker.sample()) # doctest: +SKIP\n SampleBatch({\"obs\": [...], \"action\": [...], ...})\n ", "language": "en", "n_whitespaces": 198, "n_words": 67, "vocab_size": 46 }
https://github.com/ray-project/ray.git
3
to_dict
def to_dict(self) -> dict[str, str | float | bool | None]: # Needed because dataclasses asdict() can't serialize Templates and ignores Properties. dic = { CONF_PLATFORM: self.platform, CONF_ENTITY_ID: self.entity_id, CONF_VALUE_TEMPLATE: self.template, CONF_TO_STATE: self.to_state, CONF_ABOVE: self.above, CONF_BELOW: self.below, CONF_P_GIVEN_T: self.prob_given_true, CONF_P_GIVEN_F: self.prob_given_false, "observed": self.observed, } for key, value in dic.copy().items(): if value is None: del dic[key] return dic
dd1463da287f591652e47b00eee0c5b77f5f5b7c
10
helpers.py
155
Refactor bayesian observations using dataclass (#79590) * refactor * remove some changes * remove typehint * improve codestyle * move docstring to comment * < 88 chars * avoid short var names * more readable * fix rename * Update homeassistant/components/bayesian/helpers.py Co-authored-by: epenet <[email protected]> * Update homeassistant/components/bayesian/binary_sensor.py Co-authored-by: epenet <[email protected]> * Update homeassistant/components/bayesian/binary_sensor.py Co-authored-by: epenet <[email protected]> * no intermediate * comment why set before list Co-authored-by: epenet <[email protected]>
87,643
0
224
104
53
288,486
57
core
28
homeassistant/components/bayesian/helpers.py
Python
17
{ "docstring": "Represent Class as a Dict for easier serialization.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
test_slicing
def test_slicing(self): s1 = SampleBatch( { "a": np.array([1, 2, 3, 2, 3, 4]), "b": {"c": np.array([4, 5, 6, 5, 6, 7])}, } ) check( s1[:3], { "a": [1, 2, 3], "b": {"c": [4, 5, 6]}, }, ) check( s1[0:3], { "a": [1, 2, 3], "b": {"c": [4, 5, 6]}, }, ) check( s1[1:4], { "a": [2, 3, 2], "b": {"c": [5, 6, 5]}, }, ) check( s1[1:], { "a": [2, 3, 2, 3, 4], "b": {"c": [5, 6, 5, 6, 7]}, }, ) check( s1[3:4], { "a": [2], "b": {"c": [5]}, }, ) # When we change the slice, the original SampleBatch should also # change (shared underlying data). s1[:3]["a"][0] = 100 s1[1:2]["a"][0] = 200 check(s1["a"][0], 100) check(s1["a"][1], 200) # Seq-len batches should be auto-sliced along sequences, # no matter what. s2 = SampleBatch( { "a": np.array([1, 2, 3, 2, 3, 4]), "b": {"c": np.array([4, 5, 6, 5, 6, 7])}, SampleBatch.SEQ_LENS: [2, 3, 1], "state_in_0": [1.0, 3.0, 4.0], } ) # We would expect a=[1, 2, 3] now, but due to the sequence # boundary, we stop earlier. check( s2[:3], { "a": [1, 2], "b": {"c": [4, 5]}, SampleBatch.SEQ_LENS: [2], "state_in_0": [1.0], }, ) # Split exactly at a seq-len boundary. check( s2[:5], { "a": [1, 2, 3, 2, 3], "b": {"c": [4, 5, 6, 5, 6]}, SampleBatch.SEQ_LENS: [2, 3], "state_in_0": [1.0, 3.0], }, ) # Split above seq-len boundary. check( s2[:50], { "a": [1, 2, 3, 2, 3, 4], "b": {"c": [4, 5, 6, 5, 6, 7]}, SampleBatch.SEQ_LENS: [2, 3, 1], "state_in_0": [1.0, 3.0, 4.0], }, ) check( s2[:], { "a": [1, 2, 3, 2, 3, 4], "b": {"c": [4, 5, 6, 5, 6, 7]}, SampleBatch.SEQ_LENS: [2, 3, 1], "state_in_0": [1.0, 3.0, 4.0], }, )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
15
test_sample_batch.py
903
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
33,076
0
1,352
639
101
143,840
286
ray
9
rllib/policy/tests/test_sample_batch.py
Python
90
{ "docstring": "Tests, whether slicing can be done on SampleBatches.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ray-project/ray.git
1
test_list_statistic_ids
async def test_list_statistic_ids(recorder_mock, hass, hass_ws_client, caplog): await async_setup_component(hass, "history", {}) client = await hass_ws_client() # Test the WS API works and issues a warning await client.send_json({"id": 1, "type": "history/list_statistic_ids"}) response = await client.receive_json() assert response["success"] assert response["result"] == [] assert ( "WS API 'history/list_statistic_ids' is deprecated and will be removed in " "Home Assistant Core 2022.12. Use 'recorder/list_statistic_ids' instead" ) in caplog.text with patch( "homeassistant.components.history.recorder_ws.ws_handle_list_statistic_ids", wraps=ws_handle_list_statistic_ids, ) as ws_mock: await client.send_json({"id": 2, "type": "history/list_statistic_ids"}) await client.receive_json() ws_mock.assert_called_once()
31a787558fd312331b55e5c2c4b33341fc3601fc
13
test_init.py
206
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
88,515
0
162
112
61
289,373
77
core
16
tests/components/history/test_init.py
Python
18
{ "docstring": "Test history/list_statistic_ids forwards to recorder.", "language": "en", "n_whitespaces": 4, "n_words": 5, "vocab_size": 5 }
https://github.com/home-assistant/core.git
24
copy_left_only
def copy_left_only(src, dest, module): changed = False owner = module.params['owner'] group = module.params['group'] local_follow = module.params['local_follow'] left_only = filecmp.dircmp(src, dest).left_only if len(left_only): changed = True if not module.check_mode: for item in left_only: src_item_path = os.path.join(src, item) dest_item_path = os.path.join(dest, item) b_src_item_path = to_bytes(src_item_path, errors='surrogate_or_strict') b_dest_item_path = to_bytes(dest_item_path, errors='surrogate_or_strict') if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is True: shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow) chown_recursive(b_dest_item_path, module) if os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path) and local_follow is False: linkto = os.readlink(b_src_item_path) os.symlink(linkto, b_dest_item_path) if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is True: shutil.copyfile(b_src_item_path, b_dest_item_path) if owner is not None: module.set_owner_if_different(b_dest_item_path, owner, False) if group is not None: module.set_group_if_different(b_dest_item_path, group, False) if os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path) and local_follow is False: linkto = os.readlink(b_src_item_path) os.symlink(linkto, b_dest_item_path) if not os.path.islink(b_src_item_path) and os.path.isfile(b_src_item_path): shutil.copyfile(b_src_item_path, b_dest_item_path) shutil.copymode(b_src_item_path, b_dest_item_path) if owner is not None: module.set_owner_if_different(b_dest_item_path, owner, False) if group is not None: module.set_group_if_different(b_dest_item_path, group, False) if not os.path.islink(b_src_item_path) and os.path.isdir(b_src_item_path): shutil.copytree(b_src_item_path, b_dest_item_path, symlinks=not local_follow) chown_recursive(b_dest_item_path, module) changed = True return changed
650befed37eadcaea735673638d5475fa957ca7e
15
copy.py
634
Add missing space after keywords. (#78480) Also remove unnecessary parenthesis.
79,439
0
618
408
63
268,182
155
ansible
38
lib/ansible/modules/copy.py
Python
41
{ "docstring": "Copy files that exist in `src` directory only to the `dest` directory.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/ansible/ansible.git
2
__del__
def __del__(self): if self.frame_id is not None: self._server.dropTable(self.frame_id)
1c0935c1bc0856d43f69c1e32498636ee24ebc85
10
partition.py
41
FEAT-#4913: Enabling pyhdk (#4900) Co-authored-by: ienkovich <[email protected]> Signed-off-by: izamyati <[email protected]>
35,961
0
33
24
8
154,404
8
modin
5
modin/experimental/core/execution/native/implementations/omnisci_on_native/partitioning/partition.py
Python
3
{ "docstring": "Deallocate OmniSci resources related to the partition.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/modin-project/modin.git
1
PowerFunction
def PowerFunction(name, alpha, a, b): r return rv(name, PowerFunctionDistribution, (alpha, a, b)) #------------------------------------------------------------------------------- # QuadraticU distribution ------------------------------------------------------
9ad8ab9fe58051cf11626ba6654852fcfec60147
8
crv_types.py
40
Documentation cleanup 5
48,091
0
20
28
16
196,673
17
sympy
7
sympy/stats/crv_types.py
Python
63
{ "docstring": "\n Creates a continuous random variable with a Power Function Distribution.\n\n Explanation\n ===========\n\n The density of PowerFunction distribution is given by\n\n .. math::\n f(x) := \\frac{{\\alpha}(x - a)^{\\alpha - 1}}{(b - a)^{\\alpha}}\n\n with :math:`x \\in [a,b]`.\n\n Parameters\n ==========\n\n alpha: Positive number, `0 < alpha`, the shape paramater\n a : Real number, :math:`-\\infty < a`, the left boundary\n b : Real number, :math:`a < b < \\infty`, the right boundary\n\n Returns\n =======\n\n RandomSymbol\n\n Examples\n ========\n\n >>> from sympy.stats import PowerFunction, density, cdf, E, variance\n >>> from sympy import Symbol\n >>> alpha = Symbol(\"alpha\", positive=True)\n >>> a = Symbol(\"a\", real=True)\n >>> b = Symbol(\"b\", real=True)\n >>> z = Symbol(\"z\")\n\n >>> X = PowerFunction(\"X\", 2, a, b)\n\n >>> density(X)(z)\n (-2*a + 2*z)/(-a + b)**2\n\n >>> cdf(X)(z)\n Piecewise((a**2/(a**2 - 2*a*b + b**2) - 2*a*z/(a**2 - 2*a*b + b**2) +\n z**2/(a**2 - 2*a*b + b**2), a <= z), (0, True))\n\n >>> alpha = 2\n >>> a = 0\n >>> b = 1\n >>> Y = PowerFunction(\"Y\", alpha, a, b)\n\n >>> E(Y)\n 2/3\n\n >>> variance(Y)\n 1/18\n\n References\n ==========\n\n .. [1] http://www.mathwave.com/help/easyfit/html/analyses/distributions/power_func.html\n\n ", "language": "en", "n_whitespaces": 302, "n_words": 174, "vocab_size": 111 }
https://github.com/sympy/sympy.git
1
get_employees
def get_employees(filters): conditions = get_conditions(filters) return frappe.db.sql( % conditions, as_list=1, )
494bd9ef78313436f0424b918f200dab8fc7c20b
9
employee_analytics.py
44
style: format code with black
14,144
0
5
27
11
66,234
11
erpnext
8
erpnext/hr/report/employee_analytics/employee_analytics.py
Python
9
{ "docstring": "select name, employee_name, date_of_birth,\n\tbranch, department, designation,\n\tgender, company from `tabEmployee` where status = 'Active' %s", "language": "en", "n_whitespaces": 13, "n_words": 16, "vocab_size": 16 }
https://github.com/frappe/erpnext.git
11
build_dependency
def build_dependency(self): self.groups = {} for node in self.graph.nodes_py.nodes_op: if node.op_type in ['Conv2d', 'ConvTranspose2d', "GroupNorm"]: if node.op_type in ['Conv2d', 'ConvTranspose2d']: group = self._get_conv_groups(node) elif node.op_type == "GroupNorm": group = self._get_group_norm_condition(node) if node.name in self.groups: # the conv layer whose group is larger than 1 will require that # it's number of output channel to be divisible by the number of group. self.groups[node.name].append(group) else: self.groups[node.name] = [group] if group > 1: # for the conv layer whose group is larger than 1, it will require the number # of output channels of their parent conv layer to be divisible by group. parent_convs = self._get_parent_convs(node) for parent in parent_convs: if parent in self.groups: self.groups[parent].append(group) else: self.groups[parent] = [group] for name in self.groups: self.dependency[name] = lcm_list(self.groups[name]) if min(self.groups[name]) == gcd_list(self.groups[name]): self.min_groups[name] = min(self.groups[name]) else: self.min_groups[name] = 1 return self.dependency
4cf680090f8a07fdffaebca2e0c2a4f41a9e4315
19
shape_dependency.py
369
Add Group Norm support for Pruning model (#5069)
24,906
0
610
228
69
113,425
136
nni
21
nni/compression/pytorch/utils/shape_dependency.py
Python
26
{ "docstring": "\n Build the channel dependency for the conv layers\n in the model. This function return the group number\n of each conv layers. Note that, here, the group count\n of conv layers may be larger than their originl groups.\n This is because that the input channel will also be grouped\n for the group conv layers. To make this clear, assume we\n have two group conv layers: conv1(group=2), conv2(group=4).\n conv2 takes the output features of conv1 as input.\n Then we have to the filters of conv1 can still be\n divided into 4 groups after filter pruning, because\n the input channels of conv2 should be divided into\n 4 groups.\n\n Returns\n -------\n self.dependency : dict\n key: the name of conv layers, value: the minimum value that the number of\n filters should be divisible to.\n ", "language": "en", "n_whitespaces": 264, "n_words": 129, "vocab_size": 80 }
https://github.com/microsoft/nni.git
15
try_cast_to_pandas
def try_cast_to_pandas(obj, squeeze=False): if hasattr(obj, "_to_pandas"): result = obj._to_pandas() if squeeze: result = result.squeeze(axis=1) return result if hasattr(obj, "to_pandas"): result = obj.to_pandas() if squeeze: result = result.squeeze(axis=1) # Query compiler case, it doesn't have logic about convertion to Series if ( isinstance(getattr(result, "name", None), str) and result.name == MODIN_UNNAMED_SERIES_LABEL ): result.name = None return result if isinstance(obj, (list, tuple)): return type(obj)([try_cast_to_pandas(o, squeeze=squeeze) for o in obj]) if isinstance(obj, dict): return {k: try_cast_to_pandas(v, squeeze=squeeze) for k, v in obj.items()} if callable(obj): module_hierarchy = getattr(obj, "__module__", "").split(".") fn_name = getattr(obj, "__name__", None) if fn_name and module_hierarchy[0] == "modin": return ( getattr(pandas.DataFrame, fn_name, obj) if module_hierarchy[-1] == "dataframe" else getattr(pandas.Series, fn_name, obj) ) return obj
3f985ed6864cc1b5b587094d75ca5b2695e4139f
14
utils.py
376
REFACTOR-#4796: Introduce constant for __reduced__ column name (#4799) Co-authored-by: Mahesh Vashishtha <[email protected]> Co-authored-by: Alexey Prutskov <[email protected]> Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Jonathan Shi <[email protected]>
35,865
0
353
237
71
154,218
112
modin
28
modin/utils.py
Python
30
{ "docstring": "\n Convert `obj` and all nested objects from Modin to pandas if it is possible.\n\n If no convertion possible return `obj`.\n\n Parameters\n ----------\n obj : object\n Object to convert from Modin to pandas.\n squeeze : bool, default: False\n Squeeze the converted object(s) before returning them.\n\n Returns\n -------\n object\n Converted object.\n ", "language": "en", "n_whitespaces": 101, "n_words": 49, "vocab_size": 43 }
https://github.com/modin-project/modin.git
1
test_backfill_floating_outlier_membership_auth
def test_backfill_floating_outlier_membership_auth(self): OTHER_SERVER = "otherserver" OTHER_USER = "@otheruser:" + OTHER_SERVER # create the room user_id = self.register_user("kermit", "test") tok = self.login("kermit", "test") room_id = self.helper.create_room_as( room_creator=user_id, is_public=True, tok=tok, extra_content={ "preset": "public_chat", }, ) room_version = self.get_success(self.store.get_room_version(room_id)) prev_event_ids = self.get_success(self.store.get_prev_events_for_room(room_id)) ( most_recent_prev_event_id, most_recent_prev_event_depth, ) = self.get_success(self.store.get_max_depth_of(prev_event_ids)) # mapping from (type, state_key) -> state_event_id prev_state_map = self.get_success( self.state_store.get_state_ids_for_event(most_recent_prev_event_id) ) # List of state event ID's prev_state_ids = list(prev_state_map.values()) auth_event_ids = prev_state_ids auth_events = list( self.get_success(self.store.get_events(auth_event_ids)).values() ) # build a floating outlier member state event fake_prev_event_id = "$" + random_string(43) member_event_dict = { "type": EventTypes.Member, "content": { "membership": "join", }, "state_key": OTHER_USER, "room_id": room_id, "sender": OTHER_USER, "depth": most_recent_prev_event_depth, "prev_events": [fake_prev_event_id], "origin_server_ts": self.clock.time_msec(), "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, } builder = self.hs.get_event_builder_factory().for_room_version( room_version, member_event_dict ) member_event = self.get_success( builder.build( prev_event_ids=member_event_dict["prev_events"], auth_event_ids=self._event_auth_handler.compute_auth_events( builder, prev_state_map, for_verification=False, ), depth=member_event_dict["depth"], ) ) # Override the signature added from "test" homeserver that we created the event with member_event.signatures = member_event_dict["signatures"] # Add the new member_event to the StateMap prev_state_map[ (member_event.type, member_event.state_key) ] = member_event.event_id auth_events.append(member_event) # build and send an event authed based on the member event message_event_dict = { "type": EventTypes.Message, "content": {}, "room_id": room_id, "sender": OTHER_USER, "depth": most_recent_prev_event_depth, "prev_events": prev_event_ids.copy(), "origin_server_ts": self.clock.time_msec(), "signatures": {OTHER_SERVER: {"ed25519:key_version": "SomeSignatureHere"}}, } builder = self.hs.get_event_builder_factory().for_room_version( room_version, message_event_dict ) message_event = self.get_success( builder.build( prev_event_ids=message_event_dict["prev_events"], auth_event_ids=self._event_auth_handler.compute_auth_events( builder, prev_state_map, for_verification=False, ), depth=message_event_dict["depth"], ) ) # Override the signature added from "test" homeserver that we created the event with message_event.signatures = message_event_dict["signatures"] # Stub the /event_auth response from the OTHER_SERVER
0fb3dd0830e476c0e0b89c3bf6c7855a4129ff11
15
test_federation.py
755
Refactor the way we set `outlier` (#11634) * `_auth_and_persist_outliers`: mark persisted events as outliers Mark any events that get persisted via `_auth_and_persist_outliers` as, well, outliers. Currently this will be a no-op as everything will already be flagged as an outlier, but I'm going to change that. * `process_remote_join`: stop flagging as outlier The events are now flagged as outliers later on, by `_auth_and_persist_outliers`. * `send_join`: remove `outlier=True` The events created here are returned in the result of `send_join` to `FederationHandler.do_invite_join`. From there they are passed into `FederationEventHandler.process_remote_join`, which passes them to `_auth_and_persist_outliers`... which sets the `outlier` flag. * `get_event_auth`: remove `outlier=True` stop flagging the events returned by `get_event_auth` as outliers. This method is only called by `_get_remote_auth_chain_for_event`, which passes the results into `_auth_and_persist_outliers`, which will flag them as outliers. * `_get_remote_auth_chain_for_event`: remove `outlier=True` we pass all the events into `_auth_and_persist_outliers`, which will now flag the events as outliers. * `_check_sigs_and_hash_and_fetch`: remove unused `outlier` parameter This param is now never set to True, so we can remove it. * `_check_sigs_and_hash_and_fetch_one`: remove unused `outlier` param This is no longer set anywhere, so we can remove it. * `get_pdu`: remove unused `outlier` parameter ... and chase it down into `get_pdu_from_destination_raw`. * `event_from_pdu_json`: remove redundant `outlier` param This is never set to `True`, so can be removed. * changelog * update docstring
70,950
0
1,189
523
136
246,021
243
synapse
58
tests/handlers/test_federation.py
Python
98
{ "docstring": "\n As the local homeserver, check that we can properly process a federated\n event from the OTHER_SERVER with auth_events that include a floating\n membership event from the OTHER_SERVER.\n\n Regression test, see #10439.\n ", "language": "en", "n_whitespaces": 67, "n_words": 31, "vocab_size": 25 }
https://github.com/matrix-org/synapse.git
7
check_against_chunks
def check_against_chunks(self, chunks): # type: (Iterator[bytes]) -> None gots = {} for hash_name in self._allowed.keys(): try: gots[hash_name] = hashlib.new(hash_name) except (ValueError, TypeError): raise InstallationError(f"Unknown hash name: {hash_name}") for chunk in chunks: for hash in gots.values(): hash.update(chunk) for hash_name, got in gots.items(): if got.hexdigest() in self._allowed[hash_name]: return self._raise(gots)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
14
hashes.py
167
upd; format
12,436
0
200
101
38
61,197
47
transferlearning
20
.venv/lib/python3.8/site-packages/pip/_internal/utils/hashes.py
Python
14
{ "docstring": "Check good hashes against ones built from iterable of chunks of\n data.\n\n Raise HashMismatch if none match.\n\n ", "language": "en", "n_whitespaces": 38, "n_words": 17, "vocab_size": 16 }
https://github.com/jindongwang/transferlearning.git
2
world_size
def world_size(self) -> int: world_size = os.environ.get("JSM_NAMESPACE_SIZE") if world_size is None: raise ValueError( "Cannot determine world size. Environment variable `JSM_NAMESPACE_SIZE` not found." "Make sure you run your executable with `jsrun`." ) return int(world_size)
dbf1acd5a553ffc1546734be164cc89cef2b741d
11
lsf_environment.py
64
Modify LSFEnvironment to use more reliable environment variable (#10825) Co-authored-by: thomas chaton <[email protected]> Co-authored-by: Carlos Mocholí <[email protected]> Co-authored-by: Adrian Wälchli <[email protected]> Co-authored-by: Jirka Borovec <[email protected]>
69,627
0
113
34
32
241,623
33
lightning
7
pytorch_lightning/plugins/environments/lsf_environment.py
Python
9
{ "docstring": "The world size is read from the environment variable ``JSM_NAMESPACE_SIZE``.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/Lightning-AI/lightning.git
4
write_to_directory
def write_to_directory(self, dataset_info_dir, pretty_print=False, fs=None): fs = fs or LocalFileSystem() is_local = not is_remote_filesystem(fs) path_join = os.path.join if is_local else posixpath.join with fs.open(path_join(dataset_info_dir, config.DATASET_INFO_FILENAME), "wb") as f: self._dump_info(f, pretty_print=pretty_print) if self.license: with fs.open(path_join(dataset_info_dir, config.LICENSE_FILENAME), "wb") as f: self._dump_license(f)
139d210b05f0ca9e9bbe29fdd628a3339d1fa813
15
info.py
167
Download and prepare as Parquet for cloud storage (#4724) * use fsspec for caching * add parquet writer * add file_format argument * style * use "gs" instead of "gcs" for apache beam + use is_remote_filesystem * typo * fix test * test ArrowWriter with filesystem * test parquet writer * more tests * more tests * fix nullcontext on 3.6 * parquet_writer.write_batch is not available in pyarrow 6 * remove reference to open file * fix test * docs * docs: dask from parquet files * Apply suggestions from code review Co-authored-by: Mario Šaško <[email protected]> * use contextlib.nullcontext * fix missing import * Use unstrip_protocol to merge protocol and path * remove bad "raise" and add TODOs * add output_dir arg to download_and_prepare * update tests * update docs * fix tests * fix tests * fix output parent dir creattion * Apply suggestions from code review Co-authored-by: Albert Villanova del Moral <[email protected]> * revert changes for remote cache_dir * fix wording in the docs: load -> download and prepare * style * fix * simplify incomplete_dir * fix tests * albert's comments Co-authored-by: Mario Šaško <[email protected]> Co-authored-by: Albert Villanova del Moral <[email protected]>
22,165
0
117
102
28
105,576
38
datasets
21
src/datasets/info.py
Python
9
{ "docstring": "Write `DatasetInfo` and license (if present) as JSON files to `dataset_info_dir`.\n\n Args:\n dataset_info_dir (str): Destination directory.\n pretty_print (bool, default ``False``): If True, the JSON will be pretty-printed with the indent level of 4.\n fs (``fsspec.spec.AbstractFileSystem``, optional, defaults ``None``):\n Instance of the remote filesystem used to download the files from.\n\n <Added version=\"2.5.0\"/>\n\n Example:\n\n ```py\n >>> from datasets import load_dataset\n >>> ds = load_dataset(\"rotten_tomatoes\", split=\"validation\")\n >>> ds.info.write_to_directory(\"/path/to/directory/\")\n ```\n ", "language": "en", "n_whitespaces": 185, "n_words": 66, "vocab_size": 57 }
https://github.com/huggingface/datasets.git
1
test_span_selector_animated_artists_callback
def test_span_selector_animated_artists_callback(): x = np.linspace(0, 2 * np.pi, 100) values = np.sin(x) fig, ax = plt.subplots() (ln,) = ax.plot(x, values, animated=True) (ln2, ) = ax.plot([], animated=True) plt.pause(0.1) ax.draw_artist(ln) fig.canvas.blit(fig.bbox)
e4dadb672ac01525f7f41ddabd844cb99e871502
10
test_widgets.py
147
Fix getting tuple of animated artists; add comments and test
22,578
0
56
352
24
107,064
29
matplotlib
20
lib/matplotlib/tests/test_widgets.py
Python
36
{ "docstring": "Check that the animated artists changed in callbacks are updated.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/matplotlib/matplotlib.git
2
_visibility_unit
def _visibility_unit(self) -> str: if ( weather_option_visibility_unit := self._weather_option_visibility_unit ) is not None: return weather_option_visibility_unit return self._default_visibility_unit
90e1fb6ce2faadb9a35fdbe1774fce7b4456364f
9
__init__.py
43
Weather unit conversion (#73441) Co-authored-by: Erik <[email protected]>
112,843
0
67
26
15
314,235
17
core
6
homeassistant/components/weather/__init__.py
Python
10
{ "docstring": "Return the converted unit of measurement for visibility.\n\n Should not be set by integrations.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
https://github.com/home-assistant/core.git
1
test_consent
def test_consent(self) -> None: # Have the admin user accept the terms. self.get_success(self.store.user_set_consent_version(self.admin_user, "1.0")) # First, cheekily accept the terms and create a room self.get_success(self.store.user_set_consent_version(self.other_user, "1.0")) room_id = self.helper.create_room_as(self.other_user, tok=self.other_user_tok) self.helper.send_event(room_id, "com.example.test", tok=self.other_user_tok) # Now unaccept it and check that we can't send an event self.get_success(self.store.user_set_consent_version(self.other_user, "0.0")) self.helper.send_event( room_id, "com.example.test", tok=self.other_user_tok, expect_code=HTTPStatus.FORBIDDEN, ) # Login in as the user puppet_token = self._get_token() # Sending an event on their behalf should work fine self.helper.send_event(room_id, "com.example.test", tok=puppet_token)
901b264c0c88f39cbfb8b2229e0dc57968882658
10
test_user.py
225
Add type hints to `tests/rest/admin` (#11851)
71,079
0
224
137
56
246,185
75
synapse
18
tests/rest/admin/test_user.py
Python
15
{ "docstring": "Test that sending a message is not subject to the privacy policies.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/matrix-org/synapse.git
1
perform_create
def perform_create(self, serializer): project_id = self.request.data.get('project') project = generics.get_object_or_404(Project, pk=project_id) instance = serializer.save(project=project) emit_webhooks_for_instance(self.request.user.active_organization, project, WebhookAction.TASKS_CREATED, [instance]) @method_decorator(name='get', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Get task', operation_description=, manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ])) @method_decorator(name='patch', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Update task', operation_description='Update the attributes of an existing labeling task.', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], request_body=TaskSimpleSerializer)) @method_decorator(name='delete', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Delete task', operation_description='Delete a task in Label Studio. This action cannot be undone!', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], ))
71a9ada93224ed6433fb5b45bfcd60d5fe3edd4c
@method_decorator(name='get', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Get task', operation_description=""" Get task data, metadata, annotations and other attributes for a specific labeling task by task ID. """, manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ])) @method_decorator(name='patch', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Update task', operation_description='Update the attributes of an existing labeling task.', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], request_body=TaskSimpleSerializer)) @method_decorator(name='delete', decorator=swagger_auto_schema( tags=['Tasks'], operation_summary='Delete task', operation_description='Delete a task in Label Studio. This action cannot be undone!', manual_parameters=[ openapi.Parameter( name='id', type=openapi.TYPE_STRING, in_=openapi.IN_PATH, description='Task ID' ), ], ))
15
api.py
371
feat: DEV-2896: Comment List API (#2704) * feat: DEV-2896: Comment List API * Fix * Fix tests * Fix more tests * Fixes * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * Fix feature flags * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * Add fixes * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * Add user id to base.html * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/dm2 * [submodules] Build static heartexlabs/label-studio-frontend * [submodules] Build static heartexlabs/label-studio-frontend Co-authored-by: Brandon Martel <[email protected]>
42,564
1
470
71
56
177,995
81
label-studio
36
label_studio/tasks/api.py
Python
6
{ "docstring": "\n Get task data, metadata, annotations and other attributes for a specific labeling task by task ID.\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 14 }
https://github.com/heartexlabs/label-studio.git
3
prepare_test_settings
def prepare_test_settings(self, alias): try: conn = self.databases[alias] except KeyError: raise self.exception_class(f"The connection '{alias}' doesn't exist.") test_settings = conn.setdefault("TEST", {}) default_test_settings = [ ("CHARSET", None), ("COLLATION", None), ("MIGRATE", True), ("MIRROR", None), ("NAME", None), ] for key, value in default_test_settings: test_settings.setdefault(key, value)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
utils.py
146
Refs #33476 -- Reformatted code with Black.
51,278
0
177
89
35
205,912
40
django
12
django/db/utils.py
Python
15
{ "docstring": "\n Make sure the test settings are available in the 'TEST' sub-dictionary.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 10 }
https://github.com/django/django.git
2
execute
def execute(): accounting_dimensions = frappe.db.sql( , as_dict=1, ) doclist = get_doctypes_with_dimensions() for dimension in accounting_dimensions: frappe.db.sql( % ("%s", ", ".join(["%s"] * len(doclist))), # nosec tuple([dimension.fieldname] + doclist), )
494bd9ef78313436f0424b918f200dab8fc7c20b
16
update_owner_fields_in_acc_dimension_custom_fields.py
114
style: format code with black
14,300
0
18
67
25
66,700
28
erpnext
13
erpnext/patches/v12_0/update_owner_fields_in_acc_dimension_custom_fields.py
Python
17
{ "docstring": "select fieldname from\n\t\t`tabAccounting Dimension`\n\t\t\tUPDATE `tabCustom Field`\n\t\t\tSET owner = 'Administrator'\n\t\t\tWHERE fieldname = %s\n\t\t\tAND dt IN (%s)", "language": "en", "n_whitespaces": 14, "n_words": 20, "vocab_size": 18 }
https://github.com/frappe/erpnext.git
1
async_setup_entity
async def async_setup_entity(hass, entity_id): return (await async_setup_entities(hass, [entity_id]))[0]
923fa473e171fcdf396556ea200612e378f9b0a5
11
conftest.py
38
Blebox add thermoBox to climate (#81090) Co-authored-by: Martin Hjelmare <[email protected]>
96,116
0
14
23
8
297,147
8
core
4
tests/components/blebox/conftest.py
Python
2
{ "docstring": "Return a configured entry with the given entity_id.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
outputs
def outputs(self) -> Mapping[str, Mapping[int, str]]: common_outputs = self._tasks_to_common_outputs[self.task] return copy.deepcopy(common_outputs)
50dd314d939a86f3a81e19af01459f449fbaeeca
8
config.py
54
Add ONNX export for ViT (#15658) * Add ONNX support for ViT * Refactor to use generic preprocessor * Add vision dep to tests * Extend ONNX slow tests to ViT * Add dummy image generator * Use model_type to determine modality * Add deprecation warnings for tokenizer argument * Add warning when overwriting the preprocessor * Add optional args to docstrings * Add minimum PyTorch version to OnnxConfig * Refactor OnnxConfig class variables from CONSTANT_NAME to snake_case * Add reasonable value for default atol Co-authored-by: Sylvain Gugger <[email protected]>
6,563
0
32
35
11
36,021
11
transformers
10
src/transformers/onnx/config.py
Python
9
{ "docstring": "\n Mapping containing the axis definition of the output tensors to provide to the model\n\n Returns:\n For each output: its name associated to the axes symbolic name and the axis position within the tensor\n ", "language": "en", "n_whitespaces": 66, "n_words": 33, "vocab_size": 24 }
https://github.com/huggingface/transformers.git
3
test_valid_search_order
def test_valid_search_order(self) -> None: # fetch the most recent first, largest timestamp channel = self.make_request( "GET", self.url + "?dir=b", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) report = 1 while report < len(channel.json_body["event_reports"]): self.assertGreaterEqual( channel.json_body["event_reports"][report - 1]["received_ts"], channel.json_body["event_reports"][report]["received_ts"], ) report += 1 # fetch the oldest first, smallest timestamp channel = self.make_request( "GET", self.url + "?dir=f", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["event_reports"]), 20) report = 1 while report < len(channel.json_body["event_reports"]): self.assertLessEqual( channel.json_body["event_reports"][report - 1]["received_ts"], channel.json_body["event_reports"][report]["received_ts"], ) report += 1
c97042f7eef3748e17c90e48a4122389a89c4735
13
test_event_reports.py
398
Use literals in place of `HTTPStatus` constants in tests (#13469)
72,595
0
394
244
42
249,088
83
synapse
15
tests/rest/admin/test_event_reports.py
Python
34
{ "docstring": "\n Testing search order. Order by timestamps.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/matrix-org/synapse.git
1
report_failure
def report_failure(self, out, test, example, got): out(self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags))
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
doctest.py
59
add python 3.10.4 for windows
56,908
0
37
40
12
223,450
12
XX-Net
10
python3.10.4/Lib/doctest.py
Python
3
{ "docstring": "\n Report that the given example failed.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
https://github.com/XX-net/XX-Net.git
5
tmpfile
def tmpfile(extension="", dir=None): extension = extension.lstrip(".") if extension: extension = "." + extension handle, filename = tempfile.mkstemp(extension, dir=dir) os.close(handle) os.remove(filename) try: yield filename finally: if os.path.exists(filename): with suppress(OSError): # sometimes we can't remove a generated temp file if os.path.isdir(filename): shutil.rmtree(filename) else: os.remove(filename) @contextmanager
bf66221722cce8f09a9b09895bdb4596f14a5430
@contextmanager
17
utils.py
179
`tmpfile` does not end files with period on empty extension (#9429)
36,805
1
167
100
35
156,915
43
dask
19
dask/utils.py
Python
16
{ "docstring": "\n Function to create and return a unique temporary file with the given extension, if provided.\n\n Parameters\n ----------\n extension : str\n The extension of the temporary file to be created\n dir : str\n If ``dir`` is not None, the file will be created in that directory; otherwise,\n Python's default temporary directory is used.\n\n Returns\n -------\n out : str\n Path to the temporary file\n\n See Also\n --------\n NamedTemporaryFile : Built-in alternative for creating temporary files\n tmp_path : pytest fixture for creating a temporary directory unique to the test invocation\n\n Notes\n -----\n This context manager is particularly useful on Windows for opening temporary files multiple times.\n ", "language": "en", "n_whitespaces": 180, "n_words": 103, "vocab_size": 69 }
https://github.com/dask/dask.git