complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
3
7
def get_patches(self): r return silent_list('Patch', [h for h in self.legend_handles if isinstance(h, Patch)])
lib/matplotlib/legend.py
46
matplotlib
{ "docstring": "Return the list of `~.patches.Patch`\\s in the legend.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
13
Python
13
2a1a1a6e47e41b8992d462c48491d2ce347694cd
legend.py
110,667
5
29
get_patches
https://github.com/matplotlib/matplotlib.git
API/DOC: Document legend_handles and legend_handlers - deprecate legendHandles
79
0
24,247
11
2
14
def pop(): conn = sqlite3.connect(DB_FILE) c = conn.cursor() c.execute("BEGIN EXCLUSIVE") c.execute( ) result = c.fetchone() if result is None: conn.commit() return None queue_index = result[0] c.execute( , (queue_index,), ) conn.commit() return result[0], result[1], json.loads(result[2]), result[3]
gradio/queueing.py
160
gradio
{ "docstring": "\n SELECT queue_index, hash, input_data, action FROM queue\n WHERE popped = 0 ORDER BY queue_index ASC LIMIT 1;\n \n UPDATE queue SET popped = 1, input_data = '' WHERE queue_index = ?;\n ", "language": "en", "n_whitespaces": 59, "n_words": 30, "vocab_size": 23 }
35
Python
27
cc0cff893f9d7d472788adc2510c123967b384fe
queueing.py
179,286
23
98
pop
https://github.com/gradio-app/gradio.git
Format The Codebase - black formatting - isort formatting
106
0
42,936
9
1
4
def __call__(self, name, value): return self[name](name, value)
python3.10.4/Lib/email/headerregistry.py
31
XX-Net
{ "docstring": "Create a header instance for header 'name' from 'value'.\n\n Creates a header instance by creating a specialized class for parsing\n and representing the specified header by combining the factory\n base_class with a specialized class from the registry or the\n default_class, and passing the name and value to the constructed\n class's constructor.\n\n ", "language": "en", "n_whitespaces": 93, "n_words": 51, "vocab_size": 32 }
7
Python
7
8198943edd73a363c266633e1aa5b2a9e9c9f526
headerregistry.py
223,754
2
20
__call__
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
21
0
57,050
7
2
5
def disconnect(self): if self.is_connected is False: return self.connection.close() self.is_connected = False return self.is_connected
mindsdb/integrations/handlers/sqlite_handler/sqlite_handler.py
52
mindsdb
{ "docstring": "\r\n Close any existing connections.\r\n ", "language": "en", "n_whitespaces": 19, "n_words": 4, "vocab_size": 4 }
13
Python
10
fc9776d9b342f873cbb3f36fd39955b9e1ea6f76
sqlite_handler.py
115,431
6
30
disconnect
https://github.com/mindsdb/mindsdb.git
added connection_args and connection_args_example dicts
59
0
25,459
8
3
38
def call_mkt(self, other_args): parser = argparse.ArgumentParser( prog="mkt", add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=, ) parser.add_argument( "--vs", help="Quoted currency. Default USD", dest="vs", default="USD", type=str, choices=coinpaprika_view.CURRENCIES, ) parser.add_argument( "-l", "--limit", default=20, dest="limit", help="Limit of records", type=check_positive, ) parser.add_argument( "-s", "--sort", dest="sortby", type=str, help="Sort by given column. Default: pct_volume_share", default="pct_volume_share", choices=coinpaprika_view.MARKET_FILTERS, ) parser.add_argument( "-r", "--reverse", action="store_true", dest="reverse", default=False, help=( "Data is sorted in descending order by default. " "Reverse flag will sort it in an ascending way. " "Only works when raw data is displayed." ), ) parser.add_argument( "-u", "--urls", dest="urls", action="store_true", help=, default=False, ) ns_parser = self.parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: if self.symbol: coinpaprika_view.display_markets( from_symbol=self.symbol, to_symbol=ns_parser.vs, limit=ns_parser.limit, sortby=ns_parser.sortby, ascend=ns_parser.reverse, links=ns_parser.urls, export=ns_parser.export, )
openbb_terminal/cryptocurrency/due_diligence/dd_controller.py
384
OpenBBTerminal
{ "docstring": "Process mkt commandGet all markets found for given coin.\n You can display only N number of markets with --limt parameter.\n You can sort data by pct_volume_share, exchange, pair, trust_score, volume, price --sort parameter\n and also with --reverse flag to sort ascending.\n You can use additional flag --urls to see urls for each market\n Displays:\n exchange, pair, trust_score, volume, price, pct_volume_share,Flag to show urls. If you will use that flag you will see only:\n exchange, pair, trust_score, market_url columns", "language": "en", "n_whitespaces": 186, "n_words": 78, "vocab_size": 55 }
109
Python
89
0ae89d6cc20be84bf49c31e437fda38a845ebc68
dd_controller.py
286,516
73
239
call_mkt
https://github.com/OpenBB-finance/OpenBBTerminal.git
Style fixing: removing --ascend/--descend (#3395) * stocks candle to use reverse * qa raw to use reverse * etf candle to use reverse * oss rossix to use reverse * crypto/defi to use reverse * crypto/disc to use reverse * added test * crypto/dd to use reverse * crypto/onchain to use reverse * crypto/ov to use revert * forex candle to use revert * conibase controller to use revert * tests to use reverse * covid to use reverse * removing ascend * removing ascend from econ * more removing ascend * more removing ascend * more removing ascend * fixing stuff on .md files * fixed economy controller tests * fixed screener tests * fa controller to use comma separated when multiple inputs
847
0
85,839
13
6
11
def nseries(self, x=None, x0=0, n=6, dir='+', logx=None, cdir=0): if x and x not in self.free_symbols: return self if x is None or x0 or dir != '+': # {see XPOS above} or (x.is_positive == x.is_negative == None): return self.series(x, x0, n, dir, cdir=cdir) else: return self._eval_nseries(x, n=n, logx=logx, cdir=cdir)
sympy/core/expr.py
135
sympy
{ "docstring": "\n Wrapper to _eval_nseries if assumptions allow, else to series.\n\n If x is given, x0 is 0, dir='+', and self has x, then _eval_nseries is\n called. This calculates \"n\" terms in the innermost expressions and\n then builds up the final series just by \"cross-multiplying\" everything\n out.\n\n The optional ``logx`` parameter can be used to replace any log(x) in the\n returned series with a symbolic value to avoid evaluating log(x) at 0. A\n symbol to use in place of log(x) should be provided.\n\n Advantage -- it's fast, because we do not have to determine how many\n terms we need to calculate in advance.\n\n Disadvantage -- you may end up with less terms than you may have\n expected, but the O(x**n) term appended will always be correct and\n so the result, though perhaps shorter, will also be correct.\n\n If any of those assumptions is not met, this is treated like a\n wrapper to series which will try harder to return the correct\n number of terms.\n\n See also lseries().\n\n Examples\n ========\n\n >>> from sympy import sin, log, Symbol\n >>> from sympy.abc import x, y\n >>> sin(x).nseries(x, 0, 6)\n x - x**3/6 + x**5/120 + O(x**6)\n >>> log(x+1).nseries(x, 0, 5)\n x - x**2/2 + x**3/3 - x**4/4 + O(x**5)\n\n Handling of the ``logx`` parameter --- in the following example the\n expansion fails since ``sin`` does not have an asymptotic expansion\n at -oo (the limit of log(x) as x approaches 0):\n\n >>> e = sin(log(x))\n >>> e.nseries(x, 0, 6)\n Traceback (most recent call last):\n ...\n PoleError: ...\n ...\n >>> logx = Symbol('logx')\n >>> e.nseries(x, 0, 6, logx=logx)\n sin(logx)\n\n In the following example, the expansion works but only returns self\n unless the ``logx`` parameter is used:\n\n >>> e = x**y\n >>> e.nseries(x, 0, 2)\n x**y\n >>> e.nseries(x, 0, 2, logx=logx)\n exp(logx*y)\n\n ", "language": "en", "n_whitespaces": 610, "n_words": 294, "vocab_size": 182 }
49
Python
40
46ba104ee0f9cb35b54c2f5f5591cfabb26d0301
expr.py
195,878
7
91
nseries
https://github.com/sympy/sympy.git
Fixed failing doctest
111
0
47,462
11
4
13
def _process_triggers(self) -> None: if self._triggers is None: # Don't need triggers for GUI return logger.debug("Processing triggers") root = self._canvas.winfo_toplevel() for key in self._keymaps: bindkey = "Return" if key == "enter" else key logger.debug("Adding trigger for key: '%s'", bindkey) root.bind(f"<{bindkey}>", self._on_keypress) logger.debug("Processed triggers")
lib/training/preview_tk.py
131
faceswap
{ "docstring": " Process the standard faceswap key press triggers:\n\n m = toggle_mask\n r = refresh\n s = save\n enter = quit\n ", "language": "en", "n_whitespaces": 55, "n_words": 19, "vocab_size": 16 }
43
Python
35
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
preview_tk.py
101,555
17
72
_process_triggers
https://github.com/deepfakes/faceswap.git
Training - Use custom preview pop-out
130
0
20,965
11
3
15
def to_arrow_refs(self) -> List[ObjectRef["pyarrow.Table"]]: blocks: List[ObjectRef[Block]] = self.get_internal_block_refs() if self.dataset_format() == BlockFormat.ARROW: # Zero-copy path. return blocks block_to_arrow = cached_remote_fn(_block_to_arrow) return [block_to_arrow.remote(block) for block in blocks]
python/ray/data/dataset.py
100
ray
{ "docstring": "Convert this dataset into a distributed set of Arrow tables.\n\n This is only supported for datasets convertible to Arrow records.\n This function is zero-copy if the existing data is already in Arrow\n format. Otherwise, the data will be converted to Arrow format.\n\n Time complexity: O(1) unless conversion is required.\n\n Returns:\n A list of remote Arrow tables created from this dataset.\n ", "language": "en", "n_whitespaces": 113, "n_words": 60, "vocab_size": 46 }
26
Python
24
326d84f1149319809191e7887155df7f04f6f46a
dataset.py
136,382
17
61
to_arrow_refs
https://github.com/ray-project/ray.git
[AIR][Predictor] Enable numpy based predictor (#28917) Co-authored-by: Clark Zinzow <[email protected]> Co-authored-by: Amog Kamsetty <[email protected]>
83
0
30,902
9
1
16
def _get_kernel(self) -> tf.Tensor: coords = np.arange(self._filter_size, dtype="float32") coords -= (self._filter_size - 1) / 2. kernel = np.square(coords) kernel *= -0.5 / np.square(self._filter_sigma) kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1)) kernel = K.constant(np.reshape(kernel, (1, -1))) kernel = K.softmax(kernel) kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1)) return kernel
lib/model/losses_tf.py
211
faceswap
{ "docstring": " Obtain the base kernel for performing depthwise convolution.\n\n Returns\n -------\n :class:`tf.Tensor`\n The gaussian kernel based on selected size and sigma\n ", "language": "en", "n_whitespaces": 60, "n_words": 20, "vocab_size": 19 }
49
Python
33
04337e0c5efd442c1ce3e2da193dd8749f1e30d8
losses_tf.py
100,878
17
141
_get_kernel
https://github.com/deepfakes/faceswap.git
SSIM Updates - Standardize DSSIM Function - Implement MSSIM function for AMD
119
0
20,328
12
1
4
def test_validate_subscription_query_invalid(): result = validate_subscription_query("invalid_query") assert result is False TEST_VALID_SUBSCRIPTION_QUERY_WITH_FRAGMENT =
saleor/plugins/webhook/tests/subscription_webhooks/test_create_deliveries_for_subscription.py
33
saleor
{ "docstring": "\nfragment productFragment on Product{\n name\n}\nsubscription{\n event{\n ...on ProductUpdated{\n product{\n id\n ...productFragment\n }\n }\n }\n}\n", "language": "en", "n_whitespaces": 46, "n_words": 17, "vocab_size": 13 }
11
Python
9
aca6418d6c36956bc1ab530e6ef7e146ec9df90c
test_create_deliveries_for_subscription.py
26,493
3
14
test_validate_subscription_query_invalid
https://github.com/saleor/saleor.git
Add Webhook payload via graphql subscriptions (#9394) * Add PoC of webhook subscriptions * add async webhooks subscription payloads feature * remove unneeded file * add translations subscription handling, fixes after review * remove todo * add descriptions * add descriptions, move subsrciption_payloads.py * refactor * fix imports, add changelog * check_document_is_single_subscription refactor Co-authored-by: Maciej Korycinski <[email protected]> Co-authored-by: Marcin Gębala <[email protected]>
16
0
5,022
9
7
18
def find_module(module, path=None, imp=None): if imp is None: imp = import_module with cwd_in_path(): try: return imp(module) except ImportError: # Raise a more specific error if the problem is that one of the # dot-separated segments of the module name is not a package. if '.' in module: parts = module.split('.') for i, part in enumerate(parts[:-1]): package = '.'.join(parts[:i + 1]) try: mpart = imp(package) except ImportError: # Break out and re-raise the original ImportError # instead. break try: mpart.__path__ except AttributeError: raise NotAPackage(package) raise
celery/utils/imports.py
185
celery
{ "docstring": "Version of :func:`imp.find_module` supporting dots.", "language": "en", "n_whitespaces": 4, "n_words": 5, "vocab_size": 5 }
84
Python
61
59263b0409e3f02dc16ca8a3bd1e42b5a3eba36d
imports.py
208,027
20
105
find_module
https://github.com/celery/celery.git
Minor refactors, found by static analysis (#7587) * Remove deprecated methods in `celery.local.Proxy` * Collapse conditionals for readability * Remove unused parameter `uuid` * Remove unused import `ClusterOptions` * Remove dangerous mutable default argument Continues work from #5478 * Remove always `None` and unused global variable * Remove unreachable `elif` block * Consolidate import statements * Add missing parameter to `os._exit()` * Add missing assert statement * Remove unused global `WindowsError` * Use `mkstemp` instead of deprecated `mktemp` * No need for `for..else` constructs in loops that don't break In these cases where the loop returns or raises instead of breaking, it is simpler to just put the code that runs after the loop completes right after the loop instead. * Use the previously unused parameter `compat_modules` Previously this parameter was always overwritten by the value of `COMPAT_MODULES.get(name, ())`, which was very likely unintentional. * Remove unused local variable `tz` * Make `assert_received` actually check for `is_received` Previously, it called `is_accepted`, which was likely a copy-paste mistake from the `assert_accepted` method. * Use previously unused `args` and `kwargs` params Unlike other backends' `__reduce__` methods, the one from `RedisBackend` simply overwrites `args` and `kwargs` instead of adding to them. This change makes it more in line with other backends. * Update celery/backends/filesystem.py Co-authored-by: Gabriel Soldani <[email protected]> Co-authored-by: Asif Saif Uddin <[email protected]>
432
0
52,179
20
1
13
def test_from_is_negative(self) -> None: channel = self.make_request( "GET", self.url + "?from=-5", access_token=self.admin_user_tok, ) self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"])
tests/rest/admin/test_event_reports.py
97
synapse
{ "docstring": "\n Testing that a negative from parameter returns a 400\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
18
Python
18
2281427175e4c93a30c39607fb4ac23c2a1f399f
test_event_reports.py
249,309
11
60
test_from_is_negative
https://github.com/matrix-org/synapse.git
Use literals in place of `HTTPStatus` constants in tests (#13488) * Use literals in place of `HTTPStatus` constants in tests * newsfile * code style * code style
86
0
72,812
10
1
5
def test__compress_ids_not_dict(): data = ["malformed"] actual_output = highstate._compress_ids(data) assert actual_output == data
tests/pytests/unit/output/test_highstate.py
41
salt
{ "docstring": "\n Simple test for returning original malformed data\n to let the outputter figure it out.\n ", "language": "en", "n_whitespaces": 24, "n_words": 14, "vocab_size": 14 }
12
Python
9
7e1c2baa659ee2a975cbe4ed0f6d85e34ec91e50
test_highstate.py
216,122
4
22
test__compress_ids_not_dict
https://github.com/saltstack/salt.git
fixes saltstack/salt#61549 allow roll-up of duplicate IDs with different names
24
0
54,413
8
9
31
def _populate_directed_relation_graph(self): related_objects_graph = defaultdict(list) all_models = self.apps.get_models(include_auto_created=True) for model in all_models: opts = model._meta # Abstract model's fields are copied to child models, hence we will # see the fields from the child models. if opts.abstract: continue fields_with_relations = ( f for f in opts._get_fields(reverse=False, include_parents=False) if f.is_relation and f.related_model is not None ) for f in fields_with_relations: if not isinstance(f.remote_field.model, str): remote_label = f.remote_field.model._meta.concrete_model._meta.label related_objects_graph[remote_label].append(f) for model in all_models: # Set the relation_tree using the internal __dict__. In this way # we avoid calling the cached property. In attribute lookup, # __dict__ takes precedence over a data descriptor (such as # @cached_property). This means that the _meta._relation_tree is # only called if related_objects is not in __dict__. related_objects = related_objects_graph[ model._meta.concrete_model._meta.label ] model._meta.__dict__["_relation_tree"] = related_objects # It seems it is possible that self is not in all_models, so guard # against that with default for get(). return self.__dict__.get("_relation_tree", EMPTY_RELATION_TREE)
django/db/models/options.py
248
django
{ "docstring": "\n This method is used by each model to find its reverse objects. As this\n method is very expensive and is accessed frequently (it looks up every\n field in a model, in every app), it is computed on first access and then\n is set as a property on every model.\n ", "language": "en", "n_whitespaces": 85, "n_words": 49, "vocab_size": 38 }
151
Python
100
9c19aff7c7561e3a82978a272ecdaad40dda5c00
options.py
205,736
22
153
_populate_directed_relation_graph
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
500
0
51,182
18
2
15
def downgrade(): conn = op.get_bind() if conn.dialect.name == "mysql": op.alter_column( table_name=TABLE_NAME, column_name=COLUMN_NAME, type_=mysql.TIMESTAMP(), nullable=False )
airflow/migrations/versions/a66efa278eea_add_precision_to_execution_date_in_mysql.py
75
airflow
{ "docstring": "Unapply Add Precision to ``execution_date`` in ``RenderedTaskInstanceFields`` table", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
15
Python
15
69f6f9e01b6df76c3c8fa266d460324163957887
a66efa278eea_add_precision_to_execution_date_in_mysql.py
45,487
6
45
downgrade
https://github.com/apache/airflow.git
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
49
0
8,614
12
1
2
def colors(self): return self["colors"]
packages/python/plotly/plotly/graph_objs/funnelarea/_marker.py
22
plotly.py
{ "docstring": "\n Sets the color of each sector. If not specified, the default\n trace color set is used to pick the sector colors.\n\n The 'colors' property is an array that may be specified as a tuple,\n list, numpy array, or pandas Series\n\n Returns\n -------\n numpy.ndarray\n ", "language": "en", "n_whitespaces": 100, "n_words": 43, "vocab_size": 39 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_marker.py
229,885
2
11
colors
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
61,558
7
2
11
def post_fork(self, payload_handler, io_loop): if not io_loop: raise ValueError("io_loop must be set") self.payload_handler = payload_handler self.io_loop = io_loop self._rmq_nonblocking_connection_wrapper = RMQNonBlockingConnectionWrapper( self.opts, io_loop=io_loop ) self._rmq_nonblocking_connection_wrapper.register_message_callback( self.handle_message ) self._rmq_nonblocking_connection_wrapper.connect()
salt/transport/rabbitmq.py
99
salt
{ "docstring": "\n After forking we need to set up handlers to listen to the\n router\n\n :param func payload_handler: A function to called to handle incoming payloads as\n they are picked up off the wire\n :param IOLoop io_loop: An instance of a Tornado IOLoop, to handle event scheduling\n ", "language": "en", "n_whitespaces": 117, "n_words": 45, "vocab_size": 36 }
28
Python
25
ab4803984bce4a4de7cc10910e7310c4babf557e
rabbitmq.py
215,409
12
60
post_fork
https://github.com/saltstack/salt.git
Start to add base class defs
124
0
53,954
10
3
7
def to(self, device=None, dtype=None) -> None: r # .to() on the tensors handles None correctly self.shadow_params = [ p.to(device=device, dtype=dtype) if p.is_floating_point() else p.to(device=device) for p in self.shadow_params ]
examples/text_to_image/train_text_to_image.py
85
diffusers
{ "docstring": "Move internal buffers of the ExponentialMovingAverage to `device`.\n\n Args:\n device: like `device` argument to `torch.Tensor.to`\n ", "language": "en", "n_whitespaces": 40, "n_words": 15, "vocab_size": 14 }
29
Python
28
008b608f1551dbcf521284ed0e7a6722cd02ef07
train_text_to_image.py
337,105
10
56
to
https://github.com/huggingface/diffusers.git
[train_text2image] Fix EMA and make it compatible with deepspeed. (#813) * fix ema * style * add comment about copy * style * quality
78
0
120,959
11
5
15
def _try_state_query_expect_rate_limit(api_func, res_q, start_q=None): try: # Indicate start of the process if start_q is not None: start_q.put(1) api_func() except RayStateApiException as e: # Other exceptions will be thrown if "Max number of in-progress requests" in str(e): res_q.put(1) else: res_q.put(e) except Exception as e: res_q.put(e) else: res_q.put(0) @pytest.mark.skipif( sys.platform == "win32", reason="Lambda test functions could not be pickled on Windows", )
python/ray/tests/test_state_api.py
163
@pytest.mark.skipif( sys.platform == "win32", reason="Lambda test functions could not be pickled on Windows", )
ray
{ "docstring": "Utility functions for rate limit related e2e tests below", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
60
Python
50
365ffe21e592589880e3116302705b5e08a5b81f
test_state_api.py
124,713
14
75
_try_state_query_expect_rate_limit
https://github.com/ray-project/ray.git
[Core | State Observability] Implement API Server (Dashboard) HTTP Requests Throttling (#26257) This is to limit the max number of HTTP requests the dashboard (API server) will accept before rejecting more requests. This will make sure the observability requests do not overload the downstream systems (raylet/gcs) when delegating too many concurrent state observability requests to the cluster.
168
1
27,666
13
1
13
async def handle_webhook(hass, webhook_id, request): data = dict(await request.post()) data["webhook_id"] = webhook_id hass.bus.async_fire(RECEIVED_DATA, dict(data)) return web.Response(text="")
homeassistant/components/twilio/__init__.py
84
core
{ "docstring": "Handle incoming webhook from Twilio for inbound messages and calls.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
16
Python
15
44befe5f11390365e2ff0a7ce03133c1edd838a9
__init__.py
292,141
5
49
handle_webhook
https://github.com/home-assistant/core.git
Fix Twilio webhook content type (#66561)
31
0
91,243
11
6
26
def manhattan_distances(X, Y=None, *, sum_over_features="deprecated"): # TODO(1.4): remove sum_over_features if sum_over_features != "deprecated": warnings.warn( "`sum_over_features` is deprecated in version 1.2 and will be" " removed in version 1.4.", FutureWarning, ) else: sum_over_features = True X, Y = check_pairwise_arrays(X, Y) if issparse(X) or issparse(Y): if not sum_over_features: raise TypeError( "sum_over_features=%r not supported for sparse matrices" % sum_over_features ) X = csr_matrix(X, copy=False) Y = csr_matrix(Y, copy=False) X.sum_duplicates() # this also sorts indices in-place Y.sum_duplicates() D = np.zeros((X.shape[0], Y.shape[0])) _sparse_manhattan(X.data, X.indices, X.indptr, Y.data, Y.indices, Y.indptr, D) return D if sum_over_features: return distance.cdist(X, Y, "cityblock") D = X[:, np.newaxis, :] - Y[np.newaxis, :, :] D = np.abs(D, D) return D.reshape((-1, X.shape[1]))
sklearn/metrics/pairwise.py
339
scikit-learn
{ "docstring": "Compute the L1 distances between the vectors in X and Y.\n\n With sum_over_features equal to False it returns the componentwise\n distances.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples_X, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples_Y, n_features), default=None\n An array where each row is a sample and each column is a feature.\n If `None`, method uses `Y=X`.\n\n sum_over_features : bool, default=True\n If True the function returns the pairwise distance matrix\n else it returns the componentwise L1 pairwise-distances.\n Not supported for sparse matrix inputs.\n\n .. deprecated:: 1.2\n ``sum_over_features`` was deprecated in version 1.2 and will be removed in\n 1.4.\n\n Returns\n -------\n D : ndarray of shape (n_samples_X * n_samples_Y, n_features) or \\\n (n_samples_X, n_samples_Y)\n If sum_over_features is False shape is\n (n_samples_X * n_samples_Y, n_features) and D contains the\n componentwise L1 pairwise-distances (ie. absolute difference),\n else shape is (n_samples_X, n_samples_Y) and D contains\n the pairwise L1 distances.\n\n Notes\n -----\n When X and/or Y are CSR sparse matrices and they are not already\n in canonical format, this function modifies them in-place to\n make them canonical.\n\n Examples\n --------\n >>> from sklearn.metrics.pairwise import manhattan_distances\n >>> manhattan_distances([[3]], [[3]])\n array([[0.]])\n >>> manhattan_distances([[3]], [[2]])\n array([[1.]])\n >>> manhattan_distances([[2]], [[3]])\n array([[1.]])\n >>> manhattan_distances([[1, 2], [3, 4]],\\\n [[1, 2], [0, 3]])\n array([[0., 2.],\n [4., 4.]])\n ", "language": "en", "n_whitespaces": 444, "n_words": 225, "vocab_size": 133 }
108
Python
81
7cf938c78ff0e38a231a7cb3a2a7fa412bb47966
pairwise.py
261,366
28
214
manhattan_distances
https://github.com/scikit-learn/scikit-learn.git
API Remove `sklearn.metrics.manhattan_distances` option `sum_over_features` (#24630)
308
0
76,778
13
1
6
def set_axis_direction(self, label_direction): self.set_default_alignment(label_direction) self.set_default_angle(label_direction) self._axis_direction = label_direction
lib/mpl_toolkits/axisartist/axis_artist.py
43
matplotlib
{ "docstring": "\n Adjust the text angle and text alignment of ticklabels\n according to the Matplotlib convention.\n\n The *label_direction* must be one of [left, right, bottom, top].\n\n ===================== ========== ========= ========== ==========\n Property left bottom right top\n ===================== ========== ========= ========== ==========\n ticklabel angle 90 0 -90 180\n ticklabel va center baseline center baseline\n ticklabel ha right center right center\n ===================== ========== ========= ========== ==========\n\n Note that the text angles are actually relative to (90 + angle\n of the direction to the ticklabel), which gives 0 for bottom\n axis.\n\n Parameters\n ----------\n label_direction : {\"left\", \"bottom\", \"right\", \"top\"}\n\n ", "language": "en", "n_whitespaces": 331, "n_words": 94, "vocab_size": 60 }
8
Python
8
df6f95703b60348e01603f98a439b133da2938a0
axis_artist.py
109,904
4
25
set_axis_direction
https://github.com/matplotlib/matplotlib.git
Improve mpl_toolkit documentation
36
0
23,812
7
1
4
def prep_related_object_data(self, parent, data): return data
netbox/netbox/views/generic/bulk_views.py
20
netbox
{ "docstring": "\n Hook to modify the data for related objects before it's passed to the related object form (for example, to\n assign a parent object).\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 19 }
6
Python
6
93e7457e0d84ad24cba22cc5c0811777ddebf94e
bulk_views.py
266,053
2
12
prep_related_object_data
https://github.com/netbox-community/netbox.git
4347 Add JSON/YAML import support for all objects (#10367) * 4347 initial code for json import * 4347 initial code for json import * Clean up form processing logic * Consolidate import forms * Consolidate object import/update logic * Clean up bulk import view Co-authored-by: jeremystretch <[email protected]>
20
0
78,283
6
4
15
def euler_poly(n, x=None, polys=False): if n < 0: raise ValueError("Cannot generate Euler polynomial of degree %s" % n) poly = DMP(dup_euler(int(n), QQ), QQ) if x is not None: poly = Poly.new(poly, x) else: poly = PurePoly.new(poly, Dummy('x')) return poly if polys else poly.as_expr()
sympy/polys/appellseqs.py
133
sympy
{ "docstring": "Generates the Euler polynomial `\\operatorname{E}_n(x)`.\n\n Parameters\n ==========\n\n n : int\n Degree of the polynomial.\n x : optional\n polys : bool, optional\n If True, return a Poly, otherwise (default) return an expression.\n ", "language": "en", "n_whitespaces": 63, "n_words": 31, "vocab_size": 26 }
43
Python
36
93e4d381d35cd4c21a3a8d713c157f8fb21f725b
appellseqs.py
199,650
9
83
euler_poly
https://github.com/sympy/sympy.git
Custom Appell sequence functions and a doctest
82
0
49,316
14
6
10
def iterencode(self, o, _one_shot=False): if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring
python3.10.4/Lib/json/encoder.py
66
XX-Net
{ "docstring": "Encode the given object and yield each string\n representation as available.\n\n For example::\n\n for chunk in JSONEncoder().iterencode(bigobject):\n mysocket.write(chunk)\n\n ", "language": "en", "n_whitespaces": 65, "n_words": 18, "vocab_size": 18 }
22
Python
15
8198943edd73a363c266633e1aa5b2a9e9c9f526
encoder.py
218,574
22
138
iterencode
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
101
0
55,396
9
1
10
def clear(self) -> None: self.row_count = 0 self._clear_caches() self._y_offsets.clear() self.data.clear() self.rows.clear() self._line_no = 0 self._require_update_dimensions = True self.refresh()
src/textual/widgets/_data_table.py
94
textual
{ "docstring": "Clear the table.\n\n Args:\n columns (bool, optional): Also clear the columns. Defaults to False.\n ", "language": "en", "n_whitespaces": 39, "n_words": 14, "vocab_size": 13 }
18
Python
15
b524fa08eecadc83b0b694278db1c79d90feb9d8
_data_table.py
185,757
14
54
clear
https://github.com/Textualize/textual.git
ffixed table refresh on add row
81
0
45,161
8
1
16
def test_string_target(pyplot): iris = load_iris() X = iris.data[:, [0, 1]] # Use strings as target y = iris.target_names[iris.target] log_reg = LogisticRegression().fit(X, y) # Does not raise DecisionBoundaryDisplay.from_estimator( log_reg, X, grid_resolution=5, response_method="predict", )
sklearn/inspection/_plot/tests/test_boundary_decision_display.py
103
scikit-learn
{ "docstring": "Check that decision boundary works with classifiers trained on string labels.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
32
Python
28
d400723a2112f15c5d5b4d40dfac2ed8a19cca5c
test_boundary_decision_display.py
259,484
11
64
test_string_target
https://github.com/scikit-learn/scikit-learn.git
FEA Add DecisionBoundaryDisplay (#16061) Co-authored-by: Guillaume Lemaitre <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Loïc Estève <[email protected]>
87
0
75,797
10
3
13
def setdefault(cls, key, default, description=None, deserialize_json=False): obj = Variable.get(key, default_var=None, deserialize_json=deserialize_json) if obj is None: if default is not None: Variable.set(key, default, description=description, serialize_json=deserialize_json) return default else: raise ValueError('Default Value must be set') else: return obj
airflow/models/variable.py
113
airflow
{ "docstring": "\n Like a Python builtin dict object, setdefault returns the current value\n for a key, and if it isn't there, stores the default value and returns it.\n\n :param key: Dict key for this Variable\n :param default: Default value to set and return if the variable\n isn't already in the DB\n :param deserialize_json: Store this as a JSON encoded value in the DB\n and un-encode it when retrieving a value\n :return: Mixed\n ", "language": "en", "n_whitespaces": 142, "n_words": 70, "vocab_size": 46 }
36
Python
27
602abe8394fafe7de54df7e73af56de848cdf617
variable.py
44,105
10
74
setdefault
https://github.com/apache/airflow.git
Remove `:type` lines now sphinx-autoapi supports typehints (#20951) * Remove `:type` lines now sphinx-autoapi supports typehints Since we have no updated sphinx-autoapi to a more recent version it supports showing type hints in the documentation, so we don't need to have the type hints _and_ the `:type` lines -- which is good, as the ones in the doc strings are easy to get out of date! The following settings have been set: `autodoc_typehints = 'description'` -- show types in description (where previous `:type` used to show up) `autodoc_typehints_description_target = 'documented'` -- only link to types that are documented. (Without this we have some missing return types that aren't documented, and aren't linked to in our current python API docs, so this caused a build failure) `autodoc_typehints_format = 'short'` -- Shorten type hints where possible, i.e. `StringIO` instead of `io.StringIO` * Add argument type names to local spelling dictionary Now that we are using the type hints in the docs, sphinxcontrib-spelling picks them up as words to be checked, so we have to ignore them. I've chosen to add the provider specific ones to local dictionary files rather than the global, as for example, `mgmt` is an error in most places, but not in some of the Azure provider.
142
0
8,155
13
3
10
def _update_trackables(self): for trackable_obj in self._self_tracked_trackables: if isinstance( trackable_obj, tf.__internal__.tracking.TrackableDataStructure ): self._track_variables(trackable_obj)
keras/engine/base_layer.py
54
keras
{ "docstring": "Track variables added to lists/dicts after creation", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
12
Python
12
00524152437b957ca4e850a5db014e223d3c6826
base_layer.py
279,734
6
33
_update_trackables
https://github.com/keras-team/keras.git
isort, black and flake8 checked
78
0
83,115
12
2
14
def assertCountSeleniumElements(self, selector, count, root_element=None): from selenium.webdriver.common.by import By root_element = root_element or self.selenium self.assertEqual( len(root_element.find_elements(By.CSS_SELECTOR, selector)), count )
django/contrib/admin/tests.py
76
django
{ "docstring": "\n Assert number of matches for a CSS selector.\n\n `root_element` allow restriction to a pre-selected node.\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
19
Python
18
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
203,511
6
51
assertCountSeleniumElements
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
65
0
50,416
12
1
3
async def action_pop_screen(self) -> None: self.pop_screen()
src/textual/app.py
26
textual
{ "docstring": "Removes the topmost screen and makes the new topmost screen active.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 8 }
6
Python
6
cf14b812ed47982463062e5b51bce506ad6ede1f
app.py
185,316
3
13
action_pop_screen
https://github.com/Textualize/textual.git
words
20
0
44,967
7
1
6
def test_empty_string_topic(self) -> None: self.login("hamlet") result = self.client_post( "/json/messages", { "type": "stream", "to": "Verona", "client": "test suite", "content": "Test message", "topic": "", }, ) self.assert_json_error(result, "Topic can't be empty!")
zerver/tests/test_message_send.py
107
zulip
{ "docstring": "\n Sending a message that has empty string topic should fail\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
29
Python
29
4f482c234c3ab72d264e7bff7835dad5207b9d07
test_message_send.py
83,031
16
54
test_empty_string_topic
https://github.com/zulip/zulip.git
string_validation: Standardize missing topic with missing stream name. Co-authored-by: Shlok Patel <[email protected]>
172
0
17,583
11
1
5
def generate_metric_ids(self) -> Set[Any]: raise NotImplementedError
src/sentry/snuba/metrics/fields/base.py
23
sentry
{ "docstring": "\n Method that generates all the metric ids required to query an instance of\n MetricsFieldBase\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 14 }
6
Python
6
3e8115c4a681e9c4adeafb1f15eb669a9342b93c
base.py
96,875
6
13
generate_metric_ids
https://github.com/getsentry/sentry.git
feat(metrics): Add initial framework for derived metrics [INGEST-924] (#32451) * feat(metrics): Add initial framework for derived metrics Adds support for derived metrics composed of constituent metrics that span one entity * Adds logic/test for when metric does not exist * Fix failing test + incorporate PR feedback * Rename snql functions to their snuba name
20
0
19,347
6
1
2
def angleref(self): return self["angleref"]
packages/python/plotly/plotly/graph_objs/scatter/_marker.py
22
plotly.py
{ "docstring": "\n Sets the reference for marker angle. With \"previous\", angle 0\n points along the line from the previous point to this one. With\n \"up\", angle 0 points toward the top of the screen.\n\n The 'angleref' property is an enumeration that may be specified as:\n - One of the following enumeration values:\n ['previous', 'up']\n\n Returns\n -------\n Any\n ", "language": "en", "n_whitespaces": 136, "n_words": 55, "vocab_size": 44 }
4
Python
4
d5a345d01507f8b6792c51507d1d8f35d7386d29
_marker.py
231,197
2
11
angleref
https://github.com/plotly/plotly.py.git
update to plotly.js 2.16.1
18
0
62,773
7
9
36
def ask_question(self, question, blocking): log.prompt.debug("Asking question {}, blocking {}, loops {}, queue " "{}".format(question, blocking, self._loops, self._queue)) if self._shutting_down: # If we're currently shutting down we have to ignore this question # to avoid segfaults - see # https://github.com/qutebrowser/qutebrowser/issues/95 log.prompt.debug("Ignoring question because we're shutting down.") question.abort() return None if self._question is not None and not blocking: # We got an async question, but we're already busy with one, so we # just queue it up for later. log.prompt.debug("Adding {} to queue.".format(question)) self._queue.append(question) return None if blocking: # If we're blocking we save the old question on the stack, so we # can restore it after exec, if exec gets called multiple times. log.prompt.debug("New question is blocking, saving {}".format( self._question)) old_question = self._question if old_question is not None: old_question.interrupted = True self._question = question self.show_prompts.emit(question) if blocking: loop = qtutils.EventLoop() self._loops.append(loop) loop.destroyed.connect(lambda: self._loops.remove(loop)) question.completed.connect(loop.quit) question.completed.connect(loop.deleteLater) log.prompt.debug("Starting loop.exec() for {}".format(question)) flags = cast(QEventLoop.ProcessEventsFlags, QEventLoop.ProcessEventsFlag.ExcludeSocketNotifiers) loop.exec(flags) log.prompt.debug("Ending loop.exec() for {}".format(question)) log.prompt.debug("Restoring old question {}".format(old_question)) self._question = old_question self.show_prompts.emit(old_question) if old_question is None: # Nothing left to restore, so we can go back to popping async # questions. if self._queue: self._pop_later() return question.answer else: question.completed.connect(self._pop_later) return None
qutebrowser/mainwindow/prompt.py
497
qutebrowser
{ "docstring": "Display a prompt for a given question.\n\n Args:\n question: The Question object to ask.\n blocking: If True, this function blocks and returns the result.\n\n Return:\n The answer of the user when blocking=True.\n None if blocking=False.\n ", "language": "en", "n_whitespaces": 100, "n_words": 35, "vocab_size": 32 }
193
Python
117
0877fb0d78635692e481c8bde224fac5ad0dd430
prompt.py
321,259
41
295
ask_question
https://github.com/qutebrowser/qutebrowser.git
Run scripts/dev/rewrite_enums.py
786
0
117,614
13
10
19
def prettify_exc(error): errors = [] for exc in KNOWN_EXCEPTIONS: search_string = exc.match_string if exc.match_string else exc.exception_name split_string = ( exc.show_from_string if exc.show_from_string else exc.exception_name ) if search_string in error: # for known exceptions with no display rules and no prefix # we should simply show nothing if not exc.show_from_string and not exc.prefix: errors.append("") continue elif exc.prefix and exc.prefix in error: _, error, info = error.rpartition(exc.prefix) else: _, error, info = error.rpartition(split_string) errors.append(f"{error} {info}") if not errors: return f"{vistir.misc.decode_for_output(error)}" return "\n".join(errors)
pipenv/exceptions.py
231
pipenv
{ "docstring": "Catch known errors and prettify them instead of showing the\n entire traceback, for better UX", "language": "en", "n_whitespaces": 17, "n_words": 15, "vocab_size": 15 }
80
Python
51
9a3b3ce70621af6f9adaa9eeac9cf83fa149319c
exceptions.py
19,700
19
126
prettify_exc
https://github.com/pypa/pipenv.git
Issue 4993 Add standard pre commit hooks and apply linting. (#4994) * Add .pre-commit-config.yaml to the project and exclude tests (for now). This does not include the MyPy linting that pip does but does include everything else.
267
0
3,069
16
1
5
def get_filename(self): return getattr(self.model_admin, "export_filename", super().get_filename())
wagtail/contrib/modeladmin/views.py
41
wagtail
{ "docstring": "Get filename for exported spreadsheet, without extension", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
6
Python
6
d10f15e55806c6944827d801cd9c2d53f5da4186
views.py
73,308
2
23
get_filename
https://github.com/wagtail/wagtail.git
Reformat with black
20
0
16,012
11
1
4
def ensure_future(coro_or_future, *, loop=None): return _ensure_future(coro_or_future, loop=loop)
python3.10.4/Lib/asyncio/tasks.py
34
XX-Net
{ "docstring": "Wrap a coroutine or an awaitable in a future.\n\n If the argument is a Future, it is returned directly.\n ", "language": "en", "n_whitespaces": 25, "n_words": 19, "vocab_size": 16 }
7
Python
7
8198943edd73a363c266633e1aa5b2a9e9c9f526
tasks.py
220,829
2
21
ensure_future
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
13
0
56,137
8
7
21
def _get_permissions(self, user_obj, obj, from_name): if not user_obj.is_active or user_obj.is_anonymous or obj is not None: return set() perm_cache_name = "_%s_perm_cache" % from_name if not hasattr(user_obj, perm_cache_name): if user_obj.is_superuser: perms = Permission.objects.all() else: perms = getattr(self, "_get_%s_permissions" % from_name)(user_obj) perms = perms.values_list("content_type__app_label", "codename").order_by() setattr( user_obj, perm_cache_name, {"%s.%s" % (ct, name) for ct, name in perms} ) return getattr(user_obj, perm_cache_name)
django/contrib/auth/backends.py
190
django
{ "docstring": "\n Return the permissions of `user_obj` from `from_name`. `from_name` can\n be either \"group\" or \"user\" to return permissions from\n `_get_group_permissions` or `_get_user_permissions` respectively.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 19 }
58
Python
44
9c19aff7c7561e3a82978a272ecdaad40dda5c00
backends.py
203,604
14
117
_get_permissions
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
204
0
50,471
16
1
2
def __hash__(self): # type: () -> int
.venv/lib/python3.8/site-packages/pip/_vendor/packaging/specifiers.py
14
transferlearning
{ "docstring": "\n Returns a hash value for this Specifier like object.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
7
Python
7
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
specifiers.py
62,876
1
6
__hash__
https://github.com/jindongwang/transferlearning.git
upd; format
21
0
13,057
6
1
4
def get_cost_of_delayed_shipments(scorecard): return get_total_cost_of_shipments(scorecard) - get_cost_of_on_time_shipments(scorecard)
erpnext/buying/doctype/supplier_scorecard_variable/supplier_scorecard_variable.py
29
erpnext
{ "docstring": "Gets the total cost of all delayed shipments in the period (based on Purchase Receipts - POs)", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 16 }
6
Python
6
494bd9ef78313436f0424b918f200dab8fc7c20b
supplier_scorecard_variable.py
65,547
2
16
get_cost_of_delayed_shipments
https://github.com/frappe/erpnext.git
style: format code with black
4
0
13,924
8
3
12
def check(self, pattern): if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) return self._re_cache[pattern].match(self.data, self.pos)
pipenv/patched/notpip/_vendor/pygments/scanner.py
93
pipenv
{ "docstring": "\n Apply `pattern` on the current position and return\n the match object. (Doesn't touch pos). Use this for\n lookahead.\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 17 }
19
Python
18
f3166e673fe8d40277b804d35d77dcdb760fc3b3
scanner.py
20,474
6
60
check
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
69
0
3,391
11
1
4
def test_clear_not_launched_queued_tasks_mapped_task(self, dag_maker, session):
tests/executors/test_kubernetes_executor.py
17
airflow
{ "docstring": "One mapped task has a launched pod - other does not.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
4
Python
4
98d52af7074e9a82457515588bdf9cdd6de70f35
test_kubernetes_executor.py
47,898
41
238
test_clear_not_launched_queued_tasks_mapped_task
https://github.com/apache/airflow.git
Use map_index when clearing not launched tasks in k8s (#23224)
11
0
9,291
6
1
2
def vertexcolorsrc(self): return self["vertexcolorsrc"]
packages/python/plotly/plotly/graph_objs/_mesh3d.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `vertexcolor`.\n\n The 'vertexcolorsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 84, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_mesh3d.py
227,437
2
11
vertexcolorsrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,110
7
1
2
def widthsrc(self): return self["widthsrc"]
packages/python/plotly/plotly/graph_objs/_bar.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for `width`.\n\n The 'widthsrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 77, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_bar.py
226,181
2
11
widthsrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,854
7
1
7
def _dotprodsimp(expr, withsimp=False): from sympy.simplify.simplify import dotprodsimp as dps return dps(expr, withsimp=withsimp)
sympy/matrices/utilities.py
45
sympy
{ "docstring": "Wrapper for simplify.dotprodsimp to avoid circular imports.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
12
Python
12
f757f3daae6e11ea0cfb7dadc133274d8d74315f
utilities.py
196,808
3
29
_dotprodsimp
https://github.com/sympy/sympy.git
Reordered imports 2
21
0
48,190
8
2
5
def libc_ver() -> Tuple[str, str]: glibc_version = glibc_version_string() if glibc_version is None: return ("", "") else: return ("glibc", glibc_version)
pipenv/patched/notpip/_internal/utils/glibc.py
64
pipenv
{ "docstring": "Try to determine the glibc version\n\n Returns a tuple of strings (lib, version) which default to empty strings\n in case the lookup fails.\n ", "language": "en", "n_whitespaces": 32, "n_words": 23, "vocab_size": 20 }
19
Python
17
f3166e673fe8d40277b804d35d77dcdb760fc3b3
glibc.py
19,980
11
36
libc_ver
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
45
0
3,162
10
1
8
def select_query(self, targets, from_stmt, where_stmt) -> pd.DataFrame: # noqa raise NotImplementedError()
mindsdb/integrations/libs/base_handler.py
33
mindsdb
{ "docstring": "\n Select data from some entity in the handler and return in dataframe format.\n \n This method assumes a raw query has been parsed beforehand with mindsdb_sql using some dialect compatible with the handler, and only targets, from, and where clauses are fed into it.\n ", "language": "en", "n_whitespaces": 73, "n_words": 43, "vocab_size": 37 }
11
Python
11
0fd3b436c38f38bcae6fed9e14dc4d2a12e90793
base_handler.py
114,370
7
20
select_query
https://github.com/mindsdb/mindsdb.git
fix tests and reformat
26
0
25,169
7
1
2
def test_bad_return_in_train_loop(ray_start_4_cpus): # Simulates what happens with eg. torch models
python/ray/train/tests/test_data_parallel_trainer.py
14
ray
{ "docstring": "Test to check if returns from train loop are discarded.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
10
Python
10
8bb67427c18887f43721cf9726d6836c3b40cafb
test_data_parallel_trainer.py
124,658
8
30
test_bad_return_in_train_loop
https://github.com/ray-project/ray.git
[AIR] Discard returns of train loops in Trainers (#26448) Discards returns of user defined train loop functions to prevent deser issues with eg. torch models. Those returns are not used anywhere in AIR, so there is no loss of functionality.
16
0
27,647
6
1
2
def cone(self): return self["cone"]
packages/python/plotly/plotly/graph_objs/layout/template/_data.py
22
plotly.py
{ "docstring": "\n The 'cone' property is a tuple of instances of\n Cone that may be specified as:\n - A list or tuple of instances of plotly.graph_objs.layout.template.data.Cone\n - A list or tuple of dicts of string/value properties that\n will be passed to the Cone constructor\n\n Supported dict properties:\n\n Returns\n -------\n tuple[plotly.graph_objs.layout.template.data.Cone]\n ", "language": "en", "n_whitespaces": 131, "n_words": 48, "vocab_size": 33 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_data.py
232,549
2
11
cone
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,993
7
2
11
def get_purchased_items_cost(): pr_items = frappe.db.sql( , as_dict=1, ) pr_item_map = {} for item in pr_items: pr_item_map.setdefault(item.project, item.amount) return pr_item_map
erpnext/projects/report/project_wise_stock_tracking/project_wise_stock_tracking.py
67
erpnext
{ "docstring": "select project, sum(base_net_amount) as amount\n\t\tfrom `tabPurchase Receipt Item` where ifnull(project, '') != ''\n\t\tand docstatus = 1 group by project", "language": "en", "n_whitespaces": 18, "n_words": 21, "vocab_size": 21 }
19
Python
17
494bd9ef78313436f0424b918f200dab8fc7c20b
project_wise_stock_tracking.py
67,037
11
42
get_purchased_items_cost
https://github.com/frappe/erpnext.git
style: format code with black
10
0
14,414
10
1
8
def test_identity_weighted_graph_matrix(self): A = nx.to_scipy_sparse_array(self.G3) self.identity_conversion(self.G3, A, nx.Graph())
networkx/tests/test_convert_scipy.py
53
networkx
{ "docstring": "Conversion from weighted graph to sparse matrix to weighted graph.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 8 }
8
Python
8
5dfd57af2a141a013ae3753e160180b82bec9469
test_convert_scipy.py
176,206
3
32
test_identity_weighted_graph_matrix
https://github.com/networkx/networkx.git
Use scipy.sparse array datastructure (#5139) * Step 1: use sparse arrays in nx.to_scipy_sparse_matrix. Seems like a reasonable place to start. nx.to_scipy_sparse_matrix is one of the primary interfaces to scipy.sparse from within NetworkX. * 1: Use np.outer instead of mult col/row vectors Fix two instances in modularitymatrix where a new 2D array was being created via an outer product of two \"vectors\". In the matrix case, this was a row vector \* a column vector. In the array case this can be disambiguated by being explicit with np.outer. * Update _transition_matrix in laplacianmatrix module - A few instances of matrix multiplication operator - Add np.newaxis + transpose to get shape right for broadcasting - Explicitly convert e.g. sp.sparse.spdiags to a csr_array. * Update directed_combinitorial_laplacian w/ sparse array. - Wrap spdiags in csr_array and update matmul operators. * Rm matrix-specific code from lgc and hmn modules - Replace .A call with appropriate array semantics - wrap sparse.diags in csr_array. * Change hits to use sparse array semantics. - Replace * with @ - Remove superfluous calls to flatten. * Update sparse matrix usage in layout module. - Simplify lil.getrowview call - Wrap spdiags in csr_array. * lil_matrix -> lil_array in graphmatrix.py. * WIP: Start working on algebraic connectivity module. * Incorporate auth mat varname feedback. * Revert 1D slice and comment for 1D sparse future. * Add TODOs: rm csr_array wrapper around spdiags etc. * WIP: cleanup algebraicconn: tracemin_fiedler. * Typo. * Finish reviewing algebraicconnectivity. * Convert bethe_hessian matrix to use sparse arrays. * WIP: update laplacian. Update undirected laplacian functions. * WIP: laplacian - add comment about _transition_matrix return types. * Finish laplacianmatrix review. * Update attrmatrix. * Switch to official laplacian function. * Update pagerank to use sparse array. * Switch bipartite matrix to sparse arrays. * Check from_scipy_sparse_matrix works with arrays. Modifies test suite. * Apply changes from review. * Fix failing docstring tests. * Fix missing axis for in-place multiplication. * Use scipy==1.8rc2 * Use matrix multiplication * Fix PyPy CI * [MRG] Create plot_subgraphs.py example (#5165) * Create plot_subgraphs.py https://github.com/networkx/networkx/issues/4220 * Update plot_subgraphs.py black * Update plot_subgraphs.py lint plus font_size * Update plot_subgraphs.py added more plots * Update plot_subgraphs.py removed plots from the unit test and added comments * Update plot_subgraphs.py lint * Update plot_subgraphs.py typos fixed * Update plot_subgraphs.py added nodes to the plot of the edges removed that was commented out for whatever reason * Update plot_subgraphs.py revert the latest commit - the line was commented out for a reason - it's broken * Update plot_subgraphs.py fixed node color issue * Update plot_subgraphs.py format fix * Update plot_subgraphs.py forgot to draw the nodes... now fixed * Fix sphinx warnings about heading length. * Update examples/algorithms/plot_subgraphs.py * Update examples/algorithms/plot_subgraphs.py Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]> * Add traveling salesman problem to example gallery (#4874) Adds an example of the using Christofides to solve the TSP problem to the example galery. Co-authored-by: Ross Barnowski <[email protected]> * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037) * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() * Resolved Requested Changes * Revert changes to degree docstrings. * Update comments in example. * Apply wording to edges method in all graph classes. Co-authored-by: Ross Barnowski <[email protected]> * Compatibility updates from testing with numpy/scipy/pytest rc's (#5226) * Rm deprecated scipy subpkg access. * Use recwarn fixture in place of deprecated pytest pattern. * Rm unnecessary try/except from tests. * Replace internal `close` fn with `math.isclose`. (#5224) * Replace internal close fn with math.isclose. * Fix lines in docstring examples. * Fix Python 3.10 deprecation warning w/ int div. (#5231) * Touchups and suggestions for subgraph gallery example (#5225) * Simplify construction of G with edges rm'd * Rm unused graph attribute. * Shorten categorization by node type. * Simplify node coloring. * Simplify isomorphism check. * Rm unit test. * Rm redundant plotting of each subgraph. * Use new package name (#5234) * Allowing None edges in weight function of bidirectional Dijkstra (#5232) * added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None. * changed syntax for better readability and code duplicate avoidance Co-authored-by: Hohmann, Nikolas <[email protected]> * Add an FAQ about assigning issues. (#5182) * Add FAQ about assigning issues. * Add note about linking issues from new PRs. * Update dev deps (#5243) * Update minor doc issues with tex notation (#5244) * Add FutureWarnings to fns that return sparse matrices - biadjacency_matrix. - bethe_hessian_matrix. - incidence_matrix. - laplacian functions. - modularity_matrix functions. - adjacency_matrix. * Add to_scipy_sparse_array and use it everywhere. Add a new conversion function to preserve array semantics internally while not altering behavior for users. Also adds FutureWarning to to_scipy_sparse_matrix. * Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix. * Handle deprecations in separate PR. * Fix docstring examples. Co-authored-by: Mridul Seth <[email protected]> Co-authored-by: Jarrod Millman <[email protected]> Co-authored-by: Andrew Knyazev <[email protected]> Co-authored-by: Dan Schult <[email protected]> Co-authored-by: eskountis <[email protected]> Co-authored-by: Anutosh Bhat <[email protected]> Co-authored-by: NikHoh <[email protected]> Co-authored-by: Hohmann, Nikolas <[email protected]> Co-authored-by: Sultan Orazbayev <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
29
0
41,766
9
1
9
def _merge_dataframes(transactions_df, articles_df, customers_df): # Merge the transactions and articles dataframes transactions_df = pd.merge( transactions_df, articles_df, how="left", left_on="article_id", right_on="article_id", ) # Merge the transactions and customers dataframes transactions_df = pd.merge( transactions_df, customers_df, how="left", left_on="customer_id", right_on="customer_id", ) return transactions_df
ludwig/datasets/loaders/hm_fashion_recommendations.py
96
ludwig
{ "docstring": "Merge the transactions, articles, and customers dataframes into a single dataframe.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
38
Python
24
abfdc05018cc4dec5a2fed20ad09e94f1749fca9
hm_fashion_recommendations.py
8,572
16
58
_merge_dataframes
https://github.com/ludwig-ai/ludwig.git
Add H&M fashion recommendation dataset (#2708) * allow individual file downloads from kaggle * pipe download_filenames to kaggle download fn * add dataset config for H&M Fashion Recommendations * add custom loader * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * use local backend instead of mock * add docstring for sample * fix titanic test * move negative_sample to ludwig.data * do not negative sample in loader Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
132
0
1,463
10
2
8
def _get_trainable_state(self): trainable_state = weakref.WeakKeyDictionary() for layer in self._flatten_layers(): trainable_state[layer] = layer.trainable return trainable_state
keras/engine/base_layer.py
54
keras
{ "docstring": "Get the `trainable` state of each sublayer.\n\n Returns:\n A dict mapping all sublayers to their `trainable` value.\n ", "language": "en", "n_whitespaces": 40, "n_words": 17, "vocab_size": 16 }
14
Python
12
84afc5193d38057e2e2badf9c889ea87d80d8fbf
base_layer.py
270,669
5
32
_get_trainable_state
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
53
0
80,516
9
11
17
def get_field_type(self, connection, table_name, row): field_params = {} field_notes = [] try: field_type = connection.introspection.get_field_type(row.type_code, row) except KeyError: field_type = "TextField" field_notes.append("This field type is a guess.") # Add max_length for all CharFields. if field_type == "CharField" and row.internal_size: field_params["max_length"] = int(row.internal_size) if field_type in {"CharField", "TextField"} and row.collation: field_params["db_collation"] = row.collation if field_type == "DecimalField": if row.precision is None or row.scale is None: field_notes.append( "max_digits and decimal_places have been guessed, as this " "database handles decimal fields as float" ) field_params["max_digits"] = ( row.precision if row.precision is not None else 10 ) field_params["decimal_places"] = ( row.scale if row.scale is not None else 5 ) else: field_params["max_digits"] = row.precision field_params["decimal_places"] = row.scale return field_type, field_params, field_notes
django/core/management/commands/inspectdb.py
299
django
{ "docstring": "\n Given the database connection, the table name, and the cursor row\n description, this routine will return the given field type name, as\n well as any additional keyword parameters and notes for the field.\n ", "language": "en", "n_whitespaces": 62, "n_words": 33, "vocab_size": 26 }
116
Python
74
9c19aff7c7561e3a82978a272ecdaad40dda5c00
inspectdb.py
204,637
28
176
get_field_type
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
459
0
50,819
14
18
53
def solve_undetermined_coeffs(equ, coeffs, *syms, **flags): r if not (coeffs and all(i.is_Symbol for i in coeffs)): raise ValueError('must provide symbols for coeffs') if isinstance(equ, Eq): eq = equ.lhs - equ.rhs else: eq = equ ceq = cancel(eq) xeq = _mexpand(ceq.as_numer_denom()[0], recursive=True) free = xeq.free_symbols coeffs = free & set(coeffs) if not coeffs: return ([], {}) if flags.get('set', None) else [] # solve(0, x) -> [] if not syms: # e.g. A*exp(x) + B - (exp(x) + y) separated into parts that # don't/do depend on coeffs gives # -(exp(x) + y), A*exp(x) + B # then see what symbols are common to both # {x} = {x, A, B} - {x, y} ind, dep = xeq.as_independent(*coeffs, as_Add=True) dfree = dep.free_symbols syms = dfree & ind.free_symbols if not syms: # but if the system looks like (a + b)*x + b - c # then {} = {a, b, x} - c # so calculate {x} = {a, b, x} - {a, b} syms = dfree - set(coeffs) if not syms: syms = [Dummy()] else: if len(syms) == 1 and iterable(syms[0]): syms = syms[0] e, s, _ = recast_to_symbols([xeq], syms) xeq = e[0] syms = s # find the functional forms in which symbols appear gens = set(xeq.as_coefficients_dict(*syms).keys()) - {1} cset = set(coeffs) if any(g.has_xfree(cset) for g in gens): return # a generator contained a coefficient symbol # make sure we are working with symbols for generators e, gens, _ = recast_to_symbols([xeq], list(gens)) xeq = e[0] # collect coefficients in front of generators system = list(collect(xeq, gens, evaluate=False).values()) # get a solution soln = solve(system, coeffs, **flags) # unpack unless told otherwise if length is 1 settings = flags.get('dict', None) or flags.get('set', None) if type(soln) is dict or settings or len(soln) != 1: return soln return soln[0]
sympy/solvers/solvers.py
580
sympy
{ "docstring": "\n Solve a system of equations in $k$ parameters that is formed by\n matching coefficients in variables ``coeffs`` that are on\n factors dependent on the remaining variables (or those given\n explicitly by ``syms``.\n\n Explanation\n ===========\n\n The result of this function is a dictionary with symbolic values of those\n parameters with respect to coefficients in $q$ -- empty if there\n is no solution or coefficients do not appear in the equation -- else\n None (if the system was not recognized). If there is more than one\n solution, the solutions are passed as a list. The output can be modified using\n the same semantics as for `solve` since the flags that are passed are sent\n directly to `solve` so, for example the flag ``dict=True`` will always return a list\n of solutions as dictionaries.\n\n This function accepts both Equality and Expr class instances.\n The solving process is most efficient when symbols are specified\n in addition to parameters to be determined, but an attempt to\n determine them (if absent) will be made. If an expected solution is not\n obtained (and symbols were not specified) try specifying them.\n\n Examples\n ========\n\n >>> from sympy import Eq, solve_undetermined_coeffs\n >>> from sympy.abc import a, b, c, h, p, k, x, y\n\n >>> solve_undetermined_coeffs(Eq(a*x + a + b, x/2), [a, b], x)\n {a: 1/2, b: -1/2}\n >>> solve_undetermined_coeffs(a - 2, [a])\n {a: 2}\n\n The equation can be nonlinear in the symbols:\n\n >>> X, Y, Z = y, x**y, y*x**y\n >>> eq = a*X + b*Y + c*Z - X - 2*Y - 3*Z\n >>> coeffs = a, b, c\n >>> syms = x, y\n >>> solve_undetermined_coeffs(eq, coeffs, syms)\n {a: 1, b: 2, c: 3}\n\n And the system can be nonlinear in coefficients, too, but if\n there is only a single solution, it will be returned as a\n dictionary:\n\n >>> eq = a*x**2 + b*x + c - ((x - h)**2 + 4*p*k)/4/p\n >>> solve_undetermined_coeffs(eq, (h, p, k), x)\n {h: -b/(2*a), k: (4*a*c - b**2)/(4*a), p: 1/(4*a)}\n\n Multiple solutions are always returned in a list:\n\n >>> solve_undetermined_coeffs(a**2*x + b - x, [a, b], x)\n [{a: -1, b: 0}, {a: 1, b: 0}]\n\n Using flag ``dict=True`` (in keeping with semantics in :func:`~.solve`)\n will force the result to always be a list with any solutions\n as elements in that list.\n\n >>> solve_undetermined_coeffs(a*x - 2*x, [a], dict=True)\n [{a: 2}]\n ", "language": "en", "n_whitespaces": 534, "n_words": 385, "vocab_size": 218 }
295
Python
168
2163f938f26e75e10f2d25b92321511988eff502
solvers.py
199,434
103
358
solve_undetermined_coeffs
https://github.com/sympy/sympy.git
mv solve_undetermined_coeffs and legacy behavior
580
0
49,266
14
7
53
def populate_any_indicators(self, pair, df, tf, informative=None,coin=''): if informative is None: informative = self.dp.get_pair_dataframe(pair, tf) informative[coin+'rsi'] = ta.RSI(informative, timeperiod=14) informative[coin+'mfi'] = ta.MFI(informative, timeperiod=25) informative[coin+'adx'] = ta.ADX(informative, window=20) informative[coin+'20sma'] = ta.SMA(informative,timeperiod=20) informative[coin+'21ema'] = ta.EMA(informative,timeperiod=21) informative[coin+'bmsb'] = np.where(informative[coin+'20sma'].lt(informative[coin+'21ema']),1,0) informative[coin+'close_over_20sma'] = informative['close']/informative[coin+'20sma'] informative[coin+'mfi'] = ta.MFI(informative, timeperiod=25) informative[coin+'ema21'] = ta.EMA(informative, timeperiod=21) informative[coin+'sma20'] = ta.SMA(informative, timeperiod=20) stoch = ta.STOCHRSI(informative, 15, 20, 2, 2) informative[coin+'srsi-fk'] = stoch['fastk'] informative[coin+'srsi-fd'] = stoch['fastd'] bollinger = qtpylib.bollinger_bands(qtpylib.typical_price(informative), window=14, stds=2.2) informative[coin+'bb_lowerband'] = bollinger['lower'] informative[coin+'bb_middleband'] = bollinger['mid'] informative[coin+'bb_upperband'] = bollinger['upper'] informative[coin+'bb_width'] = ((informative[coin+"bb_upperband"] - informative[coin+"bb_lowerband"]) / informative[coin+"bb_middleband"]) informative[coin+'close-bb_lower'] = informative['close'] / informative[coin+'bb_lowerband'] informative[coin+'roc'] = ta.ROC(informative, timeperiod=3) informative[coin+'adx'] = ta.ADX(informative, window=14) macd = ta.MACD(informative) informative[coin+'macd'] = macd['macd'] informative[coin+'pct-change'] = informative['close'].pct_change() informative[coin+'relative_volume'] = informative['volume'] / informative['volume'].rolling(10).mean() informative[coin+'pct-change'] = informative['close'].pct_change() indicators = [col for col in informative if col.startswith(coin)] for n in range(self.freqai_info['feature_parameters']['shift']+1): if n==0: continue informative_shift = informative[indicators].shift(n) informative_shift = informative_shift.add_suffix('_shift-'+str(n)) informative = pd.concat((informative,informative_shift),axis=1) df = merge_informative_pair(df, informative, self.config['timeframe'], tf, ffill=True) skip_columns = [(s + '_'+tf) for s in ['date', 'open', 'high', 'low', 'close', 'volume']] df = df.drop(columns=skip_columns) return df
freqtrade/templates/FreqaiExampleStrategy.py
1,004
freqtrade
{ "docstring": "\n Function designed to automatically generate, name and merge features\n from user indicated timeframes in the configuration file. User can add\n additional features here, but must follow the naming convention.\n :params:\n :pair: pair to be used as informative\n :df: strategy dataframe which will receive merges from informatives\n :tf: timeframe of the dataframe which will modify the feature names\n :informative: the dataframe associated with the informative pair\n :coin: the name of the coin which will modify the feature names.\n ", "language": "en", "n_whitespaces": 148, "n_words": 77, "vocab_size": 54 }
165
Python
109
fc837c4daa27a18ff0e86128f4d52089b88fa5fb
FreqaiExampleStrategy.py
149,765
40
614
populate_any_indicators
https://github.com/freqtrade/freqtrade.git
add freqao backend machinery, user interface, documentation
481
0
34,522
13
1
6
def shuffle(*arrays, random_state=None, n_samples=None): return resample( *arrays, replace=False, n_samples=n_samples, random_state=random_state )
sklearn/utils/__init__.py
50
scikit-learn
{ "docstring": "Shuffle arrays or sparse matrices in a consistent way.\n\n This is a convenience alias to ``resample(*arrays, replace=False)`` to do\n random permutations of the collections.\n\n Parameters\n ----------\n *arrays : sequence of indexable data-structures\n Indexable data-structures can be arrays, lists, dataframes or scipy\n sparse matrices with consistent first dimension.\n\n random_state : int, RandomState instance or None, default=None\n Determines random number generation for shuffling\n the data.\n Pass an int for reproducible results across multiple function calls.\n See :term:`Glossary <random_state>`.\n\n n_samples : int, default=None\n Number of samples to generate. If left to None this is\n automatically set to the first dimension of the arrays. It should\n not be larger than the length of arrays.\n\n Returns\n -------\n shuffled_arrays : sequence of indexable data-structures\n Sequence of shuffled copies of the collections. The original arrays\n are not impacted.\n\n See Also\n --------\n resample : Resample arrays or sparse matrices in a consistent way.\n\n Examples\n --------\n It is possible to mix sparse and dense arrays in the same run::\n\n >>> import numpy as np\n >>> X = np.array([[1., 0.], [2., 1.], [0., 0.]])\n >>> y = np.array([0, 1, 2])\n\n >>> from scipy.sparse import coo_matrix\n >>> X_sparse = coo_matrix(X)\n\n >>> from sklearn.utils import shuffle\n >>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)\n >>> X\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> X_sparse\n <3x2 sparse matrix of type '<... 'numpy.float64'>'\n with 3 stored elements in Compressed Sparse Row format>\n\n >>> X_sparse.toarray()\n array([[0., 0.],\n [2., 1.],\n [1., 0.]])\n\n >>> y\n array([2, 1, 0])\n\n >>> shuffle(y, n_samples=2, random_state=0)\n array([0, 1])\n ", "language": "en", "n_whitespaces": 519, "n_words": 248, "vocab_size": 152 }
11
Python
11
49279c3267c0c54cdba80a571820c46f25fbe883
__init__.py
260,817
4
33
shuffle
https://github.com/scikit-learn/scikit-learn.git
DOC ensures sklearn.utils.shuffle passes numpydoc validation (#24367) Co-authored-by: Guillaume Lemaitre <[email protected]>
27
0
76,516
8
1
8
def _eigh(*args, **kwargs): eigvals = kwargs.pop("subset_by_index", None) return scipy.linalg.eigh(*args, eigvals=eigvals, **kwargs) # remove when https://github.com/joblib/joblib/issues/1071 is fixed
sklearn/utils/fixes.py
62
scikit-learn
{ "docstring": "Wrapper for `scipy.linalg.eigh` that handles the deprecation of `eigvals`.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
17
Python
17
b0bf2315a771ed10b10d1f6a24a48ebdba34cf16
fixes.py
261,771
3
37
_eigh
https://github.com/scikit-learn/scikit-learn.git
MAINT fix deprecation raised in scipy-dev build (#25175) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Loïc Estève <[email protected]>
37
0
76,983
9
4
11
def compute_dict_delta(old_dict, new_dict) -> Tuple[dict, dict, dict]: added_keys, removed_keys, updated_keys = compute_iterable_delta( old_dict.keys(), new_dict.keys() ) return ( {k: new_dict[k] for k in added_keys}, {k: old_dict[k] for k in removed_keys}, {k: new_dict[k] for k in updated_keys}, )
python/ray/serve/utils.py
113
ray
{ "docstring": "Given two dicts, return the entries that's (added, removed, updated).\n\n Usage:\n >>> old = {\"a\": 1, \"b\": 2}\n >>> new = {\"a\": 3, \"d\": 4}\n >>> compute_dict_delta(old, new)\n ({\"d\": 4}, {\"b\": 2}, {\"a\": 3})\n ", "language": "en", "n_whitespaces": 68, "n_words": 34, "vocab_size": 29 }
36
Python
26
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
utils.py
131,053
17
79
compute_dict_delta
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
79
0
29,455
10
1
3
def convert_shapes(input_shape, to_tuples=True):
keras/utils/tf_utils.py
18
keras
{ "docstring": "Converts nested shape representations to desired format.\n\n Performs:\n\n TensorShapes -> tuples if `to_tuples=True`.\n tuples of int or None -> TensorShapes if `to_tuples=False`.\n\n Valid objects to be converted are:\n - TensorShapes\n - tuples with elements of type int or None.\n - ints\n - None\n\n Args:\n input_shape: A nested structure of objects to be converted to TensorShapes.\n to_tuples: If `True`, converts all TensorShape to tuples. Otherwise converts\n all tuples representing shapes to TensorShapes.\n\n Returns:\n Nested structure of shapes in desired format.\n\n Raises:\n ValueError: when the input tensor shape can't be converted to tuples, eg\n unknown tensor shape.\n ", "language": "en", "n_whitespaces": 165, "n_words": 95, "vocab_size": 58 }
3
Python
3
84afc5193d38057e2e2badf9c889ea87d80d8fbf
tf_utils.py
277,095
7
25
convert_shapes
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
6
0
81,867
6
3
16
def get_performance_issue_description_data(self, event): spans, matched_problem = get_span_and_problem(event) if not matched_problem: return "" parent_span, repeating_spans = get_parent_and_repeating_spans(spans, matched_problem) transaction_name = get_span_evidence_value_problem(matched_problem) parent_span = get_span_evidence_value(parent_span) repeating_spans = get_span_evidence_value(repeating_spans) num_repeating_spans = ( str(len(matched_problem.offender_span_ids)) if matched_problem.offender_span_ids else "" ) return (transaction_name, parent_span, num_repeating_spans, repeating_spans)
src/sentry/integrations/mixins/issues.py
128
sentry
{ "docstring": "Generate the span evidence data from a performance issue to populate\n an integration's ticket description. Each integration will need to take\n this data and format it appropriately.\n ", "language": "en", "n_whitespaces": 48, "n_words": 27, "vocab_size": 25 }
40
Python
30
0711b240a4efe79f06629914d5836cd6acbfcf78
issues.py
88,271
12
79
get_performance_issue_description_data
https://github.com/getsentry/sentry.git
feat(github): Add span evidence to performance issues (#41041) Add span evidence to the description of a GitHub issue created from a performance issue. Currently the GitHub issue is fairly empty as for an error issue it shows the stacktrace, but for a performance issue it's just a link back to the Sentry issue. <img width="1019" alt="Screen Shot 2022-11-04 at 2 03 29 PM" src="https://user-images.githubusercontent.com/29959063/200081055-f5119f8a-3467-490f-b5a4-f30f179a9620.png">
132
0
18,365
13
2
17
def install_src(collection, b_collection_path, b_collection_output_path, artifacts_manager): r collection_meta = artifacts_manager.get_direct_collection_meta(collection) if 'build_ignore' not in collection_meta: # installed collection, not src # FIXME: optimize this? use a different process? copy instead of build? collection_meta['build_ignore'] = [] collection_manifest = _build_manifest(**collection_meta) file_manifest = _build_files_manifest( b_collection_path, collection_meta['namespace'], collection_meta['name'], collection_meta['build_ignore'], ) collection_output_path = _build_collection_dir( b_collection_path, b_collection_output_path, collection_manifest, file_manifest, ) display.display( 'Created collection for {coll!s} at {path!s}'. format(coll=collection, path=collection_output_path) )
lib/ansible/galaxy/collection/__init__.py
150
ansible
{ "docstring": "Install the collection from source control into given dir.\n\n Generates the Ansible collection artifact data from a galaxy.yml and\n installs the artifact to a directory.\n This should follow the same pattern as build_collection, but instead\n of creating an artifact, install it.\n\n :param collection: Collection to be installed.\n :param b_collection_path: Collection dirs layout path.\n :param b_collection_output_path: The installation directory for the \\\n collection artifact.\n :param artifacts_manager: Artifacts manager.\n\n :raises AnsibleError: If no collection metadata found.\n ", "language": "en", "n_whitespaces": 140, "n_words": 74, "vocab_size": 59 }
63
Python
52
b439e41a915ccec0ccbabecc966919ea406db74e
__init__.py
267,136
33
93
install_src
https://github.com/ansible/ansible.git
expand ansible-doc coverage (#74963) * Expand ansible-doc to tests/filters and fix existing issues enable filter/test docs if in single file or companion yaml add docs for several filters/tests plugins allow .yml companion for docs for other plugins, must be colocated verify plugins are valid (not modules, cannot) fix 'per collection' filtering limit old style deprecation (_ prefix) to builtin/legacy start move to pathlib for saner path handling moved some funcitons, kept backwards compat shims with deprecation notice Co-authored-by: Abhijeet Kasurde <[email protected]> Co-authored-by: Felix Fontein <[email protected]> Co-authored-by: Sandra McCann <[email protected]>
156
0
78,754
10
23
58
def in1d(ar1, ar2, assume_unique=False, invert=False, method='auto'): # Ravel both arrays, behavior for the first array could be different ar1 = np.asarray(ar1).ravel() ar2 = np.asarray(ar2).ravel() # Ensure that iteration through object arrays yields size-1 arrays if ar2.dtype == object: ar2 = ar2.reshape(-1, 1) # Convert booleans to uint8 so we can use the fast integer algorithm if ar1.dtype == bool: ar1 = ar1.view(np.uint8) if ar2.dtype == bool: ar2 = ar2.view(np.uint8) # Check if we can use a fast integer algorithm: integer_arrays = (np.issubdtype(ar1.dtype, np.integer) and np.issubdtype(ar2.dtype, np.integer)) if method not in {'auto', 'sort', 'dictionary'}: raise ValueError( "Invalid method: {0}. ".format(method) + "Please use 'auto', 'sort' or 'dictionary'.") if integer_arrays and method in {'auto', 'dictionary'}: ar2_min = np.min(ar2) ar2_max = np.max(ar2) ar2_size = ar2.size # Check for integer overflow with np.errstate(over='raise'): try: ar2_range = ar2_max - ar2_min # Optimal performance is for approximately # log10(size) > (log10(range) - 2.27) / 0.927. # See discussion on # https://github.com/numpy/numpy/pull/12065 optimal_parameters = ( np.log10(ar2_size) > ((np.log10(ar2_range + 1.0) - 2.27) / 0.927) ) except FloatingPointError: optimal_parameters = False # Use the fast integer algorithm if optimal_parameters or method == 'dictionary': if invert: outgoing_array = np.ones_like(ar1, dtype=bool) else: outgoing_array = np.zeros_like(ar1, dtype=bool) # Make elements 1 where the integer exists in ar2 if invert: isin_helper_ar = np.ones(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 0 else: isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool) isin_helper_ar[ar2 - ar2_min] = 1 # Mask out elements we know won't work basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min) outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] - ar2_min] return outgoing_array elif method == 'dictionary': raise ValueError( "'dictionary' method is only " "supported for boolean or integer arrays. " "Please select 'sort' or 'auto' for the method." ) # Check if one of the arrays may contain arbitrary objects contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject # This code is run when # a) the first condition is true, making the code significantly faster # b) the second condition is true (i.e. `ar1` or `ar2` may contain # arbitrary objects), since then sorting is not guaranteed to work if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object: if invert: mask = np.ones(len(ar1), dtype=bool) for a in ar2: mask &= (ar1 != a) else: mask = np.zeros(len(ar1), dtype=bool) for a in ar2: mask |= (ar1 == a) return mask # Otherwise use sorting if not assume_unique: ar1, rev_idx = np.unique(ar1, return_inverse=True) ar2 = np.unique(ar2) ar = np.concatenate((ar1, ar2)) # We need this to be a stable sort, so always use 'mergesort' # here. The values from the first array should always come before # the values from the second array. order = ar.argsort(kind='mergesort') sar = ar[order] if invert: bool_ar = (sar[1:] != sar[:-1]) else: bool_ar = (sar[1:] == sar[:-1]) flag = np.concatenate((bool_ar, [invert])) ret = np.empty(ar.shape, dtype=bool) ret[order] = flag if assume_unique: return ret[:len(ar1)] else: return ret[rev_idx]
numpy/lib/arraysetops.py
971
numpy
{ "docstring": "\n Test whether each element of a 1-D array is also present in a second array.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n We recommend using :func:`isin` instead of `in1d` for new code.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n method : {'auto', 'sort', 'dictionary'}, optional\n The algorithm to use. This will not affect the final result,\n but will affect the speed. Default is 'auto'.\n\n - If 'sort', will use a sort-based approach.\n - If 'dictionary', will use a key-dictionary approach similar\n to a counting sort. This is only available for boolean and\n integer arrays.\n - If 'auto', will automatically choose the method which is\n expected to perform the fastest, which depends\n on the size and range of `ar2`. For larger sizes,\n 'dictionary' is chosen. For larger range or smaller\n sizes, 'sort' is chosen.\n\n .. versionadded:: 1.8.0\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n numpy.lib.arraysetops : Module with a number of other functions for\n performing set operations on arrays.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n .. versionadded:: 1.4.0\n\n Examples\n --------\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n ", "language": "en", "n_whitespaces": 763, "n_words": 397, "vocab_size": 223 }
469
Python
245
d7e2582cd33b22a767286e8a3d95b336dfe51a34
arraysetops.py
160,657
77
600
in1d
https://github.com/numpy/numpy.git
MAINT: bool instead of np.bool_ dtype
1,320
0
38,687
21
6
18
def world_size(self): if is_torch_tpu_available(): return xm.xrt_world_size() elif is_sagemaker_mp_enabled(): return smp.dp_size() if not smp.state.cfg.prescaled_batch else smp.rdp_size() elif is_sagemaker_dp_enabled(): return sm_dist.get_world_size() elif self.local_rank != -1: return torch.distributed.get_world_size() return 1
src/transformers/training_args.py
122
transformers
{ "docstring": "\n The number of processes used in parallel.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
27
Python
20
2eb7bb15e771f13192968cd4657c78f76b0799fe
training_args.py
35,787
10
72
world_size
https://github.com/huggingface/transformers.git
Updates in Trainer to support new features in SM Model Parallel library (#15877) * Create optimizer after model creation for SMP * update dp_rank to rdp_rank for opt_state_dict * update world_size and process_index for smp * Address comments * Lint fix Co-authored-by: Cavdar <[email protected]>
113
0
6,535
13
6
17
def _is_packed(dtype): align = dtype.isalignedstruct max_alignment = 1 total_offset = 0 for name in dtype.names: fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name]) if align: total_offset = _aligned_offset(total_offset, fld_dtype.alignment) max_alignment = max(max_alignment, fld_dtype.alignment) if fld_offset != total_offset: return False total_offset += fld_dtype.itemsize if align: total_offset = _aligned_offset(total_offset, max_alignment) if total_offset != dtype.itemsize: return False return True
numpy/core/_dtype.py
153
numpy
{ "docstring": "\n Checks whether the structured data type in 'dtype'\n has a simple layout, where all the fields are in order,\n and follow each other with no alignment padding.\n\n When this returns true, the dtype can be reconstructed\n from a list of the field names and dtypes with no additional\n dtype parameters.\n\n Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.\n ", "language": "en", "n_whitespaces": 80, "n_words": 55, "vocab_size": 45 }
53
Python
32
a0c2e826738daa0cbd83aba85852405b73878f5b
_dtype.py
160,281
17
97
_is_packed
https://github.com/numpy/numpy.git
API: Fix structured dtype cast-safety, promotion, and comparison This PR replaces the old gh-15509 implementing proper type promotion for structured voids. It further fixes the casting safety to consider casts with equivalent field number and matching order as "safe" and if the names, titles, and offsets match as "equiv". The change perculates into the void comparison, and since it fixes the order, it removes the current FutureWarning there as well. This addresses https://github.com/liberfa/pyerfa/issues/77 and replaces gh-15509 (the implementation has changed too much). Fixes gh-15494 (and probably a few more) Co-authored-by: Allan Haldane <[email protected]>
152
0
38,590
13
2
4
async def _async_stop(self) -> None: if self._cancel_watchdog: self._cancel_watchdog() self._cancel_watchdog = None await self._async_stop_scanner()
homeassistant/components/bluetooth/scanner.py
53
core
{ "docstring": "Cancel watchdog and bluetooth discovery under the lock.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
13
Python
13
ced8278e3222501dde7d769ea4b57aae75f62438
scanner.py
304,515
6
29
_async_stop
https://github.com/home-assistant/core.git
Auto recover when the Bluetooth adapter stops responding (#77043)
56
0
103,322
9
1
1
def netdev():
salt/modules/status.py
12
salt
{ "docstring": "\n .. versionchanged:: 2016.3.2\n Return the network device stats for this minion\n\n .. versionchanged:: 2016.11.4\n Added support for AIX\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' status.netdev\n ", "language": "en", "n_whitespaces": 63, "n_words": 26, "vocab_size": 22 }
2
Python
2
fe48a85e8204f3840264f16235ea3bde3e664c65
status.py
215,934
14
56
netdev
https://github.com/saltstack/salt.git
Allow for Python 3 using view objects for a dictionary keys() function
5
0
54,260
6
2
7
def remove_module_load(state_dict): new_state_dict = OrderedDict() for k, v in state_dict.items(): new_state_dict[k[7:]] = v return new_state_dict
fastai/torch_core.py
57
DeOldify
{ "docstring": "create new OrderedDict that does not contain `module.`", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
15
Python
12
4fc3616712edb19179b17dd270ad6cf63abf99c2
torch_core.py
190,450
4
34
remove_module_load
https://github.com/jantic/DeOldify.git
Upgrading to support latest Pytorch version
27
0
46,351
11
5
18
def in_place_subclassed_model_state_restoration(model): assert not model._is_graph_network # Restore layers and build attributes if ( hasattr(model, "_original_attributes_cache") and model._original_attributes_cache is not None ): # Models have sticky attribute assignment, so we want to be careful to # add back the previous attributes and track Layers by their original # names without adding dependencies on "utility" attributes which Models # exempt when they're constructed. setattr_tracking = model._setattr_tracking model._setattr_tracking = False model._self_tracked_trackables = [] for name, value in model._original_attributes_cache.items(): setattr(model, name, value) if isinstance(value, Layer): model._self_tracked_trackables.append(value) model._original_attributes_cache = None model._setattr_tracking = setattr_tracking else: # Restore to the state of a never-called model. _reset_build_compile_trackers(model) @keras_export("keras.__internal__.models.clone_and_build_model", v1=[])
keras/models/cloning.py
181
@keras_export("keras.__internal__.models.clone_and_build_model", v1=[])
keras
{ "docstring": "Restores the original state of a model after it was \"reset\".\n\n This undoes this action of `_in_place_subclassed_model_reset`, which is\n called in `clone_and_build_model` if `in_place_reset` is set to True.\n\n Args:\n model: Instance of a Keras model created via subclassing, on which\n `_in_place_subclassed_model_reset` was previously called.\n ", "language": "en", "n_whitespaces": 68, "n_words": 44, "vocab_size": 37 }
101
Python
75
f0fc6f798937a7a5fdab469c0f16bdde7cfc4ccd
cloning.py
278,255
17
97
in_place_subclassed_model_state_restoration
https://github.com/keras-team/keras.git
resolve line-too-long in models
253
1
82,432
14
5
23
def _make_inc_temp(self, suffix="", prefix="", directory_name=None): if directory_name is None: directory_name = ray._private.utils.get_ray_temp_dir() directory_name = os.path.expanduser(directory_name) index = self._incremental_dict[suffix, prefix, directory_name] # `tempfile.TMP_MAX` could be extremely large, # so using `range` in Python2.x should be avoided. while index < tempfile.TMP_MAX: if index == 0: filename = os.path.join(directory_name, prefix + suffix) else: filename = os.path.join( directory_name, prefix + "." + str(index) + suffix ) index += 1 if not os.path.exists(filename): # Save the index. self._incremental_dict[suffix, prefix, directory_name] = index return filename raise FileExistsError(errno.EEXIST, "No usable temporary filename found")
python/ray/node.py
228
ray
{ "docstring": "Return a incremental temporary file name. The file is not created.\n\n Args:\n suffix (str): The suffix of the temp file.\n prefix (str): The prefix of the temp file.\n directory_name (str) : The base directory of the temp file.\n\n Returns:\n A string of file name. If there existing a file having\n the same name, the returned name will look like\n \"{directory_name}/{prefix}.{unique_index}{suffix}\"\n ", "language": "en", "n_whitespaces": 155, "n_words": 60, "vocab_size": 38 }
86
Python
60
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
node.py
130,791
17
142
_make_inc_temp
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
306
0
29,365
17
1
4
def captured_stdout() -> ContextManager[StreamWrapper]: return captured_output("stdout")
pipenv/patched/notpip/_internal/utils/misc.py
30
pipenv
{ "docstring": "Capture the output of sys.stdout:\n\n with captured_stdout() as stdout:\n print('hello')\n self.assertEqual(stdout.getvalue(), 'hello\\n')\n\n Taken from Lib/support/__init__.py in the CPython repo.\n ", "language": "en", "n_whitespaces": 47, "n_words": 19, "vocab_size": 18 }
6
Python
6
f3166e673fe8d40277b804d35d77dcdb760fc3b3
misc.py
19,989
10
15
captured_stdout
https://github.com/pypa/pipenv.git
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
12
0
3,166
8
4
23
def test_a3c_compilation(self): config = a3c.DEFAULT_CONFIG.copy() config["num_workers"] = 2 config["num_envs_per_worker"] = 2 num_iterations = 1 # Test against all frameworks. for _ in framework_iterator(config, with_eager_tracing=True): for env in ["CartPole-v1", "Pendulum-v1", "PongDeterministic-v0"]: print("env={}".format(env)) config["model"]["use_lstm"] = env == "CartPole-v1" trainer = a3c.A3CTrainer(config=config, env=env) for i in range(num_iterations): results = trainer.train() check_train_results(results) print(results) check_compute_single_action( trainer, include_state=config["model"]["use_lstm"] ) trainer.stop()
rllib/agents/a3c/tests/test_a3c.py
224
ray
{ "docstring": "Test whether an A3CTrainer can be built with both frameworks.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
54
Python
42
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
test_a3c.py
133,631
18
129
test_a3c_compilation
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
295
0
30,064
15
1
5
def test_author_name_present(self): response = self.get_for_author(1) self.assertContains(response, "J. R. R. Tolkien", 2)
wagtail/contrib/modeladmin/tests/test_simple_modeladmin.py
42
wagtail
{ "docstring": "\n The author name should appear twice. Once in the header, and once\n more in the field listing\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 15 }
11
Python
10
d10f15e55806c6944827d801cd9c2d53f5da4186
test_simple_modeladmin.py
73,270
3
24
test_author_name_present
https://github.com/wagtail/wagtail.git
Reformat with black
32
0
16,001
8
19
43
def _update_title_position(self, renderer): if self._autotitlepos is not None and not self._autotitlepos: _log.debug('title position was updated manually, not adjusting') return titles = (self.title, self._left_title, self._right_title) for title in titles: x, _ = title.get_position() # need to start again in case of window resizing title.set_position((x, 1.0)) # need to check all our twins too... axs = self._twinned_axes.get_siblings(self) # and all the children for ax in self.child_axes: if ax is not None: locator = ax.get_axes_locator() if locator: pos = locator(self, renderer) ax.apply_aspect(pos) else: ax.apply_aspect() axs = axs + [ax] top = -np.Inf for ax in axs: bb = None if (ax.xaxis.get_ticks_position() in ['top', 'unknown'] or ax.xaxis.get_label_position() == 'top'): bb = ax.xaxis.get_tightbbox(renderer) if bb is None: bb = ax.get_window_extent(renderer) top = max(top, bb.ymax) if title.get_text(): ax.yaxis.get_tightbbox(renderer) # update offsetText if ax.yaxis.offsetText.get_text(): bb = ax.yaxis.offsetText.get_tightbbox(renderer) if bb.intersection(title.get_tightbbox(renderer), bb): top = bb.ymax if top < 0: # the top of Axes is not even on the figure, so don't try and # automatically place it. _log.debug('top of Axes not in the figure, so title not moved') return if title.get_window_extent(renderer).ymin < top: _, y = self.transAxes.inverted().transform((0, top)) title.set_position((x, y)) # empirically, this doesn't always get the min to top, # so we need to adjust again. if title.get_window_extent(renderer).ymin < top: _, y = self.transAxes.inverted().transform( (0., 2 * top - title.get_window_extent(renderer).ymin)) title.set_position((x, y)) ymax = max(title.get_position()[1] for title in titles) for title in titles: # now line up all the titles at the highest baseline. x, _ = title.get_position() title.set_position((x, ymax)) # Drawing
lib/matplotlib/axes/_base.py
662
matplotlib
{ "docstring": "\n Update the title position based on the bounding box enclosing\n all the ticklabels and x-axis spine and xlabel...\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 15 }
245
Python
135
cfabe79945743dd375db4fe8bcdbaab00330dfe8
_base.py
106,974
47
411
_update_title_position
https://github.com/matplotlib/matplotlib.git
FIX: Autoposition title when yaxis has offset Move any title above the y axis offset text it would overlap with the offset. If multiple titles are present, they are vertically aligned to the highest one.
1,070
0
22,531
19
1
3
def geturl(self): return self.url
python3.10.4/Lib/http/client.py
19
XX-Net
{ "docstring": "Return the real URL of the page.\n\n In some cases, the HTTP server redirects a client to another\n URL. The urlopen() function handles this transparently, but in\n some cases the caller needs to know which URL the client was\n redirected to. The geturl() method can be used to get at this\n redirected URL.\n\n ", "language": "en", "n_whitespaces": 95, "n_words": 53, "vocab_size": 40 }
4
Python
4
8198943edd73a363c266633e1aa5b2a9e9c9f526
client.py
217,715
2
10
geturl
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
18
0
54,897
6
5
22
def get_updated_history(self, current_stream_state, latest_record_datetime, latest_record, current_parsed_datetime, state_date): history = current_stream_state.get("history", {}) file_modification_date = latest_record_datetime.strftime("%Y-%m-%d") # add record to history if record modified date in range delta start from state if latest_record_datetime.date() + timedelta(days=self.buffer_days) >= state_date: history_item = set(history.setdefault(file_modification_date, set())) history_item.add(latest_record[self.ab_file_name_col]) history[file_modification_date] = history_item # reset history to new date state if current_parsed_datetime.date() != state_date: history = { date: history[date] for date in history if datetime.strptime(date, "%Y-%m-%d").date() + timedelta(days=self.buffer_days) >= state_date } return history
airbyte-integrations/connectors/source-s3/source_s3/source_files_abstract/stream.py
215
airbyte
{ "docstring": "\n History is dict which basically groups files by their modified_at date.\n After reading each record we add its file to the history set if it wasn't already there.\n Then we drop from the history set any entries whose key is less than now - buffer_days\n ", "language": "en", "n_whitespaces": 74, "n_words": 45, "vocab_size": 40 }
73
Python
49
f9348b22517556e1af5d1831db7187b912ee0126
stream.py
5,513
14
134
get_updated_history
https://github.com/airbytehq/airbyte.git
🐛 Source Amazon S3: solve possible case of files being missed during incremental syncs (#12568) * Added history to state * Deleted unused import * Rollback abnormal state file * Rollback abnormal state file * Fixed type error issue * Fix state issue * Updated after review * Bumped version
229
0
784
17
3
12
def test_inheritance(self): should_contain = [ '<li>Villain: <a href="%s">Bob</a>' % reverse("admin:admin_views_villain_change", args=(self.sv1.pk,)), '<li>Super villain: <a href="%s">Bob</a>' % reverse("admin:admin_views_supervillain_change", args=(self.sv1.pk,)), "<li>Secret hideout: floating castle", "<li>Super secret hideout: super floating castle!", ] response = self.client.get( reverse("admin:admin_views_villain_delete", args=(self.sv1.pk,)) ) for should in should_contain: self.assertContains(response, should, 1) response = self.client.get( reverse("admin:admin_views_supervillain_delete", args=(self.sv1.pk,)) ) for should in should_contain: self.assertContains(response, should, 1)
tests/admin_views/tests.py
204
django
{ "docstring": "\n In the case of an inherited model, if either the child or\n parent-model instance is deleted, both instances are listed\n for deletion, as well as any relationships they have.\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 27 }
55
Python
36
9c19aff7c7561e3a82978a272ecdaad40dda5c00
tests.py
207,745
19
128
test_inheritance
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
228
0
52,081
14
3
14
def store_or_execute(self, block, name): if name: # If storing it for further editing self.shell.user_ns[name] = SList(block.splitlines()) print("Block assigned to '%s'" % name) else: b = self.preclean_input(block) self.shell.user_ns['pasted_block'] = b self.shell.using_paste_magics = True try: self.shell.run_cell(b, store_history=True) finally: self.shell.using_paste_magics = False
IPython/terminal/magics.py
144
ipython
{ "docstring": " Execute a block, or store it in a variable, per the user's request.\n ", "language": "en", "n_whitespaces": 21, "n_words": 13, "vocab_size": 12 }
39
Python
33
75b3d1cc6d5e1e629705d8a7233a374f1e4235e7
magics.py
208,683
12
86
store_or_execute
https://github.com/ipython/ipython.git
Get history from sql. Fixes #13585 By getting history from sql we can get the transformed history. This also skip storing history if `%paste` is used and `%paste` itself will insert the pasted value in history which is more conveninent.
178
0
52,454
14
2
5
def _count_righthand_zero_bits(number, bits): if number == 0: return bits return min(bits, (~number & (number-1)).bit_length())
python3.10.4/Lib/ipaddress.py
57
XX-Net
{ "docstring": "Count the number of zero bits on the right hand side.\n\n Args:\n number: an integer.\n bits: maximum number of bits to count.\n\n Returns:\n The number of zero bits on the right hand side of the number.\n\n ", "language": "en", "n_whitespaces": 66, "n_words": 36, "vocab_size": 22 }
14
Python
13
8198943edd73a363c266633e1aa5b2a9e9c9f526
ipaddress.py
218,498
4
35
_count_righthand_zero_bits
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,351
13
2
6
def _deserialize_metric(metric_config): from keras import ( metrics as metrics_module, ) # pylint:disable=g-import-not-at-top if metric_config in ["accuracy", "acc", "crossentropy", "ce"]: # Do not deserialize accuracy and cross-entropy strings as we have special # case handling for these in compile, based on model output shape. return metric_config return metrics_module.deserialize(metric_config)
keras/saving/saving_utils.py
68
keras
{ "docstring": "Deserialize metrics, leaving special strings untouched.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
47
Python
41
84afc5193d38057e2e2badf9c889ea87d80d8fbf
saving_utils.py
276,242
7
37
_deserialize_metric
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
91
0
81,600
8
1
3
def __call__(self, w): return w
keras/constraints.py
18
keras
{ "docstring": "Applies the constraint to the input weight variable.\n\n By default, the inputs weight variable is not modified.\n Users should override this method to implement their own projection\n function.\n\n Args:\n w: Input weight variable.\n\n Returns:\n Projected variable (by default, returns unmodified inputs).\n ", "language": "en", "n_whitespaces": 101, "n_words": 41, "vocab_size": 33 }
5
Python
5
84afc5193d38057e2e2badf9c889ea87d80d8fbf
constraints.py
270,120
2
10
__call__
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
19
0
80,394
6
1
3
def on_click(self) -> None: self.cycle_variant()
src/textual/widgets/_placeholder.py
25
textual
{ "docstring": "Clicking on the placeholder cycles through the placeholder variants.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 7 }
5
Python
5
4b5fd43423a327e4cd6d477a66bebc9588fd1488
_placeholder.py
185,863
3
13
on_click
https://github.com/Textualize/textual.git
Add scaffolding for the Placeholder widget.
19
0
45,212
7
3
12
def set_tunnel(self, host, port=None, headers=None): if self.sock: raise RuntimeError("Can't set up tunnel for established connection") self._tunnel_host, self._tunnel_port = self._get_hostport(host, port) if headers: self._tunnel_headers = headers else: self._tunnel_headers.clear()
python3.10.4/Lib/http/client.py
96
XX-Net
{ "docstring": "Set up host and port for HTTP CONNECT tunnelling.\n\n In a connection that uses HTTP CONNECT tunneling, the host passed to the\n constructor is used as a proxy server that relays all communication to\n the endpoint passed to `set_tunnel`. This done by sending an HTTP\n CONNECT request to the proxy server when the connection is established.\n\n This method must be called before the HTTP connection has been\n established.\n\n The headers argument should be a mapping of extra HTTP headers to send\n with the CONNECT request.\n ", "language": "en", "n_whitespaces": 148, "n_words": 85, "vocab_size": 54 }
27
Python
25
8198943edd73a363c266633e1aa5b2a9e9c9f526
client.py
217,711
8
59
set_tunnel
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
95
0
54,894
11
4
33
def get_ann_info(self, idx): img_id = self.data_infos[idx]['img_id'] bboxes = [] labels = [] bboxes_ignore = [] labels_ignore = [] is_occludeds = [] is_truncateds = [] is_group_ofs = [] is_depictions = [] is_insides = [] for obj in self.ann_infos[img_id]: label = int(obj['label']) bbox = [ float(obj['bbox'][0]), float(obj['bbox'][1]), float(obj['bbox'][2]), float(obj['bbox'][3]) ] bboxes.append(bbox) labels.append(label) # Other parameters is_occludeds.append(obj['is_occluded']) is_truncateds.append(obj['is_truncated']) is_group_ofs.append(obj['is_group_of']) is_depictions.append(obj['is_depiction']) is_insides.append(obj['is_inside']) if not bboxes: bboxes = np.zeros((0, 4)) labels = np.zeros((0, )) else: bboxes = np.array(bboxes) labels = np.array(labels) if not bboxes_ignore: bboxes_ignore = np.zeros((0, 4)) labels_ignore = np.zeros((0, )) else: bboxes_ignore = np.array(bboxes_ignore) labels_ignore = np.array(labels_ignore) assert len(is_group_ofs) == len(labels) == len(bboxes) gt_is_group_ofs = np.array(is_group_ofs, dtype=np.bool) # These parameters is not used yet. is_occludeds = np.array(is_occludeds, dtype=np.bool) is_truncateds = np.array(is_truncateds, dtype=np.bool) is_depictions = np.array(is_depictions, dtype=np.bool) is_insides = np.array(is_insides, dtype=np.bool) ann = dict( bboxes=bboxes.astype(np.float32), labels=labels.astype(np.int64), bboxes_ignore=bboxes_ignore.astype(np.float32), labels_ignore=labels_ignore.astype(np.int64), gt_is_group_ofs=gt_is_group_ofs, is_occludeds=is_occludeds, is_truncateds=is_truncateds, is_depictions=is_depictions, is_insides=is_insides) return ann
mmdet/datasets/openimages.py
674
mmdetection
{ "docstring": "Get OpenImages annotation by index.\n\n Args:\n idx (int): Index of data.\n\n Returns:\n dict: Annotation info of specified index.\n ", "language": "en", "n_whitespaces": 61, "n_words": 18, "vocab_size": 16 }
141
Python
79
1516986a616fee8bb741d0ab2be40683045efccd
openimages.py
243,999
55
423
get_ann_info
https://github.com/open-mmlab/mmdetection.git
[Feature] Support OpenImages Dataset (#6331) * [Feature] support openimage group of eval * [Feature] support openimage group of eval * support openimage dataset * support openimage challenge dataset * fully support OpenImages-V6 and OpenImages Challenge 2019 * Fix some logic error * update config file * fix get data_infos error * fully support OpenImages evaluation * update OpenImages config files * [Feature] support OpenImages datasets * fix bug * support load image metas from pipeline * fix bug * fix get classes logic error * update code * support get image metas * support openimags * support collect image metas * support Open Images * fix openimages logic * minor fix * add a new function to compute openimages tpfp * minor fix * fix ci error * minor fix * fix indication * minor fix * fix returns * fix returns * fix returns * fix returns * fix returns * minor fix * update readme * support loading image level labels and fix some logic * minor fix * minor fix * add class names * minor fix * minor fix * minor fix * add openimages test unit * minor fix * minor fix * fix test unit * minor fix * fix logic error * minor fix * fully support openimages * minor fix * fix docstring * fix docstrings in readthedocs * update get image metas script * label_description_file -> label_file * update openimages readme * fix test unit * fix test unit * minor fix * update readme file * Update get_image_metas.py
684
0
70,189
14
1
17
def test_chord_clone_kwargs(self, subtests): with subtests.test(msg='Verify chord cloning clones kwargs correctly'): c = chord([signature('g'), signature('h')], signature('i'), kwargs={'U': 6}) c2 = c.clone() assert c2.kwargs == c.kwargs with subtests.test(msg='Cloning the chord with overridden kwargs'): override_kw = {'X': 2} c3 = c.clone(args=(1,), kwargs=override_kw) with subtests.test(msg='Verify the overridden kwargs were cloned correctly'): new_kw = c.kwargs.copy() new_kw.update(override_kw) assert c3.kwargs == new_kw
t/unit/tasks/test_canvas.py
222
celery
{ "docstring": " Test that chord clone ensures the kwargs are the same ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 9 }
55
Python
39
c3c6594b4cdea898abba218f576a669700dba98d
test_canvas.py
208,148
12
127
test_chord_clone_kwargs
https://github.com/celery/celery.git
BLM-2: Adding unit tests to chord clone (#7668) * Added .python-version and .vscode to .gitignore * Added test_chord_clone_kwargs() to verify chord cloning treats kwargs correctly * Happify linter
171
0
52,217
14
2
18
def save(self, filename, data): logger.debug("filename: %s, data type: %s", filename, type(data)) filename = self._check_extension(filename) try: with open(filename, self._write_option) as s_file: s_file.write(self.marshal(data)) except IOError as err: msg = f"Error writing to '{filename}': {err.strerror}" raise FaceswapError(msg) from err
lib/serializer.py
131
faceswap
{ "docstring": " Serialize data and save to a file\n\n Parameters\n ----------\n filename: str\n The path to where the serialized file should be saved\n data: varies\n The data that is to be serialized to file\n\n Example\n ------\n >>> serializer = get_serializer('json')\n >>> data ['foo', 'bar']\n >>> json_file = '/path/to/json/file.json'\n >>> serializer.save(json_file, data)\n ", "language": "en", "n_whitespaces": 149, "n_words": 49, "vocab_size": 35 }
36
Python
33
bad5025aea1adb9126580e14e064e6c99089243d
serializer.py
100,940
9
72
save
https://github.com/deepfakes/faceswap.git
Core updates - Change loss loading mechanism - Autosize tooltips based on content size - Random linting + code modernisation
119
0
20,387
13
6
12
def _media_status(self): media_status = self.media_status media_status_received = self.media_status_received if ( media_status is None or media_status.player_state == MEDIA_PLAYER_STATE_UNKNOWN ): groups = self.mz_media_status for k, val in groups.items(): if val and val.player_state != MEDIA_PLAYER_STATE_UNKNOWN: media_status = val media_status_received = self.mz_media_status_received[k] break return (media_status, media_status_received)
homeassistant/components/cast/media_player.py
115
core
{ "docstring": "\n Return media status.\n\n First try from our own cast, then groups which our cast is a member in.\n ", "language": "en", "n_whitespaces": 40, "n_words": 18, "vocab_size": 17 }
42
Python
32
66551e6fcbd063e53c13adc8a6462b8e00ce1450
media_player.py
299,284
14
72
_media_status
https://github.com/home-assistant/core.git
Add state buffering to media_player and use it in cast (#70802)
200
0
98,218
14
2
42
def no_manual_dependency_tracking_scope(obj):
keras/feature_column/dense_features_v2.py
92
"""A context that disables manual dependency tracking for the given `obj`. Sometimes library methods might track objects on their own and we might want to disable that and do the tracking on our own. One can then use this context manager to disable the tracking the library method does and do your own tracking. For example:the given `obj`. Sometimes library methods might track objects on their own and we might want to disable that and do the tracking on our own. One can then use this context manager to disable the tracking the library method does and do your own
keras
{ "docstring": "A context that disables manual dependency tracking for the given `obj`.\n\n Sometimes library methods might track objects on their own and we might want\n to disable that and do the tracking on our own. One can then use this context\n manager to disable the tracking the library method does and do your own\n tracking.\n\n For example:\n\n class TestLayer(tf.keras.Layer):", "language": "en", "n_whitespaces": 63, "n_words": 58, "vocab_size": 42 }
2
Python
2
0c959a0670a2bcb12dc7a1717ce7416ff1f7cc27
dense_features_v2.py
268,958
7
31
no_manual_dependency_tracking_scope
https://github.com/keras-team/keras.git
Remove deprecated TF1 Layer APIs `apply()`, `get_updates_for()`, `get_losses_for()`, and remove the `inputs` argument in the `add_loss()` method. PiperOrigin-RevId: 428134172
3
2
79,789
8
3
13
def load(self) -> Generator[Tuple[str, np.ndarray], None, None]: iterator = self._load_video_frames if self._is_video else self._load_disk_frames for filename, image in iterator(): yield filename, image
scripts/fsmedia.py
73
faceswap
{ "docstring": " Generator to load frames from a folder of images or from a video file.\n\n Yields\n ------\n filename: str\n The filename of the current frame\n image: :class:`numpy.ndarray`\n A single frame\n ", "language": "en", "n_whitespaces": 87, "n_words": 29, "vocab_size": 25 }
22
Python
20
1022651eb8a7741014f5d2ec7cbfe882120dfa5f
fsmedia.py
101,397
13
48
load
https://github.com/deepfakes/faceswap.git
Bugfix: convert - Gif Writer - Fix non-launch error on Gif Writer - convert plugins - linting - convert/fs_media/preview/queue_manager - typing - Change convert items from dict to Dataclass
54
0
20,812
9
11
30
def _execute_task(self, context, task_copy): # If the task has been deferred and is being executed due to a trigger, # then we need to pick the right method to come back to, otherwise # we go for the default execute execute_callable = task_copy.execute if self.next_method: # __fail__ is a special signal value for next_method that indicates # this task was scheduled specifically to fail. if self.next_method == "__fail__": next_kwargs = self.next_kwargs or {} raise TaskDeferralError(next_kwargs.get("error", "Unknown")) # Grab the callable off the Operator/Task and add in any kwargs execute_callable = getattr(task_copy, self.next_method) if self.next_kwargs: execute_callable = partial(execute_callable, **self.next_kwargs) # If a timeout is specified for the task, make it fail # if it goes beyond if task_copy.execution_timeout: # If we are coming in with a next_method (i.e. from a deferral), # calculate the timeout from our start_date. if self.next_method: timeout_seconds = ( task_copy.execution_timeout - (timezone.utcnow() - self.start_date) ).total_seconds() else: timeout_seconds = task_copy.execution_timeout.total_seconds() try: # It's possible we're already timed out, so fast-fail if true if timeout_seconds <= 0: raise AirflowTaskTimeout() # Run task in timeout wrapper with timeout(timeout_seconds): result = execute_callable(context=context) except AirflowTaskTimeout: task_copy.on_kill() raise else: result = execute_callable(context=context) # If the task returns a result, push an XCom containing it if task_copy.do_xcom_push and result is not None: with create_session() as session: self.xcom_push(key=XCOM_RETURN_KEY, value=result, session=session) self._record_task_map_for_downstreams(result, session=session) return result
airflow/models/taskinstance.py
354
airflow
{ "docstring": "Executes Task (optionally with a Timeout) and pushes Xcom results", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
219
Python
138
d48a3a357fd89ec805d086d5b6c1f1d4daf77b9a
taskinstance.py
43,918
31
206
_execute_task
https://github.com/apache/airflow.git
Add TaskMap and TaskInstance.map_id (#20286) Co-authored-by: Ash Berlin-Taylor <[email protected]>
731
0
8,094
18
6
32
def update_pdfjs(target_version=None, legacy=False, gh_token=None): if target_version is None: version, url = get_latest_pdfjs_url(gh_token, legacy=legacy) else: # We need target_version as x.y.z, without the 'v' prefix, though the # user might give it on the command line if target_version.startswith('v'): target_version = target_version[1:] # version should have the prefix to be consistent with the return value # of get_latest_pdfjs_url() version = 'v' + target_version suffix = "-legacy" if legacy else "" url = ('https://github.com/mozilla/pdf.js/releases/download/' f'{version}/pdfjs-{target_version}{suffix}-dist.zip') os.chdir(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..')) target_path = os.path.join('qutebrowser', '3rdparty', 'pdfjs') print(f"=> Downloading pdf.js {version}{' (legacy)' if legacy else ''}") try: (archive_path, _headers) = urllib.request.urlretrieve(url) except urllib.error.HTTPError as error: print("Could not retrieve pdfjs {}: {}".format(version, error)) return if os.path.isdir(target_path): print("Removing old version in {}".format(target_path)) shutil.rmtree(target_path) os.makedirs(target_path) print("Extracting new version") shutil.unpack_archive(archive_path, target_path, 'zip') urllib.request.urlcleanup()
scripts/dev/update_3rdparty.py
390
qutebrowser
{ "docstring": "Download and extract the latest pdf.js version.\n\n If target_version is not None, download the given version instead.\n\n Args:\n target_version: None or version string ('x.y.z')\n legacy: Whether to download the legacy build for 83-based.\n gh_token: GitHub token to use for the API. Optional except on CI.\n ", "language": "en", "n_whitespaces": 75, "n_words": 45, "vocab_size": 38 }
122
Python
94
f6a365172afe127a4ba770e14569f2d3cd7569b4
update_3rdparty.py
320,711
26
208
update_pdfjs
https://github.com/qutebrowser/qutebrowser.git
Use legacy PDF.js build for macOS/Windows releases Fixes #7108
309
0
117,302
14
1
9
def test_callback_error(self) -> None: request = Mock(args={}) request.args[b"error"] = [b"invalid_client"] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "") request.args[b"error_description"] = [b"some description"] self.get_success(self.handler.handle_oidc_callback(request)) self.assertRenderedError("invalid_client", "some description")
tests/handlers/test_oidc.py
143
synapse
{ "docstring": "Errors from the provider returned in the callback are displayed.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
21
Python
17
5dd949bee6158a8b651db9f2ae417a62c8184bfd
test_oidc.py
247,637
9
83
test_callback_error
https://github.com/matrix-org/synapse.git
Add type hints to some tests/handlers files. (#12224)
77
0
71,801
10
1
3
def _Filters(): return _cpplint_state.filters
code/deep/BJMMD/caffe/scripts/cpp_lint.py
18
transferlearning
{ "docstring": "Returns the module's list of output filters, as a list.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
4
Python
4
cc4d0564756ca067516f71718a3d135996525909
cpp_lint.py
60,414
2
9
_Filters
https://github.com/jindongwang/transferlearning.git
Balanced joint maximum mean discrepancy for deep transfer learning
6
0
12,142
6
1
2
def notchspansrc(self): return self["notchspansrc"]
packages/python/plotly/plotly/graph_objs/_box.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for\n `notchspan`.\n\n The 'notchspansrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 84, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_box.py
226,282
2
11
notchspansrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,955
7
14
34
def get_random_transform(self, img_shape, seed=None): img_row_axis = self.row_axis - 1 img_col_axis = self.col_axis - 1 if seed is not None: np.random.seed(seed) if self.rotation_range: theta = np.random.uniform(-self.rotation_range, self.rotation_range) else: theta = 0 if self.height_shift_range: try: # 1-D array-like or int tx = np.random.choice(self.height_shift_range) tx *= np.random.choice([-1, 1]) except ValueError: # floating point tx = np.random.uniform( -self.height_shift_range, self.height_shift_range ) if np.max(self.height_shift_range) < 1: tx *= img_shape[img_row_axis] else: tx = 0 if self.width_shift_range: try: # 1-D array-like or int ty = np.random.choice(self.width_shift_range) ty *= np.random.choice([-1, 1]) except ValueError: # floating point ty = np.random.uniform( -self.width_shift_range, self.width_shift_range ) if np.max(self.width_shift_range) < 1: ty *= img_shape[img_col_axis] else: ty = 0 if self.shear_range: shear = np.random.uniform(-self.shear_range, self.shear_range) else: shear = 0 if self.zoom_range[0] == 1 and self.zoom_range[1] == 1: zx, zy = 1, 1 else: zx, zy = np.random.uniform( self.zoom_range[0], self.zoom_range[1], 2 ) flip_horizontal = (np.random.random() < 0.5) * self.horizontal_flip flip_vertical = (np.random.random() < 0.5) * self.vertical_flip channel_shift_intensity = None if self.channel_shift_range != 0: channel_shift_intensity = np.random.uniform( -self.channel_shift_range, self.channel_shift_range ) brightness = None if self.brightness_range is not None: brightness = np.random.uniform( self.brightness_range[0], self.brightness_range[1] ) transform_parameters = { "theta": theta, "tx": tx, "ty": ty, "shear": shear, "zx": zx, "zy": zy, "flip_horizontal": flip_horizontal, "flip_vertical": flip_vertical, "channel_shift_intensity": channel_shift_intensity, "brightness": brightness, } return transform_parameters
keras/preprocessing/image.py
703
keras
{ "docstring": "Generates random parameters for a transformation.\n\n Args:\n img_shape: Tuple of integers.\n Shape of the image that is transformed.\n seed: Random seed.\n\n Returns:\n A dictionary containing randomly chosen parameters describing the\n transformation.\n ", "language": "en", "n_whitespaces": 111, "n_words": 31, "vocab_size": 27 }
203
Python
108
84afc5193d38057e2e2badf9c889ea87d80d8fbf
image.py
275,708
68
450
get_random_transform
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
931
0
81,446
15
4
2
def test_dynamic_event_by_http(workflow_start_regular_shared_serve):
python/ray/workflow/tests/test_http_events.py
13
ray
{ "docstring": "If a workflow has dynamically generated event arguments, it should\n return the event as if the event was declared statically.\n ", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 17 }
2
Python
2
659d25a3a9c4794db9dbe8f428ec587470b261b0
test_http_events.py
126,132
20
91
test_dynamic_event_by_http
https://github.com/ray-project/ray.git
[workflow] http_event_provider and accompanied listener (#26010) ### Why are these changes needed? This PR enhances workflow functionality to receive external events from a Serve based HTTP endpoint. A workflow can then consume events asynchronously as they arrive. ### Design Logic A `workflow.wait_for_event` node subscribes to the endpoint instantiated by a Ray Serve deployment of class `http_event_provider.HTTPEventProvider`. The subscription is made through a helper class `http_event_provider.HTTPListener`. `HTTPListener` implements the methods of `EventListener` to poll from and confirm event checkpointing to `HTTPEventProvider`, before `HTTPEventProvider`acknowledges success or error to the event submitter. ### Architecture Improvement The logic of this enhancement conforms with existing workflow runtime design.
5
0
28,063
6
2
13
def select_query(self, targets, from_stmt, where_stmt): query = f"SELECT {','.join([t.__str__() for t in targets])} FROM {from_stmt.parts[-1]}" if where_stmt: query += f" WHERE {str(where_stmt)}" result = self.run_native_query(query) return result #TODO: JOIN, SELECT INTO
mindsdb/integrations/postgres_handler/postgres_handler.py
106
mindsdb
{ "docstring": "\n Retrieve the data from the SQL statement with eliminated rows that dont satisfy the WHERE condition\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 14 }
31
Python
28
32edb0b1468a705d89af89ed2b3dca2a459dc23f
postgres_handler.py
114,589
6
33
select_query
https://github.com/mindsdb/mindsdb.git
Select query
80
0
25,224
13
1
5
def patch(url, data=None, **kwargs): r return request("patch", url, data=data, **kwargs)
pipenv/patched/pip/_vendor/requests/api.py
43
pipenv
{ "docstring": "Sends a PATCH request.\n\n :param url: URL for the new :class:`Request` object.\n :param data: (optional) Dictionary, list of tuples, bytes, or file-like\n object to send in the body of the :class:`Request`.\n :param json: (optional) json data to send in the body of the :class:`Request`.\n :param \\*\\*kwargs: Optional arguments that ``request`` takes.\n :return: :class:`Response <Response>` object\n :rtype: requests.Response\n ", "language": "en", "n_whitespaces": 85, "n_words": 57, "vocab_size": 41 }
10
Python
10
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
api.py
22,051
12
28
patch
https://github.com/pypa/pipenv.git
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
15
0
4,138
8
4
29
def test_install_venv_project_directory(PipenvInstance): with PipenvInstance(chdir=True) as p: with temp_environ(), TemporaryDirectory( prefix="pipenv-", suffix="temp_workon_home" ) as workon_home: os.environ["WORKON_HOME"] = workon_home c = p.pipenv("install six") assert c.returncode == 0 venv_loc = None for line in c.stderr.splitlines(): if line.startswith("Virtualenv location:"): venv_loc = Path(line.split(":", 1)[-1].strip()) assert venv_loc is not None assert venv_loc.joinpath(".project").exists() @pytest.mark.cli @pytest.mark.deploy @pytest.mark.system
tests/integration/test_install_basic.py
232
@pytest.mark.cli @pytest.mark.deploy @pytest.mark.system
pipenv
{ "docstring": "Test the project functionality during virtualenv creation.\n ", "language": "en", "n_whitespaces": 10, "n_words": 7, "vocab_size": 7 }
49
Python
39
949ee95d6748e8777bed589f0d990aa4792b28f8
test_install_basic.py
19,833
16
129
test_install_venv_project_directory
https://github.com/pypa/pipenv.git
More granular control over PIPENV_VENV_IN_PROJECT variable. (#5026) * Allow PIPENV_VENV_IN_PROJECT to be read in as None, and ensure if it is set to False that it does not use .venv directory. * refactor based on PR feedback and add news fragment. * Review unit test coverage and add new tests. Remove unneccesary bits from other tests.
188
1
3,106
22