n_ast_errors
int64
0
28
n_whitespaces
int64
3
14k
commit_id
stringlengths
40
40
url
stringlengths
31
59
random_cut
stringlengths
16
15.8k
token_counts
int64
6
2.13k
vocab_size
int64
4
1.11k
repo
stringlengths
3
28
file_name
stringlengths
5
79
path
stringlengths
8
134
ast_levels
int64
6
31
ast_errors
stringlengths
0
3.2k
d_id
int64
44
121k
code
stringlengths
101
62.2k
nloc
int64
1
548
fun_name
stringlengths
1
84
id
int64
70
338k
n_identifiers
int64
1
131
n_ast_nodes
int64
15
19.2k
commit_message
stringlengths
2
15.3k
documentation
dict
complexity
int64
1
66
language
stringclasses
1 value
n_words
int64
4
4.82k
0
101
f65417656ba8c59438d832b6e2a431f78d40c21c
https://github.com/pandas-dev/pandas.git
def rolling(self, *args, **kwargs) -> RollingGroupby: from pandas.core.window import RollingGroupby
48
17
pandas
groupby.py
pandas/core/groupby/groupby.py
9
40,113
def rolling(self, *args, **kwargs) -> RollingGroupby: from pandas.core.window import RollingGroupby return RollingGroupby( self._selected_obj, *args, _grouper=self.grouper, _as_index=self.as_index, **kwargs, )
12
rolling
167,770
13
71
TYP: more return annotations in core/ (#47618) * TYP: more return annotations in core/ * from __future__ import annotations * more __future__
{ "docstring": "\n Return a rolling grouper, providing rolling functionality per group.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 8 }
1
Python
18
0
417
2a05ccdb07cff88e56661dee8a9271859354027f
https://github.com/networkx/networkx.git
def expected_degree_graph(w, seed=None, selfloops=True): r n = len(w) G = nx.empty_graph(n) # If there are no nodes are no edges in the graph, return the empty graph. if n == 0 or max(w) == 0: return G rho = 1 / sum(w) # Sort the weights in decreasing order. The original order of the # weights dictates the order of the (integer) node labels, so we # need to remember the permutation applied in the sorting. order = sorted(enumerate(w), key=itemgetter(1), reverse=True) mapping = {c: u for c, (u, v) in enumerate(order)} seq = [v for u, v in order] last = n if not selfloops: last -= 1 for u in range(last): v = u if not selfloops: v += 1 factor = seq[u] * rho p = min(seq[v] * factor, 1) while v < n and p > 0: if p != 1: r = seed.random() v += math.floor(math.log(r, 1 - p)) if v < n: q = min(seq[v] * factor, 1) if seed.random() < q / p: G.add_edge(mapping[u
240
97
networkx
degree_seq.py
networkx/generators/degree_seq.py
17
42,064
def expected_degree_graph(w, seed=None, selfloops=True): r n = len(w) G = nx.empty_graph(n) # If there are no nodes are no edges in the graph, return the empty graph. if n == 0 or max(w) == 0: return G rho = 1 / sum(w) # Sort the weights in decreasing order. The original order of the # weights dictates the order of the (integer) node labels, so we # need to remember the permutation applied in the sorting. order = sorted(enumerate(w), key=itemgetter(1), reverse=True) mapping = {c: u for c, (u, v) in enumerate(order)} seq = [v for u, v in order] last = n if not selfloops: last -= 1 for u in range(last): v = u if not selfloops: v += 1 factor = seq[u] * rho p = min(seq[v] * factor, 1) while v < n and p > 0: if p != 1: r = seed.random() v += math.floor(math.log(r, 1 - p)) if v < n: q = min(seq[v] * factor, 1) if seed.random() < q / p: G.add_edge(mapping[u], mapping[v]) v += 1 p = q return G
100
expected_degree_graph
176,730
35
375
Remove redundant py2 numeric conversions (#5661) * Remove redundant float conversion * Remove redundant int conversion * Use integer division Co-authored-by: Miroslav Šedivý <[email protected]>
{ "docstring": "Returns a random graph with given expected degrees.\n\n Given a sequence of expected degrees $W=(w_0,w_1,\\ldots,w_{n-1})$\n of length $n$ this algorithm assigns an edge between node $u$ and\n node $v$ with probability\n\n .. math::\n\n p_{uv} = \\frac{w_u w_v}{\\sum_k w_k} .\n\n Parameters\n ----------\n w : list\n The list of expected degrees.\n selfloops: bool (default=True)\n Set to False to remove the possibility of self-loop edges.\n seed : integer, random_state, or None (default)\n Indicator of random number generation state.\n See :ref:`Randomness<randomness>`.\n\n Returns\n -------\n Graph\n\n Examples\n --------\n >>> z = [10 for i in range(100)]\n >>> G = nx.expected_degree_graph(z)\n\n Notes\n -----\n The nodes have integer labels corresponding to index of expected degrees\n input sequence.\n\n The complexity of this algorithm is $\\mathcal{O}(n+m)$ where $n$ is the\n number of nodes and $m$ is the expected number of edges.\n\n The model in [1]_ includes the possibility of self-loop edges.\n Set selfloops=False to produce a graph without self loops.\n\n For finite graphs this model doesn't produce exactly the given\n expected degree sequence. Instead the expected degrees are as\n follows.\n\n For the case without self loops (selfloops=False),\n\n .. math::\n\n E[deg(u)] = \\sum_{v \\ne u} p_{uv}\n = w_u \\left( 1 - \\frac{w_u}{\\sum_k w_k} \\right) .\n\n\n NetworkX uses the standard convention that a self-loop edge counts 2\n in the degree of a node, so with self loops (selfloops=True),\n\n .. math::\n\n E[deg(u)] = \\sum_{v \\ne u} p_{uv} + 2 p_{uu}\n = w_u \\left( 1 + \\frac{w_u}{\\sum_k w_k} \\right) .\n\n References\n ----------\n .. [1] Fan Chung and L. Lu, Connected components in random graphs with\n given expected degree sequences, Ann. Combinatorics, 6,\n pp. 125-145, 2002.\n .. [2] Joel Miller and Aric Hagberg,\n Efficient generation of networks with given expected degrees,\n in Algorithms and Models for the Web-Graph (WAW 2011),\n Alan Frieze, Paul Horn, and Paweł Prałat (Eds), LNCS 6732,\n pp. 115-126, 2011.\n ", "language": "en", "n_whitespaces": 524, "n_words": 298, "vocab_size": 173 }
13
Python
179
0
208
4c58179509e6f6047789efb0a95c2b0e20cb6c8f
https://github.com/mlflow/mlflow.git
def save(self, path): os.makedirs(path,
153
36
mlflow
base.py
mlflow/models/evaluation/base.py
13
2,897
def save(self, path): os.makedirs(path, exist_ok=True) with open(os.path.join(path, "metrics.json"), "w") as fp: json.dump(self.metrics, fp) artifacts_metadata = { artifact_name: { "uri": artifact.uri, "class_name": _get_fully_qualified_class_name(artifact), } for artifact_name, artifact in self.artifacts.items() } with open(os.path.join(path, "artifacts_metadata.json"), "w") as fp: json.dump(artifacts_metadata, fp) artifacts_dir = os.path.join(path, "artifacts") os.mkdir(artifacts_dir) for artifact_name, artifact in self.artifacts.items(): artifact._save(os.path.join(artifacts_dir, artifact_name))
17
save
19,151
22
253
Improve evaluation api (#5256) * init Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * address comments Signed-off-by: Weichen Xu <[email protected]> * update doc Signed-off-by: Weichen Xu <[email protected]> * add shap limitation on value type Signed-off-by: Weichen Xu <[email protected]> * fix format Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]> * update Signed-off-by: Weichen Xu <[email protected]>
{ "docstring": "Write the evaluation results to the specified local filesystem path", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
3
Python
49
0
154
3255fa4ebb9fbc1df6bb063c0eb77a0298ca8f72
https://github.com/getsentry/sentry.git
def test_build_group_generic_issue_attachment(self): event = self.store_event( data={"message": "Hello world", "level": "error"}, project_id=self.project.id ) event = event.for_group(event.groups[0]) occurrence = self.build_occurrence(level="info") occurrence.save(project_id=self.project.id) event.occurrence = occurrence event.group.type = GroupType.PROFILE_BLOCKED_THREAD attachments = SlackIssuesMessageBuilder(group=event.group, event=event).build() assert attachments["title"] == occurrence.issue_title assert attachments["text"] == occurrence.evidence_display[0].value assert attachments["fallback"] == f"[{self.project.slug}] {occurrence.issue_title}" assert attachments["color"] =
137
38
sentry
test_message_builder.py
tests/sentry/integrations/slack/test_message_builder.py
12
18,592
def test_build_group_generic_issue_attachment(self): event = self.store_event( data={"message": "Hello world", "level": "error"}, project_id=self.project.id ) event = event.for_group(event.groups[0]) occurrence = self.build_occurrence(level="info") occurrence.save(project_id=self.project.id) event.occurrence = occurrence event.group.type = GroupType.PROFILE_BLOCKED_THREAD attachments = SlackIssuesMessageBuilder(group=event.group, event=event).build() assert attachments["title"] == occurrence.issue_title assert attachments["text"] == occurrence.evidence_display[0].value assert attachments["fallback"] == f"[{self.project.slug}] {occurrence.issue_title}" assert attachments["color"] == "#2788CE" # blue for info level
14
test_build_group_generic_issue_attachment
89,933
25
249
feat(integrations): Support generic issue type alerts (#42110) Add support for issue alerting integrations that use the message builder (Slack and MSTeams) for generic issue types. Preview text for Slack alert: <img width="350" alt="Screen Shot 2022-12-08 at 4 07 16 PM" src="https://user-images.githubusercontent.com/29959063/206593405-7a206d88-a31a-4e85-8c15-1f7534733ca7.png"> Slack generic issue alert shows the `occurrence.issue_title` and the "important" evidence value <img width="395" alt="Screen Shot 2022-12-08 at 4 11 20 PM" src="https://user-images.githubusercontent.com/29959063/206593408-6942d74d-4238-4df9-bfee-601ce2bc1098.png"> MSTeams generic issue alert shows the `occurrence.issue_title` and the "important" evidence value <img width="654" alt="Screen Shot 2022-12-08 at 4 13 45 PM" src="https://user-images.githubusercontent.com/29959063/206593410-2773746a-16b3-4652-ba2c-a7d5fdc76992.png"> Fixes #42047
{ "docstring": "Test that a generic issue type's Slack alert contains the expected values", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
1
Python
51
0
127
b3bc4e734528d3b186c3a38a6e73e106c3555cc7
https://github.com/iperov/DeepFaceLive.git
def apply(self, func, mask=None) -> 'ImageProcessor': img = orig_img = self._img img = func(img).astype(orig_img.dtype) if img.ndim != 4: raise Exception('func used in ImageProcessor.apply changed format of image') if mask is not None:
82
34
DeepFaceLive
ImageProcessor.py
xlib/image/ImageProcessor.py
13
42,906
def apply(self, func, mask=None) -> 'ImageProcessor': img = orig_img = self._img img = func(img).astype(orig_img.dtype) if img.ndim != 4: raise Exception('func used in ImageProcessor.apply changed format of image') if mask is not None: mask = self._check_normalize_mask(mask) img = ne.evaluate('orig_img*(1-mask) + img*mask').astype(orig_img.dtype) self._img = img return self
21
apply
179,114
14
137
ImageProcessor.py refactoring
{ "docstring": "\n apply your own function on internal image\n\n image has NHWC format. Do not change format, but dims can be changed.\n\n func callable (img) -> img\n\n example:\n\n .apply( lambda img: img-[102,127,63] )\n ", "language": "en", "n_whitespaces": 79, "n_words": 31, "vocab_size": 30 }
3
Python
45
0
110
843dba903757d592f7703a83ebd75eb3ffb46f6f
https://github.com/microsoft/recommenders.git
def predict(self, x): # start the timer self.timer.start() v_, _ = self
65
30
recommenders
rbm.py
recommenders/models/rbm/rbm.py
12
7,073
def predict(self, x): # start the timer self.timer.start() v_, _ = self.eval_out() # evaluate the ratings and the associated probabilities vp = self.sess.run(v_, feed_dict={self.vu: x}) # stop the timer self.timer.stop() log.info("Done inference, time %f2" % self.timer.interval) return vp
7
predict
39,007
17
111
removed time from returning args
{ "docstring": "Returns the inferred ratings. This method is similar to recommend_k_items() with the\n exceptions that it returns all the inferred ratings\n\n Basic mechanics:\n\n The method samples new ratings from the learned joint distribution, together with\n their probabilities. The input x must have the same number of columns as the one used\n for training the model, i.e. the same number of items, but it can have an arbitrary number\n of rows (users).\n\n Args:\n x (numpy.ndarray, int32): Input user/affinity matrix. Note that this can be a single vector, i.e.\n the ratings of a single user.\n\n Returns:\n numpy.ndarray, float:\n - A matrix with the inferred ratings.\n - The elapsed time for predediction.\n ", "language": "en", "n_whitespaces": 226, "n_words": 108, "vocab_size": 73 }
1
Python
38
0
74
8198943edd73a363c266633e1aa5b2a9e9c9f526
https://github.com/XX-net/XX-Net.git
def raw_decode(self, s, idx=0): try: obj, end = self.scan_once(s, idx) except StopIteration as err: raise JSONDecodeError("Expecting value", s, err.val
48
21
XX-Net
decoder.py
python3.10.4/Lib/json/decoder.py
11
55,394
def raw_decode(self, s, idx=0): try: obj, end = self.scan_once(s, idx) except StopIteration as err: raise JSONDecodeError("Expecting value", s, err.value) from None return obj, end
6
raw_decode
218,569
11
76
add python 3.10.4 for windows
{ "docstring": "Decode a JSON document from ``s`` (a ``str`` beginning with\n a JSON document) and return a 2-tuple of the Python\n representation and the index in ``s`` where the document ended.\n\n This can be used to decode a JSON document from a string that may\n have extraneous data at the end.\n\n ", "language": "en", "n_whitespaces": 85, "n_words": 50, "vocab_size": 36 }
2
Python
24
1
45
aa1f40a93a882db304e9a06c2a11d93b2532d80a
https://github.com/networkx/networkx.git
def has_bridges(G, root=None): try: next(bridges
28
13
networkx
bridges.py
networkx/algorithms/bridges.py
11
@not_implemented_for("multigraph") @not_implemented_for("directed")
41,965
def has_bridges(G, root=None): try: next(bridges(G)) except StopIteration: return False else: return True @not_implemented_for("multigraph") @not_implemented_for("directed")
7
has_bridges
176,561
7
70
Improve bridges documentation (#5519) * Fix bridges documentation * Revert source code modification * Revert raise errors for multigraphs
{ "docstring": "Decide whether a graph has any bridges.\n\n A *bridge* in a graph is an edge whose removal causes the number of\n connected components of the graph to increase.\n\n Parameters\n ----------\n G : undirected graph\n\n root : node (optional)\n A node in the graph `G`. If specified, only the bridges in the\n connected component containing this node will be considered.\n\n Returns\n -------\n bool\n Whether the graph (or the connected component containing `root`)\n has any bridges.\n\n Raises\n ------\n NodeNotFound\n If `root` is not in the graph `G`.\n\n NetworkXNotImplemented\n If `G` is a directed graph.\n\n Examples\n --------\n The barbell graph with parameter zero has a single bridge::\n\n >>> G = nx.barbell_graph(10, 0)\n >>> nx.has_bridges(G)\n True\n\n On the other hand, the cycle graph has no bridges::\n\n >>> G = nx.cycle_graph(5)\n >>> nx.has_bridges(G)\n False\n\n Notes\n -----\n This implementation uses the :func:`networkx.bridges` function, so\n it shares its worst-case time complexity, $O(m + n)$, ignoring\n polylogarithmic factors, where $n$ is the number of nodes in the\n graph and $m$ is the number of edges.\n\n ", "language": "en", "n_whitespaces": 318, "n_words": 167, "vocab_size": 106 }
2
Python
14
0
112
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
https://github.com/jindongwang/transferlearning.git
def wheel_metadata(source, dist_info_dir): # type: (ZipFile, str) -> Message path = f"{dist_info_dir}/WHEEL" # Zip file path separators must be / wheel_contents = read_wheel_metadata_file(source, path) try: wheel_text = wheel_contents.decode() except UnicodeDecodeError as e: raise UnsupportedWheel(f"error decoding {path!r}: {e!r}") # FeedParser (used by Parser) does not raise any exceptions. The returned # message may have .defects populated, but for backwards-compatibility
49
57
transferlearning
wheel.py
.venv/lib/python3.8/site-packages/pip/_internal/utils/wheel.py
12
12,520
def wheel_metadata(source, dist_info_dir): # type: (ZipFile, str) -> Message path = f"{dist_info_dir}/WHEEL" # Zip file path separators must be / wheel_contents = read_wheel_metadata_file(source, path) try: wheel_text = wheel_contents.decode() except UnicodeDecodeError as e: raise UnsupportedWheel(f"error decoding {path!r}: {e!r}") # FeedParser (used by Parser) does not raise any exceptions. The returned # message may have .defects populated, but for backwards-compatibility we # currently ignore them. return Parser().parsestr(wheel_text)
8
wheel_metadata
61,338
13
103
upd; format
{ "docstring": "Return the WHEEL metadata of an extracted wheel, if possible.\n Otherwise, raise UnsupportedWheel.\n ", "language": "en", "n_whitespaces": 19, "n_words": 13, "vocab_size": 13 }
2
Python
65
0
172
e35be138148333078284b942ccc9ed7b1d826f97
https://github.com/huggingface/datasets.git
def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_colu
96
29
datasets
table.py
src/datasets/table.py
16
21,852
def remove_column(self, i, *args, **kwargs): table = self.table.remove_column(i, *args, **kwargs) name = self.table.column_names[i] blocks = [] for tables in self.blocks: blocks.append( [ t.remove_column(t.column_names.index(name), *args, **kwargs) if name in t.column_names else t for t in tables ] ) return ConcatenationTable(table, blocks)
12
remove_column
104,416
14
145
Update docs to new frontend/UI (#3690) * WIP: update docs to new UI * make style * Rm unused * inject_arrow_table_documentation __annotations__ * hasattr(arrow_table_method, "__annotations__") * Update task_template.rst * Codeblock PT-TF-SPLIT * Convert loading scripts * Convert docs to mdx * Fix mdx * Add <Tip> * Convert mdx tables * Fix codeblock * Rm unneded hashlinks * Update index.mdx * Redo dev change * Rm circle ci `build_doc` & `deploy_doc` * Rm unneeded files * Update docs reamde * Standardize to `Example::` * mdx logging levels doc * Table properties inject_arrow_table_documentation * ``` to ```py mdx * Add Tips mdx * important,None -> <Tip warning={true}> * More misc * Center imgs * Update instllation page * `setup.py` docs section * Rm imgs since they are in hf.co * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * Update index mdx * Update docs/source/access.mdx Co-authored-by: Steven Liu <[email protected]> * just `Dataset` obj * Addedversion just italics * Update ReadInstruction doc example syntax * Change docstring for `prepare_for_task` * Chore * Remove `code` syntax from headings * Rm `code` syntax from headings * Hashlink backward compatability * S3FileSystem doc * S3FileSystem doc updates * index.mdx updates * Add darkmode gifs * Index logo img css classes * Index mdx dataset logo img size * Docs for DownloadMode class * Doc DownloadMode table * format docstrings * style * Add doc builder scripts (#3790) * add doc builder scripts * fix docker image * Docs new UI actions no self hosted (#3793) * No self hosted * replace doc injection by actual docstrings * Docstring formatted Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> Co-authored-by: Lysandre Debut <[email protected]> Co-authored-by: Mishig Davaadorj <[email protected]> * Rm notebooks from docs actions since they dont exi * Update tsting branch * More docstring * Chore * bump up node version * bump up node * ``` -> ```py for audio_process.mdx * Update .github/workflows/build_documentation.yml Co-authored-by: Quentin Lhoest <[email protected]> * Uodate dev doc build * remove run on PR * fix action * Fix gh doc workflow * forgot this change when merging master * Update build doc Co-authored-by: Steven Liu <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: Lysandre Debut <[email protected]>
{ "docstring": "\n Create new Table with the indicated column removed.\n\n Args:\n i (:obj:`int`):\n Index of column to remove.\n\n Returns:\n :class:`datasets.table.Table`:\n New table without the column.\n ", "language": "en", "n_whitespaces": 104, "n_words": 23, "vocab_size": 21 }
4
Python
40
0
53
3a461d02793e6f9d41c2b1a92647e691de1abaac
https://github.com/netbox-community/netbox.git
def test_cable_cannot_terminate_to_a_wireless_interface(self): wireless_interface = Interface(device=self.device1, name="W1", type=InterfaceTypeChoices.TYPE_80211A) cable = Cable(a_terminations=[self.interface2], b_terminations=[wireless_interface]) with self.assertRaises(ValidationError): cable.clean()
57
13
netbox
test_models.py
netbox/dcim/tests/test_models.py
11
77,897
def test_cable_cannot_terminate_to_a_wireless_interface(self): wireless_interface = Interface(device=self.device1, name="W1", type=InterfaceTypeChoices.TYPE_80211A) cable = Cable(a_terminations=[self.interface2], b_terminations=[wireless_interface]) with self.assertRaises(ValidationError): cable.clean()
5
test_cable_cannot_terminate_to_a_wireless_interface
264,886
18
95
Update Cable instantiations to match new signature
{ "docstring": "\n A cable cannot terminate to a wireless interface\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
1
Python
14
0
114
9c19aff7c7561e3a82978a272ecdaad40dda5c00
https://github.com/django/django.git
def get_test_db_clone_settings(self, suffix): # When this function is called, the test database has been created # already and its name has been copied to
35
38
django
creation.py
django/db/backends/base/creation.py
11
50,917
def get_test_db_clone_settings(self, suffix): # When this function is called, the test database has been created # already and its name has been copied to settings_dict['NAME'] so # we don't need to call _get_test_db_name. orig_settings_dict = self.connection.settings_dict return { **orig_settings_dict, "NAME": "{}_{}".format(orig_settings_dict["NAME"], suffix), }
6
get_test_db_clone_settings
204,838
7
63
Refs #33476 -- Reformatted code with Black.
{ "docstring": "\n Return a modified connection settings dict for the n-th clone of a DB.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
1
Python
43
0
52
8198943edd73a363c266633e1aa5b2a9e9c9f526
https://github.com/XX-net/XX-Net.git
def open(self, host='', port=IMAP4_PORT, timeout=None): self.host = host self.port = port self.sock = self._create_socket(timeout) self.file = self.sock.makefile('rb')
50
14
XX-Net
imaplib.py
python3.10.4/Lib/imaplib.py
9
55,005
def open(self, host='', port=IMAP4_PORT, timeout=None): self.host = host self.port = port self.sock = self._create_socket(timeout) self.file = self.sock.makefile('rb')
5
open
217,907
10
83
add python 3.10.4 for windows
{ "docstring": "Setup connection to remote server on \"host:port\"\n (default: localhost:standard IMAP4 port).\n This connection will be used by the routines:\n read, readline, send, shutdown.\n ", "language": "en", "n_whitespaces": 59, "n_words": 23, "vocab_size": 22 }
1
Python
17
0
42
7f27e70440c177b2a047b7f74a78ed5cd5b4b596
https://github.com/Textualize/textual.git
def synchronized_output_end_sequence(self) -> str: if self.synchronised_output: return
25
9
textual
_terminal_features.py
src/textual/_terminal_features.py
10
44,257
def synchronized_output_end_sequence(self) -> str: if self.synchronised_output: return TERMINAL_MODES_ANSI_SEQUENCES[Mode.SynchronizedOutput]["end_sync"] return ""
13
synchronized_output_end_sequence
183,574
7
45
[terminal buffering] Address PR feedback
{ "docstring": "\n Returns the ANSI sequence that we should send to the terminal to tell it that\n it should stop buffering the content we're about to send.\n If the terminal doesn't seem to support synchronised updates the string will be empty.\n\n Returns:\n str: the \"synchronised output stop\" ANSI sequence. It will be ab empty string\n if the terminal emulator doesn't seem to support the \"synchronised updates\" mode.\n ", "language": "en", "n_whitespaces": 127, "n_words": 65, "vocab_size": 41 }
2
Python
10
0
98
f6021faf2a8e62f88a8d6979ce812dcb71133a8f
https://github.com/jaakkopasanen/AutoEq.git
def _band_penalty_coefficients(self, fc, q, gain, filter_frs): ref_frs = biquad.digital_coeffs(self.frequenc
121
34
AutoEq
frequency_response.py
frequency_response.py
12
39,253
def _band_penalty_coefficients(self, fc, q, gain, filter_frs): ref_frs = biquad.digital_coeffs(self.frequency, 192e3, *biquad.peaking(fc, q, gain, fs=192e3)) est_sums = np.sum(filter_frs, axis=1) ref_sums = np.sum(ref_frs, axis=1) penalties = np.zeros((len(fc),)) mask = np.squeeze(ref_sums) != 0.0 penalties[mask] = est_sums[mask] / ref_sums[mask] return 10 * (1 - np.expand_dims(penalties, 1))
8
_band_penalty_coefficients
162,681
23
176
Improved quality regularization to a point where it works well. 10 kHz to 20 kHz is RMSE is calculated from the average levels. Split neo PEQ notebook by band and Q.
{ "docstring": "Calculates penalty coefficients for filters if their transition bands extend beyond Nyquist frequency\n\n The calculation is based on ratio of frequency response integrals between 44.1 kHz and 192 kHz\n\n Args:\n fc: Filter center frequencies, 1-D array\n q: Filter qualities, 1-D array\n gain: Filter gains, 1-D array\n filter_frs: Filter frequency responses, 2-D array, one fr per row\n\n Returns:\n Column array of penalty coefficients, one per filter\n ", "language": "en", "n_whitespaces": 148, "n_words": 65, "vocab_size": 50 }
1
Python
42
0
201
02b04cb3ecfc5fce1f627281c312753f3b4b8494
https://github.com/scikit-learn/scikit-learn.git
def test_predict_on_toy_problem(global_random_seed): clf1 = LogisticRegression(random_state=global_random_seed) clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) clf3 = GaussianNB() X = np.array( [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]] ) y = np.array([1, 1, 1, 2, 2, 2]) assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard", weights=[1, 1, 1], ) assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", weights=[1, 1, 1], ) assert_array
357
48
scikit-learn
test_voting.py
sklearn/ensemble/tests/test_voting.py
12
76,664
def test_predict_on_toy_problem(global_random_seed): clf1 = LogisticRegression(random_state=global_random_seed) clf2 = RandomForestClassifier(n_estimators=10, random_state=global_random_seed) clf3 = GaussianNB() X = np.array( [[-1.1, -1.5], [-1.2, -1.4], [-3.4, -2.2], [1.1, 1.2], [2.1, 1.4], [3.1, 2.3]] ) y = np.array([1, 1, 1, 2, 2, 2]) assert_array_equal(clf1.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf2.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) assert_array_equal(clf3.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="hard", weights=[1, 1, 1], ) assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2]) eclf = VotingClassifier( estimators=[("lr", clf1), ("rf", clf2), ("gnb", clf3)], voting="soft", weights=[1, 1, 1], ) assert_array_equal(eclf.fit(X, y).predict(X), [1, 1, 1, 2, 2, 2])
23
test_predict_on_toy_problem
261,153
22
469
TST use global_random_seed in sklearn/ensemble/tests/test_voting.py (#24282) Co-authored-by: Jérémie du Boisberranger <[email protected]>
{ "docstring": "Manually check predicted class labels for toy dataset.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
1
Python
104
0
29
5a850eb044ca07f1f3bcb1b284116d6f2d37df1b
https://github.com/scikit-learn/scikit-learn.git
def fit_transform(self, X, y=None): self._validate_params() return self._tran
28
8
scikit-learn
_dict_vectorizer.py
sklearn/feature_extraction/_dict_vectorizer.py
8
76,257
def fit_transform(self, X, y=None): self._validate_params() return self._transform(X, fitting=True)
3
fit_transform
260,448
7
45
MAINT Param validation for Dictvectorizer (#23820)
{ "docstring": "Learn a list of feature name -> indices mappings and transform X.\n\n Like fit(X) followed by transform(X), but does not require\n materializing X in memory.\n\n Parameters\n ----------\n X : Mapping or iterable over Mappings\n Dict(s) or Mapping(s) from feature names (arbitrary Python\n objects) to feature values (strings or convertible to dtype).\n\n .. versionchanged:: 0.24\n Accepts multiple string values for one categorical feature.\n\n y : (ignored)\n Ignored parameter.\n\n Returns\n -------\n Xa : {array, sparse matrix}\n Feature vectors; always 2-d.\n ", "language": "en", "n_whitespaces": 217, "n_words": 78, "vocab_size": 69 }
1
Python
8
0
761
0877fb0d78635692e481c8bde224fac5ad0dd430
https://github.com/qutebrowser/qutebrowser.git
def _on_feature_permission_requested(self, url, feature): page = self._widget.page() grant_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionGrantedByUser) deny_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionDeniedByUser) permission_str = debug.qenum_key(QWebEnginePage, feature) if not url.isValid(): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-85116 is_qtbug = (qtutils.version_check('5.15.0', compiled=False, exact=True) and self._tab.is_private and feature == QWebEnginePage.Feature.Notifications) logger = log.webview.debug if is_qtbug else log.webview.warning logger("Ignoring feature permission {} for invalid URL {}".format( permission_str, url)) deny_permission() return if feature not in self._options: log.webview.error("Unhandled feature permission {}".format( permission_str)) deny_permission() return if ( feature in [QWebEnginePage.Feature.DesktopVideoCapture, QWebEnginePage.Feature.DesktopAudioVideoCapture] and qtutils.version_check('5.13', compiled=
301
84
qutebrowser
webenginetab.py
qutebrowser/browser/webengine/webenginetab.py
14
117,565
def _on_feature_permission_requested(self, url, feature): page = self._widget.page() grant_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionGrantedByUser) deny_permission = functools.partial( page.setFeaturePermission, url, feature, QWebEnginePage.PermissionPolicy.PermissionDeniedByUser) permission_str = debug.qenum_key(QWebEnginePage, feature) if not url.isValid(): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-85116 is_qtbug = (qtutils.version_check('5.15.0', compiled=False, exact=True) and self._tab.is_private and feature == QWebEnginePage.Feature.Notifications) logger = log.webview.debug if is_qtbug else log.webview.warning logger("Ignoring feature permission {} for invalid URL {}".format( permission_str, url)) deny_permission() return if feature not in self._options: log.webview.error("Unhandled feature permission {}".format( permission_str)) deny_permission() return if ( feature in [QWebEnginePage.Feature.DesktopVideoCapture, QWebEnginePage.Feature.DesktopAudioVideoCapture] and qtutils.version_check('5.13', compiled=False) and not qtutils.version_check('5.13.2', compiled=False) ): # WORKAROUND for https://bugreports.qt.io/browse/QTBUG-78016 log.webview.warning("Ignoring desktop sharing request due to " "crashes in Qt < 5.13.2") deny_permission() return question = shared.feature_permission( url=url.adjusted(QUrl.UrlFormattingOption.RemovePath), option=self._options[feature], msg=self._messages[feature], yes_action=grant_permission, no_action=deny_permission, abort_on=[self._tab.abort_questions]) if question is not None: page.featurePermissionRequestCanceled.connect( functools.partial(self._on_feature_permission_cancelled, question, url, feature))
44
_on_feature_permission_requested
321,150
54
470
Run scripts/dev/rewrite_enums.py
{ "docstring": "Ask the user for approval for geolocation/media/etc..", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 6 }
10
Python
125
0
784
8198943edd73a363c266633e1aa5b2a9e9c9f526
https://github.com/XX-net/XX-Net.git
def add_find_python(self): start = 402 for ver in self.versions: install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver machine_reg = "python.machine." + ver user_reg = "python.user." + ver machine_prop = "PYTHON.MACHINE." + ver user_prop = "PYTHON.USER." + ver machine_action = "Pyth
304
86
XX-Net
bdist_msi.py
python3.10.4/Lib/distutils/command/bdist_msi.py
14
56,684
def add_find_python(self): start = 402 for ver in self.versions: install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver machine_reg = "python.machine." + ver user_reg = "python.user." + ver machine_prop = "PYTHON.MACHINE." + ver user_prop = "PYTHON.USER." + ver machine_action = "PythonFromMachine" + ver user_action = "PythonFromUser" + ver exe_action = "PythonExe" + ver target_dir_prop = "TARGETDIR" + ver exe_prop = "PYTHON" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, "RegLocator", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, "AppSearch", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, "CustomAction", [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), ]) add_data(self.db, "InstallExecuteSequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "InstallUISequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "Condition", [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) start += 4 assert start < 500
42
add_find_python
222,643
20
469
add python 3.10.4 for windows
{ "docstring": "Adds code to the installer to compute the location of Python.\n\n Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the\n registry for each version of Python.\n\n Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,\n else from PYTHON.MACHINE.X.Y.\n\n Properties PYTHONX.Y will be set to TARGETDIRX.Y\\\\python.exe", "language": "en", "n_whitespaces": 79, "n_words": 45, "vocab_size": 28 }
3
Python
167
0
45
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
https://github.com/jindongwang/transferlearning.git
def write_exports(self, exports): rf = self
32
13
transferlearning
database.py
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
11
12,781
def write_exports(self, exports): rf = self.get_distinfo_file(EXPORTS_FILENAME) with open(rf, 'w') as f: write_exports(exports, f)
4
write_exports
61,961
8
57
upd; format
{ "docstring": "\n Write a dictionary of exports to a file in .ini format.\n :param exports: A dictionary of exports, mapping an export category to\n a list of :class:`ExportEntry` instances describing the\n individual export entries.\n ", "language": "en", "n_whitespaces": 100, "n_words": 32, "vocab_size": 25 }
1
Python
13
0
685
621e782ed0c119d2c84124d006fdf253c082449a
https://github.com/ansible/ansible.git
def _get_action_handler_with_module_context(self, connection, templar): module_collection, separator, module_name = self._task.action.rpartition(".") module_prefix = module_name.split('_')[0] if module_collection: # For network modules, which look for one action plugin per platform, look for the # action plugin in the same collection as the module by prefixing the action plugin # with the same collecti
264
117
ansible
task_executor.py
lib/ansible/executor/task_executor.py
15
78,856
def _get_action_handler_with_module_context(self, connection, templar): module_collection, separator, module_name = self._task.action.rpartition(".") module_prefix = module_name.split('_')[0] if module_collection: # For network modules, which look for one action plugin per platform, look for the # action plugin in the same collection as the module by prefixing the action plugin # with the same collection. network_action = "{0}.{1}".format(module_collection, module_prefix) else: network_action = module_prefix collections = self._task.collections # Check if the module has specified an action handler module = self._shared_loader_obj.module_loader.find_plugin_with_context( self._task.action, collection_list=collections ) if not module.resolved or not module.action_plugin: module = None if module is not None: handler_name = module.action_plugin # let action plugin override module, fallback to 'normal' action plugin otherwise elif self._shared_loader_obj.action_loader.has_plugin(self._task.action, collection_list=collections): handler_name = self._task.action elif all((module_prefix in C.NETWORK_GROUP_MODULES, self._shared_loader_obj.action_loader.has_plugin(network_action, collection_list=collections))): handler_name = network_action display.vvvv("Using network group action {handler} for {action}".format(handler=handler_name, action=self._task.action), host=self._play_context.remote_addr) else: # use ansible.legacy.normal to allow (historic) local action_plugins/ override without collections search handler_name = 'ansible.legacy.normal' collections = None # until then, we don't want the task's collection list to be consulted; use the builtin handler = self._shared_loader_obj.action_loader.get( handler_name, task=self._task, connection=connection, play_context=self._play_context, loader=self._loader, templar=templar, shared_loader_obj=self._shared_loader_obj, collection_list=collections ) if not handler: raise AnsibleError("the handler '%s' was not found" % handler_name) return handler, module
38
_get_action_handler_with_module_context
267,337
41
420
Add toggle to fix module_defaults with module-as-redirected-action on a per-module basis (#77265) * If there is a platform specific handler, prefer the resolved module over the resolved action when loading module_defaults Add a toggle for action plugins to prefer the resolved module when loading module_defaults Allow moving away from modules intercepted as actions pattern Fixes #77059
{ "docstring": "\n Returns the correct action plugin to handle the requestion task action and the module context\n ", "language": "en", "n_whitespaces": 30, "n_words": 15, "vocab_size": 12 }
8
Python
191
0
161
c17ff17a18f21be60c6916714ac8afd87d4441df
https://github.com/coqui-ai/TTS.git
def forward(self, y_hat, y, length): mask = sequence_mask(sequence_length=length, max_len=y.size(1)).unsqueeze(2) y_norm = sample_wise_min_max(y, mask) y_hat_norm = sample_wise_min_max(y_hat, mask) ssim_loss = self.loss_func((y_norm * mask).unsqueeze(1), (y_hat_norm * mask).unsqueeze(1)) if ssim_loss.item() > 1.0: print(f" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 1.0") ssim_loss == 1.0 if ssim_loss.item() < 0.0: print(f" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 0.0")
122
40
TTS
losses.py
TTS/tts/layers/losses.py
13
77,241
def forward(self, y_hat, y, length): mask = sequence_mask(sequence_length=length, max_len=y.size(1)).unsqueeze(2) y_norm = sample_wise_min_max(y, mask) y_hat_norm = sample_wise_min_max(y_hat, mask) ssim_loss = self.loss_func((y_norm * mask).unsqueeze(1), (y_hat_norm * mask).unsqueeze(1)) if ssim_loss.item() > 1.0: print(f" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 1.0") ssim_loss == 1.0 if ssim_loss.item() < 0.0: print(f" > SSIM loss is out-of-range {ssim_loss.item()}, setting it 0.0") ssim_loss == 0.0 return ssim_loss
12
forward
262,500
18
203
Fix SSIM loss
{ "docstring": "\n Args:\n y_hat (tensor): model prediction values.\n y (tensor): target values.\n length (tensor): length of each sample in a batch for masking.\n\n Shapes:\n y_hat: B x T X D\n y: B x T x D\n length: B\n\n Returns:\n loss: An average loss value in range [0, 1] masked by the length.\n ", "language": "en", "n_whitespaces": 157, "n_words": 50, "vocab_size": 39 }
3
Python
61
0
67
7346c288e307e1821e3ceb757d686c9bd879389c
https://github.com/django/django.git
def get_commands(): commands = {name: 'django.core' for name in find_commands(__path__[0])} if not settings.configured: return commands for app_config in reversed(apps.get_app_configs()): path = os.path.join(app_config.path, 'management') commands.update({n
77
22
django
__init__.py
django/core/management/__init__.py
13
50,200
def get_commands(): commands = {name: 'django.core' for name in find_commands(__path__[0])} if not settings.configured: return commands for app_config in reversed(apps.get_app_configs()): path = os.path.join(app_config.path, 'management') commands.update({name: app_config.name for name in find_commands(path)}) return commands
8
get_commands
202,989
15
126
Refs #32355 -- Removed unnecessary list() calls before reversed() on dictviews. Dict and dictviews are iterable in reversed insertion order using reversed() in Python 3.8+.
{ "docstring": "\n Return a dictionary mapping command names to their callback applications.\n\n Look for a management.commands package in django.core, and in each\n installed application -- if a commands package exists, register all\n commands in that package.\n\n Core commands are always included. If a settings module has been\n specified, also include user-defined commands.\n\n The dictionary is in the format {command_name: app_name}. Key-value\n pairs from this dictionary can then be used in calls to\n load_command_class(app_name, command_name)\n\n If a specific version of a command must be loaded (e.g., with the\n startapp command), the instantiated module can be placed in the\n dictionary in place of the application name.\n\n The dictionary is cached on the first call and reused on subsequent\n calls.\n ", "language": "en", "n_whitespaces": 161, "n_words": 115, "vocab_size": 79 }
5
Python
31
0
193
8198943edd73a363c266633e1aa5b2a9e9c9f526
https://github.com/XX-net/XX-Net.git
def getphraselist(self): plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': s
119
26
XX-Net
_parseaddr.py
python3.10.4/Lib/email/_parseaddr.py
15
57,004
def getphraselist(self): plist = [] while self.pos < len(self.field): if self.field[self.pos] in self.FWS: self.pos += 1 elif self.field[self.pos] == '"': plist.append(self.getquote()) elif self.field[self.pos] == '(': self.commentlist.append(self.getcomment()) elif self.field[self.pos] in self.phraseends: break else: plist.append(self.getatom(self.phraseends)) return plist
14
getphraselist
223,611
13
196
add python 3.10.4 for windows
{ "docstring": "Parse a sequence of RFC 2822 phrases.\n\n A phrase is a sequence of words, which are in turn either RFC 2822\n atoms or quoted-strings. Phrases are canonicalized by squeezing all\n runs of continuous whitespace into one space.\n ", "language": "en", "n_whitespaces": 66, "n_words": 37, "vocab_size": 30 }
6
Python
35
0
363
8387676bc049d7b3e071846730c632e6ced137ed
https://github.com/matplotlib/matplotlib.git
def set_location(self, location): # This puts the rectangle
130
97
matplotlib
_secondary_axes.py
lib/matplotlib/axes/_secondary_axes.py
15
23,720
def set_location(self, location): # This puts the rectangle into figure-relative coordinates. if isinstance(location, str): _api.check_in_list(self._locstrings, location=location) self._pos = 1. if location in ('top', 'right') else 0. elif isinstance(location, numbers.Real): self._pos = location else: raise ValueError( f"location must be {self._locstrings[0]!r}, " f"{self._locstrings[1]!r}, or a float, not {location!r}") self._loc = location if self._orientation == 'x': # An x-secondary axes is like an inset axes from x = 0 to x = 1 and # from y = pos to y = pos + eps, in the parent's transAxes coords. bounds = [0, self._pos, 1., 1e-10] else: # 'y' bounds = [self._pos, 0, 1e-10, 1] # this locator lets the axes move in the parent axes coordinates. # so it never needs to know where the parent is explicitly in # figure coordinates. # it gets called in ax.apply_aspect() (of all places) self.set_axes_locator( _TransformedBoundsLocator(bounds, self._parent.transAxes))
17
set_location
109,724
19
230
Clean up code in SecondaryAxis
{ "docstring": "\n Set the vertical or horizontal location of the axes in\n parent-normalized coordinates.\n\n Parameters\n ----------\n location : {'top', 'bottom', 'left', 'right'} or float\n The position to put the secondary axis. Strings can be 'top' or\n 'bottom' for orientation='x' and 'right' or 'left' for\n orientation='y'. A float indicates the relative position on the\n parent axes to put the new axes, 0.0 being the bottom (or left)\n and 1.0 being the top (or right).\n ", "language": "en", "n_whitespaces": 170, "n_words": 71, "vocab_size": 51 }
5
Python
142
0
149
e7cb2e82f8b9c7a68f82abdd3b6011d661230b7e
https://github.com/modin-project/modin.git
def length(self): if self._length_cache is None: if len(self.call_queue): self.drain_call_queue() else: self._length_cache, self._width_cache = _get_index_and_columns.remote( self.oid
70
19
modin
partition.py
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py
14
35,383
def length(self): if self._length_cache is None: if len(self.call_queue): self.drain_call_queue() else: self._length_cache, self._width_cache = _get_index_and_columns.remote( self.oid ) if isinstance(self._length_cache, ObjectIDType): self._length_cache = ray.get(self._length_cache) return self._length_cache
11
length
153,347
14
115
REFACTOR-#4251: define public interfaces in `modin.core.execution.ray` module (#3868) Signed-off-by: Anatoly Myachev <[email protected]>
{ "docstring": "\n Get the length of the object wrapped by this partition.\n\n Returns\n -------\n int\n The length of the object.\n ", "language": "en", "n_whitespaces": 65, "n_words": 18, "vocab_size": 14 }
4
Python
24
0
44
0f6dde45a1c75b02c208323574bdb09b8536e3e4
https://github.com/sympy/sympy.git
def dmp_l2_norm_squared(f, u, K): if not u: return dup_l2_norm_squared(f, K) v = u - 1 return s
44
23
sympy
densearith.py
sympy/polys/densearith.py
10
47,480
def dmp_l2_norm_squared(f, u, K): if not u: return dup_l2_norm_squared(f, K) v = u - 1 return sum([ dmp_l2_norm_squared(c, v, K) for c in f ])
5
dmp_l2_norm_squared
195,939
8
67
Add `l2_norm_squared` methods.
{ "docstring": "\n Returns squared l2 norm of a polynomial in ``K[X]``.\n\n Examples\n ========\n\n >>> from sympy.polys import ring, ZZ\n >>> R, x,y = ring(\"x,y\", ZZ)\n\n >>> R.dmp_l2_norm_squared(2*x*y - x - 3)\n 14\n\n ", "language": "en", "n_whitespaces": 55, "n_words": 30, "vocab_size": 27 }
3
Python
25
0
72
a06fa496d3f837cca3c437ab6e9858525633d147
https://github.com/ansible/ansible.git
def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> t.List[str] if args.metadata.cloud_config is not None: return [] # cloud filter already performed prior to delegation exclude = [] # type: t.List[str] for provider in get_cloud_providers(
45
32
ansible
__init__.py
test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
9
78,551
def cloud_filter(args, targets): # type: (IntegrationConfig, t.Tuple[IntegrationTarget, ...]) -> t.List[str] if args.metadata.cloud_config is not None: return [] # cloud filter already performed prior to delegation exclude = [] # type: t.List[str] for provider in get_cloud_providers(args, targets): provider.filter(targets, exclude) return exclude
7
cloud_filter
266,740
9
74
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
{ "docstring": "Return a list of target names to exclude based on the given targets.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
3
Python
40
0
252
f1c37893caf90738288e789c3233ab934630254f
https://github.com/saltstack/salt.git
def test_upgrade_available_none(): chk_upgrade_out = ( "Last metadata ex
124
56
salt
test_aixpkg.py
tests/pytests/unit/modules/test_aixpkg.py
16
53,805
def test_upgrade_available_none(): chk_upgrade_out = ( "Last metadata expiration check: 22:5:48 ago on Mon Dec 6 19:26:36 EST 2021." ) dnf_call = MagicMock(return_value={"retcode": 100, "stdout": chk_upgrade_out}) version_mock = MagicMock(return_value="6.6-2") with patch("pathlib.Path.is_file", return_value=True): with patch.dict( aixpkg.__salt__, {"cmd.run_all": dnf_call, "config.get": MagicMock(return_value=False)}, ), patch.object(aixpkg, "version", version_mock): result = aixpkg.upgrade_available("info") assert dnf_call.call_count == 1 libpath_env = {"LIBPATH": "/opt/freeware/lib:/usr/lib"} dnf_call.assert_any_call( "/opt/freeware/bin/dnf check-update info", env=libpath_env, ignore_retcode=True, python_shell=False, ) assert not result
21
test_upgrade_available_none
215,087
19
217
Working tests for install
{ "docstring": "\n test upgrade available where a valid upgrade is not available\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 8 }
1
Python
64
0
373
361b7f444a53cc34cad8ddc378d125b7027d96df
https://github.com/getsentry/sentry.git
def test_too_many_boosted_releases_do_not_boost_anymore(self): release_2 = Release.get_or_create(
185
46
sentry
test_event_manager.py
tests/sentry/event_manager/test_event_manager.py
14
18,273
def test_too_many_boosted_releases_do_not_boost_anymore(self): release_2 = Release.get_or_create(self.project, "2.0") release_3 = Release.get_or_create(self.project, "3.0") for release_id in (self.release.id, release_2.id): self.redis_client.set(f"ds::p:{self.project.id}:r:{release_id}", 1, 60 * 60 * 24) self.redis_client.hset( f"ds::p:{self.project.id}:boosted_releases", release_id, time(), ) with self.options( { "dynamic-sampling:boost-latest-release": True, } ): self.make_release_transaction( release_version=release_3.version, environment_name=self.environment1.name, project_id=self.project.id, checksum="b" * 32, timestamp=self.timestamp, ) assert self.redis_client.hgetall(f"ds::p:{self.project.id}:boosted_releases") == { str(self.release.id): str(time()), str(release_2.id): str(time()), } assert self.redis_client.get(f"ds::p:{self.project.id}:r:{release_3.id}") is None
27
test_too_many_boosted_releases_do_not_boost_anymore
87,293
27
342
feat(ds): Limit the amount of boosted releases to 10 (#40501) Limits amount of boosted releases to 10 releases otherwise do not add any more releases to hash set of listed releases
{ "docstring": "\n This test tests the case when we have already too many boosted releases, in this case we want to skip the\n boosting of anymore releases\n ", "language": "en", "n_whitespaces": 47, "n_words": 25, "vocab_size": 22 }
2
Python
56
0
175
5dfd57af2a141a013ae3753e160180b82bec9469
https://github.com/networkx/networkx.git
def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): import numpy as np import scipy as sp imp
226
56
networkx
hits_alg.py
networkx/algorithms/link_analysis/hits_alg.py
15
41,745
def hits(G, max_iter=100, tol=1.0e-8, nstart=None, normalized=True): import numpy as np import scipy as sp import scipy.sparse.linalg # call as sp.sparse.linalg if len(G) == 0: return {}, {} A = nx.adjacency_matrix(G, nodelist=list(G), dtype=float) if nstart is None: u, s, vt = sp.sparse.linalg.svds(A, k=1, maxiter=max_iter, tol=tol) else: nstart = np.array(list(nstart.values())) u, s, vt = sp.sparse.linalg.svds(A, k=1, v0=nstart, maxiter=max_iter, tol=tol) a = vt.flatten().real h = A @ a if normalized: h = h / h.sum() a = a / a.sum() hubs = dict(zip(G, map(float, h))) authorities = dict(zip(G, map(float, a))) return hubs, authorities
20
hits
176,175
39
339
Use scipy.sparse array datastructure (#5139) * Step 1: use sparse arrays in nx.to_scipy_sparse_matrix. Seems like a reasonable place to start. nx.to_scipy_sparse_matrix is one of the primary interfaces to scipy.sparse from within NetworkX. * 1: Use np.outer instead of mult col/row vectors Fix two instances in modularitymatrix where a new 2D array was being created via an outer product of two \"vectors\". In the matrix case, this was a row vector \* a column vector. In the array case this can be disambiguated by being explicit with np.outer. * Update _transition_matrix in laplacianmatrix module - A few instances of matrix multiplication operator - Add np.newaxis + transpose to get shape right for broadcasting - Explicitly convert e.g. sp.sparse.spdiags to a csr_array. * Update directed_combinitorial_laplacian w/ sparse array. - Wrap spdiags in csr_array and update matmul operators. * Rm matrix-specific code from lgc and hmn modules - Replace .A call with appropriate array semantics - wrap sparse.diags in csr_array. * Change hits to use sparse array semantics. - Replace * with @ - Remove superfluous calls to flatten. * Update sparse matrix usage in layout module. - Simplify lil.getrowview call - Wrap spdiags in csr_array. * lil_matrix -> lil_array in graphmatrix.py. * WIP: Start working on algebraic connectivity module. * Incorporate auth mat varname feedback. * Revert 1D slice and comment for 1D sparse future. * Add TODOs: rm csr_array wrapper around spdiags etc. * WIP: cleanup algebraicconn: tracemin_fiedler. * Typo. * Finish reviewing algebraicconnectivity. * Convert bethe_hessian matrix to use sparse arrays. * WIP: update laplacian. Update undirected laplacian functions. * WIP: laplacian - add comment about _transition_matrix return types. * Finish laplacianmatrix review. * Update attrmatrix. * Switch to official laplacian function. * Update pagerank to use sparse array. * Switch bipartite matrix to sparse arrays. * Check from_scipy_sparse_matrix works with arrays. Modifies test suite. * Apply changes from review. * Fix failing docstring tests. * Fix missing axis for in-place multiplication. * Use scipy==1.8rc2 * Use matrix multiplication * Fix PyPy CI * [MRG] Create plot_subgraphs.py example (#5165) * Create plot_subgraphs.py https://github.com/networkx/networkx/issues/4220 * Update plot_subgraphs.py black * Update plot_subgraphs.py lint plus font_size * Update plot_subgraphs.py added more plots * Update plot_subgraphs.py removed plots from the unit test and added comments * Update plot_subgraphs.py lint * Update plot_subgraphs.py typos fixed * Update plot_subgraphs.py added nodes to the plot of the edges removed that was commented out for whatever reason * Update plot_subgraphs.py revert the latest commit - the line was commented out for a reason - it's broken * Update plot_subgraphs.py fixed node color issue * Update plot_subgraphs.py format fix * Update plot_subgraphs.py forgot to draw the nodes... now fixed * Fix sphinx warnings about heading length. * Update examples/algorithms/plot_subgraphs.py * Update examples/algorithms/plot_subgraphs.py Co-authored-by: Ross Barnowski <[email protected]> Co-authored-by: Dan Schult <[email protected]> * Add traveling salesman problem to example gallery (#4874) Adds an example of the using Christofides to solve the TSP problem to the example galery. Co-authored-by: Ross Barnowski <[email protected]> * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() (#5037) * Fixed inconsistent documentation for nbunch parameter in DiGraph.edges() * Resolved Requested Changes * Revert changes to degree docstrings. * Update comments in example. * Apply wording to edges method in all graph classes. Co-authored-by: Ross Barnowski <[email protected]> * Compatibility updates from testing with numpy/scipy/pytest rc's (#5226) * Rm deprecated scipy subpkg access. * Use recwarn fixture in place of deprecated pytest pattern. * Rm unnecessary try/except from tests. * Replace internal `close` fn with `math.isclose`. (#5224) * Replace internal close fn with math.isclose. * Fix lines in docstring examples. * Fix Python 3.10 deprecation warning w/ int div. (#5231) * Touchups and suggestions for subgraph gallery example (#5225) * Simplify construction of G with edges rm'd * Rm unused graph attribute. * Shorten categorization by node type. * Simplify node coloring. * Simplify isomorphism check. * Rm unit test. * Rm redundant plotting of each subgraph. * Use new package name (#5234) * Allowing None edges in weight function of bidirectional Dijkstra (#5232) * added following feature also to bidirectional dijkstra: The weight function can be used to hide edges by returning None. * changed syntax for better readability and code duplicate avoidance Co-authored-by: Hohmann, Nikolas <[email protected]> * Add an FAQ about assigning issues. (#5182) * Add FAQ about assigning issues. * Add note about linking issues from new PRs. * Update dev deps (#5243) * Update minor doc issues with tex notation (#5244) * Add FutureWarnings to fns that return sparse matrices - biadjacency_matrix. - bethe_hessian_matrix. - incidence_matrix. - laplacian functions. - modularity_matrix functions. - adjacency_matrix. * Add to_scipy_sparse_array and use it everywhere. Add a new conversion function to preserve array semantics internally while not altering behavior for users. Also adds FutureWarning to to_scipy_sparse_matrix. * Add from_scipy_sparse_array. Supercedes from_scipy_sparse_matrix. * Handle deprecations in separate PR. * Fix docstring examples. Co-authored-by: Mridul Seth <[email protected]> Co-authored-by: Jarrod Millman <[email protected]> Co-authored-by: Andrew Knyazev <[email protected]> Co-authored-by: Dan Schult <[email protected]> Co-authored-by: eskountis <[email protected]> Co-authored-by: Anutosh Bhat <[email protected]> Co-authored-by: NikHoh <[email protected]> Co-authored-by: Hohmann, Nikolas <[email protected]> Co-authored-by: Sultan Orazbayev <[email protected]> Co-authored-by: Mridul Seth <[email protected]>
{ "docstring": "Returns HITS hubs and authorities values for nodes.\n\n The HITS algorithm computes two numbers for a node.\n Authorities estimates the node value based on the incoming links.\n Hubs estimates the node value based on outgoing links.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph\n\n max_iter : integer, optional\n Maximum number of iterations in power method.\n\n tol : float, optional\n Error tolerance used to check convergence in power method iteration.\n\n nstart : dictionary, optional\n Starting value of each node for power method iteration.\n\n normalized : bool (default=True)\n Normalize results by the sum of all of the values.\n\n Returns\n -------\n (hubs,authorities) : two-tuple of dictionaries\n Two dictionaries keyed by node containing the hub and authority\n values.\n\n Raises\n ------\n PowerIterationFailedConvergence\n If the algorithm fails to converge to the specified tolerance\n within the specified number of iterations of the power iteration\n method.\n\n Examples\n --------\n >>> G = nx.path_graph(4)\n >>> h, a = nx.hits(G)\n\n Notes\n -----\n The eigenvector calculation is done by the power iteration method\n and has no guarantee of convergence. The iteration will stop\n after max_iter iterations or an error tolerance of\n number_of_nodes(G)*tol has been reached.\n\n The HITS algorithm was designed for directed graphs but this\n algorithm does not check if the input graph is directed and will\n execute on undirected graphs.\n\n References\n ----------\n .. [1] A. Langville and C. Meyer,\n \"A survey of eigenvector methods of web information retrieval.\"\n http://citeseer.ist.psu.edu/713792.html\n .. [2] Jon Kleinberg,\n Authoritative sources in a hyperlinked environment\n Journal of the ACM 46 (5): 604-32, 1999.\n doi:10.1145/324133.324140.\n http://www.cs.cornell.edu/home/kleinber/auth.pdf.\n ", "language": "en", "n_whitespaces": 446, "n_words": 248, "vocab_size": 152 }
4
Python
90
0
87
26e8d6d7664bbaae717438bdb41766550ff57e4f
https://github.com/apache/airflow.git
def test_connection(self) -> Tuple[bool, str]: try: conn = se
41
21
airflow
ftp.py
airflow/providers/ftp/hooks/ftp.py
11
8,731
def test_connection(self) -> Tuple[bool, str]: try: conn = self.get_conn() conn.pwd return True, "Connection successfully tested" except Exception as e: return False, str(e)
8
test_connection
45,823
10
71
Updates FTPHook provider to have test_connection (#21997) * Updates FTP provider to have test_connection Co-authored-by: eladkal <[email protected]>
{ "docstring": "Test the FTP connection by calling path with directory", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
2
Python
22
0
352
1661ddd44044c637526e9a1e812e7c1863be35fc
https://github.com/OpenBB-finance/OpenBBTerminal.git
def call_price(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="price", description=, ) parser.add_argument( "-s", "--symbol", required="-h" not in other_args, type=str, dest="symbol", help="Symbol of coin to load data for, ~100 symbols are availa
131
64
OpenBBTerminal
crypto_controller.py
openbb_terminal/cryptocurrency/crypto_controller.py
13
85,397
def call_price(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="price", description=, ) parser.add_argument( "-s", "--symbol", required="-h" not in other_args, type=str, dest="symbol", help="Symbol of coin to load data for, ~100 symbols are available", ) if other_args and "-" not in other_args[0][0]: other_args.insert(0, "-s") ns_parser = self.parse_known_args_and_warn(parser, other_args) if ns_parser: if ns_parser.symbol in pyth_model.ASSETS.keys(): console.print( "[param]If it takes too long, you can use 'Ctrl + C' to cancel.\n[/param]" ) pyth_view.display_price(ns_parser.symbol) else: console.print("[red]The symbol selected does not exist.[/red]\n")
26
call_price
285,727
28
221
Integrate live feeds from Pyth (#2178) * added dependency * added pyth models * dependencies * docs * some improvements to this pyth command (#2433) * some improvements to this pyth command * minor improv * dependencies * tests Co-authored-by: DidierRLopes <[email protected]>; COlin
{ "docstring": "Process price commandDisplay price and interval of confidence in real-time. [Source: Pyth]", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
5
Python
74
0
316
6ed6ac9448311930557810383d2cfd4fe6aae269
https://github.com/huggingface/datasets.git
def _single_map_nested(args): function, data_struct, types, rank, disable_tqdm, desc = args # Singleton first to spare some computation if not isinstance(data_struct, dict) and not isinstance(data_struct, types): return function(data_struct) # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): print(" ", end="", flush=True) # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc pbar = utils.tqdm(pbar_iterable, dis
259
107
datasets
py_utils.py
src/datasets/utils/py_utils.py
13
21,793
def _single_map_nested(args): function, data_struct, types, rank, disable_tqdm, desc = args # Singleton first to spare some computation if not isinstance(data_struct, dict) and not isinstance(data_struct, types): return function(data_struct) # Reduce logging to keep things readable in multiprocessing with tqdm if rank is not None and logging.get_verbosity() < logging.WARNING: logging.set_verbosity_warning() # Print at least one thing to fix tqdm in notebooks in multiprocessing # see https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308 if rank is not None and not disable_tqdm and any("notebook" in tqdm_cls.__name__ for tqdm_cls in tqdm.__mro__): print(" ", end="", flush=True) # Loop over single examples or batches and write to buffer/file if examples are to be updated pbar_iterable = data_struct.items() if isinstance(data_struct, dict) else data_struct pbar_desc = (desc + " " if desc is not None else "") + "#" + str(rank) if rank is not None else desc pbar = utils.tqdm(pbar_iterable, disable=disable_tqdm, position=rank, unit="obj", desc=pbar_desc) if isinstance(data_struct, dict): return {k: _single_map_nested((function, v, types, None, True, None)) for k, v in pbar} else: mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] if isinstance(data_struct, list): return mapped elif isinstance(data_struct, tuple): return tuple(mapped) else: return np.array(mapped)
21
_single_map_nested
104,238
38
398
Better TQDM output (#3654) * Show progress bar when generating examples * Consistent utils.is_progress_bar_enabled calls * Fix tqdm in notebook * Add missing params to DatasetDict.map * Specify total in tqdm progress bar in map * Fix total computation * Small fix * Add desc to map_nested * Add more precise descriptions to download * Address comments * Fix docstring * Final changes * Minor change
{ "docstring": "Apply a function recursively to each element of a nested data struct.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 11 }
17
Python
182
0
99
9c19aff7c7561e3a82978a272ecdaad40dda5c00
https://github.com/django/django.git
def test_unified(self):
77
26
django
tests.py
tests/admin_scripts/tests.py
11
51,930
def test_unified(self): self.write_settings("settings_to_diff.py", sdict={"FOO": '"bar"'}) args = ["diffsettings", "--settings=settings_to_diff", "--output=unified"] out, err = self.run_manage(args) self.assertNoOutput(err) self.assertOutput(out, "+ FOO = 'bar'") self.assertOutput(out, "- SECRET_KEY = ''") self.assertOutput(out, "+ SECRET_KEY = 'django_tests_secret_key'") self.assertNotInOutput(out, " APPEND_SLASH = True")
9
test_unified
207,334
11
140
Refs #33476 -- Reformatted code with Black.
{ "docstring": "--output=unified emits settings diff in unified mode.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
1
Python
35
0
18
eb2692cb32bb1747e312d5b20e976d7a879c9588
https://github.com/ray-project/ray.git
def runtime_env(self):
17
4
ray
runtime_context.py
python/ray/runtime_context.py
9
31,793
def runtime_env(self): return RuntimeEnv.deserialize(self._get_runtime_env_string())
2
runtime_env
139,848
5
31
[runtime env] runtime env inheritance refactor (#24538) * [runtime env] runtime env inheritance refactor (#22244) Runtime Environments is already GA in Ray 1.6.0. The latest doc is [here](https://docs.ray.io/en/master/ray-core/handling-dependencies.html#runtime-environments). And now, we already supported a [inheritance](https://docs.ray.io/en/master/ray-core/handling-dependencies.html#inheritance) behavior as follows (copied from the doc): - The runtime_env["env_vars"] field will be merged with the runtime_env["env_vars"] field of the parent. This allows for environment variables set in the parent’s runtime environment to be automatically propagated to the child, even if new environment variables are set in the child’s runtime environment. - Every other field in the runtime_env will be overridden by the child, not merged. For example, if runtime_env["py_modules"] is specified, it will replace the runtime_env["py_modules"] field of the parent. We think this runtime env merging logic is so complex and confusing to users because users can't know the final runtime env before the jobs are run. Current PR tries to do a refactor and change the behavior of Runtime Environments inheritance. Here is the new behavior: - **If there is no runtime env option when we create actor, inherit the parent runtime env.** - **Otherwise, use the optional runtime env directly and don't do the merging.** Add a new API named `ray.runtime_env.get_current_runtime_env()` to get the parent runtime env and modify this dict by yourself. Like: ```Actor.options(runtime_env=ray.runtime_env.get_current_runtime_env().update({"X": "Y"}))``` This new API also can be used in ray client.
{ "docstring": "Get the runtime env of the current job/worker.\n\n If this API is called in driver or ray client, returns the job level runtime\n env.\n If this API is called in workers/actors, returns the worker level runtime env.\n\n Returns:\n A new ray.runtime_env.RuntimeEnv instance.\n\n To merge from the current runtime env in some specific cases, you can get the\n current runtime env by this API and modify it by yourself.\n\n Example:\n >>> # Inherit current runtime env, except `env_vars`\n >>> Actor.options( # doctest: +SKIP\n ... runtime_env=ray.get_runtime_context().runtime_env.update(\n ... {\"env_vars\": {\"A\": \"a\", \"B\": \"b\"}})\n ... ) # doctest: +SKIP\n ", "language": "en", "n_whitespaces": 205, "n_words": 95, "vocab_size": 60 }
1
Python
4
0
65
d1a8d1597d4fe9f129a72fe94c1508304b7eae0f
https://github.com/streamlink/streamlink.git
def sleeper(self, duration): s = time() yield time_to_sleep = duration - (time() - s) if time_to_sleep > 0: s
36
16
streamlink
dash.py
src/streamlink/stream/dash.py
11
45,770
def sleeper(self, duration): s = time() yield time_to_sleep = duration - (time() - s) if time_to_sleep > 0: self.wait(time_to_sleep)
6
sleeper
187,407
7
63
stream.dash: update DASHStreamWorker.iter_segments - Refactor DASHStreamWorker.iter_segments() - Replace dash_manifest.sleeper() with SegmentedStreamWorker.wait(), and make the worker thread shut down immediately on close(). - Prevent unnecessary wait times for static manifest types by calling close() after all segments were put into the writer's queue.
{ "docstring": "\n Do something and then wait for a given duration minus the time it took doing something\n ", "language": "en", "n_whitespaces": 31, "n_words": 16, "vocab_size": 15 }
2
Python
19
0
1,100
a17f4f3bd63e3ca3754f96d7db4ce5197720589b
https://github.com/matplotlib/matplotlib.git
def test_BoundaryNorm(): boundaries = [0, 1.1, 2.2] vals = [-1, 0, 1, 2, 2.2, 4] # Without interpolation expected = [-1, 0, 0, 1, 2, 2] ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # ncolors != len(boundaries) - 1 triggers interpolation expected = [-1, 0, 0, 2, 3, 3] ncolors = len(boundaries) bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # with a single region and interpolation expected = [-1, 1, 1, 1, 3, 3] bn = mcolors.BoundaryNorm([0, 2.2], ncolors) assert_array_equal(bn(vals), expected) # more boundaries for a third color boundaries = [0, 1, 2, 3] vals = [-1, 0.1, 1.1, 2.2, 4] ncolors = 5 expected = [-1, 0, 2, 4, 5] bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # a scalar as input should not trigger an error and should return a scalar boundaries = [0, 1, 2] vals = [-1, 0.1, 1.1, 2.2] bn = mcolors.BoundaryNorm(boundaries, 2) expected = [-1, 0, 1, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # same with interp bn = mcolors.BoundaryNorm(boundaries, 3) expected = [-1, 0, 2, 3] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Clipping bn = mcolors.BoundaryNorm(boundaries, 3, clip=True) expected = [0, 0, 2, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Masked arrays boundaries = [0, 1.1, 2.2] vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9]) # Without interpolation ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # With interpolation bn = mcolors.BoundaryNorm(boundaries, len(boundaries)) expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # Non-trivial masked arrays vals = np.ma.masked_invalid([np.Inf, np.NaN]) assert np.all(bn(vals).mask) vals = np.ma.masked_invalid([np.Inf]) assert np.all(bn(vals).mask) # Incompatible extend and clip with pytest.raises(ValueError, match="not compatible"): mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True) # Too small ncolors argument with pytest.raises(ValueError, match="ncolors must equal or exceed"): mcolors.BoundaryNorm(np.arange(4), 2) with pytest.raises(ValueError, match="ncolors must equal or exceed"): mcolors.BoundaryNorm(np.arange(4), 3, extend='min') with pytest.raises(ValueError, match="ncolors must equal or exceed"): mcolors.BoundaryNorm(np.arange(4), 4, extend='both') # Testing extend keyword, with interpolation (large cmap) bounds = [1, 2, 3] cmap = mpl.colormaps['viridis'] mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both') refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N) x = np.random.randn(100) * 10 + 2 ref = refnorm(x) ref[ref == 0] = -1 ref[ref == cmap.N - 1] = cmap.N assert_array_equal(mynorm(x), ref) # Without interpolation cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_over('black') cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black']) assert mcolors.same_color(cmref.get_over(), 'black') assert mcolors.same_color(cmref.get_under(), 'white') refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax assert mynorm(bounds[0] - 0.1) == -1 # under assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color assert mynorm(bounds[-1] + 0.1) == cmshould.N # over x = [-1, 1.2, 2.3, 9.6] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just min cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red']) assert mcolors.same_color(cmref.get_under(), 'white') assert cmref.N == 2 assert cmshould.N == 3 refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax x = [-1, 1.2, 2.3] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just max cmref = mcolors.Lis
1,470
192
matplotlib
test_colors.py
lib/matplotlib/tests/test_colors.py
12
23,566
def test_BoundaryNorm(): boundaries = [0, 1.1, 2.2] vals = [-1, 0, 1, 2, 2.2, 4] # Without interpolation expected = [-1, 0, 0, 1, 2, 2] ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # ncolors != len(boundaries) - 1 triggers interpolation expected = [-1, 0, 0, 2, 3, 3] ncolors = len(boundaries) bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # with a single region and interpolation expected = [-1, 1, 1, 1, 3, 3] bn = mcolors.BoundaryNorm([0, 2.2], ncolors) assert_array_equal(bn(vals), expected) # more boundaries for a third color boundaries = [0, 1, 2, 3] vals = [-1, 0.1, 1.1, 2.2, 4] ncolors = 5 expected = [-1, 0, 2, 4, 5] bn = mcolors.BoundaryNorm(boundaries, ncolors) assert_array_equal(bn(vals), expected) # a scalar as input should not trigger an error and should return a scalar boundaries = [0, 1, 2] vals = [-1, 0.1, 1.1, 2.2] bn = mcolors.BoundaryNorm(boundaries, 2) expected = [-1, 0, 1, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # same with interp bn = mcolors.BoundaryNorm(boundaries, 3) expected = [-1, 0, 2, 3] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Clipping bn = mcolors.BoundaryNorm(boundaries, 3, clip=True) expected = [0, 0, 2, 2] for v, ex in zip(vals, expected): ret = bn(v) assert isinstance(ret, int) assert_array_equal(ret, ex) assert_array_equal(bn([v]), ex) # Masked arrays boundaries = [0, 1.1, 2.2] vals = np.ma.masked_invalid([-1., np.NaN, 0, 1.4, 9]) # Without interpolation ncolors = len(boundaries) - 1 bn = mcolors.BoundaryNorm(boundaries, ncolors) expected = np.ma.masked_array([-1, -99, 0, 1, 2], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # With interpolation bn = mcolors.BoundaryNorm(boundaries, len(boundaries)) expected = np.ma.masked_array([-1, -99, 0, 2, 3], mask=[0, 1, 0, 0, 0]) assert_array_equal(bn(vals), expected) # Non-trivial masked arrays vals = np.ma.masked_invalid([np.Inf, np.NaN]) assert np.all(bn(vals).mask) vals = np.ma.masked_invalid([np.Inf]) assert np.all(bn(vals).mask) # Incompatible extend and clip with pytest.raises(ValueError, match="not compatible"): mcolors.BoundaryNorm(np.arange(4), 5, extend='both', clip=True) # Too small ncolors argument with pytest.raises(ValueError, match="ncolors must equal or exceed"): mcolors.BoundaryNorm(np.arange(4), 2) with pytest.raises(ValueError, match="ncolors must equal or exceed"): mcolors.BoundaryNorm(np.arange(4), 3, extend='min') with pytest.raises(ValueError, match="ncolors must equal or exceed"): mcolors.BoundaryNorm(np.arange(4), 4, extend='both') # Testing extend keyword, with interpolation (large cmap) bounds = [1, 2, 3] cmap = mpl.colormaps['viridis'] mynorm = mcolors.BoundaryNorm(bounds, cmap.N, extend='both') refnorm = mcolors.BoundaryNorm([0] + bounds + [4], cmap.N) x = np.random.randn(100) * 10 + 2 ref = refnorm(x) ref[ref == 0] = -1 ref[ref == cmap.N - 1] = cmap.N assert_array_equal(mynorm(x), ref) # Without interpolation cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_over('black') cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red', 'black']) assert mcolors.same_color(cmref.get_over(), 'black') assert mcolors.same_color(cmref.get_under(), 'white') refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='both') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax assert mynorm(bounds[0] - 0.1) == -1 # under assert mynorm(bounds[0] + 0.1) == 1 # first bin -> second color assert mynorm(bounds[-1] - 0.1) == cmshould.N - 2 # next-to-last color assert mynorm(bounds[-1] + 0.1) == cmshould.N # over x = [-1, 1.2, 2.3, 9.6] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2, 3])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just min cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_under('white') cmshould = mcolors.ListedColormap(['white', 'blue', 'red']) assert mcolors.same_color(cmref.get_under(), 'white') assert cmref.N == 2 assert cmshould.N == 3 refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='min') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax x = [-1, 1.2, 2.3] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x))) # Just max cmref = mcolors.ListedColormap(['blue', 'red']) cmref.set_over('black') cmshould = mcolors.ListedColormap(['blue', 'red', 'black']) assert mcolors.same_color(cmref.get_over(), 'black') assert cmref.N == 2 assert cmshould.N == 3 refnorm = mcolors.BoundaryNorm(bounds, cmref.N) mynorm = mcolors.BoundaryNorm(bounds, cmshould.N, extend='max') assert mynorm.vmin == refnorm.vmin assert mynorm.vmax == refnorm.vmax x = [1.2, 2.3, 4] assert_array_equal(cmshould(mynorm(x)), cmshould([0, 1, 2])) x = np.random.randn(100) * 10 + 2 assert_array_equal(cmshould(mynorm(x)), cmref(refnorm(x)))
119
test_BoundaryNorm
109,399
52
2,192
MNT: convert tests and internal usage way from using mpl.cm.get_cmap
{ "docstring": "\n GitHub issue #1258: interpolation was failing with numpy\n 1.7 pre-release.\n ", "language": "en", "n_whitespaces": 20, "n_words": 10, "vocab_size": 10 }
4
Python
623
0
912
e5b1888cd932909e49194d58035da34b210b91c4
https://github.com/modin-project/modin.git
def _join_by_index(self, other_modin_frames, how, sort, ignore_index): if how == "outer": raise NotImplementedError("outer join is not supported in HDK engine") lhs = self._maybe_materialize_rowid() reset_index_names = False for rhs in other_modin_frames: rhs = rhs._maybe_materialize_rowid() if len(lhs._index_cols) != len(rhs._index_cols): raise NotImplementedError( "join by indexes with different sizes is not supported" ) reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols condition = lhs._build_equi_join_condition( rhs, lhs._index_cols, rhs._index_cols ) exprs = lhs._index_exprs() new_columns = lhs.columns.to_list() for col in lhs.columns: e
315
113
modin
dataframe.py
modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py
16
36,066
def _join_by_index(self, other_modin_frames, how, sort, ignore_index): if how == "outer": raise NotImplementedError("outer join is not supported in HDK engine") lhs = self._maybe_materialize_rowid() reset_index_names = False for rhs in other_modin_frames: rhs = rhs._maybe_materialize_rowid() if len(lhs._index_cols) != len(rhs._index_cols): raise NotImplementedError( "join by indexes with different sizes is not supported" ) reset_index_names = reset_index_names or lhs._index_cols != rhs._index_cols condition = lhs._build_equi_join_condition( rhs, lhs._index_cols, rhs._index_cols ) exprs = lhs._index_exprs() new_columns = lhs.columns.to_list() for col in lhs.columns: exprs[col] = lhs.ref(col) for col in rhs.columns: # Handle duplicating column names here. When user specifies # suffixes to make a join, actual renaming is done in front-end. new_col_name = col rename_idx = 0 while new_col_name in exprs: new_col_name = f"{col}{rename_idx}" rename_idx += 1 exprs[new_col_name] = rhs.ref(col) new_columns.append(new_col_name) op = JoinNode( lhs, rhs, how=how, exprs=exprs, condition=condition, ) new_columns = Index.__new__( Index, data=new_columns, dtype=self.columns.dtype ) lhs = lhs.__constructor__( dtypes=lhs._dtypes_for_exprs(exprs), columns=new_columns, index_cols=lhs._index_cols, op=op, force_execution_mode=self._force_execution_mode, ) if sort: lhs = lhs.sort_rows( lhs._index_cols, ascending=True, ignore_index=False, na_position="last", ) if reset_index_names: lhs = lhs._reset_index_names() if ignore_index: new_columns = Index.__new__(RangeIndex, data=range(len(lhs.columns))) lhs = lhs._set_columns(new_columns) return lhs
57
_join_by_index
154,556
44
498
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
{ "docstring": "\n Perform equi-join operation for multiple frames by index columns.\n\n Parameters\n ----------\n other_modin_frames : list of HdkOnNativeDataframe\n Frames to join with.\n how : str\n A type of join.\n sort : bool\n Sort the result by join keys.\n ignore_index : bool\n If True then reset column index for the resulting frame.\n\n Returns\n -------\n HdkOnNativeDataframe\n The new frame.\n ", "language": "en", "n_whitespaces": 188, "n_words": 55, "vocab_size": 43 }
11
Python
171
0
112
e272ed2fa4c58e0a89e273a3e85da7d13a85e04c
https://github.com/OpenMined/PySyft.git
def _object2proto(self) -> RunFunctionOrConstructorAction_PB: return RunFunctionOrConstructorAction_PB( path=self.path, args=[serialize(x, to_bytes=True) for x in self.args], kwargs={k: serialize(v, to_bytes=True) for k, v in self.kwargs.items()}, id_at_location=serialize(self.id_at_location), address=serialize(self.address), msg_id=serialize(s
91
22
PySyft
function_or_constructor_action.py
packages/syft/src/syft/core/node/common/action/function_or_constructor_action.py
13
342
def _object2proto(self) -> RunFunctionOrConstructorAction_PB: return RunFunctionOrConstructorAction_PB( path=self.path, args=[serialize(x, to_bytes=True) for x in self.args], kwargs={k: serialize(v, to_bytes=True) for k, v in self.kwargs.items()}, id_at_location=serialize(self.id_at_location), address=serialize(self.address), msg_id=serialize(self.id), )
23
_object2proto
2,710
16
135
[syft.core.node.common.action] Change syft import absolute -> relative
{ "docstring": "Returns a protobuf serialization of self.\n\n As a requirement of all objects which inherit from Serializable,\n this method transforms the current object into the corresponding\n Protobuf object so that it can be further serialized.\n\n :return: returns a protobuf object\n :rtype: RunFunctionOrConstructorAction_PB\n\n .. note::\n This method is purely an internal method. Please use serialize(object) or one of\n the other public serialization methods if you wish to serialize an\n object.\n ", "language": "en", "n_whitespaces": 150, "n_words": 68, "vocab_size": 56 }
3
Python
25
0
370
dec723f072eb997a497a159dbe8674cd39999ee9
https://github.com/networkx/networkx.git
def truncated_cube_graph(create_using=None): description = [ "adjacencylist", "Truncated Cube Graph", 24, [ [2, 3, 5], [12, 15], [4, 5], [7, 9], [6], [17, 19], [8, 9], [11, 13], [10], [18, 21], [12, 13], [15], [14], [22, 23], [16], [20, 24], [18, 19], [21], [20], [24], [22], [23], [24], [], ],
152
46
networkx
small.py
networkx/generators/small.py
9
41,741
def truncated_cube_graph(create_using=None): description = [ "adjacencylist", "Truncated Cube Graph", 24, [ [2, 3, 5], [12, 15], [4, 5], [7, 9], [6], [17, 19], [8, 9], [11, 13], [10], [18, 21], [12, 13], [15], [14], [22, 23], [16], [20, 24], [18, 19], [21], [20], [24], [22], [23], [24], [], ], ] G = make_small_undirected_graph(description, create_using) return G
34
truncated_cube_graph
176,171
5
197
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
{ "docstring": "\n Returns the skeleton of the truncated cube.\n\n The truncated cube is an Archimedean solid with 14 regular\n faces (6 octagonal and 8 triangular), 36 edges and 24 nodes [1]_.\n The truncated cube is created by truncating (cutting off) the tips\n of the cube one third of the way into each edge [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Skeleton of the truncated cube\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Truncated_cube\n .. [2] https://www.coolmath.com/reference/polyhedra-truncated-cube\n\n ", "language": "en", "n_whitespaces": 153, "n_words": 91, "vocab_size": 68 }
1
Python
56
0
53
de3fcba9e95818e9634ab7de6bfcb1f4221f2775
https://github.com/wagtail/wagtail.git
def get_admin_urls_for_registration(self): urls = () for instance in self.modeladmin_instances: urls += instance.get_admin_urls_for_registration() return urls
26
12
wagtail
options.py
wagtail/contrib/modeladmin/options.py
10
15,593
def get_admin_urls_for_registration(self): urls = () for instance in self.modeladmin_instances: urls += instance.get_admin_urls_for_registration() return urls
5
get_admin_urls_for_registration
70,994
5
45
Fix warnings from flake8-comprehensions.
{ "docstring": "\n Utilised by Wagtail's 'register_admin_urls' hook to register urls for\n used by any associated ModelAdmin instances\n ", "language": "en", "n_whitespaces": 37, "n_words": 15, "vocab_size": 14 }
2
Python
14
0
63
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
https://github.com/jindongwang/transferlearning.git
def setName(self, name): self.name = name self.errmsg = "Expected " + self.name if __diag__.enable_debug_on_named_expressions: self.setDebug() return self
34
15
transferlearning
pyparsing.py
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
9
13,241
def setName(self, name): self.name = name self.errmsg = "Expected " + self.name if __diag__.enable_debug_on_named_expressions: self.setDebug() return self
6
setName
63,304
7
59
upd; format
{ "docstring": "\n Define name for this expression, makes debugging and exception messages clearer.\n\n Example::\n\n Word(nums).parseString(\"ABC\") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)\n Word(nums).setName(\"integer\").parseString(\"ABC\") # -> Exception: Expected integer (at char 0), (line:1, col:1)\n ", "language": "en", "n_whitespaces": 80, "n_words": 34, "vocab_size": 25 }
2
Python
17
0
82
1e65a4afd191cf61ba05b80545d23f9b88962f41
https://github.com/modin-project/modin.git
def get_func(cls, key, **kwargs): if "agg_func" in kwargs: return cls.inplace_applyier_builder(key, kwargs["agg_func"]) elif "func_dict" in kwargs: return cls.inplace_applyier_builder(key, kwargs["func_dict"]) else: return cls.inplace_applyier_builder(key)
54
16
modin
groupby.py
modin/core/dataframe/algebra/default2pandas/groupby.py
12
35,257
def get_func(cls, key, **kwargs): if "agg_func" in kwargs: return cls.inplace_applyier_builder(key, kwargs["agg_func"]) elif "func_dict" in kwargs: return cls.inplace_applyier_builder(key, kwargs["func_dict"]) else: return cls.inplace_applyier_builder(key)
7
get_func
153,097
5
92
FIX-#3197: do not pass lambdas to the backend in GroupBy (#3373) Signed-off-by: Dmitry Chigarev <[email protected]>
{ "docstring": "\n Extract aggregation function from groupby arguments.\n\n Parameters\n ----------\n key : callable or str\n Default aggregation function. If aggregation function is not specified\n via groupby arguments, then `key` function is used.\n **kwargs : dict\n GroupBy arguments that may contain aggregation function.\n\n Returns\n -------\n callable\n Aggregation function.\n\n Notes\n -----\n There are two ways of how groupby aggregation can be invoked:\n 1. Explicitly with query compiler method: `qc.groupby_sum()`.\n 2. By passing aggregation function as an argument: `qc.groupby_agg(\"sum\")`.\n Both are going to produce the same result, however in the first case actual aggregation\n function can be extracted from the method name, while for the second only from the method arguments.\n ", "language": "en", "n_whitespaces": 271, "n_words": 106, "vocab_size": 78 }
3
Python
21
0
44
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
https://github.com/ray-project/ray.git
def update_scheduler(self, metric): self.worker_group.apply_all_operators( lambda op: [sched.step(m
32
12
ray
torch_trainer.py
python/ray/util/sgd/torch/torch_trainer.py
11
29,983
def update_scheduler(self, metric): self.worker_group.apply_all_operators( lambda op: [sched.step(metric) for sched in op._schedulers] )
4
update_scheduler
133,351
9
52
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
{ "docstring": "Calls ``scheduler.step(metric)`` on all registered schedulers.\n\n This is useful for lr_schedulers such as ``ReduceLROnPlateau``.\n ", "language": "en", "n_whitespaces": 28, "n_words": 14, "vocab_size": 14 }
2
Python
12
0
56
a5b70b3132467b5e3616178d9ecca6cb7316c400
https://github.com/scikit-learn/scikit-learn.git
def paired_cosine_distances(X, Y): X, Y = c
39
27
scikit-learn
pairwise.py
sklearn/metrics/pairwise.py
11
75,273
def paired_cosine_distances(X, Y): X, Y = check_paired_arrays(X, Y) return 0.5 * row_norms(normalize(X) - normalize(Y), squared=True) PAIRED_DISTANCES = { "cosine": paired_cosine_distances, "euclidean": paired_euclidean_distances, "l2": paired_euclidean_distances, "l1": paired_manhattan_distances, "manhattan": paired_manhattan_distances, "cityblock": paired_manhattan_distances, }
3
paired_cosine_distances
258,521
10
108
DOC Ensures that sklearn.metrics.pairwise.paired_cosine_distances passes numpydoc validation (#22141) Co-authored-by: Thomas J. Fan <[email protected]>
{ "docstring": "\n Compute the paired cosine distances between X and Y.\n\n Read more in the :ref:`User Guide <metrics>`.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Y : array-like of shape (n_samples, n_features)\n An array where each row is a sample and each column is a feature.\n\n Returns\n -------\n distances : ndarray of shape (n_samples,)\n Returns the distances between the row vectors of `X`\n and the row vectors of `Y`, where `distances[i]` is the\n distance between `X[i]` and `Y[i]`.\n\n Notes\n -----\n The cosine distance is equivalent to the half the squared\n euclidean distance if each sample is normalized to unit norm.\n ", "language": "en", "n_whitespaces": 192, "n_words": 114, "vocab_size": 57 }
1
Python
31
0
131
897a8dd89f40817201bc4aebe532a096405bdeb1
https://github.com/huggingface/transformers.git
def torchdynamo_smart_context_manager(self): ctx_manager = contextlib.nullcontext() if is_torchdynamo_available(): import torchdynamo from torchdy
64
20
transformers
trainer.py
src/transformers/trainer.py
13
5,648
def torchdynamo_smart_context_manager(self): ctx_manager = contextlib.nullcontext() if is_torchdynamo_available(): import torchdynamo from torchdynamo.optimizations.training import aot_autograd_speedup_strategy if self.args.torchdynamo == "eager": ctx_manager = torchdynamo.optimize("eager") elif self.args.torchdynamo == "nvfuser": ctx_manager = torchdynamo.optimize(aot_autograd_speedup_strategy) return ctx_manager
10
torchdynamo_smart_context_manager
30,695
12
112
Support compilation via Torchdynamo, AOT Autograd, NVFuser (#17308) * Support compilation via Torchdynamo, AOT Autograd, NVFuser * Address comments * Lint * Stas comments - missing quality test * Lintere * Quality test * Doc lint * Reset CUDA peak mem * Add CustomTrainer * require a single gpu Co-authored-by: Stas Bekman <[email protected]>
{ "docstring": "\n A helper wrapper that creates an appropriate context manager for `torchdynamo`.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
4
Python
29
0
110
7d9e9a49005de7961e84d2a7c608db57dbab3046
https://github.com/certbot/certbot.git
def check_aug_version(self) -> bool: self.aug.set("/test/path/testing/arg", "aRgUMeNT") try: matches = self.aug.match( "/test//*[self::arg=~regexp('argument', 'i')]") except RuntimeError: self.aug.remove("/test/path") return False self.aug.remove("/test/path") return matches
53
17
certbot
parser.py
certbot-apache/certbot_apache/_internal/parser.py
11
45,584
def check_aug_version(self) -> bool: self.aug.set("/test/path/testing/arg", "aRgUMeNT") try: matches = self.aug.match( "/test//*[self::arg=~regexp('argument', 'i')]") except RuntimeError: self.aug.remove("/test/path") return False self.aug.remove("/test/path") return matches
13
check_aug_version
186,677
9
98
Add typing to certbot.apache (#9071) * Add typing to certbot.apache Co-authored-by: Adrien Ferrand <[email protected]>
{ "docstring": " Checks that we have recent enough version of libaugeas.\n If augeas version is recent enough, it will support case insensitive\n regexp matching", "language": "en", "n_whitespaces": 36, "n_words": 22, "vocab_size": 20 }
2
Python
20
0
42
ca86da3a30c4e080d4db8c25fca73de843663cb4
https://github.com/Stability-AI/stablediffusion.git
def resize_depth(depth, width, height): depth = torch.squeeze(depth[0, :, :, :]).to("cpu") depth_resized = cv2.resize( depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC ) return depth_resized
58
17
stablediffusion
utils.py
ldm/modules/midas/utils.py
12
37,003
def resize_depth(depth, width, height): depth = torch.squeeze(depth[0, :, :, :]).to("cpu") depth_resized = cv2.resize( depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC ) return depth_resized
6
resize_depth
157,635
13
91
release more models
{ "docstring": "Resize depth map and bring to CPU (numpy).\n\n Args:\n depth (tensor): depth\n width (int): image width\n height (int): image height\n\n Returns:\n array: processed depth\n ", "language": "en", "n_whitespaces": 61, "n_words": 24, "vocab_size": 17 }
1
Python
20
0
729
cda8dfe6f45dc5ed394c2f5cda706cd6c729f713
https://github.com/sympy/sympy.git
def comp(z1, z2, tol=None): r if type(z2) is str: if not
381
107
sympy
numbers.py
sympy/core/numbers.py
24
47,440
def comp(z1, z2, tol=None): r if type(z2) is str: if not pure_complex(z1, or_real=True): raise ValueError('when z2 is a str z1 must be a Number') return str(z1) == z2 if not z1: z1, z2 = z2, z1 if not z1: return True if not tol: a, b = z1, z2 if tol == '': return str(a) == str(b) if tol is None: a, b = sympify(a), sympify(b) if not all(i.is_number for i in (a, b)): raise ValueError('expecting 2 numbers') fa = a.atoms(Float) fb = b.atoms(Float) if not fa and not fb: # no floats -- compare exactly return a == b # get a to be pure_complex for _ in range(2): ca = pure_complex(a, or_real=True) if not ca: if fa: a = a.n(prec_to_dps(min([i._prec for i in fa]))) ca = pure_complex(a, or_real=True) break else: fa, fb = fb, fa a, b = b, a cb = pure_complex(b) if not cb and fb: b = b.n(prec_to_dps(min([i._prec for i in fb]))) cb = pure_complex(b, or_real=True) if ca and cb and (ca[1] or cb[1]): return all(comp(i, j) for i, j in zip(ca, cb)) tol = 10**prec_to_dps(min(a._prec, getattr(b, '_prec', a._prec))) return int(abs(a - b)*tol) <= 5 diff = abs(z1 - z2) az1 = abs(z1) if z2 and az1 > 1: return diff/az1 <= tol else: return diff <= tol
105
comp
195,853
34
605
Improved documentation formatting
{ "docstring": "Return a bool indicating whether the error between z1 and z2\n is $\\le$ ``tol``.\n\n Examples\n ========\n\n If ``tol`` is ``None`` then ``True`` will be returned if\n :math:`|z1 - z2|\\times 10^p \\le 5` where $p$ is minimum value of the\n decimal precision of each value.\n\n >>> from sympy import comp, pi\n >>> pi4 = pi.n(4); pi4\n 3.142\n >>> comp(_, 3.142)\n True\n >>> comp(pi4, 3.141)\n False\n >>> comp(pi4, 3.143)\n False\n\n A comparison of strings will be made\n if ``z1`` is a Number and ``z2`` is a string or ``tol`` is ''.\n\n >>> comp(pi4, 3.1415)\n True\n >>> comp(pi4, 3.1415, '')\n False\n\n When ``tol`` is provided and $z2$ is non-zero and\n :math:`|z1| > 1` the error is normalized by :math:`|z1|`:\n\n >>> abs(pi4 - 3.14)/pi4\n 0.000509791731426756\n >>> comp(pi4, 3.14, .001) # difference less than 0.1%\n True\n >>> comp(pi4, 3.14, .0005) # difference less than 0.1%\n False\n\n When :math:`|z1| \\le 1` the absolute error is used:\n\n >>> 1/pi4\n 0.3183\n >>> abs(1/pi4 - 0.3183)/(1/pi4)\n 3.07371499106316e-5\n >>> abs(1/pi4 - 0.3183)\n 9.78393554684764e-6\n >>> comp(1/pi4, 0.3183, 1e-5)\n True\n\n To see if the absolute error between ``z1`` and ``z2`` is less\n than or equal to ``tol``, call this as ``comp(z1 - z2, 0, tol)``\n or ``comp(z1 - z2, tol=tol)``:\n\n >>> abs(pi4 - 3.14)\n 0.00160156249999988\n >>> comp(pi4 - 3.14, 0, .002)\n True\n >>> comp(pi4 - 3.14, 0, .001)\n False\n ", "language": "en", "n_whitespaces": 363, "n_words": 217, "vocab_size": 120 }
26
Python
213
0
491
36c1f477b273cb2fb0dea3c921ec267db877c039
https://github.com/open-mmlab/mmdetection.git
def _parse_img_level_ann(self, image_level_ann_file): item_lists = defaultdict(list) with self.file_client.get_local_path( image_level_ann_file) as local_path: with open(local_path, 'r') as f: reader = csv.reader(f) i = -1 for line
122
45
mmdetection
openimages.py
mmdet/datasets/openimages.py
19
70,677
def _parse_img_level_ann(self, image_level_ann_file): item_lists = defaultdict(list) with self.file_client.get_local_path( image_level_ann_file) as local_path: with open(local_path, 'r') as f: reader = csv.reader(f) i = -1 for line in reader: i += 1 if i == 0: continue else: img_id = line[0] label_id = line[1] assert label_id in self.label_id_mapping image_level_label = int( self.label_id_mapping[label_id]) confidence = float(line[2]) item_lists[img_id].append( dict( image_level_label=image_level_label, confidence=confidence)) return item_lists
23
_parse_img_level_ann
245,152
24
201
Refactor OpenImages.
{ "docstring": "Parse image level annotations from csv style ann_file.\n\n Args:\n image_level_ann_file (str): CSV style image level annotation\n file path.\n\n Returns:\n defaultdict[list[dict]]: Annotations where item of the defaultdict\n indicates an image, each of which has (n) dicts.\n Keys of dicts are:\n\n - `image_level_label` (int): of shape 1.\n - `confidence` (float): of shape 1.\n ", "language": "en", "n_whitespaces": 161, "n_words": 51, "vocab_size": 41 }
3
Python
58
0
32
8198943edd73a363c266633e1aa5b2a9e9c9f526
https://github.com/XX-net/XX-Net.git
def logical_and(self, a, b): a = _convert
31
11
XX-Net
_pydecimal.py
python3.10.4/Lib/_pydecimal.py
9
55,789
def logical_and(self, a, b): a = _convert_other(a, raiseit=True) return a.logical_and(b, context=self)
3
logical_and
219,771
7
48
add python 3.10.4 for windows
{ "docstring": "Applies the logical operation 'and' between each operand's digits.\n\n The operands must be both logical numbers.\n\n >>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))\n Decimal('0')\n >>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))\n Decimal('0')\n >>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))\n Decimal('0')\n >>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))\n Decimal('1')\n >>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))\n Decimal('1000')\n >>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))\n Decimal('10')\n >>> ExtendedContext.logical_and(110, 1101)\n Decimal('100')\n >>> ExtendedContext.logical_and(Decimal(110), 1101)\n Decimal('100')\n >>> ExtendedContext.logical_and(110, Decimal(1101))\n Decimal('100')\n ", "language": "en", "n_whitespaces": 192, "n_words": 52, "vocab_size": 33 }
1
Python
11
0
587
65be461082dda54c8748922f9c29a19af1279fe1
https://github.com/sympy/sympy.git
def decrement_part_small(self, part, ub): if self.lpart >= ub - 1: self.p1 += 1 # increment to keep track of usefulness of tests return False plen = len(part) for j in range(plen - 1, -1, -1): # Knuth's mod, (answer to problem 7.2.1.5.69) if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u: self.k1 += 1 return False if j == 0 and part[j].v > 1 or
214
114
sympy
enumerative.py
sympy/utilities/enumerative.py
18
48,514
def decrement_part_small(self, part, ub): if self.lpart >= ub - 1: self.p1 += 1 # increment to keep track of usefulness of tests return False plen = len(part) for j in range(plen - 1, -1, -1): # Knuth's mod, (answer to problem 7.2.1.5.69) if j == 0 and (part[0].v - 1)*(ub - self.lpart) < part[0].u: self.k1 += 1 return False if j == 0 and part[j].v > 1 or j > 0 and part[j].v > 0: # found val to decrement part[j].v -= 1 # Reset trailing parts back to maximum for k in range(j + 1, plen): part[k].v = part[k].u # Have now decremented part, but are we doomed to # failure when it is expanded? Check one oddball case # that turns out to be surprisingly common - exactly # enough room to expand the leading component, but no # room for the second component, which has v=0. if (plen > 1 and part[1].v == 0 and (part[0].u - part[0].v) == ((ub - self.lpart - 1) * part[0].v)): self.k2 += 1 self.db_trace("Decrement fails test 3") return False return True return False
21
decrement_part_small
197,371
16
333
Remove abbreviations in documentation
{ "docstring": "Decrements part (a subrange of pstack), if possible, returning\n True iff the part was successfully decremented.\n\n Parameters\n ==========\n\n part\n part to be decremented (topmost part on the stack)\n\n ub\n the maximum number of parts allowed in a partition\n returned by the calling traversal.\n\n Notes\n =====\n\n The goal of this modification of the ordinary decrement method\n is to fail (meaning that the subtree rooted at this part is to\n be skipped) when it can be proved that this part can only have\n child partitions which are larger than allowed by ``ub``. If a\n decision is made to fail, it must be accurate, otherwise the\n enumeration will miss some partitions. But, it is OK not to\n capture all the possible failures -- if a part is passed that\n should not be, the resulting too-large partitions are filtered\n by the enumeration one level up. However, as is usual in\n constrained enumerations, failing early is advantageous.\n\n The tests used by this method catch the most common cases,\n although this implementation is by no means the last word on\n this problem. The tests include:\n\n 1) ``lpart`` must be less than ``ub`` by at least 2. This is because\n once a part has been decremented, the partition\n will gain at least one child in the spread step.\n\n 2) If the leading component of the part is about to be\n decremented, check for how many parts will be added in\n order to use up the unallocated multiplicity in that\n leading component, and fail if this number is greater than\n allowed by ``ub``. (See code for the exact expression.) This\n test is given in the answer to Knuth's problem 7.2.1.5.69.\n\n 3) If there is *exactly* enough room to expand the leading\n component by the above test, check the next component (if\n it exists) once decrementing has finished. If this has\n ``v == 0``, this next component will push the expansion over the\n limit by 1, so fail.\n ", "language": "en", "n_whitespaces": 637, "n_words": 319, "vocab_size": 181 }
13
Python
182
0
40
90cea203befa8f2e86e9c1c18bb3972296358e7b
https://github.com/ray-project/ray.git
def get_node_id(self) -> str: node_id = self.worker.current_node_id assert not node_id.is_nil() return node_i
28
12
ray
runtime_context.py
python/ray/runtime_context.py
8
27,935
def get_node_id(self) -> str: node_id = self.worker.current_node_id assert not node_id.is_nil() return node_id.hex()
12
get_node_id
125,638
8
49
Ray 2.0 API deprecation (#26116) Ray 2.0 API deprecation for: ray.remote(): placement_group ray.remote(): placement_group_bundle_index ray.remote(): placement_group_capture_child_tasks ray.get_dashboard_url() ray.get_resource_ids() ray.disconnect() ray.connect() ray.util.ActorGroup ray.util.ActorPool Add get_xx_id() to return hex (rather than object), and then deprecate the xx_id() (which returns Cython object): the xx here can be node, task etc. ray start: --plasma-store-socket-name ray start: --raylet-socket-name
{ "docstring": "Get current node ID for this worker or driver.\n\n Node ID is the id of a node that your driver, task, or actor runs.\n The ID will be in hex format.\n\n Returns:\n A node id in hex format for this worker or driver.\n ", "language": "en", "n_whitespaces": 82, "n_words": 43, "vocab_size": 30 }
1
Python
12
0
57
6c4e2810285af0698538aed9d46a99de085eb310
https://github.com/qutebrowser/qutebrowser.git
def list_option(*, info): return _option( info, "List options", lambda opt: (isinstance(info.config.get_obj(op
41
16
qutebrowser
configmodel.py
qutebrowser/completion/models/configmodel.py
15
117,392
def list_option(*, info): return _option( info, "List options", lambda opt: (isinstance(info.config.get_obj(opt.name), list) and not opt.no_autoconfig) )
7
list_option
320,849
10
67
pylint: Fix new unnecessary-lambda-assignment
{ "docstring": "A CompletionModel filled with settings whose values are lists.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
2
Python
16
1
201
2e7ee756eb1d55080d707cef63454788a7abb6be
https://github.com/airbytehq/airbyte.git
def get_instance_from_config_with_end_date(config, query): start_date = "2021-03-04" end_date = "2021-04-04" conversion_window_days = 14 google_api = GoogleAds(credentials=config["credentials"], customer_id=config["customer_id"]) instance = CustomQuery( api=google_api, conversion_window_days=conversion_window_days, start_date=start_date, end_date=end_date, time_zone="local", custom_query_config={"query": query, "table_name": "whatever_table"}, ) return instance @pytest.mark.parametrize( "query,
73
44
airbyte
test_source.py
airbyte-integrations/connectors/source-google-ads/unit_tests/test_source.py
12
@pytest.mark.parametrize( "query, fields", [ ( """ SELecT campaign.id, campaign.name, campaign.status, metrics.impressions FROM campaign wheRe campaign.status = 'PAUSED' AND metrics.impressions > 100 order by campaign.status """, ["campaign.id", "campaign.name", "campaign.status", "metrics.impressions"], ), ( """ SELECT campaign.accessible_bidding_strategy, segments.ad_destination_type, campaign.start_date, campaign.end_date FROM campaign """, ["campaign.accessible_bidding_strategy", "segments.ad_destination_type", "campaign.start_date", "campaign.end_date"], ), ("""selet aasdasd from aaa""", []), ], )
480
def get_instance_from_config_with_end_date(config, query): start_date = "2021-03-04" end_date = "2021-04-04" conversion_window_days = 14 google_api = GoogleAds(credentials=config["credentials"], customer_id=config["customer_id"]) instance = CustomQuery( api=google_api, conversion_window_days=conversion_window_days, start_date=start_date, end_date=end_date, time_zone="local", custom_query_config={"query": query, "table_name": "whatever_table"}, ) return instance @pytest.mark.parametrize( "query, fields", [ ( , ["campaign.id", "campaign.name", "campaign.status", "metrics.impressions"], ), ( , ["campaign.accessible_bidding_strategy", "segments.ad_destination_type", "campaign.start_date", "campaign.end_date"], ), (, []), ], )
14
get_instance_from_config_with_end_date
3,546
18
208
Source GoogleAds: add end_date to config (#8669) * GoogleAds add end_date to config * Update script following review comments * Add unit test * Solve conflicts * Solve conflicts in MR * Update test_google_ads.py Instanciate IncrementalGoogleAdsStream in tests + add missing line between functions * Update test_source.py remove extra hashtag * Update tests with missing params * Add missing time_zone param * merge user code * run format * revert unit test stream count * remove error log file * bump connector version * run seed file Co-authored-by: Marcos Marx <[email protected]>
{ "docstring": "\n SELecT\n campaign.id,\n campaign.name,\n campaign.status,\n metrics.impressions FROM campaign\nwheRe campaign.status = 'PAUSED'\nAND metrics.impressions > 100\norder by campaign.status\n \n SELECT\n campaign.accessible_bidding_strategy,\n segments.ad_destination_type,\n campaign.start_date,\n campaign.end_date\n FROM campaign\n selet aasdasd from aaa", "language": "en", "n_whitespaces": 98, "n_words": 29, "vocab_size": 25 }
1
Python
53
0
144
34d9d630bb02426d297d3e20fedb7da8c3ced03a
https://github.com/networkx/networkx.git
def node_degree_xy(G, x="out", y="in", weight=None, nodes=None): nodes = set(G) if nodes is None else set(nodes) if G.is_directed(): direction = {"out": G.out_degree, "in": G.in_degree} xdeg = direction[x] ydeg = direction[y] else: xdeg = ydeg = G.degree for u, degu in xdeg(nodes, weight=weight): # use G.edges to treat multigraphs correctly neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) fo
132
49
networkx
pairs.py
networkx/algorithms/assortativity/pairs.py
12
41,838
def node_degree_xy(G, x="out", y="in", weight=None, nodes=None): nodes = set(G) if nodes is None else set(nodes) if G.is_directed(): direction = {"out": G.out_degree, "in": G.in_degree} xdeg = direction[x] ydeg = direction[y] else: xdeg = ydeg = G.degree for u, degu in xdeg(nodes, weight=weight): # use G.edges to treat multigraphs correctly neighbors = (nbr for _, nbr in G.edges(u) if nbr in nodes) for _, degv in ydeg(neighbors, weight=weight): yield degu, degv
12
node_degree_xy
176,324
21
209
MAINT: Cleanup assortativity module, remove unused variables (#5301) Remove unused variables, sort imports, raise errors instead of accepting invalid arguments silently Co-authored-by: Dan Schult <[email protected]>
{ "docstring": "Generate node degree-degree pairs for edges in G.\n\n Parameters\n ----------\n G: NetworkX graph\n\n x: string ('in','out')\n The degree type for source node (directed graphs only).\n\n y: string ('in','out')\n The degree type for target node (directed graphs only).\n\n weight: string or None, optional (default=None)\n The edge attribute that holds the numerical value used\n as a weight. If None, then each edge has weight 1.\n The degree is the sum of the edge weights adjacent to the node.\n\n nodes: list or iterable (optional)\n Use only edges that are adjacency to specified nodes.\n The default is all nodes.\n\n Returns\n -------\n (x, y): 2-tuple\n Generates 2-tuple of (degree, degree) values.\n\n\n Examples\n --------\n >>> G = nx.DiGraph()\n >>> G.add_edge(1, 2)\n >>> list(nx.node_degree_xy(G, x=\"out\", y=\"in\"))\n [(1, 1)]\n >>> list(nx.node_degree_xy(G, x=\"in\", y=\"out\"))\n [(0, 0)]\n\n Notes\n -----\n For undirected graphs each edge is produced twice, once for each edge\n representation (u, v) and (v, u), with the exception of self-loop edges\n which only appear once.\n ", "language": "en", "n_whitespaces": 281, "n_words": 157, "vocab_size": 111 }
7
Python
69
0
88
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
https://github.com/ray-project/ray.git
def validate(self, num_steps=None, profile=False, reduce_results=True, info=None): worker_stats = self.worker_group.validate(
56
18
ray
torch_trainer.py
python/ray/util/sgd/torch/torch_trainer.py
9
29,985
def validate(self, num_steps=None, profile=False, reduce_results=True, info=None): worker_stats = self.worker_group.validate( num_steps=num_steps, profile=profile, info=info ) if reduce_results: return self._process_stats(worker_stats) else: return worker_stats
8
validate
133,353
9
85
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
{ "docstring": "Evaluates the model on the validation data set.\n\n Args:\n num_steps (int): Number of batches to compute update steps on\n per worker. This corresponds also to the number of times\n ``TrainingOperator.validate_batch`` is called per worker.\n profile (bool): Returns time stats for the evaluation procedure.\n reduce_results (bool): Whether to average all metrics across\n all workers into one dict. If a metric is a non-numerical\n value (or nested dictionaries), one value will be randomly\n selected among the workers. If False, returns a list of dicts.\n info (dict): Optional dictionary passed to the training\n operator for `validate` and `validate_batch`.\n\n Returns:\n A dictionary of metrics for validation.\n You can provide custom metrics by passing in a custom\n ``training_operator_cls``.\n ", "language": "en", "n_whitespaces": 309, "n_words": 113, "vocab_size": 84 }
2
Python
20
0
29
cc4d0564756ca067516f71718a3d135996525909
https://github.com/jindongwang/transferlearning.git
def set_raw_scale(self, in_, scale): self.__check_input(in_) self.raw_scale[in_] = scale
24
8
transferlearning
io.py
code/deep/BJMMD/caffe/python/caffe/io.py
8
12,047
def set_raw_scale(self, in_, scale): self.__check_input(in_) self.raw_scale[in_] = scale
3
set_raw_scale
60,255
6
39
Balanced joint maximum mean discrepancy for deep transfer learning
{ "docstring": "\n Set the scale of raw features s.t. the input blob = input * scale.\n While Python represents images in [0, 1], certain Caffe models\n like CaffeNet and AlexNet represent images in [0, 255] so the raw_scale\n of these models must be 255.\n\n Parameters\n ----------\n in_ : which input to assign this scale factor\n scale : scale coefficient\n ", "language": "en", "n_whitespaces": 121, "n_words": 57, "vocab_size": 44 }
1
Python
8
0
238
d1aa5608979891e3dd859c07fa919fa01cfead5f
https://github.com/ray-project/ray.git
def test_add_rule_to_best_shard(): # If we start with an empty list, then add to first shard shards: List[List[bazel_sharding.BazelRule]] = [list() for _ in range(4)] optimum = 600 rule = bazel_sharding.BazelRule("mock", "medium") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[0][0] == rule assert all(not shard for shard in shards[1:]) # Add to first shard below optimum old_rule = bazel_sharding.BazelRule("mock", "medium") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] shards[3] = [] optimum = old_rule.actual_timeout_s rule = bazel_sharding.BazelRule("mock", "small") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == rule assert all(shard[-1] == old_rule for shard in shards[0:3]) # If all shards are above or equal optimum, add to the one with the smallest # difference old_rule = bazel_sharding.BazelRule("mock", "large") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] optimum = old_rule.actual_timeout_s old_rule_medium = bazel_sharding.BazelRule("mock", "medium") shards[3][0] = old_rule_medium rule = bazel_sharding.BazelRule("mock", "small") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == old_rule_medium assert shards[3][-1] == rule assert all(shard[-1] == old_rule for shard in shards[0:3])
291
61
ray
test_bazel_sharding.py
ci/run/bazel_sharding/tests/test_bazel_sharding.py
10
30,178
def test_add_rule_to_best_shard(): # If we start with an empty list, then add to first shard shards: List[List[bazel_sharding.BazelRule]] = [list() for _ in range(4)] optimum = 600 rule = bazel_sharding.BazelRule("mock", "medium") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[0][0] == rule assert all(not shard for shard in shards[1:]) # Add to first shard below optimum old_rule = bazel_sharding.BazelRule("mock", "medium") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] shards[3] = [] optimum = old_rule.actual_timeout_s rule = bazel_sharding.BazelRule("mock", "small") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == rule assert all(shard[-1] == old_rule for shard in shards[0:3]) # If all shards are above or equal optimum, add to the one with the smallest # difference old_rule = bazel_sharding.BazelRule("mock", "large") shards: List[List[bazel_sharding.BazelRule]] = [[old_rule] for _ in range(4)] optimum = old_rule.actual_timeout_s old_rule_medium = bazel_sharding.BazelRule("mock", "medium") shards[3][0] = old_rule_medium rule = bazel_sharding.BazelRule("mock", "small") bazel_sharding.add_rule_to_best_shard(rule, shards, optimum) assert shards[3][0] == old_rule_medium assert shards[3][-1] == rule assert all(shard[-1] == old_rule for shard in shards[0:3])
25
test_add_rule_to_best_shard
134,046
16
460
[CI] Make bazel sharding for parallel buildkite more intelligent (#29221) This PR implements two changes to our `bazel-sharding.py` script, used for determining which bazel tests to run on each instance when buildkite parallelism is used: * An ability to filter tests before they are sharded, using the same logic as `bazel test`. This is done by specifying the `--tag_filters` argument, eg. `--tag_filters=air,-gpu`. If we filter tests with `bazel test` *after* they are sharded, we can end up with imbalanced shards as eg. all tests we want to filter out are assigned to one shard. This feature is enabled for Serve tests and it will be required for the changes I want to make to AIR CI. * A new algorithm to balance the shards, finally implementing what that comment was asking for all this time. Instead of trying to assign the same number of tests (which have variable timeouts) to each shard, the new algorithm tries to make sure each shard will finish in more or less the same time. This is achieved through a simple but good enough heuristic. The old algorithm can still be accessed through the `--sharding_strategy` argument. Those two changes do cause the complexity of the script to increase, necessitating proper testing. In order to facilitate that, this PR also adds a basic buildkite test harness for CI tools/scripts. After this PR is merged, the next step will be to move most of our manually parallelized jobs to use buildkite parallelism with the new logic here. Signed-off-by: Antoni Baum <[email protected]>
{ "docstring": "Test that the best shard in optimal strategy is chosen correctly.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
7
Python
151
0
37
6c38a6b5697bcf4587e00101771001bf596974f9
https://github.com/home-assistant/core.git
def async_heartbeat(self) -> None: self._computed_state = False self._restart_timer() self.async_write_ha_stat
23
9
core
binary_sensor.py
homeassistant/components/isy994/binary_sensor.py
7
110,798
def async_heartbeat(self) -> None: self._computed_state = False self._restart_timer() self.async_write_ha_state()
11
async_heartbeat
312,146
5
42
Enable strict typing for isy994 (#65439) Co-authored-by: Martin Hjelmare <[email protected]>
{ "docstring": "Mark the device as online, and restart the 25 hour timer.\n\n This gets called when the heartbeat node beats, but also when the\n parent sensor sends any events, as we can trust that to mean the device\n is online. This mitigates the risk of false positives due to a single\n missed heartbeat event.\n ", "language": "en", "n_whitespaces": 88, "n_words": 53, "vocab_size": 42 }
1
Python
9
0
49
7e23a37e1c5bda81234801a6584563e2880769eb
https://github.com/pandas-dev/pandas.git
def test_assert_series_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 left = Series([pd.Interval(0, 1, "right")], dtype="interval") right = left.astype(object) msg = tm.assert_series_equal(left, right, check_dtype=False) with pytest.raises(AssertionError, match=msg): tm.assert_series_equal(left, right, check_dtype=True)
72
20
pandas
test_assert_series_equal.py
pandas/tests/util/test_assert_series_equal.py
12
39,861
def test_assert_series_equal_interval_dtype_mismatch(): # https://github.com/pandas-dev/pandas/issues/32747 left = Series([pd.Interval(0, 1, "right")], dtype="interval") right = left.astype(object) msg = tm.assert_series_equal(left, right, check_dtype=False) with pytest.raises(AssertionError, match=msg): tm.assert_series_equal(left, right, check_dtype=True)
11
test_assert_series_equal_interval_dtype_mismatch
166,848
17
123
ENH: consistency of input args for boundaries - Interval (#46522)
{ "docstring": "Attributes of Series are different\n\nAttribute \"dtype\" are different\n\\\\[left\\\\]: interval\\\\[int64, right\\\\]\n\\\\[right\\\\]: object", "language": "en", "n_whitespaces": 11, "n_words": 14, "vocab_size": 12 }
1
Python
24
0
85
7c6c5f6215b40a27cfefb7bf21246299fd9b3a1e
https://github.com/matplotlib/matplotlib.git
def rc_file_defaults(): #
41
32
matplotlib
__init__.py
lib/matplotlib/__init__.py
12
23,106
def rc_file_defaults(): # Deprecation warnings were already handled when creating rcParamsOrig, no # need to reemit them here. with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig if k not in STYLE_BLACKLIST})
5
rc_file_defaults
108,225
10
72
Fix removed cross-references
{ "docstring": "\n Restore the `.rcParams` from the original rc file loaded by Matplotlib.\n\n Style-blacklisted `.rcParams` (defined in\n ``matplotlib.style.core.STYLE_BLACKLIST``) are not updated.\n ", "language": "en", "n_whitespaces": 32, "n_words": 19, "vocab_size": 17 }
3
Python
35
0
153
f3166e673fe8d40277b804d35d77dcdb760fc3b3
https://github.com/pypa/pipenv.git
def lexer(self) -> Optional[Lexer]: if isinstance(self._lexer, Lexer): return self._lexer try: return get_lexer_by_name( self._lexer, stripnl=False, ensurenl=True, tabsize=self.tab_size, ) except ClassNotFound:
54
19
pipenv
syntax.py
pipenv/patched/notpip/_vendor/rich/syntax.py
11
3,587
def lexer(self) -> Optional[Lexer]: if isinstance(self._lexer, Lexer): return self._lexer try: return get_lexer_by_name( self._lexer, stripnl=False, ensurenl=True, tabsize=self.tab_size, ) except ClassNotFound: return None
16
lexer
20,845
12
83
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
{ "docstring": "The lexer for this syntax, or None if no lexer was found.\n\n Tries to find the lexer by name if a string was passed to the constructor.\n ", "language": "en", "n_whitespaces": 41, "n_words": 27, "vocab_size": 21 }
3
Python
21
0
19
a35b29b2651bf33c5d5b45e64bc7765ffde4aff4
https://github.com/saltstack/salt.git
def test_numeric_repl(file, multiline_file): file.replace(multiline_fi
27
10
salt
test_replace.py
tests/pytests/functional/modules/file/test_replace.py
8
54,182
def test_numeric_repl(file, multiline_file): file.replace(multiline_file, r"Etiam", 123) assert "123" in multiline_file.read_text()
3
test_numeric_repl
215,808
5
46
Add some funtional tests Add functional tests for the following: - file.readlink - file.replace - file.symlink Remove unit tests for file.replace as they are duplicated in the added functional test
{ "docstring": "\n This test covers cases where the replacement string is numeric. The CLI\n parser yaml-fies it into a numeric type. If not converted back to a string\n type in file.replace, a TypeError occurs when the replace is attempted. See\n https://github.com/saltstack/salt/issues/9097 for more information.\n ", "language": "en", "n_whitespaces": 58, "n_words": 42, "vocab_size": 37 }
1
Python
10
0
29
7fa8e45b6782d545fa0ead112d92d13bdad7417c
https://github.com/gradio-app/gradio.git
def set_interpret_parameters(self, segments=16): self.interpretation_segments = segments retu
17
8
gradio
components.py
gradio/components.py
7
43,005
def set_interpret_parameters(self, segments=16): self.interpretation_segments = segments return self
3
set_interpret_parameters
179,715
4
29
Blocks-Components - fixes - format
{ "docstring": "\n Calculates interpretation score of image subsections by splitting the image into subsections, then using a \"leave one out\" method to calculate the score of each subsection by whiting out the subsection and measuring the delta of the output value.\n Parameters:\n segments (int): Number of interpretation segments to split image into.\n ", "language": "en", "n_whitespaces": 79, "n_words": 50, "vocab_size": 35 }
1
Python
8
0
24
1fe202a1a3343fad77da270ffe0923a46f1944dd
https://github.com/matrix-org/synapse.git
def can_native_upsert(self) -> bool: return sqlite3.sqlite_version_info >= (3, 2
20
10
synapse
sqlite.py
synapse/storage/engines/sqlite.py
7
72,207
def can_native_upsert(self) -> bool: return sqlite3.sqlite_version_info >= (3, 24, 0)
6
can_native_upsert
248,309
5
32
Tidy up and type-hint the database engine modules (#12734) Co-authored-by: Sean Quah <[email protected]>
{ "docstring": "\n Do we support native UPSERTs? This requires SQLite3 3.24+, plus some\n more work we haven't done yet to tell what was inserted vs updated.\n ", "language": "en", "n_whitespaces": 46, "n_words": 24, "vocab_size": 23 }
1
Python
10
0
97
30ab5458a7e4ba2351d5e1beef8c8797b5946493
https://github.com/ray-project/ray.git
async def get_actors(self) -> dict: reply = await self._client.get_all_actor_info(timeout=DEFAULT_RPC_TIMEOUT) result = {} for message in rep
67
22
ray
state_aggregator.py
dashboard/state_aggregator.py
13
31,405
async def get_actors(self) -> dict: reply = await self._client.get_all_actor_info(timeout=DEFAULT_RPC_TIMEOUT) result = {} for message in reply.actor_table_data: data = self._message_to_dict(message=message, fields_to_decode=["actor_id"]) data = filter_fields(data, ActorState) result[data["actor_id"]] = data return result
14
get_actors
138,397
16
111
[State Observability] Tasks and Objects API (#23912) This PR implements ray list tasks and ray list objects APIs. NOTE: You can ignore the merge conflict for now. It is because the first PR was reverted. There's a fix PR open now.
{ "docstring": "List all actor information from the cluster.\n\n Returns:\n {actor_id -> actor_data_in_dict}\n actor_data_in_dict's schema is in ActorState\n ", "language": "en", "n_whitespaces": 52, "n_words": 16, "vocab_size": 16 }
2
Python
29
0
833
551205a18ac2ac19626f4e4ffb2ed88fcad705b9
https://github.com/mindsdb/mindsdb.git
def insert_predictor_answer(self, insert): model_interface = self.session.model_interface data_store = self.session.data_store select_data_query = insert.get('select_data_query') if isinstance(select_data_query, str) is False or len(select_data_query) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg="'select_data_query' should not be empty" ).send() return models = model_interface.get_models() if insert['name'] in [x['name'] for x in models]: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"predictor with name '{insert['name']}'' already exists" ).send() return kwargs = {} if isinstance(insert.get('training_options'), str) \ and len(insert['training_options']) > 0: try: kwargs = json.loads(insert['training_options']) except Exception: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='training_options should be in valid JSON string' ).send() return integration = self.session.integration if isinstance(integration, str) is False or len(integration) == 0: self.packet( ErrPacket, err_code=E
445
109
mindsdb
mysql_proxy.py
mindsdb/api/mysql/mysql_proxy/mysql_proxy.py
16
25,051
def insert_predictor_answer(self, insert): model_interface = self.session.model_interface data_store = self.session.data_store select_data_query = insert.get('select_data_query') if isinstance(select_data_query, str) is False or len(select_data_query) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg="'select_data_query' should not be empty" ).send() return models = model_interface.get_models() if insert['name'] in [x['name'] for x in models]: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg=f"predictor with name '{insert['name']}'' already exists" ).send() return kwargs = {} if isinstance(insert.get('training_options'), str) \ and len(insert['training_options']) > 0: try: kwargs = json.loads(insert['training_options']) except Exception: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='training_options should be in valid JSON string' ).send() return integration = self.session.integration if isinstance(integration, str) is False or len(integration) == 0: self.packet( ErrPacket, err_code=ERR.ER_WRONG_ARGUMENTS, msg='select_data_query can be used only in query from database' ).send() return insert['select_data_query'] = insert['select_data_query'].replace(r"\'", "'") ds_name = data_store.get_vacant_name(insert['name']) ds = data_store.save_datasource(ds_name, integration, {'query': insert['select_data_query']}) insert['predict'] = [x.strip() for x in insert['predict'].split(',')] ds_data = data_store.get_datasource(ds_name) if ds_data is None: raise Exception(f"DataSource '{ds_name}' does not exists") ds_columns = [x['name'] for x in ds_data['columns']] for col in insert['predict']: if col not in ds_columns: data_store.delete_datasource(ds_name) raise Exception(f"Column '{col}' not exists") try: insert['predict'] = self._check_predict_columns(insert['predict'], ds_columns) except Exception: data_store.delete_datasource(ds_name) raise model_interface.learn( insert['name'], ds, insert['predict'], ds_data['id'], kwargs=kwargs, delete_ds_on_fail=True ) self.packet(OkPacket).send()
63
insert_predictor_answer
113,876
42
713
fix
{ "docstring": " Start learn new predictor.\n Parameters:\n - insert - dict with keys as columns of mindsb.predictors table.\n ", "language": "en", "n_whitespaces": 47, "n_words": 16, "vocab_size": 15 }
18
Python
181
0
469
2c3e10a128fa0ce4e937d8d50dc0cd6d7cd11485
https://github.com/OpenBB-finance/OpenBBTerminal.git
def populate_historical_trade_data(self): trade_data = self.__orderbook.pivot( index="Date", columns="Ticker", values=[ "Type", "Sector", "Industry", "Country", "Price", "Quantity",
164
65
OpenBBTerminal
portfolio_model.py
openbb_terminal/portfolio/portfolio_model.py
12
85,115
def populate_historical_trade_data(self): trade_data = self.__orderbook.pivot( index="Date", columns="Ticker", values=[ "Type", "Sector", "Industry", "Country", "Price", "Quantity", "Fees", "Premium", "Investment", "Side", "Currency", ], ) # Make historical prices columns a multi-index. This helps the merging. self.portfolio_historical_prices.columns = pd.MultiIndex.from_product( [["Close"], self.portfolio_historical_prices.columns] ) # Merge with historical close prices (and fillna) trade_data = pd.merge( trade_data, self.portfolio_historical_prices, how="right", left_index=True, right_index=True, ).fillna(0) # Accumulate quantity held by trade date trade_data["Quantity"] = trade_data["Quantity"].cumsum() trade_data["Investment"] = trade_data["Investment"].cumsum() trade_data.loc[:, ("Investment", "Total")] = trade_data["Investment"][ self.tickers_list ].sum(axis=1) self.historical_trade_data = trade_data
34
populate_historical_trade_data
285,032
23
282
Overhaul Portfolio class (#2021) * adds pythonic portfolio class * start calculate trades refactoring * adds comments to portfolio model - delete afterwards * finish calculate trades refactoring * restore original portfolio_model.py * implement calculate_allocations * adapt and test controller load, show, bench, alloc and perf * add old code that was ok * adapt controller * adapt portfolio_view * run black on pythonic_portfolio.py * fix crypto bug * change column name in example datasets * substitute portfolio_model.py * update show command * push cumulative returns calculation to model * fix last change in cumulative returns * add comments on possibly unused code * run black on changes * bring metrics from helper to model * push rolling metrics from view to model * Details and linting * Fix tests * remove empty attribute and rename class * fix view and controller rf * change returns calculation method * remove CASH from code * remove cash from tickers_list * run black on changes * change function name * adapt to PortfolioModel * fix tests * fix tests on help * fix linting * call metrics from PortfolioModel * call drawdown from model * fix some mypy issues * fix remaining mypy issues * fix test * Fix linting * Remove unused function * Small fixes * Remove old code and adjust summary to simply work * Update the Excel since CASH is no longer a thing * Fix tests * Update the csvs * Updates to usage of full_shares and more details * Fix -t flag for perf Co-authored-by: Jeroen Bouma <[email protected]>
{ "docstring": "Create a new dataframe to store historical prices by ticker", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
1
Python
78
1
129
a47d569e670fd4102af37c3165c9b1ddf6fd3005
https://github.com/scikit-learn/scikit-learn.git
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser): pytest.importorskip("pandas") data_id = 61 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) bunch_as_frame_true = fetch_openml( data_id=data_id, as_frame=True, cache=False, parser=parser, ) bunch_as_frame_false = fetch_openml( data_id=data_id, as_frame=False, cache=False, parser=parser, ) assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data) assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy @pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
89
39
scikit-learn
test_openml.py
sklearn/datasets/tests/test_openml.py
9
@fails_if_pypy @pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
75,979
def test_fetch_openml_equivalence_array_dataframe(monkeypatch, parser): pytest.importorskip("pandas") data_id = 61 _monkey_patch_webbased_functions(monkeypatch, data_id, gzip_response=True) bunch_as_frame_true = fetch_openml( data_id=data_id, as_frame=True, cache=False, parser=parser, ) bunch_as_frame_false = fetch_openml( data_id=data_id, as_frame=False, cache=False, parser=parser, ) assert_allclose(bunch_as_frame_false.data, bunch_as_frame_true.data) assert_array_equal(bunch_as_frame_false.target, bunch_as_frame_true.target) # Known failure of PyPy for OpenML. See the following issue: # https://github.com/scikit-learn/scikit-learn/issues/18906 @fails_if_pypy @pytest.mark.parametrize("parser", ["liac-arff", "pandas"])
18
test_fetch_openml_equivalence_array_dataframe
259,898
20
167
ENH improve ARFF parser using pandas (#21938) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Adrin Jalali <[email protected]>
{ "docstring": "Check the equivalence of the dataset when using `as_frame=False` and\n `as_frame=True`.\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 10 }
1
Python
47
0
76
002f919dda5f01d067c2e786426c68751551d15c
https://github.com/mitmproxy/mitmproxy.git
def wire_type(self): if hasattr(self, '_m_wire_type'): return self._m_wire_type self._m_wire_type = Kaita
51
15
mitmproxy
google_protobuf.py
mitmproxy/contrib/kaitaistruct/google_protobuf.py
12
73,944
def wire_type(self): if hasattr(self, '_m_wire_type'): return self._m_wire_type self._m_wire_type = KaitaiStream.resolve_enum(GoogleProtobuf.Pair.WireTypes, (self.key.value & 7)) return getattr(self, '_m_wire_type', None)
5
wire_type
252,396
12
83
update kaitai definitions
{ "docstring": "\"Wire type\" is a part of the \"key\" that carries enough\n information to parse value from the wire, i.e. read correct\n amount of bytes, but there's not enough informaton to\n interprete in unambiguously. For example, one can't clearly\n distinguish 64-bit fixed-sized integers from 64-bit floats,\n signed zigzag-encoded varints from regular unsigned varints,\n arbitrary bytes from UTF-8 encoded strings, etc.\n ", "language": "en", "n_whitespaces": 136, "n_words": 59, "vocab_size": 51 }
2
Python
17
0
80
b3587b52b25077f68116b9852b041d33e7fc6601
https://github.com/mitmproxy/mitmproxy.git
def address(self): # pragma: no cover warnings.warn( "Client.address is deprecated, use Client.peername instead.", D
23
18
mitmproxy
connection.py
mitmproxy/connection.py
8
73,687
def address(self): # pragma: no cover warnings.warn( "Client.address is deprecated, use Client.peername instead.", DeprecationWarning, stacklevel=2, ) return self.peername
7
address
251,333
7
40
make it black!
{ "docstring": "*Deprecated:* An outdated alias for Client.peername.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
1
Python
18
3
80
a4fdabab38def4bf6b4007f8cd67d6944740b303
https://github.com/sympy/sympy.git
def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs): if 'r
239
19
sympy
common.py
sympy/matrices/common.py
12
f"""\ To get asquare Jordan block matrix use a morebanded matrix
48,242
def jordan_block(kls, size=None, eigenvalue=None, *, band='upper', **kwargs): if 'rows' in kwargs or 'cols' in kwargs: msg = if 'rows' in kwargs and 'cols' in kwargs: msg += f
45
jordan_block
196,907
21
109
Update the Matrix.jordan_block() rows and cols kwargs deprecation
{ "docstring": "Returns a Jordan block\n\n Parameters\n ==========\n\n size : Integer, optional\n Specifies the shape of the Jordan block matrix.\n\n eigenvalue : Number or Symbol\n Specifies the value for the main diagonal of the matrix.\n\n .. note::\n The keyword ``eigenval`` is also specified as an alias\n of this keyword, but it is not recommended to use.\n\n We may deprecate the alias in later release.\n\n band : 'upper' or 'lower', optional\n Specifies the position of the off-diagonal to put `1` s on.\n\n cls : Matrix, optional\n Specifies the matrix class of the output form.\n\n If it is not specified, the class type where the method is\n being executed on will be returned.\n\n rows, cols : Integer, optional\n Specifies the shape of the Jordan block matrix. See Notes\n section for the details of how these key works.\n\n .. deprecated:: 1.4\n The rows and cols parameters are deprecated and will be\n removed in a future version.\n\n\n Returns\n =======\n\n Matrix\n A Jordan block matrix.\n\n Raises\n ======\n\n ValueError\n If insufficient arguments are given for matrix size\n specification, or no eigenvalue is given.\n\n Examples\n ========\n\n Creating a default Jordan block:\n\n >>> from sympy import Matrix\n >>> from sympy.abc import x\n >>> Matrix.jordan_block(4, x)\n Matrix([\n [x, 1, 0, 0],\n [0, x, 1, 0],\n [0, 0, x, 1],\n [0, 0, 0, x]])\n\n Creating an alternative Jordan block matrix where `1` is on\n lower off-diagonal:\n\n >>> Matrix.jordan_block(4, x, band='lower')\n Matrix([\n [x, 0, 0, 0],\n [1, x, 0, 0],\n [0, 1, x, 0],\n [0, 0, 1, x]])\n\n Creating a Jordan block with keyword arguments\n\n >>> Matrix.jordan_block(size=4, eigenvalue=x)\n Matrix([\n [x, 1, 0, 0],\n [0, x, 1, 0],\n [0, 0, x, 1],\n [0, 0, 0, x]])\n\n Notes\n =====\n\n .. deprecated:: 1.4\n This feature is deprecated and will be removed in a future\n version.\n\n The keyword arguments ``size``, ``rows``, ``cols`` relates to\n the Jordan block size specifications.\n\n If you want to create a square Jordan block, specify either\n one of the three arguments.\n\n If you want to create a rectangular Jordan block, specify\n ``rows`` and ``cols`` individually.\n\n +--------------------------------+---------------------+\n | Arguments Given | Matrix Shape |\n +----------+----------+----------+----------+----------+\n | size | rows | cols | rows | cols |\n +==========+==========+==========+==========+==========+\n | size | Any | size | size |\n +----------+----------+----------+----------+----------+\n | | None | ValueError |\n | +----------+----------+----------+----------+\n | None | rows | None | rows | rows |\n | +----------+----------+----------+----------+\n | | None | cols | cols | cols |\n + +----------+----------+----------+----------+\n | | rows | cols | rows | cols |\n +----------+----------+----------+----------+----------+\n\n References\n ==========\n\n .. [1] https://en.wikipedia.org/wiki/Jordan_matrix\n \n The 'rows' and 'cols' keywords to Matrix.jordan_block() are\n deprecated. Use the 'size' parameter instead.\n \\\n To get a non-square Jordan block matrix use a more generic\n banded matrix constructor, like\n", "language": "en", "n_whitespaces": 1426, "n_words": 442, "vocab_size": 190 }
16
Python
28

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
15
Add dataset card