complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
test_cyclic
def test_cyclic(self): one = '<li>Cyclic one: <a href="%s">I am recursive</a>' % ( reverse("admin:admin_views_cyclicone_change", args=(self.cy1.pk,)), ) two = '<li>Cyclic two: <a href="%s">I am recursive too</a>' % ( reverse("admin:admin_views_cyclictwo_change", args=(self.cy2.pk,)), ) response = self.client.get( reverse("admin:admin_views_cyclicone_delete", args=(self.cy1.pk,)) ) self.assertContains(response, one, 1) self.assertContains(response, two, 1)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
tests.py
151
Refs #33476 -- Reformatted code with Black.
52,132
0
137
94
29
207,849
41
django
13
tests/admin_views/tests.py
Python
12
{ "docstring": "\n Cyclic relationships should still cause each object to only be\n listed once.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
https://github.com/django/django.git
1
test_center_head_loss
def test_center_head_loss(self): s = 256 img_metas = [{'batch_input_shape': (s, s, 3)}] test_cfg = dict(topK=100, max_per_img=100) centernet_head = CenterNetHead( num_classes=4, in_channels=1, feat_channels=4, test_cfg=test_cfg) feat = [torch.rand(1, 1, s, s)] center_out, wh_out, offset_out = centernet_head.forward(feat) # Test that empty ground truth encourages the network to # predict background gt_instances = InstanceData() gt_instances.bboxes = torch.empty((0, 4)) gt_instances.labels = torch.LongTensor([]) empty_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = empty_gt_losses['loss_center_heatmap'] loss_wh = empty_gt_losses['loss_wh'] loss_offset = empty_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() == 0, ( 'there should be no loss_wh when there are no true boxes') assert loss_offset.item() == 0, ( 'there should be no loss_offset when there are no true boxes') # When truth is non-empty then both cls and box loss # should be nonzero for random inputs gt_instances = InstanceData() gt_instances.bboxes = torch.Tensor( [[23.6667, 23.8757, 238.6326, 151.8874]]) gt_instances.labels = torch.LongTensor([2]) one_gt_losses = centernet_head.loss(center_out, wh_out, offset_out, [gt_instances], img_metas) loss_center = one_gt_losses['loss_center_heatmap'] loss_wh = one_gt_losses['loss_wh'] loss_offset = one_gt_losses['loss_offset'] assert loss_center.item() > 0, 'loss_center should be non-zero' assert loss_wh.item() > 0, 'loss_wh should be non-zero' assert loss_offset.item() > 0, 'loss_offset should be non-zero'
96aa909c19dbe753852ac6dba13bbbc35329b99f
10
test_centernet_head.py
457
[Refactor] CenterNet
70,506
0
532
295
101
244,739
183
mmdetection
34
tests/test_models/test_dense_heads/test_centernet_head.py
Python
33
{ "docstring": "Tests center head loss when truth is empty and non-empty.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/open-mmlab/mmdetection.git
1
parsebytes
def parsebytes(self, text, headersonly=False): text = text.decode('ASCII', errors='surrogateescape') return self.parser.parsestr(text, headersonly)
8198943edd73a363c266633e1aa5b2a9e9c9f526
10
parser.py
58
add python 3.10.4 for windows
57,101
0
32
35
11
223,843
11
XX-Net
8
python3.10.4/Lib/email/parser.py
Python
3
{ "docstring": "Create a message structure from a byte string.\n\n Returns the root of the message structure. Optional headersonly is a\n flag specifying whether to stop parsing after reading the headers or\n not. The default is False, meaning it parses the entire contents of\n the file.\n ", "language": "en", "n_whitespaces": 81, "n_words": 44, "vocab_size": 35 }
https://github.com/XX-net/XX-Net.git
3
shuffle
def shuffle(self) -> "SampleBatch": # Shuffling the data when we have `seq_lens` defined is probably # a bad idea! if self.get(SampleBatch.SEQ_LENS) is not None: raise ValueError( "SampleBatch.shuffle not possible when your data has " "`seq_lens` defined!" ) # Get a permutation over the single items once and use the same # permutation for all the data (otherwise, data would become # meaningless). permutation = np.random.permutation(self.count) self_as_dict = {k: v for k, v in self.items()} shuffled = tree.map_structure(lambda v: v[permutation], self_as_dict) self.update(shuffled) # Flush cache such that intercepted values are recalculated after the # shuffling. self.intercepted_values = {} return self
de9e143938d2e551e973496e167297dcd166b3c8
11
sample_batch.py
151
[RLlib] Issue 23907: SampleBatch.shuffle does not flush intercepted_values dict (which it should). (#24005)
34,200
0
256
86
75
148,185
99
ray
19
rllib/policy/sample_batch.py
Python
26
{ "docstring": "Shuffles the rows of this batch in-place.\n\n Returns:\n This very (now shuffled) SampleBatch.\n\n Raises:\n ValueError: If self[SampleBatch.SEQ_LENS] is defined.\n\n Examples:\n >>> from ray.rllib.policy.sample_batch import SampleBatch\n >>> batch = SampleBatch({\"a\": [1, 2, 3, 4]}) # doctest: +SKIP\n >>> print(batch.shuffle()) # doctest: +SKIP\n {\"a\": [4, 1, 3, 2]}\n ", "language": "en", "n_whitespaces": 141, "n_words": 46, "vocab_size": 39 }
https://github.com/ray-project/ray.git
1
frucht_graph
def frucht_graph(create_using=None): G = cycle_graph(7, create_using) G.add_edges_from( [ [0, 7], [1, 7], [2, 8], [3, 9], [4, 9], [5, 10], [6, 10], [7, 11], [8, 11], [8, 9], [10, 11], ] ) G.name = "Frucht Graph" return G
dec723f072eb997a497a159dbe8674cd39999ee9
9
small.py
130
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
41,743
0
191
96
29
176,173
38
networkx
6
networkx/generators/small.py
Python
19
{ "docstring": "\n Returns the Frucht Graph.\n\n The Frucht Graph is the smallest cubical graph whose\n automorphism group consists only of the identity element [1]_.\n It has 12 nodes and 18 edges and no nontrivial symmetries.\n It is planar and Hamiltonian [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Frucht Graph with 12 nodes and 18 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Frucht_graph\n .. [2] https://mathworld.wolfram.com/FruchtGraph.html\n\n ", "language": "en", "n_whitespaces": 143, "n_words": 81, "vocab_size": 59 }
https://github.com/networkx/networkx.git
1
managed
def managed(self): # type: () -> bool return t.cast(bool, self._get_cloud_config(self._MANAGED))
a06fa496d3f837cca3c437ab6e9858525633d147
10
__init__.py
38
ansible-test - Code cleanup and refactoring. (#77169) * Remove unnecessary PyCharm ignores. * Ignore intentional undefined attribute usage. * Add missing type hints. Fix existing type hints. * Fix docstrings and comments. * Use function to register completion handler. * Pass strings to display functions. * Fix CompositeAction handling of dest argument. * Use consistent types in expressions/assignments. * Use custom function to keep linters happy. * Add missing raise for custom exception. * Clean up key/value type handling in cloud plugins. * Use dataclass instead of dict for results. * Add custom type_guard function to check lists. * Ignore return type that can't be checked (yet). * Avoid changing types on local variables.
78,553
0
25
22
10
266,742
10
ansible
7
test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
Python
2
{ "docstring": "True if resources are managed by ansible-test, otherwise False.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ansible/ansible.git
1
_combine_individual_stats
def _combine_individual_stats(self, operator_count, cv_score, individual_stats): stats = deepcopy( individual_stats ) # Deepcopy, since the string reference to predecessor should be cloned stats["operator_count"] = operator_count stats["internal_cv_score"] = cv_score return stats
388616b6247ca4ea8de4e2f340d6206aee523541
8
base.py
55
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,600
0
83
32
26
181,816
29
tpot
7
tpot/base.py
Python
7
{ "docstring": "Combine the stats with operator count and cv score and preprare to be written to _evaluated_individuals\n\n Parameters\n ----------\n operator_count: int\n number of components in the pipeline\n cv_score: float\n internal cross validation score\n individual_stats: dictionary\n dict containing statistics about the individual. currently:\n 'generation': generation in which the individual was evaluated\n 'mutation_count': number of mutation operations applied to the individual and its predecessor cumulatively\n 'crossover_count': number of crossover operations applied to the individual and its predecessor cumulatively\n 'predecessor': string representation of the individual\n\n Returns\n -------\n stats: dictionary\n dict containing the combined statistics:\n 'operator_count': number of operators in the pipeline\n 'internal_cv_score': internal cross validation score\n and all the statistics contained in the 'individual_stats' parameter\n ", "language": "en", "n_whitespaces": 295, "n_words": 111, "vocab_size": 66 }
https://github.com/EpistasisLab/tpot.git
7
__arrow_array__
def __arrow_array__(self, type=None): import pyarrow from pandas.core.arrays.arrow._arrow_utils import ArrowIntervalType try: subtype = pyarrow.from_numpy_dtype(self.dtype.subtype) except TypeError as err: raise TypeError( f"Conversion to arrow with subtype '{self.dtype.subtype}' " "is not supported" ) from err interval_type = ArrowIntervalType(subtype, self.closed) storage_array = pyarrow.StructArray.from_arrays( [ pyarrow.array(self._left, type=subtype, from_pandas=True), pyarrow.array(self._right, type=subtype, from_pandas=True), ], names=["left", "right"], ) mask = self.isna() if mask.any(): # if there are missing values, set validity bitmap also on the array level null_bitmap = pyarrow.array(~mask).buffers()[1] storage_array = pyarrow.StructArray.from_buffers( storage_array.type, len(storage_array), [null_bitmap], children=[storage_array.field(0), storage_array.field(1)], ) if type is not None: if type.equals(interval_type.storage_type): return storage_array elif isinstance(type, ArrowIntervalType): # ensure we have the same subtype and closed attributes if not type.equals(interval_type): raise TypeError( "Not supported to convert IntervalArray to type with " f"different 'subtype' ({self.dtype.subtype} vs {type.subtype}) " f"and 'closed' ({self.closed} vs {type.closed}) attributes" ) else: raise TypeError( f"Not supported to convert IntervalArray to '{type}' type" ) return pyarrow.ExtensionArray.from_storage(interval_type, storage_array) _interval_shared_docs[ "to_tuples" ] =
41e423fd09df5817dcfa11f3aea111a5b1a2f98a
19
interval.py
450
REF: Create pandas/core/arrays/arrow (#46591)
39,734
0
711
249
106
165,894
149
pandas
40
pandas/core/arrays/interval.py
Python
42
{ "docstring": "\n Convert myself into a pyarrow Array.\n \n Return an %(return_type)s of tuples of the form (left, right).\n\n Parameters\n ----------\n na_tuple : bool, default True\n Returns NA as a tuple if True, ``(nan, nan)``, or just as the NA\n value itself if False, ``nan``.\n\n Returns\n -------\n tuples: %(return_type)s\n %(examples)s\\\n ", "language": "en", "n_whitespaces": 148, "n_words": 47, "vocab_size": 39 }
https://github.com/pandas-dev/pandas.git
14
getopt
def getopt(self, args=None, object=None): if args is None: args = sys.argv[1:] if object is None: object = OptionDummy() created_object = True else: created_object = False self._grok_option_table() short_opts = ' '.join(self.short_opts) try: opts, args = getopt.getopt(args, short_opts, self.long_opts) except getopt.error as msg: raise DistutilsArgError(msg) for opt, val in opts: if len(opt) == 2 and opt[0] == '-': # it's a short option opt = self.short2long[opt[1]] else: assert len(opt) > 2 and opt[:2] == '--' opt = opt[2:] alias = self.alias.get(opt) if alias: opt = alias if not self.takes_arg[opt]: # boolean option? assert val == '', "boolean option can't have value" alias = self.negative_alias.get(opt) if alias: opt = alias val = 0 else: val = 1 attr = self.attr_name[opt] # The only repeating option at the moment is 'verbose'. # It has a negative option -q quiet, which should set verbose = 0. if val and self.repeat.get(attr) is not None: val = getattr(object, attr, 0) + 1 setattr(object, attr, val) self.option_order.append((opt, val)) # for opts if created_object: return args, object else: return args
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
fancy_getopt.py
437
add python 3.10.4 for windows
56,808
0
660
269
101
222,919
171
XX-Net
31
python3.10.4/Lib/distutils/fancy_getopt.py
Python
40
{ "docstring": "Parse command-line options in args. Store as attributes on object.\n\n If 'args' is None or not supplied, uses 'sys.argv[1:]'. If\n 'object' is None or not supplied, creates a new OptionDummy\n object, stores option values there, and returns a tuple (args,\n object). If 'object' is supplied, it is modified in place and\n 'getopt()' just returns 'args'; in both cases, the returned\n 'args' is a modified copy of the passed-in 'args' list, which\n is left untouched.\n ", "language": "en", "n_whitespaces": 132, "n_words": 74, "vocab_size": 51 }
https://github.com/XX-net/XX-Net.git
14
split_code_in_indented_blocks
def split_code_in_indented_blocks(code, indent_level="", start_prompt=None, end_prompt=None): # Let's split the code into lines and move to start_index. index = 0 lines = code.split("\n") if start_prompt is not None: while not lines[index].startswith(start_prompt): index += 1 blocks = ["\n".join(lines[:index])] else: blocks = [] # We split into blocks until we get to the `end_prompt` (or the end of the block). current_block = [lines[index]] index += 1 while index < len(lines) and (end_prompt is None or not lines[index].startswith(end_prompt)): if len(lines[index]) > 0 and get_indent(lines[index]) == indent_level: if len(current_block) > 0 and get_indent(current_block[-1]).startswith(indent_level + " "): current_block.append(lines[index]) blocks.append("\n".join(current_block)) if index < len(lines) - 1: current_block = [lines[index + 1]] index += 1 else: current_block = [] else: blocks.append("\n".join(current_block)) current_block = [lines[index]] else: current_block.append(lines[index]) index += 1 # Adds current block if it's nonempty. if len(current_block) > 0: blocks.append("\n".join(current_block)) # Add final block after end_prompt if provided. if end_prompt is not None and index < len(lines): blocks.append("\n".join(lines[index:])) return blocks
4261c3aadfc23ee5b123b80ab7d8680a013acb66
17
custom_init_isort.py
478
Make style
120,784
0
445
284
80
335,484
153
diffusers
15
utils/custom_init_isort.py
Python
32
{ "docstring": "\n Split `code` into its indented blocks, starting at `indent_level`. If provided, begins splitting after\n `start_prompt` and stops at `end_prompt` (but returns what's before `start_prompt` as a first block and what's\n after `end_prompt` as a last block, so `code` is always the same as joining the result of this function).\n ", "language": "en", "n_whitespaces": 62, "n_words": 49, "vocab_size": 38 }
https://github.com/huggingface/diffusers.git
2
test_find_next_time_expression_tenth_second_pattern_does_not_drift_entering_dst
def test_find_next_time_expression_tenth_second_pattern_does_not_drift_entering_dst(): tz = dt_util.get_time_zone("America/Chicago") dt_util.set_default_time_zone(tz) tenth_second_pattern = (None, None, "10") # Entering DST, clocks go forward test_time = datetime(2021, 3, 15, 2, 30, 0, tzinfo=tz, fold=0) matching_hours, matching_minutes, matching_seconds = _get_matches( *tenth_second_pattern ) next_time = dt_util.find_next_time_expression_time( test_time, matching_seconds, matching_minutes, matching_hours ) assert next_time == datetime(2021, 3, 15, 2, 30, 10, tzinfo=tz) prev_target = next_time for i in range(1000): next_target = dt_util.find_next_time_expression_time( prev_target.replace(microsecond=999999) + timedelta(seconds=1), matching_seconds, matching_minutes, matching_hours, ) assert (next_target - prev_target).total_seconds() == 60 assert next_target.second == 10 prev_target = next_target
4e9bc9eaffd464f192d187a01771a86699b2f932
14
test_dt.py
236
Small cleanups to find_next_time_expression and addition of tests (#71845)
99,552
0
214
159
55
300,692
82
core
26
tests/util/test_dt.py
Python
23
{ "docstring": "Test finding next time expression tenth second pattern does not drift entering dst.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/home-assistant/core.git
1
groundtruths
def groundtruths(self) -> 'DocumentArray': return DocumentArray(self._content.groundtruths)
933415bfa1f9eb89f935037014dfed816eb9815d
9
data.py
36
feat: star routing (#3900) * feat(proto): adjust proto for star routing (#3844) * feat(proto): adjust proto for star routing * feat(proto): generate proto files * feat(grpc): refactor grpclet interface (#3846) * feat: refactor connection pool for star routing (#3872) * feat(k8s): add more labels to k8s deployments * feat(network): refactor connection pool * feat(network): refactor k8s pool * feat: star routing graph gateway (#3877) * feat: star routing - refactor grpc data runtime (#3887) * feat(runtimes): refactor grpc dataruntime * fix(tests): adapt worker runtime tests * fix(import): fix import * feat(proto): enable sending multiple lists (#3891) * feat: star routing gateway (#3893) * feat: star routing gateway all protocols (#3897) * test: add streaming and prefetch tests (#3901) * feat(head): new head runtime for star routing (#3899) * feat(head): new head runtime * feat(head): new head runtime * style: fix overload and cli autocomplete * feat(network): improve proto comments Co-authored-by: Jina Dev Bot <[email protected]> * feat(worker): merge docs in worker runtime (#3905) * feat(worker): merge docs in worker runtime * feat(tests): assert after clean up * feat(tests): star routing runtime integration tests (#3908) * fix(tests): fix integration tests * test: test runtimes fast slow request (#3910) * feat(zmq): purge zmq, zed, routing_table (#3915) * feat(zmq): purge zmq, zed, routing_table * style: fix overload and cli autocomplete * feat(zmq): adapt comment in dependency list * style: fix overload and cli autocomplete * fix(tests): fix type tests Co-authored-by: Jina Dev Bot <[email protected]> * test: add test gateway to worker connection (#3921) * feat(pea): adapt peas for star routing (#3918) * feat(pea): adapt peas for star routing * style: fix overload and cli autocomplete * feat(pea): add tests * feat(tests): add failing head pea test Co-authored-by: Jina Dev Bot <[email protected]> * feat(tests): integration tests for peas (#3923) * feat(tests): integration tests for peas * feat(pea): remove _inner_pea function * feat: star routing container pea (#3922) * test: rescue tests (#3942) * fix: fix streaming tests (#3945) * refactor: move docker run to run (#3948) * feat: star routing pods (#3940) * feat(pod): adapt pods for star routing * feat(pods): adapt basepod to star routing * feat(pod): merge pod and compound pod * feat(tests): fix tests * style: fix overload and cli autocomplete * feat(test): add container pea int test * feat(ci): remove more unnecessary tests * fix(tests): remove jinad runtime * feat(ci): remove latency tracking * fix(ci): fix ci def * fix(runtime): enable runtime to be exited * fix(tests): wrap runtime test in process * fix(runtimes): remove unused runtimes * feat(runtimes): improve cancel wait * fix(ci): build test pip again in ci * fix(tests): fix a test * fix(test): run async in its own process * feat(pod): include shard in activate msg * fix(pea): dont join * feat(pod): more debug out * feat(grpc): manage channels properly * feat(pods): remove exitfifo * feat(network): add simple send retry mechanism * fix(network): await pool close * fix(test): always close grpc server in worker * fix(tests): remove container pea from tests * fix(tests): reorder tests * fix(ci): split tests * fix(ci): allow alias setting * fix(test): skip a test * feat(pods): address comments Co-authored-by: Jina Dev Bot <[email protected]> * test: unblock skipped test (#3957) * feat: jinad pea (#3949) * feat: jinad pea * feat: jinad pea * test: remote peas * test: toplogy tests with jinad * ci: parallel jobs * feat(tests): add pod integration tests (#3958) * feat(tests): add pod integration tests * fix(tests): make tests less flaky * fix(test): fix test * test(pea): remote pea topologies (#3961) * test(pea): remote pea simple topology * test: remote pea topologies * refactor: refactor streamer result handling (#3960) * feat(k8s): adapt K8s Pod for StarRouting (#3964) * test: optimize k8s test * test: increase timeout and use different namespace * test: optimize k8s test * test: build and load image when needed * test: refactor k8s test * test: fix image name error * test: fix k8s image load * test: fix typoe port expose * test: update tests in connection pool and handling * test: remove unused fixture * test: parameterize docker images * test: parameterize docker images * test: parameterize docker images * feat(k8s): adapt k8s pod for star routing * fix(k8s): dont overwrite add/remove function in pool * fix(k8s): some fixes * fix(k8s): some more fixes * fix(k8s): linting * fix(tests): fix tests * fix(tests): fix k8s unit tests * feat(k8s): complete k8s integration test * feat(k8s): finish k8s tests * feat(k8s): fix test * fix(tests): fix test with no name * feat(k8s): unify create/replace interface * feat(k8s): extract k8s port constants * fix(tests): fix tests * fix(tests): wait for runtime being ready in tests * feat(k8s): address comments Co-authored-by: bwanglzu <[email protected]> * feat(flow): adapt Flow for StarRouting (#3986) * feat(flow): add routes * feat(flow): adapt flow to star routing * style: fix overload and cli autocomplete * feat(flow): handle empty topologies * feat(k8s): allow k8s pool disabling * style: fix overload and cli autocomplete * fix(test): fix test with mock * fix(tests): fix more tests * feat(flow): clean up tests * style: fix overload and cli autocomplete * fix(tests): fix more tests * feat: add plot function (#3994) * fix(tests): avoid hanging tests * feat(flow): add type hinting * fix(test): fix duplicate exec name in test * fix(tests): fix more tests * fix(tests): enable jinad test again * fix(tests): random port fixture * fix(style): replace quotes Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * feat(ci): bring back ci (#3997) * feat(ci): enable ci again * style: fix overload and cli autocomplete * feat(ci): add latency tracking * feat(ci): bring back some tests * fix(tests): remove invalid port test * feat(ci): disable daemon and distributed tests * fix(tests): fix entrypoint in hub test * fix(tests): wait for gateway to be ready * fix(test): fix more tests * feat(flow): do rolling update and scale sequentially * fix(tests): fix more tests * style: fix overload and cli autocomplete * feat: star routing hanging pods (#4011) * fix: try to handle hanging pods better * test: hanging pods test work * fix: fix topology graph problem * test: add unit test to graph * fix(tests): fix k8s tests * fix(test): fix k8s test * fix(test): fix k8s pool test * fix(test): fix k8s test * fix(test): fix k8s connection pool setting * fix(tests): make runtime test more reliable * fix(test): fix routes test * fix(tests): make rolling update test less flaky * feat(network): gurantee unique ports * feat(network): do round robin for shards * fix(ci): increase pytest timeout to 10 min Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Joan Fontanals <[email protected]> * fix(ci): fix ci file * feat(daemon): jinad pod for star routing * Revert "feat(daemon): jinad pod for star routing" This reverts commit ed9b37ac862af2e2e8d52df1ee51c0c331d76f92. * feat(daemon): remote jinad pod support (#4042) * feat(daemon): add pod tests for star routing * feat(daemon): add remote pod test * test(daemon): add remote pod arguments test * test(daemon): add async scale test * test(daemon): add rolling update test * test(daemon): fix host * feat(proto): remove message proto (#4051) * feat(proto): remove message proto * fix(tests): fix tests * fix(tests): fix some more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * fix(tests): fix more tests * feat(proto): put docs back in data * fix(proto): clean up * feat(proto): clean up * fix(tests): skip latency tracking * fix(test): fix hub test * fix(tests): fix k8s test * fix(test): some test clean up * fix(style): clean up style issues * feat(proto): adjust for rebase * fix(tests): bring back latency tracking * fix(tests): fix merge accident * feat(proto): skip request serialization (#4074) * feat: add reduce to star routing (#4070) * feat: add reduce on shards to head runtime * test: add reduce integration tests with fixed order * feat: add reduce on needs * chore: get_docs_matrix_from_request becomes public * style: fix overload and cli autocomplete * docs: remove undeterministic results warning * fix: fix uses_after * test: assert correct num docs after reducing in test_external_pod * test: correct asserts after reduce in test_rolling_update * fix: no reduce if uses_after_address is set * fix: get_docs_from_request only if needed * fix: fix tests after merge * refactor: move reduce from data_request_handler to head * style: fix overload and cli autocomplete * chore: apply suggestions * fix: fix asserts * chore: minor test fix * chore: apply suggestions * test: remove flow tests with external executor (pea) * fix: fix test_expected_messages_routing * fix: fix test_func_joiner * test: adapt k8s test Co-authored-by: Jina Dev Bot <[email protected]> * fix(k8s): fix static pool config * fix: use custom protoc doc generator image (#4088) * fix: use custom protoc doc generator image * fix(docs): minor doc improvement * fix(docs): use custom image * fix(docs): copy docarray * fix: doc building local only * fix: timeout doc building * fix: use updated args when building ContainerPea * test: add container PeaFactory test * fix: force pea close on windows (#4098) * fix: dont reduce if uses exist (#4099) * fix: dont use reduce if uses exist * fix: adjust reduce tests * fix: adjust more reduce tests * fix: fix more tests * fix: adjust more tests * fix: ignore non jina resources (#4101) * feat(executor): enable async executors (#4102) * feat(daemon): daemon flow on star routing (#4096) * test(daemon): add remote flow test * feat(daemon): call scale in daemon * feat(daemon): remove tail args and identity * test(daemon): rename scalable executor * test(daemon): add a small delay in async test * feat(daemon): scale partial flow only * feat(daemon): call scale directly in partial flow store * test(daemon): use asyncio sleep * feat(daemon): enable flow level distributed tests * test(daemon): fix jinad env workspace config * test(daemon): fix pod test use new port rolling update * feat(daemon): enable distribuetd tests * test(daemon): remove duplicate tests and zed runtime test * test(daemon): fix stores unit test * feat(daemon): enable part of distributed tests * feat(daemon): enable part of distributed tests * test: correct test paths * test(daemon): add client test for remote flows * test(daemon): send a request with jina client * test(daemon): assert async generator * test(daemon): small interval between tests * test(daemon): add flow test for container runtime * test(daemon): add flow test for container runtime * test(daemon): fix executor name * test(daemon): fix executor name * test(daemon): use async client fetch result * test(daemon): finish container flow test * test(daemon): enable distributed in ci * test(daemon): enable distributed in ci * test(daemon): decare flows and pods * test(daemon): debug ci if else * test(daemon): debug ci if else * test(daemon): decare flows and pods * test(daemon): correct test paths * test(daemon): add small delay for async tests * fix: star routing fixes (#4100) * docs: update docs * fix: fix Request.__repr__ * docs: update flow remarks * docs: fix typo * test: add non_empty_fields test * chore: remove non_empty_fields test * feat: polling per endpoint (#4111) * feat(polling): polling per endpoint configurable * fix: adjust tests * feat(polling): extend documentation * style: fix overload and cli autocomplete * fix: clean up * fix: adjust more tests * fix: remove repeat from flaky test * fix: k8s test * feat(polling): address pr feedback * feat: improve docs Co-authored-by: Jina Dev Bot <[email protected]> * feat(grpc): support connect grpc server via ssl tunnel (#4092) * feat(grpc): support ssl grpc connect if port is 443 * fix(grpc): use https option instead of detect port automatically * chore: fix typo * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * fix: update jina/peapods/networking.py Co-authored-by: Joan Fontanals <[email protected]> * test(networking): add test for peapods networking * fix: address comments Co-authored-by: Joan Fontanals <[email protected]> * feat(polling): unify polling args (#4113) * fix: several issues for jinad pods (#4119) * fix: activate for jinad pods * fix: dont expose worker pod in partial daemon * fix: workspace setting * fix: containerized flows * fix: hub test * feat(daemon): remote peas on star routing (#4112) * test(daemon): fix request in peas * test(daemon): fix request in peas * test(daemon): fix sync async client test * test(daemon): enable remote peas test * test(daemon): replace send message to send request * test(daemon): declare pea tests in ci * test(daemon): use pea args fixture * test(daemon): head pea use default host * test(daemon): fix peas topologies * test(daemon): fix pseudo naming * test(daemon): use default host as host * test(daemon): fix executor path * test(daemon): add remote worker back * test(daemon): skip local remote remote topology * fix: jinad pea test setup * fix: jinad pea tests * fix: remove invalid assertion Co-authored-by: jacobowitz <[email protected]> * feat: enable daemon tests again (#4132) * feat: enable daemon tests again * fix: remove bogy empty script file * fix: more jinad test fixes * style: fix overload and cli autocomplete * fix: scale and ru in jinad * fix: fix more jinad tests Co-authored-by: Jina Dev Bot <[email protected]> * fix: fix flow test * fix: improve pea tests reliability (#4136) Co-authored-by: Joan Fontanals <[email protected]> Co-authored-by: Jina Dev Bot <[email protected]> Co-authored-by: Deepankar Mahapatro <[email protected]> Co-authored-by: bwanglzu <[email protected]> Co-authored-by: AlaeddineAbdessalem <[email protected]> Co-authored-by: Zhaofeng Miao <[email protected]>
1,803
0
32
17
6
9,954
6
jina
4
jina/types/request/data.py
Python
5
{ "docstring": "Get the :class: `DocumentArray` with sequence `data.docs` as content.\n\n .. # noqa: DAR201\n :class:`DataRequest` is one of the **primitive data type** in Jina.\n\n It offers a Pythonic interface to allow users access and manipulate\n :class:`jina.jina_pb2.DataRequestProto` object without working with Protobuf itself.\n\n A container for serialized :class:`jina_pb2.DataRequestProto` that only triggers deserialization\n and decompression when receives the first read access to its member.\n\n It overrides :meth:`__getattr__` to provide the same get/set interface as an\n :class:`jina_pb2.DataRequestProto` object.\n\n :param request: The request.\n ", "language": "en", "n_whitespaces": 116, "n_words": 78, "vocab_size": 66 }
https://github.com/jina-ai/jina.git
3
sharey
def sharey(self, other): _api.check_isinstance(_AxesBase, other=other) if self._sharey is not None and other is not self._sharey: raise ValueError("y-axis is already shared") self._shared_axes["y"].join(self, other) self._sharey = other self.yaxis.major = other.yaxis.major # Ticker instances holding self.yaxis.minor = other.yaxis.minor # locator and formatter. y0, y1 = other.get_ylim() self.set_ylim(y0, y1, emit=False, auto=other.get_autoscaley_on()) self.yaxis._scale = other.yaxis._scale
f156db08eee54d285ab0fb4e031e48d078ba6aa3
10
_base.py
188
DOC: More cleanup axes -> Axes
22,766
0
133
118
39
107,477
50
matplotlib
21
lib/matplotlib/axes/_base.py
Python
11
{ "docstring": "\n Share the y-axis with *other*.\n\n This is equivalent to passing ``sharey=other`` when constructing the\n Axes, and cannot be used if the y-axis is already being shared with\n another Axes.\n ", "language": "en", "n_whitespaces": 65, "n_words": 29, "vocab_size": 24 }
https://github.com/matplotlib/matplotlib.git
11
clean_text
def clean_text(start_token, end_token, doc_tokens, doc_bytes, ignore_final_whitespace=True): text = "" for index in range(start_token, end_token): token = doc_tokens[index] if token["html_token"]: continue text += token["token"] # Add a single space between two tokens iff there is at least one # whitespace character between them (outside of an HTML tag). For example: # # token1 token2 ==> Add space. # token1</B> <B>token2 ==> Add space. # token1</A>token2 ==> No space. # token1<A href="..." title="...">token2 ==> No space. # token1<SUP>2</SUP>token2 ==> No space. next_token = token last_index = end_token if ignore_final_whitespace else end_token + 1 for next_token in doc_tokens[index + 1 : last_index]: if not next_token["html_token"]: break chars = doc_bytes[token["end_byte"] : next_token["start_byte"]].decode("utf-8") # Since some HTML tags are missing from the token list, we count '<' and # '>' to detect if we're inside a tag. unclosed_brackets = 0 for char in chars: if char == "<": unclosed_brackets += 1 elif char == ">": unclosed_brackets -= 1 elif unclosed_brackets == 0 and re.match(r"\s", char): # Add a single space after this token. text += " " break return text
a59bca366174d9c692fa19750c24d65f47660ef7
14
nq_to_squad.py
250
Apply black formatting (#2115) * Testing black on ui/ * Applying black on docstores * Add latest docstring and tutorial changes * Create a single GH action for Black and docs to reduce commit noise to the minimum, slightly refactor the OpenAPI action too * Remove comments * Relax constraints on pydoc-markdown * Split temporary black from the docs. Pydoc-markdown was obsolete and needs a separate PR to upgrade * Fix a couple of bugs * Add a type: ignore that was missing somehow * Give path to black * Apply Black * Apply Black * Relocate a couple of type: ignore * Update documentation * Make Linux CI run after applying Black * Triggering Black * Apply Black * Remove dependency, does not work well * Remove manually double trailing commas * Update documentation Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
74,866
0
559
144
107
256,314
175
haystack
18
test/benchmarks/nq_to_squad.py
Python
23
{ "docstring": "Remove HTML tags from a text span and reconstruct proper spacing.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/deepset-ai/haystack.git
3
test_simple_q_compilation
def test_simple_q_compilation(self): # Run locally and with compression config = ( simple_q.SimpleQConfig() .rollouts(num_rollout_workers=0, compress_observations=True) .training(num_steps_sampled_before_learning_starts=0) ) num_iterations = 2 for _ in framework_iterator(config, with_eager_tracing=True): trainer = config.build(env="CartPole-v0") rw = trainer.workers.local_worker() for i in range(num_iterations): sb = rw.sample() assert sb.count == config.rollout_fragment_length results = trainer.train() check_train_results(results) print(results) check_compute_single_action(trainer)
0dceddb912ed92286032b5563dd2e541a8a7031f
13
test_simple_q.py
180
[RLlib] Move learning_starts logic from buffers into `training_step()`. (#26032)
28,323
0
241
109
40
126,972
47
ray
31
rllib/algorithms/simple_q/tests/test_simple_q.py
Python
17
{ "docstring": "Test whether SimpleQ can be built on all frameworks.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
1
load_config
def load_config(file_path): _, ext = os.path.splitext(file_path) assert ext in ['.yml', '.yaml'], "only support yaml files for now" config = yaml.load(open(file_path, 'rb'), Loader=yaml.Loader) return config
a323fce66dd68a881cf599526185b52ab5df356b
11
program.py
84
vqa code integrated into ppocr training system
4,491
0
39
49
21
22,954
24
PaddleOCR
12
tools/program.py
Python
5
{ "docstring": "\n Load config from yml/yaml file.\n Args:\n file_path (str): Path of the config file to be loaded.\n Returns: global config\n ", "language": "en", "n_whitespaces": 39, "n_words": 19, "vocab_size": 17 }
https://github.com/PaddlePaddle/PaddleOCR.git
4
_get_spaces_from_remote_worker
def _get_spaces_from_remote_worker(self): # Get ID of the first remote worker. worker_id = next(iter(self.__worker_manager.actors().keys())) # Try to figure out spaces from the first remote worker. remote_spaces = self.foreach_worker( lambda worker: worker.foreach_policy( lambda p, pid: (pid, p.observation_space, p.action_space) ), remote_worker_indices=[worker_id], local_worker=False, ) if not remote_spaces: raise ValueError( "Could not get observation and action spaces from remote " "worker. Maybe specify them manually in the config?" ) spaces = { e[0]: (getattr(e[1], "original_space", e[1]), e[2]) for e in remote_spaces[0] } # Try to add the actual env's obs/action spaces. env_spaces = self.foreach_worker( lambda worker: worker.foreach_env( lambda env: (env.observation_space, env.action_space) ), remote_worker_indices=[worker_id], local_worker=False, ) if env_spaces: # env_spaces group spaces by environment then worker. # So need to unpack thing twice. spaces["__env__"] = env_spaces[0][0] logger.info( "Inferred observation/action spaces from remote " f"worker (local worker has no env): {spaces}" ) return spaces
e707ce4fb3717e3c05118c57f503dfbd03552ca9
15
worker_set.py
286
[RLlib] Refactor `WorkerSet` on top of `FaultTolerantActorManager`. (#29938) Signed-off-by: Jun Gong <[email protected]>
30,791
0
496
177
94
136,000
137
ray
27
rllib/evaluation/worker_set.py
Python
32
{ "docstring": "Infer observation and action spaces from a remote worker.\n\n Returns:\n A dict mapping from policy ids to spaces.\n ", "language": "en", "n_whitespaces": 43, "n_words": 18, "vocab_size": 17 }
https://github.com/ray-project/ray.git
1
test_performance_frozen_clock
def test_performance_frozen_clock(self) -> None: past_stats = [ (self.hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF)) ] stats: JsonDict = {} self.get_success(phone_stats_home(self.hs, stats, past_stats)) self.assertEqual(stats["cpu_average"], 0)
d666fc02fab7421efb39a33800a83791f88bf9b2
13
test_phone_home.py
107
Add type hints to some tests files (#12371)
71,999
0
72
66
18
247,931
19
synapse
14
tests/test_phone_home.py
Python
10
{ "docstring": "\n If time doesn't move, don't error out.\n ", "language": "en", "n_whitespaces": 22, "n_words": 7, "vocab_size": 7 }
https://github.com/matrix-org/synapse.git
4
get_trades
def get_trades(trade_filter=None) -> Query: if not Trade.use_db: raise NotImplementedError('`Trade.get_trades()` not supported in backtesting mode.') if trade_filter is not None: if not isinstance(trade_filter, list): trade_filter = [trade_filter] return Trade.query.filter(*trade_filter) else: return Trade.query
b58e811b1486ae62e835cbea3e40cf88128243a0
11
trade_model.py
96
Move trade/order Models to their own class
34,484
0
118
57
24
149,690
31
freqtrade
10
freqtrade/persistence/trade_model.py
Python
18
{ "docstring": "\n Helper function to query Trades using filters.\n NOTE: Not supported in Backtesting.\n :param trade_filter: Optional filter to apply to trades\n Can be either a Filter object, or a List of filters\n e.g. `(trade_filter=[Trade.id == trade_id, Trade.is_open.is_(True),])`\n e.g. `(trade_filter=Trade.id == trade_id)`\n :return: unsorted query object\n ", "language": "en", "n_whitespaces": 164, "n_words": 44, "vocab_size": 38 }
https://github.com/freqtrade/freqtrade.git
1
load_data
def load_data(path="boston_housing.npz", test_split=0.2, seed=113): assert 0 <= test_split < 1 origin_folder = ( "https://storage.googleapis.com/tensorflow/tf-keras-datasets/" ) path = get_file( path, origin=origin_folder + "boston_housing.npz", file_hash=( # noqa: E501 "f553886a1f8d56431e820c5b82552d9d95cfcb96d1e678153f8839538947dff5" ), ) with np.load(path, allow_pickle=True) as f: x = f["x"] y = f["y"] rng = np.random.RandomState(seed) indices = np.arange(len(x)) rng.shuffle(indices) x = x[indices] y = y[indices] x_train = np.array(x[: int(len(x) * (1 - test_split))]) y_train = np.array(y[: int(len(x) * (1 - test_split))]) x_test = np.array(x[int(len(x) * (1 - test_split)) :]) y_test = np.array(y[int(len(x) * (1 - test_split)) :]) return (x_train, y_train), (x_test, y_test)
be73ac1a1e25d9abd4d793cba9707098d7adf231
15
boston_housing.py
354
Add f-string format and lint with flynt on the whole codebase
82,968
0
202
221
63
279,394
90
keras
27
keras/datasets/boston_housing.py
Python
25
{ "docstring": "Loads the Boston Housing dataset.\n\n This is a dataset taken from the StatLib library which is maintained at\n Carnegie Mellon University.\n\n **WARNING:** This dataset has an ethical problem: the authors of this\n dataset included a variable, \"B\", that may appear to assume that racial\n self-segregation influences house prices. As such, we strongly discourage\n the use of this dataset, unless in the context of illustrating ethical\n issues in data science and machine learning.\n\n Samples contain 13 attributes of houses at different locations around the\n Boston suburbs in the late 1970s. Targets are the median values of\n the houses at a location (in k$).\n\n The attributes themselves are defined in the\n [StatLib website](http://lib.stat.cmu.edu/datasets/boston).\n\n Args:\n path: path where to cache the dataset locally\n (relative to `~/.keras/datasets`).\n test_split: fraction of the data to reserve as test set.\n seed: Random seed for shuffling the data\n before computing the test split.\n\n Returns:\n Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n **x_train, x_test**: numpy arrays with shape `(num_samples, 13)`\n containing either the training samples (for x_train),\n or test samples (for y_train).\n\n **y_train, y_test**: numpy arrays of shape `(num_samples,)` containing the\n target scalars. The targets are float scalars typically between 10 and\n 50 that represent the home prices in k$.\n ", "language": "en", "n_whitespaces": 311, "n_words": 202, "vocab_size": 142 }
https://github.com/keras-team/keras.git
1
test_androidtv_select_source_overridden_app_name
async def test_androidtv_select_source_overridden_app_name(hass): # Evidence that the default YouTube app ID will be overridden conf_apps = { "com.youtube.test": "YouTube", } assert "YouTube" in ANDROIDTV_APPS.values() assert "com.youtube.test" not in ANDROIDTV_APPS await _test_select_source( hass, CONFIG_ANDROIDTV_PYTHON_ADB, conf_apps, "YouTube", "com.youtube.test", patchers.PATCH_LAUNCH_APP, ) @pytest.mark.parametrize( ["source", "expected_arg", "method_patch"], [ ("com.app.test1", "com.app.test1", patchers.PATCH_LAUNCH_APP), ("TEST 1", "com.app.test1", patchers.PATCH_LAUNCH_APP), ("com.app.test2", "com.app.test2", patchers.PATCH_LAUNCH_APP), ("com.app.test3", "com.app.test3", patchers.PATCH_LAUNCH_APP), ("!com.app.test1", "com.app.test1", patchers.PATCH_STOP_APP), ("!TEST 1", "com.app.test1", patchers.PATCH_STOP_APP), ("!com.app.test2", "com.app.test2", patchers.PATCH_STOP_APP), ("!com.app.test3", "com.app.test3", patchers.PATCH_STOP_APP), ], )
ea456893f94c7dc88b0cc28f92dadf240fbb1fe7
@pytest.mark.parametrize( ["source", "expected_arg", "method_patch"], [ ("com.app.test1", "com.app.test1", patchers.PATCH_LAUNCH_APP), ("TEST 1", "com.app.test1", patchers.PATCH_LAUNCH_APP), ("com.app.test2", "com.app.test2", patchers.PATCH_LAUNCH_APP), ("com.app.test3", "com.app.test3", patchers.PATCH_LAUNCH_APP), ("!com.app.test1", "com.app.test1", patchers.PATCH_STOP_APP), ("!TEST 1", "com.app.test1", patchers.PATCH_STOP_APP), ("!com.app.test2", "com.app.test2", patchers.PATCH_STOP_APP), ("!com.app.test3", "com.app.test3", patchers.PATCH_STOP_APP), ], )
9
test_media_player.py
243
Review AndroidTV tests for media player entity (#71168)
98,649
1
207
45
55
299,745
71
core
13
tests/components/androidtv/test_media_player.py
Python
14
{ "docstring": "Test that when an app name is overridden via the `apps` configuration parameter, the app is launched correctly.", "language": "en", "n_whitespaces": 17, "n_words": 18, "vocab_size": 15 }
https://github.com/home-assistant/core.git
2
to_row_dict
def to_row_dict(self, item): row_dict = OrderedDict( (field, item.get_data().get(field)) for field in self.list_export ) return row_dict
d10f15e55806c6944827d801cd9c2d53f5da4186
13
views.py
56
Reformat with black
15,939
0
54
35
14
73,068
15
wagtail
9
wagtail/contrib/forms/views.py
Python
5
{ "docstring": "Orders the submission dictionary for spreadsheet writing", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
https://github.com/wagtail/wagtail.git
1
_get_multi_faces
def _get_multi_faces(self) -> Union[Generator[str, None, None], Generator[Tuple[str, int], None, None]]: process_type = getattr(self, f"_get_multi_faces_{self._type}") for item in process_type(): yield item
e2a77e7c6e84e81f642cb22f528e25e3f2d2dbc1
11
jobs.py
80
Alignments Tool - Typing, Documentation + Re-org
21,125
0
91
31
18
101,721
20
faceswap
11
tools/alignments/jobs.py
Python
2
{ "docstring": " yield each frame or face that has multiple faces matched in alignments file\n\n Yields\n ------\n str or tuple\n The frame name of any frames which have multiple faces and potentially the face id\n ", "language": "en", "n_whitespaces": 73, "n_words": 33, "vocab_size": 28 }
https://github.com/deepfakes/faceswap.git
1
group_members
def group_members(self) -> list[str] | None: return self.speaker.sonos_group_entities
6151306e3d8f0bc774145280276a1801613f8aa7
7
media_player.py
32
Use standard attribute for Sonos group members (#70924)
98,271
0
22
19
8
299,337
8
core
6
homeassistant/components/sonos/media_player.py
Python
3
{ "docstring": "List of entity_ids which are currently grouped together.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
losses
def losses(self, vv): with tf.compat.v1.variable_scope("losses"): obj = self.free_energy(vv) - self.free_energy(self.v_k) return obj
dbf7ed08d36b4c77e5455c2c3ba165d6308aeaaf
12
rbm.py
68
update loss function documentation
7,088
0
44
39
11
39,033
12
recommenders
10
recommenders/models/rbm/rbm.py
Python
4
{ "docstring": "Calculate contrastive divergence, which is the difference between\n the free energy clamped on the data (v) and the model Free energy (v_k).\n\n Args:\n vv (tf.Tensor, float32): empirical input\n\n Returns:\n obj: contrastive divergence\n ", "language": "en", "n_whitespaces": 82, "n_words": 32, "vocab_size": 27 }
https://github.com/microsoft/recommenders.git
4
update_positions
def update_positions(self, renderer): x, y = self.xybox if isinstance(self.boxcoords, tuple): xcoord, ycoord = self.boxcoords x1, y1 = self._get_xy(renderer, x, y, xcoord) x2, y2 = self._get_xy(renderer, x, y, ycoord) ox0, oy0 = x1, y2 else: ox0, oy0 = self._get_xy(renderer, x, y, self.boxcoords) w, h, xd, yd = self.offsetbox.get_extent(renderer) fw, fh = self._box_alignment self.offsetbox.set_offset((ox0 - fw * w + xd, oy0 - fh * h + yd)) bbox = self.offsetbox.get_window_extent(renderer) self.patch.set_bounds(bbox.bounds) mutation_scale = renderer.points_to_pixels(self.get_fontsize()) self.patch.set_mutation_scale(mutation_scale) if self.arrowprops: # Use FancyArrowPatch if self.arrowprops has "arrowstyle" key. # Adjust the starting point of the arrow relative to the textbox. # TODO: Rotation needs to be accounted. arrow_begin = bbox.p0 + bbox.size * self._arrow_relpos arrow_end = self._get_position_xy(renderer) # The arrow (from arrow_begin to arrow_end) will be first clipped # by patchA and patchB, then shrunk by shrinkA and shrinkB (in # points). If patch A is not set, self.bbox_patch is used. self.arrow_patch.set_positions(arrow_begin, arrow_end) if "mutation_scale" in self.arrowprops: mutation_scale = renderer.points_to_pixels( self.arrowprops["mutation_scale"]) # Else, use fontsize-based mutation_scale defined above. self.arrow_patch.set_mutation_scale(mutation_scale) patchA = self.arrowprops.get("patchA", self.patch) self.arrow_patch.set_patchA(patchA)
924d7c7f9900d8839e66616791121237101e7b57
14
offsetbox.py
418
Cleanup AnnotationBbox. Inline _update_position_xybox into update_positions. Avoid unpacking x,y pairs where unnecessary. Don't bother copying arrowprops, as we don't actually modify it. Reuse mutation scale for both patch and arrow. Clarify the doc for frameon. Various small extra cleanups.
22,838
0
501
264
113
107,623
169
matplotlib
49
lib/matplotlib/offsetbox.py
Python
26
{ "docstring": "\n Update pixel positions for the annotated point, the text and the arrow.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 10 }
https://github.com/matplotlib/matplotlib.git
1
test_events_service_call
async def test_events_service_call(hass): mock_session = MockAioSession() with async_patch( "homeassistant.components.aws.AioSession", return_value=mock_session ): await async_setup_component( hass, "aws", { "aws": { "notify": [ { "service": "events", "name": "Events Test", "region_name": "us-east-1", } ] } }, ) await hass.async_block_till_done() assert hass.services.has_service("notify", "events_test") is True mock_session.put_events.return_value = { "Entries": [{"EventId": "", "ErrorCode": 0, "ErrorMessage": "test-error"}] } await hass.services.async_call( "notify", "events_test", { "message": "test", "target": "ARN", "data": {}, }, blocking=True, ) mock_session.put_events.assert_called_once_with( Entries=[ { "EventBusName": "ARN", "Detail": json.dumps({"message": "test"}), "DetailType": "", "Source": "homeassistant", "Resources": [], } ] )
dbfca8def81b6aac9b4bd0a494c96a0b15f1a82c
18
test_init.py
328
Add support for EventBridge to aws integration (#77573) * Added EventBridge support to aws integration * Added type hints for all aws notification services + Added unit tests for EventBridge AWS integration * Increase line coverage for unit tests for aws integration.
88,945
0
568
173
65
289,810
82
core
17
tests/components/aws/test_init.py
Python
46
{ "docstring": "Test events service (EventBridge) call works as expected.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
5
_consume_incoming
async def _consume_incoming(self) -> None: while True: message_json = await self.incoming_queue.get() if message_json is None: self.incoming_queue.task_done() break type = message_json["type"] if type == "client_log": path = message_json["payload"]["path"] line_number = message_json["payload"]["line_number"] timestamp = message_json["payload"]["timestamp"] encoded_segments = message_json["payload"]["encoded_segments"] decoded_segments = base64.b64decode(encoded_segments) segments = pickle.loads(decoded_segments) self.service.console.print( DevtoolsLogMessage( segments=segments, path=path, line_number=line_number, unix_timestamp=timestamp, ) ) elif type == "client_spillover": spillover = int(message_json["payload"]["spillover"]) info_renderable = DevtoolsInternalMessage( f"Discarded {spillover} messages", level="warning" ) self.service.console.print(info_renderable) self.incoming_queue.task_done()
a72e347ed99333a090377ee438eaf63477cbf98b
16
service.py
299
Seperate server and client handling logic into classes for devtools
44,000
0
506
170
49
182,900
67
textual
27
src/textual/devtools/service.py
Python
32
{ "docstring": "Consume messages from the incoming (client -> server) Queue, and print\n the corresponding renderables to the console for each message.\n ", "language": "en", "n_whitespaces": 34, "n_words": 20, "vocab_size": 18 }
https://github.com/Textualize/textual.git
6
resolve
def resolve(self, root_reqs, check_supported_wheels): # type: (List[InstallRequirement], bool) -> RequirementSet requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels) for req in root_reqs: if req.constraint: check_invalid_constraint_type(req) requirement_set.add_requirement(req) # Actually prepare the files, and collect any exceptions. Most hash # exceptions cannot be checked ahead of time, because # _populate_link() needs to be called before we can make decisions # based on link type. discovered_reqs = [] # type: List[InstallRequirement] hash_errors = HashErrors() for req in chain(requirement_set.all_requirements, discovered_reqs): try: discovered_reqs.extend(self._resolve_one(requirement_set, req)) except HashError as exc: exc.req = req hash_errors.append(exc) if hash_errors: raise hash_errors return requirement_set
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
13
resolver.py
163
upd; format
12,406
0
295
97
71
61,064
88
transferlearning
20
.venv/lib/python3.8/site-packages/pip/_internal/resolution/legacy/resolver.py
Python
17
{ "docstring": "Resolve what operations need to be done\n\n As a side-effect of this method, the packages (and their dependencies)\n are downloaded, unpacked and prepared for installation. This\n preparation is done by ``pip.operations.prepare``.\n\n Once PyPI has static dependency metadata available, it would be\n possible to move the preparation to become a step separated from\n dependency resolution.\n ", "language": "en", "n_whitespaces": 103, "n_words": 54, "vocab_size": 46 }
https://github.com/jindongwang/transferlearning.git
1
test_convert_outgoing_payload
async def test_convert_outgoing_payload(hass): command_template = mqtt.MqttCommandTemplate(None, hass=hass) assert command_template.async_render(b"\xde\xad\xbe\xef") == b"\xde\xad\xbe\xef" assert ( command_template.async_render("b'\\xde\\xad\\xbe\\xef'") == "b'\\xde\\xad\\xbe\\xef'" ) assert command_template.async_render(1234) == 1234 assert command_template.async_render(1234.56) == 1234.56 assert command_template.async_render(None) is None
457ce195dd9758cc7916d0cf1786948a10ef8135
10
test_init.py
129
Add mqtt entity attributes command templates (#61937) * Add entity variables to MqttCommandTemplate * missing command template update * make hass and entity conditional parameters * Add encoding support for publishing * Revert "Add encoding support for publishing" This reverts commit b69b9c60ececdab1d35621bdd7ffe5b8b9d59c44.
107,239
0
67
71
22
308,485
29
core
6
tests/components/mqtt/test_init.py
Python
10
{ "docstring": "Test the converting of outgoing MQTT payloads without template.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
2
test_sessions_metrics_equal_num_keys
def test_sessions_metrics_equal_num_keys(self): interval_days_int = 1 interval_days = f"{interval_days_int}d" groupbyes = _session_groupby_powerset() for groupby in groupbyes: with patch( "sentry.api.endpoints.organization_sessions.release_health", SessionsReleaseHealthBackend(), ): sessions_data = result_sorted(self.get_sessions_data(groupby, interval_days)) with patch( "sentry.api.endpoints.organization_sessions.release_health", MetricsReleaseHealthBackend(), ): metrics_data = result_sorted(self.get_sessions_data(groupby, interval_days)) errors = compare_results( sessions=sessions_data, metrics=metrics_data, rollup=interval_days_int * 24 * 60 * 60, # days to seconds ) assert len(errors) == 0
4d9743df1cadba2b3dd8055f05c76e6e5fae3c4d
14
test_metrics_sessions_v2.py
172
fix(metrics): Fix flaky test [TET-387] (#39742) This PR aims at fixing the flakiness reproduced [here](https://sentry.io/organizations/sentry/issues/3070580114/?project=2423079&query=is:unresolved+test_sessions_metrics_equal_num_keys&statsPeriod=14d).
18,144
0
302
101
41
86,663
54
sentry
20
tests/sentry/release_health/test_metrics_sessions_v2.py
Python
21
{ "docstring": "\n Tests whether the number of keys in the metrics implementation of\n sessions data is the same as in the sessions implementation.\n\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 15 }
https://github.com/getsentry/sentry.git
2
stop_gradient
def stop_gradient(variables): if isinstance(variables, (list, tuple)): return map(tf.stop_gradient, variables) return tf.stop_gradient(variables) # CONTROL FLOW @keras_export("keras.backend.rnn") @tf.__internal__.dispatch.add_dispatch_support
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.rnn") @tf.__internal__.dispatch.add_dispatch_support
10
backend.py
78
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,112
1
29
34
15
269,479
16
keras
11
keras/backend.py
Python
4
{ "docstring": "Returns `variables` but with zero gradient w.r.t. every other variable.\n\n Args:\n variables: Tensor or list of tensors to consider constant with respect\n to any other variable.\n\n\n Returns:\n A single tensor or a list of tensors (depending on the passed argument)\n that has no gradient with respect to any other variable.\n ", "language": "en", "n_whitespaces": 89, "n_words": 50, "vocab_size": 35 }
https://github.com/keras-team/keras.git
1
test_max_pos
def test_max_pos(self) -> None: cache = StreamChangeCache("#test", 1) cache.entity_has_changed("[email protected]", 2) cache.entity_has_changed("[email protected]", 3) cache.entity_has_changed("[email protected]", 4) # Known entities will return the point where they were changed. self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 2) self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 3) self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 4) # Unknown entities will return the stream start position. self.assertEqual(cache.get_max_pos_of_last_change("[email protected]"), 1)
acea4d7a2ff61b5beda420b54a8451088060a8cd
10
test_stream_change_cache.py
162
Add missing types to tests.util. (#14597) Removes files under tests.util from the ignored by list, then fully types all tests/util/*.py files.
73,230
0
119
92
33
250,016
42
synapse
7
tests/util/test_stream_change_cache.py
Python
14
{ "docstring": "\n StreamChangeCache.get_max_pos_of_last_change will return the most\n recent point where the entity could have changed. If the entity is not\n known, the stream start is provided instead.\n ", "language": "en", "n_whitespaces": 55, "n_words": 25, "vocab_size": 20 }
https://github.com/matrix-org/synapse.git
1
test_simple_animation_reverse
def test_simple_animation_reverse(): # Thing that may be animated animatable = AnimateTest() # Fake wall-clock time time = 100.0 # Object that does the animation animation = SimpleAnimation( animatable, "foo", time, 3.0, start_value=50.0, end_value=20.0, final_value=20.0, easing=lambda x: x, ) assert animation(time) is False assert animatable.foo == 50.0 assert animation(time + 1.0) is False assert animatable.foo == 40.0 assert animation(time + 2.0) is False assert animatable.foo == 30.0 assert animation(time + 3.0) is True assert animatable.foo == 20.0
8be6ea91f6e8a8d24d385975f1a5a7714cf27894
11
test_animator.py
162
fix and test for animator
43,809
0
180
127
47
182,371
76
textual
12
tests/test_animator.py
Python
21
{ "docstring": "Test an animation from one float to another, where the end value is less than the start.", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 16 }
https://github.com/Textualize/textual.git
6
before_task_publish_handler
def before_task_publish_handler(sender=None, headers=None, body=None, **kwargs): if "task" not in headers or headers["task"] != "documents.tasks.consume_file": # Assumption: this is only ever a v2 message return try: task_args = body[0] task_kwargs = body[1] task_file_name = "" if "override_filename" in task_kwargs: task_file_name = task_kwargs["override_filename"] # Nothing was found, report the task first argument if not len(task_file_name): # There are always some arguments to the consume, first is always filename filepath = Path(task_args[0]) task_file_name = filepath.name PaperlessTask.objects.create( task_id=headers["id"], status=states.PENDING, task_file_name=task_file_name, task_name=headers["task"], result=None, date_created=timezone.now(), date_started=None, date_done=None, ) except Exception as e: # pragma: no cover # Don't let an exception in the signal handlers prevent # a document from being consumed. logger.error(f"Creating PaperlessTask failed: {e}", exc_info=True) @task_prerun.connect
97d6503fefc5737028637c39a2c1f33dd1e12904
@task_prerun.connect
13
handlers.py
258
Switches task serialization over to pickle format
117,208
1
347
149
89
320,534
112
paperless-ngx
33
src/documents/signals/handlers.py
Python
24
{ "docstring": "\n Creates the PaperlessTask object in a pending state. This is sent before\n the task reaches the broker, but before it begins executing on a worker.\n\n https://docs.celeryq.dev/en/stable/userguide/signals.html#before-task-publish\n\n https://docs.celeryq.dev/en/stable/internals/protocol.html#version-2\n\n ", "language": "en", "n_whitespaces": 44, "n_words": 27, "vocab_size": 23 }
https://github.com/paperless-ngx/paperless-ngx.git
1
test_dataframe_format_with_index
def test_dataframe_format_with_index(): pytest.importorskip("jinja2") df = pd.DataFrame( { "A": [1, 2, 3, 4, 5, 6, 7, 8], "B": list("ABCDEFGH"), "C": pd.Categorical(list("AAABBBCC")), }, index=list("ABCDEFGH"), ) ddf = dd.from_pandas(df, 3) exp = ( "Dask DataFrame Structure:\n" " A B C\n" "npartitions=3 \n" "A int64 object category[known]\n" "D ... ... ...\n" "G ... ... ...\n" "H ... ... ...\n" "Dask Name: from_pandas, 1 graph layer" ) assert repr(ddf) == exp assert str(ddf) == exp exp_table = exp = .format( exp_table=exp_table ) assert ddf.to_html() == exp # table is boxed with div and has style exp = .format( style=style, exp_table=exp_table ) assert ddf._repr_html_() == exp
ddcb841903f8f180aa359bd8db0054aa3b5964e3
15
test_format.py
259
Change repr methods to avoid Layer materialization (#9289) * change task count to layer count in DataFrame and Array reprs * add test * address doctest failure * simplify test * support pluralization * use 'graph layers' instead of 'layers' to be more explicit
36,771
0
453
145
70
156,780
100
dask
20
dask/dataframe/tests/test_format.py
Python
79
{ "docstring": "<table border=\"1\" class=\"dataframe\">\n <thead>\n <tr style=\"text-align: right;\">\n <th></th>\n <th>A</th>\n <th>B</th>\n <th>C</th>\n </tr>\n <tr>\n <th>npartitions=3</th>\n <th></th>\n <th></th>\n <th></th>\n </tr>\n </thead>\n <tbody>\n <tr>\n <th>A</th>\n <td>int64</td>\n <td>object</td>\n <td>category[known]</td>\n </tr>\n <tr>\n <th>D</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>G</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n <tr>\n <th>H</th>\n <td>...</td>\n <td>...</td>\n <td>...</td>\n </tr>\n </tbody>\n</table><div><strong>Dask DataFrame Structure:</strong></div>\n{exp_table}\n<div>Dask Name: from_pandas, 1 graph layer</div><div><strong>Dask DataFrame Structure:</strong></div>\n<div>\n{style}{exp_table}\n</div>\n<div>Dask Name: from_pandas, 1 graph layer</div>", "language": "en", "n_whitespaces": 218, "n_words": 66, "vocab_size": 38 }
https://github.com/dask/dask.git
4
is_path
def is_path(G, path): for node, nbr in nx.utils.pairwise(path): if (node not in G) or (nbr not in G[node]): return False return True
d8b07498dfcd9fb2ce1c83ccf190ab24209234e8
11
function.py
68
Minor docstring touchups and test refactor for `is_path` (#5967) * Touch up docstring. * Condense conditional. * Minor refactor of ispath test - parametrize and rm redundant. * Add release note. * Update networkx/classes/function.py Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Dan Schult <[email protected]>
42,315
0
49
44
18
177,249
22
networkx
8
networkx/classes/function.py
Python
5
{ "docstring": "Returns whether or not the specified path exists.\n\n For it to return True, every node on the path must exist and\n each consecutive pair must be connected via one or more edges.\n\n Parameters\n ----------\n G : graph\n A NetworkX graph.\n\n path : list\n A list of nodes which defines the path to traverse\n\n Returns\n -------\n bool\n True if `path` is a valid path in `G`\n\n ", "language": "en", "n_whitespaces": 116, "n_words": 65, "vocab_size": 52 }
https://github.com/networkx/networkx.git
7
evaluate
def evaluate(self, eval_features, return_realism=False, return_neighbors=False): num_eval_images = eval_features.shape[0] num_ref_images = self.D.shape[0] distance_batch = np.zeros([self.row_batch_size, num_ref_images], dtype=np.float16) batch_predictions = np.zeros([num_eval_images, self.num_nhoods], dtype=np.int32) #max_realism_score = np.zeros([num_eval_images,], dtype=np.float32) realism_score = np.zeros([num_eval_images,], dtype=np.float32) nearest_indices = np.zeros([num_eval_images,], dtype=np.int32) for begin1 in range(0, num_eval_images, self.row_batch_size): end1 = min(begin1 + self.row_batch_size, num_eval_images) feature_batch = eval_features[begin1:end1] for begin2 in range(0, num_ref_images, self.col_batch_size): end2 = min(begin2 + self.col_batch_size, num_ref_images) ref_batch = self._ref_features[begin2:end2] distance_batch[0:end1-begin1, begin2:end2] = self._distance_block.pairwise_distances(feature_batch, ref_batch) # From the minibatch of new feature vectors, determine if they are in the estimated manifold. # If a feature vector is inside a hypersphere of some reference sample, then the new sample lies on the estimated manifold. # The radii of the hyperspheres are determined from distances of neighborhood size k. samples_in_manifold = distance_batch[0:end1-begin1, :, None] <= self.D batch_predictions[begin1:end1] = np.any(samples_in_manifold, axis=1).astype(np.int32) #max_realism_score[begin1:end1] = np.max(self.D[:, 0] / (distance_batch[0:end1-begin1, :] + 1e-18), axis=1) #nearest_indices[begin1:end1] = np.argmax(self.D[:, 0] / (distance_batch[0:end1-begin1, :] + 1e-18), axis=1) nearest_indices[begin1:end1] = np.argmin(distance_batch[0:end1-begin1, :], axis=1) realism_score[begin1:end1] = self.D[nearest_indices[begin1:end1], 0] / np.min(distance_batch[0:end1-begin1, :], axis=1) if return_realism and return_neighbors: return batch_predictions, realism_score, nearest_indices elif return_realism: return batch_predictions, realism_score elif return_neighbors: return batch_predictions, nearest_indices return batch_predictions #----------------------------------------------------------------------------
7375ee364e0df2a417f92593e09557f1b2a3575a
14
precision_recall.py
499
initialize ostec
1,623
0
485
336
118
9,466
185
insightface
38
reconstruction/ostec/external/stylegan2/metrics/precision_recall.py
Python
25
{ "docstring": "Evaluate if new feature vectors are in the estimated manifold.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/deepinsight/insightface.git
1
test_processors
def test_processors(self): from djangocms_text_ckeditor.cms_plugins import TextPlugin from cms.plugin_pool import plugin_pool instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0] load_from_string = self.load_template_from_string
a3110e1ff24085373898c7d2a85f628abeb8518d
13
test_rendering.py
69
Enabled isort workflow (#7200) * Ran isort * Enabled isort workflow Co-authored-by: Vinit Kumar <[email protected]>
17,345
0
51
169
13
82,298
16
django-cms
14
cms/tests/test_rendering.py
Python
27
{ "docstring": "\n Tests that plugin processors and plugin context processors can be defined\n in settings and are working and that extra plugin context processors can be\n passed to PluginContext.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 17 }
https://github.com/django-cms/django-cms.git
5
__aenter__
async def __aenter__(self): if self._closed: # httpx.AsyncClient does not allow reuse so we will not either. raise RuntimeError( "The client cannot be started again after closing. " "Retrieve a new client with `get_client()` instead." ) if self._started: # httpx.AsyncClient does not allow reentrancy so we will not either. raise RuntimeError("The client cannot be started more than once.") await self._exit_stack.__aenter__() # Enter a lifespan context if using an ephemeral application. # See https://github.com/encode/httpx/issues/350 if self._ephemeral_app and self.manage_lifespan: self._ephemeral_lifespan = await self._exit_stack.enter_async_context( app_lifespan_context(self._ephemeral_app) ) # Enter the httpx client's context await self._exit_stack.enter_async_context(self._client) self._started = True return self
05b92d7c7f6cf21c5d6033df7242c331fc66b92e
14
client.py
145
Disable lifespan management during logging
11,152
0
294
80
65
54,803
95
prefect
12
src/prefect/client.py
Python
16
{ "docstring": "\n Start the client.\n\n If the client is already started, this will raise an exception.\n\n If the client is already closed, this will raise an exception. Use a new client\n instance instead.\n ", "language": "en", "n_whitespaces": 67, "n_words": 31, "vocab_size": 19 }
https://github.com/PrefectHQ/prefect.git
3
_start_profiler
def _start_profiler(self, logdir): if self._profiler_started: return try: tf.profiler.experimental.start(logdir=logdir) self._profiler_started = True except tf.errors.AlreadyExistsError as e: # Profiler errors should not be fatal. logging.error("Failed to start profiler: %s", e.message)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
11
callbacks.py
89
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,363
0
111
52
28
269,967
28
keras
14
keras/callbacks.py
Python
8
{ "docstring": "Starts the profiler if currently inactive.\n\n Args:\n logdir: Directory where profiler results will be saved.\n ", "language": "en", "n_whitespaces": 38, "n_words": 15, "vocab_size": 14 }
https://github.com/keras-team/keras.git
1
test_export_pipeline_3
def test_export_pipeline_3(): pipeline_string = ( 'DecisionTreeClassifier(SelectPercentile(input_matrix, SelectPercentile__percentile=20),' 'DecisionTreeClassifier__criterion=gini, DecisionTreeClassifier__max_depth=8,' 'DecisionTreeClassifier__min_samples_leaf=5, DecisionTreeClassifier__min_samples_split=5)' ) pipeline = creator.Individual.from_string(pipeline_string, tpot_obj._pset) expected_code = assert expected_code == export_pipeline(pipeline, tpot_obj.operators, tpot_obj._pset)
388616b6247ca4ea8de4e2f340d6206aee523541
9
export_tests.py
79
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,405
0
64
44
20
181,617
24
tpot
11
tests/export_tests.py
Python
29
{ "docstring": "Assert that exported_pipeline() generated a compile source file as expected given a fixed simple pipeline with a preprocessor.import numpy as np\nimport pandas as pd\nfrom sklearn.feature_selection import SelectPercentile, f_classif\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.tree import DecisionTreeClassifier\n\n# NOTE: Make sure that the outcome column is labeled 'target' in the data file\ntpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)\nfeatures = tpot_data.drop('target', axis=1)\ntraining_features, testing_features, training_target, testing_target = \\\\\n train_test_split(features, tpot_data['target'], random_state=None)\n\nexported_pipeline = make_pipeline(\n SelectPercentile(score_func=f_classif, percentile=20),\n DecisionTreeClassifier(criterion=\"gini\", max_depth=8, min_samples_leaf=5, min_samples_split=5)\n)\n\nexported_pipeline.fit(training_features, training_target)\nresults = exported_pipeline.predict(testing_features)\n", "language": "en", "n_whitespaces": 93, "n_words": 90, "vocab_size": 72 }
https://github.com/EpistasisLab/tpot.git
3
_experimental_indices
def _experimental_indices(self) -> List[int]: retval = [idx for idx, device in enumerate(self._all_devices) if device not in self._supported_indices] return retval
bdbbad4d310fb606b6f412aa81e9f57ccd994e97
11
amd.py
57
Refactor lib.gpu_stats (#1218) * inital gpu_stats refactor * Add dummy CPU Backend * Update Sphinx documentation
20,011
0
57
36
16
100,547
19
faceswap
10
lib/gpu_stats/amd.py
Python
6
{ "docstring": " list: The indices corresponding to :attr:`_ids` of GPU devices marked as\n \"experimental\". ", "language": "en", "n_whitespaces": 20, "n_words": 12, "vocab_size": 12 }
https://github.com/deepfakes/faceswap.git
7
loglog
def loglog(self, *args, **kwargs): dx = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basex', 'subsx', 'nonposx']} self.set_xscale('log', **dx) dy = {k: v for k, v in kwargs.items() if k in ['base', 'subs', 'nonpositive', 'basey', 'subsy', 'nonposy']} self.set_yscale('log', **dy) return self.plot( *args, **{k: v for k, v in kwargs.items() if k not in {*dx, *dy}}) # @_preprocess_data() # let 'plot' do the unpacking..
383de519505964ed879c40b23ef36e90c17ebe0d
13
_axes.py
222
[Doc] fix more spelling and grammar
24,058
0
194
132
41
110,319
68
matplotlib
12
lib/matplotlib/axes/_axes.py
Python
11
{ "docstring": "\n Make a plot with log scaling on both the x- and y-axis.\n\n Call signatures::\n\n loglog([x], y, [fmt], data=None, **kwargs)\n loglog([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)\n\n This is just a thin wrapper around `.plot` which additionally changes\n both the x-axis and the y-axis to log scaling. All the concepts and\n parameters of plot can be used here as well.\n\n The additional parameters *base*, *subs* and *nonpositive* control the\n x/y-axis properties. They are just forwarded to `.Axes.set_xscale` and\n `.Axes.set_yscale`. To use different properties on the x-axis and the\n y-axis, use e.g.\n ``ax.set_xscale(\"log\", base=10); ax.set_yscale(\"log\", base=2)``.\n\n Parameters\n ----------\n base : float, default: 10\n Base of the logarithm.\n\n subs : sequence, optional\n The location of the minor ticks. If *None*, reasonable locations\n are automatically chosen depending on the number of decades in the\n plot. See `.Axes.set_xscale`/`.Axes.set_yscale` for details.\n\n nonpositive : {'mask', 'clip'}, default: 'mask'\n Non-positive values can be masked as invalid, or clipped to a very\n small positive number.\n\n **kwargs\n All parameters supported by `.plot`.\n\n Returns\n -------\n list of `.Line2D`\n Objects representing the plotted data.\n ", "language": "en", "n_whitespaces": 424, "n_words": 173, "vocab_size": 126 }
https://github.com/matplotlib/matplotlib.git
1
moebius_kantor_graph
def moebius_kantor_graph(create_using=None): G = LCF_graph(16, [5, -5], 8, create_using) G.name = "Moebius-Kantor Graph" return G
dec723f072eb997a497a159dbe8674cd39999ee9
10
small.py
52
Docstrings for the small.py module (#5240) * added description for the first 5 small graphs * modified descriptions based on comment and added description for two more functions * added doctrings to all the functions * Minor touchups. Co-authored-by: Ross Barnowski <[email protected]>
41,735
0
27
32
13
176,165
15
networkx
5
networkx/generators/small.py
Python
4
{ "docstring": "\n Returns the Moebius-Kantor graph.\n\n The Möbius-Kantor graph is the cubic symmetric graph on 16 nodes.\n Its LCF notation is [5,-5]^8, and it is isomorphic to the generalized\n Petersen graph [1]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Moebius-Kantor graph\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/M%C3%B6bius%E2%80%93Kantor_graph\n\n ", "language": "en", "n_whitespaces": 119, "n_words": 63, "vocab_size": 48 }
https://github.com/networkx/networkx.git
1
test_real_query
def test_real_query(user_api_client, product, channel_USD): product_attr = product.product_type.product_attributes.first() category = product.category attr_value = product_attr.values.first() query = variables = { "categoryId": graphene.Node.to_global_id("Category", category.id), "sortBy": {"field": "NAME", "direction": "ASC"}, "first": 1, "attributesFilter": [ {"slug": f"{product_attr.slug}", "values": [f"{attr_value.slug}"]} ], "channel": channel_USD.slug, } response = user_api_client.post_graphql(query, variables) get_graphql_content(response)
6e1f00a685069da8d0704216ceca1fc2c2177840
15
test_graphql.py
204
Make GraphQL list items non-nullable (#9391) * Add NonNullList type * Fix types.common imports * Adjust tests * Explicit types.common imports * Fix tests * Use NonNullList in filter inputs
4,970
0
120
110
38
26,365
43
saleor
21
saleor/graphql/core/tests/test_graphql.py
Python
146
{ "docstring": "\n query Root($categoryId: ID!, $sortBy: ProductOrder, $first: Int,\n $attributesFilter: [AttributeInput!], $channel: String) {\n\n category(id: $categoryId) {\n ...CategoryPageFragmentQuery\n __typename\n }\n products(first: $first, sortBy: $sortBy, filter: {categories: [$categoryId],\n attributes: $attributesFilter}, channel: $channel) {\n\n ...ProductListFragmentQuery\n __typename\n }\n attributes(first: 20, filter: {inCategory: $categoryId}, channel: $channel) {\n edges {\n node {\n ...ProductFiltersFragmentQuery\n __typename\n }\n }\n }\n }\n\n fragment CategoryPageFragmentQuery on Category {\n id\n name\n ancestors(first: 20) {\n edges {\n node {\n name\n id\n __typename\n }\n }\n }\n children(first: 20) {\n edges {\n node {\n name\n id\n slug\n __typename\n }\n }\n }\n __typename\n }\n\n fragment ProductListFragmentQuery on ProductCountableConnection {\n edges {\n node {\n ...ProductFragmentQuery\n __typename\n }\n __typename\n }\n pageInfo {\n hasNextPage\n __typename\n }\n __typename\n }\n\n fragment ProductFragmentQuery on Product {\n id\n isAvailable\n name\n pricing {\n ...ProductPriceFragmentQuery\n __typename\n }\n thumbnailUrl1x: thumbnail(size: 255){\n url\n }\n thumbnailUrl2x: thumbnail(size: 510){\n url\n }\n __typename\n }\n\n fragment ProductPriceFragmentQuery on ProductPricingInfo {\n discount {\n gross {\n amount\n currency\n __typename\n }\n __typename\n }\n priceRange {\n stop {\n gross {\n amount\n currency\n __typename\n }\n currency\n __typename\n }\n start {\n gross {\n amount\n currency\n __typename\n }\n currency\n __typename\n }\n __typename\n }\n __typename\n }\n\n fragment ProductFiltersFragmentQuery on Attribute {\n id\n name\n slug\n choices(first: 10) {\n edges {\n node {\n id\n name\n slug\n __typename\n }\n }\n }\n __typename\n }\n ", "language": "en", "n_whitespaces": 1534, "n_words": 200, "vocab_size": 75 }
https://github.com/saleor/saleor.git
2
format
def format(self, tokensource, outfile): if self.encoding: # wrap the outfile in a StreamWriter outfile = codecs.lookup(self.encoding)[3](outfile) return self.format_unencoded(tokensource, outfile)
f3166e673fe8d40277b804d35d77dcdb760fc3b3
13
formatter.py
64
check point progress on only bringing in pip==22.0.4 (#4966) * vendor in pip==22.0.4 * updating vendor packaging version * update pipdeptree to fix pipenv graph with new version of pip. * Vendoring of pip-shims 0.7.0 * Vendoring of requirementslib 1.6.3 * Update pip index safety restrictions patch for pip==22.0.4 * Update patches * exclude pyptoject.toml from black to see if that helps. * Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4
3,310
0
62
40
18
20,286
19
pipenv
8
pipenv/patched/notpip/_vendor/pygments/formatter.py
Python
4
{ "docstring": "\n Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``\n tuples and write it into ``outfile``.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 13 }
https://github.com/pypa/pipenv.git
6
_get_layer_inputs
def _get_layer_inputs(self, layer): if ( isinstance(layer.call, tf.__internal__.function.Function) and layer.call.input_signature is not None ): return layer.call.input_signature, {} elif isinstance(layer, training_lib.Model): return saving_utils.model_call_inputs(layer) elif ( layer.input_spec is not None and layer._use_input_spec_as_call_signature ): # pylint: disable=protected-access
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
save_impl.py
117
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,569
0
142
104
25
276,121
33
keras
16
keras/saving/saved_model/save_impl.py
Python
19
{ "docstring": "Inspects layer object and returns the inferred input signature.\n\n Args:\n layer: Layer object.\n\n Returns:\n List of possibly nested TensorSpecs of the layer call function inputs in\n the form of `(args, kwargs)`\n ", "language": "en", "n_whitespaces": 79, "n_words": 31, "vocab_size": 26 }
https://github.com/keras-team/keras.git
1
test_lookup_cache
def test_lookup_cache(self): # At this point, a lookup for a ContentType should hit the DB with self.assertNumQueries(1): ContentType.objects.get_for_model(ContentType) # A second hit, though, won't hit the DB, nor will a lookup by ID # or natural key with self.assertNumQueries(0): ct = ContentType.objects.get_for_model(ContentType) with self.assertNumQueries(0): ContentType.objects.get_for_id(ct.id) with self.assertNumQueries(0): ContentType.objects.get_by_natural_key("contenttypes", "contenttype") # Once we clear the cache, another lookup will again hit the DB ContentType.objects.clear_cache() with self.assertNumQueries(1): ContentType.objects.get_for_model(ContentType) # The same should happen with a lookup by natural key ContentType.objects.clear_cache() with self.assertNumQueries(1): ContentType.objects.get_by_natural_key("contenttypes", "contenttype") # And a second hit shouldn't hit the DB with self.assertNumQueries(0): ContentType.objects.get_by_natural_key("contenttypes", "contenttype")
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
test_models.py
263
Refs #33476 -- Reformatted code with Black.
50,069
0
284
142
48
202,324
95
django
11
tests/contenttypes_tests/test_models.py
Python
17
{ "docstring": "\n The content type cache (see ContentTypeManager) works correctly.\n Lookups for a particular content type -- by model, ID, or natural key\n -- should hit the database only on the first lookup.\n ", "language": "en", "n_whitespaces": 60, "n_words": 31, "vocab_size": 27 }
https://github.com/django/django.git
12
prepare
def prepare(self): super().prepare() try: if self.config["verify_env"]: confd = self.config.get("default_include") if confd: # If 'default_include' is specified in config, then use it if "*" in confd: # Value is of the form "minion.d/*.conf" confd = os.path.dirname(confd) if not os.path.isabs(confd): # If configured 'default_include' is not an absolute # path, consider it relative to folder of 'conf_file' # (/etc/salt by default) confd = os.path.join( os.path.dirname(self.config["conf_file"]), confd ) else: confd = os.path.join( os.path.dirname(self.config["conf_file"]), "minion.d" ) v_dirs = [ self.config["pki_dir"], self.config["cachedir"], self.config["sock_dir"], self.config["extension_modules"], confd, ] verify_env( v_dirs, self.config["user"], permissive=self.config["permissive_pki_access"], root_dir=self.config["root_dir"], pki_dir=self.config["pki_dir"], ) except OSError as error: self.environment_failure(error) self.setup_logfile_logger() verify_log(self.config) log.info('Setting up the Salt Minion "%s"', self.config["id"]) migrations.migrate_paths(self.config) # Bail out if we find a process running and it matches out pidfile if (HAS_PSUTIL and not self.claim_process_responsibility()) or ( not HAS_PSUTIL and self.check_running() ): self.action_log_info("An instance is already running. Exiting") self.shutdown(1) transport = self.config.get("transport").lower() try: # Late import so logging works correctly import salt.minion # If the minion key has not been accepted, then Salt enters a loop # waiting for it, if we daemonize later then the minion could halt # the boot process waiting for a key to be accepted on the master. # This is the latest safe place to daemonize self.daemonize_if_required() self.set_pidfile() if self.config.get("master_type") == "func": salt.minion.eval_master_func(self.config) self.minion = salt.minion.MinionManager(self.config) except Exception: # pylint: disable=broad-except log.error( "An error occured while setting up the minion manager", exc_info=True ) self.shutdown(1)
25c2ae356bcf684cbe20f776e1ffcab0f8aeb80c
20
daemons.py
599
Address docs and hard coded strings
53,991
0
1,155
348
148
215,494
227
salt
40
salt/cli/daemons.py
Python
54
{ "docstring": "\n Run the preparation sequence required to start a salt minion.\n\n If sub-classed, don't **ever** forget to run:\n\n super(YourSubClass, self).prepare()\n ", "language": "en", "n_whitespaces": 52, "n_words": 19, "vocab_size": 18 }
https://github.com/saltstack/salt.git
1
test_cleanup_device_mqtt
async def test_cleanup_device_mqtt(hass, device_reg, entity_reg, mqtt_mock): data = ( '{ "device":{"identifiers":["0AFFD2"]},' ' "state_topic": "foobar/sensor",' ' "unique_id": "unique" }' ) async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", data) await hass.async_block_till_done() # Verify device and registry entries are created device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}) assert device_entry is not None entity_entry = entity_reg.async_get("sensor.mqtt_sensor") assert entity_entry is not None state = hass.states.get("sensor.mqtt_sensor") assert state is not None async_fire_mqtt_message(hass, "homeassistant/sensor/bla/config", "") await hass.async_block_till_done() await hass.async_block_till_done() # Verify device and registry entries are cleared device_entry = device_reg.async_get_device({("mqtt", "0AFFD2")}) assert device_entry is None entity_entry = entity_reg.async_get("sensor.mqtt_sensor") assert entity_entry is None # Verify state is removed state = hass.states.get("sensor.mqtt_sensor") assert state is None await hass.async_block_till_done() # Verify retained discovery topics have not been cleared again mqtt_mock.async_publish.assert_not_called()
ba6d1976dff8df2aa32726ff2acbf0ba61e5c550
11
test_discovery.py
280
Improve MQTT device removal (#66766) * Improve MQTT device removal * Update homeassistant/components/mqtt/mixins.py Co-authored-by: Martin Hjelmare <[email protected]> * Adjust tests * Improve test coverage Co-authored-by: Martin Hjelmare <[email protected]>
91,310
0
213
157
52
292,210
112
core
17
tests/components/mqtt/test_discovery.py
Python
25
{ "docstring": "Test discvered device is cleaned up when removed through MQTT.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
set_params
def set_params(self, **kwargs): self._set_params("transformer_list", **kwargs) return self
b849ce8d17fce0ff7569a9d49b7264d0f54d37d3
8
pipeline.py
36
DOC fix typo inside Pipeline docstring (#24730)
76,788
0
28
20
7
261,378
7
scikit-learn
4
sklearn/pipeline.py
Python
3
{ "docstring": "Set the parameters of this estimator.\n\n Valid parameter keys can be listed with ``get_params()``. Note that\n you can directly set the parameters of the estimators contained in\n `transformer_list`.\n\n Parameters\n ----------\n **kwargs : dict\n Parameters of this estimator or parameters of estimators contained\n in `transform_list`. Parameters of the transformers may be set\n using its name and the parameter name separated by a '__'.\n\n Returns\n -------\n self : object\n FeatureUnion class instance.\n ", "language": "en", "n_whitespaces": 184, "n_words": 70, "vocab_size": 48 }
https://github.com/scikit-learn/scikit-learn.git
2
_convert_to_boolean
def _convert_to_boolean(self, value): if value.lower() not in self.BOOLEAN_STATES: raise ValueError('Not a boolean: %s' % value) return self.BOOLEAN_STATES[value.lower()]
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
configparser.py
64
add python 3.10.4 for windows
56,464
0
49
38
17
221,663
17
XX-Net
6
python3.10.4/Lib/configparser.py
Python
4
{ "docstring": "Return a boolean value translating from other types if necessary.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/XX-net/XX-Net.git
1
test_unignore_create_entry
async def test_unignore_create_entry(hass, manager): async_setup_entry = AsyncMock(return_value=True) mock_integration(hass, MockModule("comp", async_setup_entry=async_setup_entry)) mock_entity_platform(hass, "config_flow.comp", None)
7cd68381f1d4f58930ffd631dfbfc7159d459832
10
test_config_entries.py
63
Search/replace RESULT_TYPE_* by FlowResultType enum (#74642)
115,027
0
25
244
13
316,449
13
core
9
tests/test_config_entries.py
Python
29
{ "docstring": "Test that we can ignore flows that are in progress and have a unique ID, then rediscover them.", "language": "en", "n_whitespaces": 17, "n_words": 18, "vocab_size": 17 }
https://github.com/home-assistant/core.git
1
test_rst_then_close
def test_rst_then_close(tctx): playbook, cff = start_h3_client(tctx) flow = tutils.Placeholder(HTTPFlow) server = tutils.Placeholder(connection.Server) err = tutils.Placeholder(str) assert ( playbook # request client >> cff.receive_headers(example_request_headers, end_stream=True) << (request := http.HttpRequestHeadersHook(flow)) << cff.send_decoder() # for receive_headers >> tutils.reply(to=request) << http.HttpRequestHook(flow) >> tutils.reply() # request server << (open := commands.OpenConnection(server)) >> cff.receive_data(b"unexpected data frame") << quic.CloseQuicConnection( tctx.client, error_code=quic.QuicErrorCode.PROTOCOL_VIOLATION, frame_type=None, reason_phrase=err, ) >> quic.QuicConnectionClosed( tctx.client, error_code=quic.QuicErrorCode.PROTOCOL_VIOLATION, frame_type=None, reason_phrase=err, ) >> tutils.reply("connection cancelled", to=open) << http.HttpErrorHook(flow) >> tutils.reply() ) assert flow().error.msg == "connection cancelled"
f23a1887bb76501f6dbb57573847767e7c1538f1
22
test_http3.py
314
[quic] fix h3 double-close issue
74,056
0
317
212
51
253,303
78
mitmproxy
40
test/mitmproxy/proxy/layers/http/test_http3.py
Python
32
{ "docstring": "\n Test that we properly handle the case of a client that first causes protocol errors and then disconnects.\n\n This is slightly different to H2, as QUIC will close the connection immediately.\n ", "language": "en", "n_whitespaces": 41, "n_words": 31, "vocab_size": 29 }
https://github.com/mitmproxy/mitmproxy.git
2
test_simple
def test_simple(self): code_owner_1 = self.create_codeowners( self.project_1, self.code_mapping_1, raw=self.data_1["raw"] ) code_owner_2 = self.create_codeowners( self.project_2, self.code_mapping_2, raw=self.data_2["raw"] ) response = self.get_success_response(self.organization.slug, status=status.HTTP_200_OK) for code_owner in [code_owner_1, code_owner_2]: assert code_owner.project.slug in response.data.keys() associations, errors = ProjectCodeOwners.validate_codeowners_associations( code_owner.raw, code_owner.project ) assert "associations" in response.data[code_owner.project.slug].keys() assert response.data[code_owner.project.slug]["associations"] == associations assert "errors" in response.data[code_owner.project.slug].keys() assert response.data[code_owner.project.slug]["errors"] == errors
5efa5eeb57ae6ddf740256e08ce3b9ff4ec98eaa
13
test_organization_codeowners_associations.py
274
feat(codeowners): Add endpoint to view code owner associations per organization (#31030) See API-2186 So the earlier version of this PR just had the endpoint return the entire serialized ProjectCodeOwners for an organization. While that works, the intention behind this feature is to read and use the associations, so sending the raw codeowners file, and timestamps are unnecessary and increase the latency with such large payloads, especially for larger orgs. @NisanthanNanthakumar suggested limiting what the endpoint returns to just what the feature will need on the frontend, and making the endpoint name a bit more specific. OrganizationCodeOwners -> OrganizationCodeOwnersAssocations. Along with this refactor, tests have been updated.
19,207
0
215
175
36
95,412
52
sentry
26
tests/sentry/api/endpoints/test_organization_codeowners_associations.py
Python
17
{ "docstring": "\n Tests that all the ProjectCodeOwners are serialized in the response\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
https://github.com/getsentry/sentry.git
1
test_memory_2
def test_memory_2(): cachedir = mkdtemp() tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, config_dict='TPOT light', memory=cachedir, verbosity=0 ) tpot_obj._setup_memory() rmtree(cachedir) assert tpot_obj._cachedir == cachedir assert isinstance(tpot_obj._memory, Memory)
388616b6247ca4ea8de4e2f340d6206aee523541
10
tpot_tests.py
104
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,503
0
99
66
23
181,716
26
tpot
18
tests/tpot_tests.py
Python
15
{ "docstring": "Assert that the TPOT _setup_memory function runs normally with a valid path.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/EpistasisLab/tpot.git
1
get_environment_client
def get_environment_client(self) -> EnvironmentsClient: return EnvironmentsClient( credentials=self._get_credentials(), client_info=self.client_info, client_options=self.client_options, )
05a883b35e34853ec9326bd579551a8e161d6cdc
10
cloud_composer.py
51
Google Cloud Composer opearators (#21251)
8,337
0
64
32
10
44,661
10
airflow
7
airflow/providers/google/cloud/hooks/cloud_composer.py
Python
7
{ "docstring": "Retrieves client library object that allow access Environments service.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/apache/airflow.git
2
update_metrics
def update_metrics(self, targets, predictions): for of_name, of_obj in self.output_features.items(): of_obj.update_metrics(targets[of_name], predictions[of_name]) eval_loss, additional_losses = self.eval_loss(targets, predictions) self.eval_loss_metric.update(eval_loss) self.eval_additional_losses_metrics.update(additional_losses)
aa0c63bf2ed825eb3ca8eff8a002d5ccbe395173
10
base.py
101
feat: Added model type GBM (LightGBM tree learner), as an alternative to ECD (#2027)
1,143
0
64
65
18
7,165
18
ludwig
13
ludwig/models/base.py
Python
6
{ "docstring": "Updates the model's metrics given targets and predictions.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/ludwig-ai/ludwig.git
4
get_arc_center
def get_arc_center(self, warning=True): # First two anchors and handles a1, h1, h2, a2 = self.points[:4] if np.all(a1 == a2): # For a1 and a2 to lie at the same point arc radius # must be zero. Thus arc_center will also lie at # that point. return a1 # Tangent vectors t1 = h1 - a1 t2 = h2 - a2 # Normals n1 = rotate_vector(t1, TAU / 4) n2 = rotate_vector(t2, TAU / 4) try: return line_intersection(line1=(a1, a1 + n1), line2=(a2, a2 + n2)) except Exception: if warning: warnings.warn("Can't find Arc center, using ORIGIN instead") self._failed_to_get_center = True return np.array(ORIGIN)
e040bcacd38378386749db18aeba575b93f4ebca
13
arc.py
187
Improved structure of the :mod:`.mobject` module (#2476) * group graphing and update its references * group text and update its references * group opengl and update its references * group three_d and update its references * group geometry and update (most) references * move some chaning.py + updater files into animation * refactor arc.py * refactor line.py * refactor polygram.py * refactor tips.py * black + isort * import new files in __init__.py * refactor places where geometry was used * black + isort again * remove unused imports * update reference.rst * add descriptions to files * fix circular imports * forgot ArrowTip * fix tests * fix doctests * satisfy mypy? * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix ALL merge conflicts * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * one VMobject import slipped through * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * re-add imports to `manim/opengl/__init__.py` * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix reference manual * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ignore unknown directive type * fix arrow tip imports in docstrings Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Benjamin Hackl <[email protected]>
46,157
0
287
116
73
189,657
100
manim
25
manim/mobject/geometry/arc.py
Python
15
{ "docstring": "\n Looks at the normals to the first two\n anchors, and finds their intersection points\n ", "language": "en", "n_whitespaces": 36, "n_words": 14, "vocab_size": 13 }
https://github.com/ManimCommunity/manim.git
1
forward
def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output @add_start_docstrings( , YOLOS_START_DOCSTRING, )
1ac698744c4dbdf1495d303246d08ffacdf4f5b8
@add_start_docstrings( """ YOLOS Model (consisting of a ViT encoder) with object detection heads on top, for tasks such as COCO detection. """, YOLOS_START_DOCSTRING, )
8
modeling_yolos.py
68
Add YOLOS (#16848) * First draft * Add YolosForObjectDetection * Make forward pass work * Add mid position embeddings * Add interpolation of position encodings * Add expected values * Add YOLOS to tests * Add integration test * Support tiny model as well * Support all models in conversion script * Remove mid_pe_size attribute * Make more tests pass * Add model to README and fix config * Add copied from statements * Rename base_model_prefix to vit * Add missing YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP * Apply suggestions from code review * Apply more suggestions from code review * Convert remaining checkpoints * Improve docstrings * Add YolosFeatureExtractor * Add feature extractor to docs * Add corresponding tests * Fix style * Fix docs * Apply suggestion from code review * Fix bad rebase * Fix some more bad rebase * Fix missing character * Improve docs and variable names Co-authored-by: Niels Rogge <[email protected]>
6,846
1
81
33
29
37,643
36
transformers
9
src/transformers/models/yolos/modeling_yolos.py
Python
5
{ "docstring": "\n YOLOS Model (consisting of a ViT encoder) with object detection heads on top, for tasks such as COCO detection.\n ", "language": "en", "n_whitespaces": 26, "n_words": 19, "vocab_size": 19 }
https://github.com/huggingface/transformers.git
1
test_enabled_requires_valid_sub
def test_enabled_requires_valid_sub(hass, mock_expired_cloud_login, cloud_prefs): assert cloud_prefs.alexa_enabled assert hass.data["cloud"].is_logged_in assert hass.data["cloud"].subscription_expired config = alexa_config.CloudAlexaConfig( hass, ALEXA_SCHEMA({}), "mock-user-id", cloud_prefs, hass.data["cloud"] ) assert not config.enabled
537dfbca18830a90a7f89d291f519948a3d977e8
11
test_alexa_config.py
100
Rename cloud's AlexaConfig to CloudAlexaConfig (#64065) * Rename cloud's AlexaConfig to CloudAlexaConfig * Tweak
108,191
0
50
62
19
309,493
22
core
13
tests/components/cloud/test_alexa_config.py
Python
8
{ "docstring": "Test that alexa config enabled requires a valid Cloud sub.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
3
predict_snli
def predict_snli(net, vocab, premise, hypothesis): premise = np.array(vocab[premise], ctx=d2l.try_gpu()) hypothesis = np.array(vocab[hypothesis], ctx=d2l.try_gpu()) label = np.argmax(net([premise.reshape((1, -1)), hypothesis.reshape((1, -1))]), axis=1) return 'entailment' if label == 0 else 'contradiction' if label == 1 \ else 'neutral' d2l.DATA_HUB['ml-100k'] = ( 'https://files.grouplens.org/datasets/movielens/ml-100k.zip', 'cd4dcac4241c8a4ad7badc7ca635da8a69dddb83')
b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2
15
mxnet.py
183
[PaddlePaddle] Merge master into Paddle branch (#1186) * change 15.2 title in chinese version (#1109) change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘ * 修改部分语义表述 (#1105) * Update r0.17.5 (#1120) * Bump versions in installation * 94行typo: (“bert.mall”)->(“bert.small”) (#1129) * line 313: "bert.mall" -> "bert.small" (#1130) * fix: update language as native reader (#1114) * Fix the translation of "stride" (#1115) * Update index.md (#1118) 修改部分语义表述 * Update self-attention-and-positional-encoding.md (#1133) 依照本书的翻译习惯,将pooling翻译成汇聚 * maybe a comment false (#1149) * maybe a little false * maybe a little false * A minor bug in the rcnn section (Chinese edition) (#1148) * Update bert.md (#1137) 一个笔误 # 假设batch_size=2,num_pred_positions=3 # 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1] * Update calculus.md (#1135) * fix typo in git documentation (#1106) * fix: Update the Chinese translation in lr-scheduler.md (#1136) * Update lr-scheduler.md * Update chapter_optimization/lr-scheduler.md Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * fix translation for kaggle-house-price.md (#1107) * fix translation for kaggle-house-price.md * fix translation for kaggle-house-price.md Signed-off-by: sunhaizhou <[email protected]> * Update weight-decay.md (#1150) * Update weight-decay.md 关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解 关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。 并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释 解释为何会增加复杂性以及为何需要细粒度工具。 * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/weight-decay.md yep Co-authored-by: goldmermaid <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Fix a spelling error (#1161) * Update gru.md (#1152) The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state. 翻译错误 * Unify the function naming (#1113) Unify naming of the function 'init_xavier()'. * Update mlp-concise.md (#1166) * Update mlp-concise.md 语句不通顺 * Update environment.md 语序异常 * Update config.ini * fix the imprecise description (#1168) Co-authored-by: yuande <yuande> * fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175) * Fix some typos. (#1163) * Update batch-norm.md (#1170) fixing typos u->x in article * Update linear-regression.md (#1090) We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that 原译文把who也直接翻译出来了。 * Update mlp.md (#1117) * Update mlp.md 修改部分语义表述 * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: goldmermaid <[email protected]> * Update chapter_multilayer-perceptrons/mlp.md Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: goldmermaid <[email protected]> * Correct a translation error. (#1091) * Correct a translation error. * Update chapter_computer-vision/image-augmentation.md Co-authored-by: Aston Zhang <[email protected]> * Update aws.md (#1121) * Update aws.md * Update chapter_appendix-tools-for-deep-learning/aws.md Co-authored-by: Aston Zhang <[email protected]> * Update image-augmentation.md (#1093) * Update anchor.md (#1088) fix a minor issue in code * Update anchor.md * Update image-augmentation.md * fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087) * Avoid `torch.meshgrid` user warning (#1174) Avoids the following user warning: ```python ~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.) return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined] ``` * bump to 2.0.0-beta1 * Update sequence.md * bump beta1 on readme * Add latex code block background to config * BLD: Bump python support version 3.9 (#1183) * BLD: Bump python support version 3.9 * Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4 * BLD: Bump torch and tensorflow * Update Jenkinsfile * Update chapter_installation/index.md * Update chapter_installation/index.md Co-authored-by: Aston Zhang <[email protected]> * Update config.ini * Update INFO.md * Update INFO.md * Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187) * resolve the conflicts * revise from publisher (#1089) * revise from publisher * d2l api * post_latex * revise from publisher * revise ch11 * Delete d2l-Copy1.bib * clear cache * rm d2lbook clear * debug anchor * keep original d2l doc Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: Aston Zhang <[email protected]> * 重复语句 (#1188) Co-authored-by: Aston Zhang <[email protected]> * Improve expression for chapter_preliminaries/pandas.md (#1184) * Update pandas.md * Improve expression * Improve expression * Update chapter_preliminaries/pandas.md Co-authored-by: Aston Zhang <[email protected]> * Improce expression for chapter_preliminaries/linear-algebra.md (#1185) * Improce expression * Improve code comments * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md * Update chapter_preliminaries/linear-algebra.md Co-authored-by: Aston Zhang <[email protected]> * Fix multibox_detection bugs * Update d2l to 0.17.5 version * restore older version * Upgrade pandas * change to python3.8 * Test warning log * relocate warning log * test logs filtering * Update gru.md * Add DeprecationWarning filter * Test warning log * Update attention mechanisms & computational performance * Update multilayer perceptron& linear & convolution networks & computer vision * Update recurrent&optimition&nlp pretraining & nlp applications * ignore warnings * Update index.md * Update linear networks * Update multilayer perceptrons&deep learning computation * Update preliminaries * Check and Add warning filter * Update kaggle-cifar10.md * Update object-detection-dataset.md * Update ssd.md fcn.md * Update hybridize.md * Update hybridize.md Signed-off-by: sunhaizhou <[email protected]> Co-authored-by: zhou201505013 <[email protected]> Co-authored-by: Xinwei Liu <[email protected]> Co-authored-by: Anirudh Dagar <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: hugo_han <[email protected]> Co-authored-by: gyro永不抽风 <[email protected]> Co-authored-by: CanChengZheng <[email protected]> Co-authored-by: linlin <[email protected]> Co-authored-by: iuk <[email protected]> Co-authored-by: yoos <[email protected]> Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]> Co-authored-by: Chiyuan Fu <[email protected]> Co-authored-by: Sunhuashan <[email protected]> Co-authored-by: Haiker Sun <[email protected]> Co-authored-by: Ming Liu <[email protected]> Co-authored-by: goldmermaid <[email protected]> Co-authored-by: silenceZheng66 <[email protected]> Co-authored-by: Wenchao Yan <[email protected]> Co-authored-by: Kiki2049 <[email protected]> Co-authored-by: Krahets <[email protected]> Co-authored-by: friedmainfunction <[email protected]> Co-authored-by: Jameson <[email protected]> Co-authored-by: P. Yao <[email protected]> Co-authored-by: Yulv-git <[email protected]> Co-authored-by: Liu,Xiao <[email protected]> Co-authored-by: YIN, Gang <[email protected]> Co-authored-by: Joe-HZ <[email protected]> Co-authored-by: lybloveyou <[email protected]> Co-authored-by: VigourJiang <[email protected]> Co-authored-by: zxhd863943427 <[email protected]> Co-authored-by: LYF <[email protected]> Co-authored-by: Aston Zhang <[email protected]> Co-authored-by: xiaotinghe <[email protected]> Co-authored-by: Ubuntu <[email protected]> Co-authored-by: Holly-Max <[email protected]> Co-authored-by: HinGwenWoong <[email protected]> Co-authored-by: Shuai Zhang <[email protected]>
37,391
0
97
104
31
158,223
40
d2l-zh
15
d2l/mxnet.py
Python
7
{ "docstring": "Predict the logical relationship between the premise and hypothesis.\n\n Defined in :numref:`sec_natural-language-inference-attention`", "language": "en", "n_whitespaces": 14, "n_words": 12, "vocab_size": 11 }
https://github.com/d2l-ai/d2l-zh.git
1
fit
def fit(self, X, y=None): self._validate_params() self._fit_transform(X) return self
ceeda362402bfc978bcc93d02481fe28e21a07ad
7
_locally_linear.py
42
MAINT Use _validate_params in LocallyLinearEmbedding (#23938) Co-authored-by: jeremiedbb <[email protected]>
76,332
0
36
25
8
260,545
8
scikit-learn
6
sklearn/manifold/_locally_linear.py
Python
4
{ "docstring": "Compute the embedding vectors for data X.\n\n Parameters\n ----------\n X : array-like of shape (n_samples, n_features)\n Training set.\n\n y : Ignored\n Not used, present here for API consistency by convention.\n\n Returns\n -------\n self : object\n Fitted `LocallyLinearEmbedding` class instance.\n ", "language": "en", "n_whitespaces": 128, "n_words": 39, "vocab_size": 36 }
https://github.com/scikit-learn/scikit-learn.git
4
_customize_site
def _customize_site(self) -> None: if not LEGACY_VIRTUALENV or self._venv_type == "venv": # Enable user site (before system). contents = textwrap.dedent( f ).strip() else: contents = "" if self._sitecustomize is not None: contents += "\n" + self._sitecustomize sitecustomize = self.site / "sitecustomize.py" sitecustomize.write_text(contents) # Make sure bytecode is up-to-date too. assert compileall.compile_file(str(sitecustomize), quiet=1, force=True)
83c85e94b70aa5b3211ef3cc592d8cd8619beb15
15
venv.py
154
Fix legacy virtualenv setup in tests
41,545
0
171
81
43
175,018
53
pip
18
tests/lib/venv.py
Python
32
{ "docstring": "\n import os, site, sys\n if not os.environ.get('PYTHONNOUSERSITE', False):\n site.ENABLE_USER_SITE = {self._user_site_packages}\n # First, drop system-sites related paths.\n original_sys_path = sys.path[:]\n known_paths = set()\n for path in site.getsitepackages():\n site.addsitedir(path, known_paths=known_paths)\n system_paths = sys.path[len(original_sys_path):]\n for path in system_paths:\n if path in original_sys_path:\n original_sys_path.remove(path)\n sys.path = original_sys_path\n # Second, add user-site.\n if {self._user_site_packages}:\n site.addsitedir(site.getusersitepackages())\n # Third, add back system-sites related paths.\n for path in site.getsitepackages():\n site.addsitedir(path)\n ", "language": "en", "n_whitespaces": 456, "n_words": 63, "vocab_size": 41 }
https://github.com/pypa/pip.git
1
test_bitbucket2_on_push_commits_multiple_committers_with_others
def test_bitbucket2_on_push_commits_multiple_committers_with_others(self) -> None: commit_info = "* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))\n" expected_message = f self.check_webhook( "push_multiple_committers_with_others", TOPIC_BRANCH_EVENTS, expected_message )
4e4689949438735622bdf669f05d218c671e7e01
9
tests.py
52
webhooks: Pick a more reasonable length for short sha. 7 characters are not enough for large projects, so we change it to reasonably longer. As an example, The Linux kernel needs at least 11 characters of sha in its shortened form to identify a revision. We pick 11 so it should work for most of the projects. Signed-off-by: Zixuan James Li <[email protected]>
17,927
0
56
24
16
85,091
18
zulip
6
zerver/webhooks/bitbucket2/tests.py
Python
6
{ "docstring": "Tomasz [pushed](https://bitbucket.org/kolaszek/repository-name/branch/master) 10 commits to branch master. Commits by Tomasz (4), James (3), Brendon (2) and others (1).\\n\\n{commit_info*9}* first commit ([84b96adc644](https://bitbucket.org/kolaszek/repository-name/commits/84b96adc644a30fd6465b3d196369d880762afed))", "language": "en", "n_whitespaces": 20, "n_words": 21, "vocab_size": 20 }
https://github.com/zulip/zulip.git
1
line_collection_2d_to_3d
def line_collection_2d_to_3d(col, zs=0, zdir='z'): segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir) col.__class__ = Line3DCollection col.set_segments(segments3d)
df6f95703b60348e01603f98a439b133da2938a0
10
art3d.py
64
Improve mpl_toolkit documentation
23,823
0
25
39
12
109,916
13
matplotlib
10
lib/mpl_toolkits/mplot3d/art3d.py
Python
4
{ "docstring": "Convert a `.LineCollection` to a `.Line3DCollection` object.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 6 }
https://github.com/matplotlib/matplotlib.git
5
register_serializer
def register_serializer(format, serializer_module, serializers=None): if serializers is None and not _serializers: _load_serializers() try: module = importlib.import_module(serializer_module) except ImportError as exc: bad_serializer = BadSerializer(exc) module = type( "BadSerializerModule", (), { "Deserializer": bad_serializer, "Serializer": bad_serializer, }, ) if serializers is None: _serializers[format] = module else: serializers[format] = module
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
__init__.py
140
Refs #33476 -- Reformatted code with Black.
50,863
0
187
84
35
204,735
46
django
14
django/core/serializers/__init__.py
Python
19
{ "docstring": "Register a new serializer.\n\n ``serializer_module`` should be the fully qualified module name\n for the serializer.\n\n If ``serializers`` is provided, the registration will be added\n to the provided dictionary.\n\n If ``serializers`` is not provided, the registration will be made\n directly into the global register of serializers. Adding serializers\n directly is not a thread-safe operation.\n ", "language": "en", "n_whitespaces": 77, "n_words": 53, "vocab_size": 35 }
https://github.com/django/django.git
4
close
def close(self) -> None: if self._buf is not None: self._buf.release() self._buf = None if self._mmap is not None: self._mmap.close() if self._fd >= 0: os.close(self._fd) self._fd = -1
fd0262413e84c02f93d15eca1f87e1ba74cbe34e
10
shm.py
106
Access to POSIX shared memory from Python
21,617
0
110
64
18
103,211
27
kitty
7
kitty/shm.py
Python
11
{ "docstring": "Closes access to the shared memory from this instance but does\n not destroy the shared memory block.", "language": "en", "n_whitespaces": 23, "n_words": 17, "vocab_size": 14 }
https://github.com/kovidgoyal/kitty.git
6
require_libsndfile_with_opus
def require_libsndfile_with_opus(test_case): if (sys.platform != "linux" and find_spec("soundfile")) or (sys.platform == "linux" and find_library("sndfile")): import soundfile # soundfile library is needed to be installed to check libsndfile version if version.parse(soundfile.__libsndfile_version__) < version.parse("1.0.30"): test_case = unittest.skip( "test requires libsndfile>=1.0.30: `conda install -c conda-forge libsndfile>=1.0.30`" )(test_case) else: test_case = require_sndfile(test_case) return test_case
b55c590809381d7822ad76c2bebbb06e895393fa
14
utils.py
134
process .opus files (for Multilingual Spoken Words) (#3666) * check version of libsndfile explicitly (instead of catching an error) * fix version of libsndfile * add test for opus decoding * add decorator to pass tests that require libsndfile>=1.0.30
21,816
0
127
73
41
104,318
50
datasets
13
tests/utils.py
Python
10
{ "docstring": "\n Decorator marking a test that requires libsndfile>=1.0.30 (version that is required for opus decoding).\n\n These tests are skipped when libsndfile is <1.0.30.\n\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 20 }
https://github.com/huggingface/datasets.git
6
_replace_child_layer_functions
def _replace_child_layer_functions(layer, serialization_cache): # pylint: disable=protected-access original_fns = {}
84afc5193d38057e2e2badf9c889ea87d80d8fbf
7
save_impl.py
23
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,571
0
18
106
9
276,127
9
keras
4
keras/saving/saved_model/save_impl.py
Python
22
{ "docstring": "Replaces functions in the children layers with wrapped tf.functions.\n\n This step allows functions from parent layers to reference the wrapped\n functions from their children layers instead of retracing the ops.\n\n This function also resets all losses stored in the layer. These are stored in\n the returned dictionary. Use `_restore_child_layer_functions` to restore\n the original attributes.\n\n Args:\n layer: Keras Layer object.\n serialization_cache: Dictionary shared between all objects during\n serialization.\n\n Returns:\n Dictionary mapping layer objects -> original functions and losses:\n { Child layer 1: {\n 'losses': Original losses,\n 'call': Original call function\n '_activity_regularizer': Original activity regularizer},\n Child layer 2: ...\n }\n ", "language": "en", "n_whitespaces": 200, "n_words": 98, "vocab_size": 69 }
https://github.com/keras-team/keras.git
1
_parallel_scaling
def _parallel_scaling(self) -> Dict[int, float]: retval = {0: 1.0, 1: 1.0, 2: 0.7, 3: 0.55, 4: 0.5, 5: 0.4} logger.trace(retval) # type: ignore return retval
13cfb3f39e72e9ca181f173b7b3db2a048db0d08
8
pipeline.py
72
extract: Add batch processing mode
20,871
0
139
60
23
101,458
25
faceswap
8
plugins/extract/pipeline.py
Python
22
{ "docstring": " dict: key is number of parallel plugins being loaded, value is the scaling factor that\n the total base vram for those plugins should be scaled by\n\n Notes\n -----\n VRAM for parallel plugins does not stack in a linear manner. Calculating the precise\n scaling for any given plugin combination is non trivial, however the following are\n calculations based on running 2-5 plugins in parallel using s3fd, fan, unet, vgg-clear\n and vgg-obstructed. The worst ratio is selected for each combination, plus a little extra\n to ensure that vram is not used up.\n\n If OOM errors are being reported, then these ratios should be relaxed some more\n ", "language": "en", "n_whitespaces": 175, "n_words": 104, "vocab_size": 79 }
https://github.com/deepfakes/faceswap.git
3
hass
def hass(hass_fixture_setup, loop, load_registries, hass_storage, request): hass_fixture_setup.append(True) orig_tz = dt_util.DEFAULT_TIME_ZONE
31a787558fd312331b55e5c2c4b33341fc3601fc
7
conftest.py
39
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
88,632
0
19
111
10
289,490
10
core
10
tests/conftest.py
Python
19
{ "docstring": "Fixture to provide a test instance of Home Assistant.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
12
change_aliases
def change_aliases(self, change_map): # If keys and values of change_map were to intersect, an alias might be # updated twice (e.g. T4 -> T5, T5 -> T6, so also T4 -> T6) depending # on their order in change_map. assert set(change_map).isdisjoint(change_map.values()) # 1. Update references in "select" (normal columns plus aliases), # "group by" and "where". self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple( [col.relabeled_clone(change_map) for col in self.group_by] ) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and { key: col.relabeled_clone(change_map) for key, col in self.annotations.items() } # 2. Rename the alias in the internal table/alias datastructures. for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias == old_alias: table_aliases[pos] = new_alias break self.external_aliases = { # Table is aliased or it's being changed and thus is aliased. change_map.get(alias, alias): (aliased or alias in change_map) for alias, aliased in self.external_aliases.items() }
9c19aff7c7561e3a82978a272ecdaad40dda5c00
13
query.py
385
Refs #33476 -- Reformatted code with Black.
51,257
0
527
246
113
205,875
167
django
31
django/db/models/sql/query.py
Python
29
{ "docstring": "\n Change the aliases in change_map (which maps old-alias -> new-alias),\n relabelling any references to them in select columns and the where\n clause.\n ", "language": "en", "n_whitespaces": 51, "n_words": 22, "vocab_size": 20 }
https://github.com/django/django.git
1
test_update_from_select
def test_update_from_select(self, mock_handler): self.set_handler(mock_handler, name='pg', tables={'tasks': self.df}) # --- use predictor --- self.set_predictor(self.task_predictor) sql = ret = self.command_executor.execute_command( parse_sql(sql, dialect='mindsdb')) assert ret.error_code is None # 1 select and 2 updates assert mock_handler().query.call_count == 3 # second is update assert mock_handler().query.call_args_list[1][0][0].to_string() == "update table2 set a1=1, c1='ccc' where (a1 = 1) AND (b1 = 'ccc')"
8f9cbb01af1f68c55227cebf4e749b86a2c525ca
15
test_executor.py
157
unit tests for: - create table - insert into
25,858
0
135
94
44
116,874
54
mindsdb
20
tests/unit/test_executor.py
Python
26
{ "docstring": "\n update \n pg.table2 \n set\n a1 = df.a,\n c1 = df.c\n from \n (\n SELECT model.a as a, model.b as b, model.p as c\n FROM pg.tasks as t\n JOIN mindsdb.task_model as model\n WHERE t.a=1 \n )\n as df\n where \n table2.a1 = df.a \n and table2.b1 = df.b \n ", "language": "en", "n_whitespaces": 351, "n_words": 42, "vocab_size": 34 }
https://github.com/mindsdb/mindsdb.git
2
_compile_weights_loss_and_weighted_metrics
def _compile_weights_loss_and_weighted_metrics(self, sample_weights=None): with backend.get_graph().as_default(): if sample_weights is not None: self._update_sample_weight_modes(sample_weights) self._prepare_sample_weights(sample_weights) masks = self._prepare_output_masks() # Compute weighted metrics. self._handle_metrics( self.outputs, targets=self._targets, skip_target_masks=self._prepare_skip_target_masks(), sample_weights=self.sample_weights, masks=masks, return_weighted_metrics=True, ) # Compute total loss. # Used to keep track of the total loss value (stateless). # eg., total_loss = loss_weight_1 * output_1_loss_fn(...) + # loss_weight_2 * output_2_loss_fn(...) + # layer losses. self.total_loss = self._prepare_total_loss(masks)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
12
training_v1.py
154
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,923
0
348
93
50
271,987
61
keras
19
keras/engine/training_v1.py
Python
15
{ "docstring": "Compiles the model loss and weighted metric sub-graphs.\n\n This may be used to set graph tensors as sample weights (instead of creating\n placeholders). This functionality is necessary for\n `tf.keras.estimator.model_to_estimator`, which calls Keras models in a v1\n graph, and creates iterator tensors for inputs, targets, and sample weights.\n\n Args:\n sample_weights: List of tensors to use as the sample weights. Must be the\n same length as the number of outputs. If left as `None`, placeholders\n are used instead.\n ", "language": "en", "n_whitespaces": 149, "n_words": 76, "vocab_size": 56 }
https://github.com/keras-team/keras.git
1
get_mop_query
def get_mop_query(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( , {"parent": filters.get("parent"), "start": start, "page_len": page_len, "txt": "%%%s%%" % txt}, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
494bd9ef78313436f0424b918f200dab8fc7c20b
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
12
payment_order.py
99
style: format code with black
13,751
1
16
50
21
64,915
23
erpnext
13
erpnext/accounts/doctype/payment_order/payment_order.py
Python
7
{ "docstring": " select mode_of_payment from `tabPayment Order Reference`\n\t\twhere parent = %(parent)s and mode_of_payment like %(txt)s\n\t\tlimit %(start)s, %(page_len)s", "language": "en", "n_whitespaces": 15, "n_words": 17, "vocab_size": 16 }
https://github.com/frappe/erpnext.git
4
reset_page_edit_handler_cache
def reset_page_edit_handler_cache(**kwargs): if kwargs["setting"] == "WAGTAILADMIN_COMMENTS_ENABLED": set_default_page_edit_handlers(Page) for model in apps.get_models(): if issubclass(model, Page): model.get_edit_handler.cache_clear()
d10f15e55806c6944827d801cd9c2d53f5da4186
14
edit_handlers.py
76
Reformat with black
15,624
0
61
43
14
71,107
15
wagtail
10
wagtail/admin/edit_handlers.py
Python
6
{ "docstring": "\n Clear page edit handler cache when global WAGTAILADMIN_COMMENTS_ENABLED settings are changed\n ", "language": "en", "n_whitespaces": 18, "n_words": 11, "vocab_size": 11 }
https://github.com/wagtail/wagtail.git
1
test_comment
def test_comment(self) -> None: html = b tree = decode_body(html, "http://example.com/test.html") og = parse_html_to_open_graph(tree, "http://example.com/test.html") self.assertEqual(og, {"og:title": "Foo", "og:description": "Some text."})
7e91107be1a4287873266e588a3c5b415279f4c8
10
test_html_preview.py
78
Add type hints to `tests/rest` (#12146) * Add type hints to `tests/rest` * newsfile * change import from `SigningKey`
71,639
0
48
43
18
247,381
21
synapse
8
tests/rest/media/v1/test_html_preview.py
Python
13
{ "docstring": "\n <html>\n <head><title>Foo</title></head>\n <body>\n <!-- HTML comment -->\n Some text.\n </body>\n </html>\n ", "language": "en", "n_whitespaces": 68, "n_words": 11, "vocab_size": 11 }
https://github.com/matrix-org/synapse.git
18
_process_packet
def _process_packet(self, pkt): # type: (Packet) -> Optional[Packet] if self.app: # Special mode: Application layer. Use on top of TCP pay_class = pkt.__class__ if not hasattr(pay_class, "tcp_reassemble"): # Being on top of TCP, we have no way of knowing # when a packet ends. return pkt self.data += bytes(pkt) pkt = pay_class.tcp_reassemble(self.data, self.metadata, self.session) if pkt: self.data = b"" self.metadata = {} return pkt return None from scapy.layers.inet import IP, TCP if not pkt or TCP not in pkt: return pkt pay = pkt[TCP].payload if isinstance(pay, (NoPayload, conf.padding_layer)): return pkt new_data = pay.original # Match packets by a unique TCP identifier seq = pkt[TCP].seq ident = self._get_ident(pkt) data, metadata = self.tcp_frags[ident] tcp_session = self.tcp_sessions[self._get_ident(pkt, True)] # Let's guess which class is going to be used if "pay_class" not in metadata: pay_class = pay.__class__ if hasattr(pay_class, "tcp_reassemble"): tcp_reassemble = pay_class.tcp_reassemble else: # We can't know for sure when a packet ends. # Ignore. return pkt metadata["pay_class"] = pay_class metadata["tcp_reassemble"] = tcp_reassemble else: tcp_reassemble = metadata["tcp_reassemble"] if "seq" not in metadata: metadata["seq"] = seq # Get a relative sequence number for a storage purpose relative_seq = metadata.get("relative_seq", None) if relative_seq is None: relative_seq = metadata["relative_seq"] = seq - 1 seq = seq - relative_seq # Add the data to the buffer # Note that this take care of retransmission packets. data.append(new_data, seq) # Check TCP FIN or TCP RESET if pkt[TCP].flags.F or pkt[TCP].flags.R: metadata["tcp_end"] = True # In case any app layer protocol requires it, # allow the parser to inspect TCP PSH flag if pkt[TCP].flags.P: metadata["tcp_psh"] = True # XXX TODO: check that no empty space is missing in the buffer. # XXX Currently, if a TCP fragment was missing, we won't notice it. packet = None # type: Optional[Packet] if data.full(): # Reassemble using all previous packets packet = tcp_reassemble(bytes(data), metadata, tcp_session) # Stack the result on top of the previous frames if packet: if "seq" in metadata: pkt[TCP].seq = metadata["seq"] # Clear buffer data.clear() # Clear TCP reassembly metadata metadata.clear() del self.tcp_frags[ident] # Rebuild resulting packet pay.underlayer.remove_payload() if IP in pkt: pkt[IP].len = None pkt[IP].chksum = None pkt = pkt / packet pkt.wirelen = None return pkt return None
ca10c5cf00425d0178998ec0b006cbb65ddbfb54
12
sessions.py
675
[MS-RPCE] and [MS-SMB] major update (#3683) * Various fixes regarding DCE/RPC build * DCE/RPC sessions * Cleanup unused code * Add missing GSS_WRAP algo names * Add find_dcerpc_interface * Split SMB client and server * Missing StrFixedLenFieldUtf16 * Remove unfinished smbserver feature * Friendlier getter for SMB2 * DceRpcNak * Improve NDR parsing (a lot) * Minor SMB2 improvements * BIG NDR refactor + Dissect pointer deferal * Build with pointer deferral * Small build bugs * SMB2 logoff, fix rawToken in SMB standalone * Add security providers from MS-RPCE to DCERPC * Cleanup ptr_pack of NDRPacketListField * Clearer exception in find_dcerpc_interface * Add minor_version attribute * Fix computation of auth_pad in sec_trailer * Fix a WTF bug * Compute length for NDR arrays * Pass enum to EnumField * Match union attributes from response with request * Improve SMB server * Small bug in pointer deferal dissection * Add user-friendly utils * Add a few NDR tests * More user-friendly improvements * Bug: parent not copied in clone_with * Build: propagate NDR64 and bug fix * Default close response parameters * Fix Python 2.7 * Fix SMB2_Create_Context offset * Fix SMB2 create context * SMB2: support chain, improvements * Fix ioctl error * SMB: check computeNTProofStr * Fix UTCField default * Improve FileId capabilities * SMB2: contexts * Typos * Minor NDRUnion fixes * Py2 fixes
52,769
0
1,171
405
189
209,763
360
scapy
46
scapy/sessions.py
Python
61
{ "docstring": "Process each packet: matches the TCP seq/ack numbers\n to follow the TCP streams, and orders the fragments.\n ", "language": "en", "n_whitespaces": 31, "n_words": 17, "vocab_size": 14 }
https://github.com/secdev/scapy.git
1
test_sequential_bad_outputs
def test_sequential_bad_outputs() -> None: chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"]) chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"]) with pytest.raises(ValueError): # "test" is not present as an output variable. SequentialChain( chains=[chain_1, chain_2], input_variables=["foo"], output_variables=["test"], )
4a4dfbfbed5ca271fc74f61a0b3387314dda8703
12
test_sequential.py
123
Harrison/sequential chains (#168) add support for basic sequential chains
46,657
0
95
70
28
191,532
29
langchain
11
tests/unit_tests/chains/test_sequential.py
Python
10
{ "docstring": "Test error is raised when bad outputs are specified.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/hwchase17/langchain.git
1
test_run_cleanup_skip_archive
def test_run_cleanup_skip_archive(self, cleanup_table_mock, kwargs, should_skip): run_cleanup( clean_before_timestamp=None, table_names=['log'], dry_run=None, verbose=None, confirm=False, **kwargs, ) assert cleanup_table_mock.call_args[1]['skip_archive'] is should_skip
95bd6b71cc9f5da377e272707f7b68000d980939
10
test_db_cleanup.py
78
Don't rely on current ORM structure for db clean command (#23574) For command DB clean, by not relying on the ORM models, we will be able to use the command even when the metadatabase is not yet upgraded to the version of Airflow you have installed. Additionally we archive all rows before deletion.
7,871
0
111
52
17
43,210
17
airflow
12
tests/utils/test_db_cleanup.py
Python
10
{ "docstring": "test that delete confirmation input is called when appropriate", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/apache/airflow.git
2
for_each_shape
def for_each_shape(self, fn, selector=None, row=None, col=None, secondary_y=None): for obj in self._select_annotations_like( prop="shapes", selector=selector, row=row, col=col, secondary_y=secondary_y, ): fn(obj) return self
43e3a4011080911901176aab919c0ecf5046ddd3
9
_figure.py
86
switch to black .22
58,414
0
114
59
20
226,741
20
plotly.py
10
packages/python/plotly/plotly/graph_objs/_figure.py
Python
10
{ "docstring": "\n Apply a function to all shapes that satisfy the specified selection\n criteria\n\n Parameters\n ----------\n fn:\n Function that inputs a single shape object.\n selector: dict, function, int, str or None (default None)\n Dict to use as selection criteria.\n Traces will be selected if they contain properties corresponding\n to all of the dictionary's keys, with values that exactly match\n the supplied values. If None (the default), all shapes are\n selected. If a function, it must be a function accepting a single\n argument and returning a boolean. The function will be called on\n each shape and those for which the function returned True\n will be in the selection. If an int N, the Nth shape matching row\n and col will be selected (N can be negative). If a string S, the selector\n is equivalent to dict(type=S).\n row, col: int or None (default None)\n Subplot row and column index of shapes to select.\n To select shapes by row and column, the Figure must have been\n created using plotly.subplots.make_subplots. To select only those\n shapes that are in paper coordinates, set row and col to the\n string 'paper'. If None (the default), all shapes are selected.\n secondary_y: boolean or None (default None)\n * If True, only select shapes associated with the secondary\n y-axis of the subplot.\n * If False, only select shapes associated with the primary\n y-axis of the subplot.\n * If None (the default), do not filter shapes based on secondary\n y-axis.\n\n To select shapes by secondary y-axis, the Figure must have been\n created using plotly.subplots.make_subplots. See the docstring\n for the specs argument to make_subplots for more info on\n creating subplots with secondary y-axes.\n Returns\n -------\n self\n Returns the Figure object that the method was called on\n ", "language": "en", "n_whitespaces": 672, "n_words": 282, "vocab_size": 142 }
https://github.com/plotly/plotly.py.git
3
load_drawer_from_disk
def load_drawer_from_disk(self): exists = self.pair_dictionary_path.is_file() if exists: with open(self.pair_dictionary_path, "r") as fp: self.pair_dict = json.load(fp) elif not self.follow_mode: logger.info("Could not find existing datadrawer, starting from scratch") else: logger.warning( f"Follower could not find pair_dictionary at {self.full_path} " "sending null values back to strategy" )
76b33359a939e8db89b55f268e9d650f78c51bf3
14
data_drawer.py
121
add an optional metric tracker to collect train timings, inference timings, and cpu load data
35,018
0
167
62
39
151,471
43
freqtrade
15
freqtrade/freqai/data_drawer.py
Python
12
{ "docstring": "\n Locate and load a previously saved data drawer full of all pair model metadata in\n present model folder.\n Load any existing metric tracker that may be present.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 26 }
https://github.com/freqtrade/freqtrade.git
3
_var_key
def _var_key(var): # pylint: disable=protected-access # Get the distributed variable if it exists. if hasattr(var, "_distributed_container"): var = var._distributed_container() if getattr(var, "_in_graph_mode", False): return var._shared_name return var._unique_id
75d70a610dffe927d89ceb400d79bb7f9027b26e
10
optimizer_v2.py
69
Support checkpointing ShardedVariables in optimizer slot variables. PiperOrigin-RevId: 429577423
79,831
0
39
39
23
269,013
27
keras
7
keras/optimizers/optimizer_v2/optimizer_v2.py
Python
6
{ "docstring": "Key for representing a primary variable, for looking up slots.\n\n In graph mode the name is derived from the var shared name.\n In eager mode the name is derived from the var unique id.\n If distribution strategy exists, get the primary variable first.\n\n Args:\n var: the variable.\n\n Returns:\n the unique name of the variable.\n ", "language": "en", "n_whitespaces": 66, "n_words": 54, "vocab_size": 35 }
https://github.com/keras-team/keras.git
3
process_dataframe
def process_dataframe(self) -> dict[int | str, dict[str, Any]]: df = self.frame if self.index: df = df.reset_index() if self.na_rep is not None: df = df.fillna(self.na_rep) return df.to_dict(orient="index")
9dfb454dbe5f183b1508a9571ee97938c9cb0db9
11
xml.py
103
REF: Deduplicate to_xml code (#45132)
39,359
0
83
64
21
163,017
26
pandas
14
pandas/io/formats/xml.py
Python
13
{ "docstring": "\n Adjust Data Frame to fit xml output.\n\n This method will adjust underlying data frame for xml output,\n including optionally replacing missing values and including indexes.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 23 }
https://github.com/pandas-dev/pandas.git
2
xresnet101
def xresnet101(pretrained=False, **kwargs): model = XResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['xresnet101'])) return model
4fc3616712edb19179b17dd270ad6cf63abf99c2
13
xresnet2.py
77
Upgrading to support latest Pytorch version
46,356
0
28
49
15
190,597
16
DeOldify
10
fastai/vision/models/xresnet2.py
Python
4
{ "docstring": "Constructs a XResNet-101 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n ", "language": "en", "n_whitespaces": 28, "n_words": 15, "vocab_size": 14 }
https://github.com/jantic/DeOldify.git
1
test_check_complete_II
def test_check_complete_II(value, expected): cc = ipt2.TransformerManager().check_complete assert cc(value) == expected @pytest.mark.parametrize( "value, expected", [ (")", ("invalid", None)), ("]", ("invalid", None)), ("}", ("invalid", None)), (")(", ("invalid", None)), ("][", ("invalid", None)), ("}{", ("invalid", None)), ("]()(", ("invalid", None)), ("())(", ("invalid", None)), (")[](", ("invalid", None)), ("()](", ("invalid", None)), ], )
cb6563dcd85783ea0a687fbe227c4782a8a7cadf
@pytest.mark.parametrize( "value, expected", [ (")", ("invalid", None)), ("]", ("invalid", None)), ("}", ("invalid", None)), (")(", ("invalid", None)), ("][", ("invalid", None)), ("}{", ("invalid", None)), ("]()(", ("invalid", None)), ("())(", ("invalid", None)), (")[](", ("invalid", None)), ("()](", ("invalid", None)), ], )
10
test_inputtransformer2.py
223
Fix and test for "async with does not allow new lines". Use the opportunity to add a test, and parametrise a few other, plus set the correct stacklevel. Closes #12975
52,326
1
132
24
28
208,444
46
ipython
10
IPython/core/tests/test_inputtransformer2.py
Python
3
{ "docstring": "\n Test that multiple line strings are properly handled.\n\n Separate test function for convenience\n\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 13 }
https://github.com/ipython/ipython.git
6
async_inference_detector
async def async_inference_detector(model, imgs): if not isinstance(imgs, (list, tuple)): imgs = [imgs] cfg = model.cfg if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) for m in model.modules(): assert not isinstance( m, RoIPool), 'CPU inference with RoIPool is not supported currently.' # We don't restore `torch.is_grad_enabled()` value during concurrent # inference since execution can overlap torch.set_grad_enabled(False) results = await model.aforward_test(data, rescale=True) return results
2631e2879acf0bd20a64dfdd7039f37a8e6afbf6
17
inference.py
314
Support Datasampler
70,325
0
298
193
80
244,324
113
mmdetection
32
mmdet/apis/inference.py
Python
24
{ "docstring": "Async inference image(s) with the detector.\n\n Args:\n model (nn.Module): The loaded detector.\n img (str | ndarray): Either image files or loaded images.\n\n Returns:\n Awaitable detection results.\n ", "language": "en", "n_whitespaces": 56, "n_words": 26, "vocab_size": 24 }
https://github.com/open-mmlab/mmdetection.git
5
upgrade
def upgrade(): conn = op.get_bind() update_flow_run_state_name_in_batches = result = conn.execute(sa.text(update_flow_run_state_name_in_batches)) while True: if result.rowcount <= 0: break update_task_run_state_name_in_batches = result = conn.execute(sa.text(update_task_run_state_name_in_batches)) while True: if result.rowcount <= 0: break
fc9f253912945e088e48cc723af383e6a9f46faf
10
2022_04_21_095519_14dc68cc5853_backfill_state_name.py
116
Add run.state_name columns
11,142
0
91
66
17
54,788
29
prefect
11
src/prefect/orion/database/migrations/versions/postgresql/2022_04_21_095519_14dc68cc5853_backfill_state_name.py
Python
26
{ "docstring": "\n Backfills state_name column for task_run and flow_run tables.\n\n This is a data only migration that can be run as many\n times as desired.\n \n WITH null_flow_run_state_name_cte as (SELECT id from flow_run where state_name is null limit 500)\n UPDATE flow_run\n SET state_name = flow_run_state.name\n FROM flow_run_state, null_flow_run_state_name_cte\n WHERE flow_run.state_id = flow_run_state.id\n AND flow_run.id = null_flow_run_state_name_cte.id;\n \n WITH null_task_run_state_name_cte as (SELECT id from task_run where state_name is null limit 500)\n UPDATE task_run\n SET state_name = task_run_state.name\n FROM task_run_state, null_task_run_state_name_cte\n WHERE task_run.state_id = task_run_state.id\n AND task_run.id = null_task_run_state_name_cte.id;\n ", "language": "en", "n_whitespaces": 188, "n_words": 83, "vocab_size": 50 }
https://github.com/PrefectHQ/prefect.git
1
get_default_rl_module_class
def get_default_rl_module_class(self) -> Union[Type["RLModule"], str]: raise NotImplementedError
f9ec2d1ae2e14e1f1ed38d315dfd643f600dc397
7
algorithm_config.py
31
[RLlib] Make RLModule initialization easy (#31069) 1. Moved the `_enable_rl_module_api` signature into `rl_module()` api of the algorithmConfig. 2. Added the ability for the user to override the entire RLModule from algorithmConfig by simply changing the class. 3. updated marl_module: we now have only one MARLModule base-class that can be used stand-alone, users can override it completely if they want. 4. Removed test_torch_marl_module (Will add it back in a framework agnostic way) 5. Updated TorchMARL and RL modules to use the new constructor format. 6. Core tests now works independent of failures of PPORLModule. 7. Core tests is now based on factory methods of RLModule. 8. created a new isolated unittest for marl_module 9. update ppo torch RL module to adhere to the new API changes. 10. get_rl_module_class is now a instance method instead of classmethod 11. made enabling the api more explicit from algo_config() Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
31,158
0
21
18
7
137,403
7
ray
6
rllib/algorithms/algorithm_config.py
Python
11
{ "docstring": "Returns the RLModule class to use for this algorithm.\n\n Override this method in the sub-class to return the RLModule class type given\n the input framework.\n\n Returns:\n The RLModule class to use for this algorithm either as a class type or as\n a string (e.g. x.y.z).\n ", "language": "en", "n_whitespaces": 95, "n_words": 45, "vocab_size": 28 }
https://github.com/ray-project/ray.git
1
is_spotify_media_type
def is_spotify_media_type(media_content_type): return media_content_type.startswith(MEDIA_PLAYER_PREFIX)
a371f8f7882bd1c7de5e344aa23f7d6b1967ce66
7
__init__.py
24
Allow browsing the Spotify media player in Sonos (#64921)
109,737
0
10
13
4
311,067
4
core
4
homeassistant/components/spotify/__init__.py
Python
2
{ "docstring": "Return whether the media_content_type is a valid Spotify media_id.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
3
_window_closed
def _window_closed(self) -> bool: retval = any(cv2.getWindowProperty(win, cv2.WND_PROP_VISIBLE) < 1 for win in self._windows) if retval: logger.debug("Window closed detected") return retval
7da2cc3dd266aabebf41a31384cc2e0e7e5af6e5
12
preview_cv.py
70
Training - Use custom preview pop-out
20,955
0
60
42
20
101,545
21
faceswap
12
lib/training/preview_cv.py
Python
6
{ "docstring": " bool: ``True`` if any window has been closed otherwise ``False`` ", "language": "en", "n_whitespaces": 11, "n_words": 10, "vocab_size": 10 }
https://github.com/deepfakes/faceswap.git
8
eval
def eval(cls, *_args): if not _args: return Undefined if len(_args) == 1 and _args[0][-1] == True: return _args[0][0] newargs = _piecewise_collapse_arguments(_args) # some conditions may have been redundant missing = len(newargs) != len(_args) # some conditions may have changed same = all(a == b for a, b in zip(newargs, _args)) # if either change happened we return the expr with the # updated args if not newargs: raise ValueError(filldedent()) if missing or not same: return cls(*newargs)
fc7b460a21e340d4104e67c86d973765c9b4071b
12
piecewise.py
169
Refactor Piecewise simplifcation/evaluation
49,272
0
204
102
52
199,448
76
sympy
15
sympy/functions/elementary/piecewise.py
Python
15
{ "docstring": "Either return a modified version of the args or, if no\n modifications were made, return None.\n\n Modifications that are made here:\n\n 1. relationals are made canonical\n 2. any False conditions are dropped\n 3. any repeat of a previous condition is ignored\n 4. any args past one with a true condition are dropped\n\n If there are no args left, nan will be returned.\n If there is a single arg with a True condition, its\n corresponding expression will be returned.\n\n EXAMPLES\n ========\n\n >>> from sympy import Piecewise\n >>> from sympy.abc import x\n >>> cond = -x < -1\n >>> args = [(1, cond), (4, cond), (3, False), (2, True), (5, x < 1)]\n >>> Piecewise(*args, evaluate=False)\n Piecewise((1, -x < -1), (4, -x < -1), (2, True))\n >>> Piecewise(*args)\n Piecewise((1, x > 1), (2, True))\n \n There are no conditions (or none that\n are not trivially false) to define an\n expression.", "language": "en", "n_whitespaces": 332, "n_words": 147, "vocab_size": 94 }
https://github.com/sympy/sympy.git
5
_populate_vhost_names_v2
def _populate_vhost_names_v2(self, vhost): servername_match = vhost.node.find_directives("ServerName", exclude=False) serveralias_match = vhost.node.find_directives("ServerAlias", exclude=False) servername = None if servername_match: servername = servername_match[-1].parameters[-1] if not vhost.modmacro: for alias in serveralias_match: for serveralias in alias.parameters: vhost.aliases.add(serveralias) vhost.name = servername
eeca208c8f57304590ac1af80b496e61021aaa45
14
configurator.py
139
Various clean-ups in certbot-apache. Use f-strings. (#9132) * Various clean-ups in certbot-apache. Use f-strings. * Smaller tweaks
45,474
0
143
86
24
186,378
34
certbot
16
certbot-apache/certbot_apache/_internal/configurator.py
Python
11
{ "docstring": "Helper function that populates the VirtualHost names.\n :param host: In progress vhost whose names will be added\n :type host: :class:`~certbot_apache.obj.VirtualHost`\n ", "language": "en", "n_whitespaces": 41, "n_words": 20, "vocab_size": 19 }
https://github.com/certbot/certbot.git
3
get_resource_path
def get_resource_path(self, relative_path): r = self.get_distinfo_resource('RESOURCES') with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as resources_reader: for relative, destination in resources_reader: if relative == relative_path: return destination raise KeyError('no resource file with relative path %r ' 'is installed' % relative_path)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
13
database.py
113
upd; format
12,790
0
156
62
32
61,975
38
transferlearning
14
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/database.py
Python
9
{ "docstring": "\n NOTE: This API may change in the future.\n\n Return the absolute path to a resource file with the given relative\n path.\n\n :param relative_path: The path, relative to .dist-info, of the resource\n of interest.\n :return: The absolute path where the resource is to be found.\n ", "language": "en", "n_whitespaces": 116, "n_words": 44, "vocab_size": 31 }
https://github.com/jindongwang/transferlearning.git
4
get_training_or_validation_split
def get_training_or_validation_split(samples, labels, validation_split, subset): if not validation_split: return samples, labels num_val_samples = int(validation_split * len(samples)) if subset == "training": print(f"Using {len(samples) - num_val_samples} files for training.") samples = samples[:-num_val_samples] labels = labels[:-num_val_samples] elif subset == "validation": print(f"Using {num_val_samples} files for validation.") samples = samples[-num_val_samples:] labels = labels[-num_val_samples:] else: raise ValueError( '`subset` must be either "training" ' f'or "validation", received: {subset}' ) return samples, labels
84afc5193d38057e2e2badf9c889ea87d80d8fbf
14
dataset_utils.py
184
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,744
0
171
96
49
276,811
65
keras
10
keras/utils/dataset_utils.py
Python
18
{ "docstring": "Potentially restict samples & labels to a training or validation split.\n\n Args:\n samples: List of elements.\n labels: List of corresponding labels.\n validation_split: Float, fraction of data to reserve for validation.\n subset: Subset of the data to return.\n Either \"training\", \"validation\", or None. If None, we return all of the\n data.\n\n Returns:\n tuple (samples, labels), potentially restricted to the specified subset.\n ", "language": "en", "n_whitespaces": 108, "n_words": 60, "vocab_size": 48 }
https://github.com/keras-team/keras.git
6
get_employees
def get_employees(filters): holiday_filter = [ ["holiday_date", ">=", filters.from_date], ["holiday_date", "<=", filters.to_date], ] if filters.holiday_list: holiday_filter.append(["parent", "=", filters.holiday_list]) holidays = frappe.get_all( "Holiday", fields=["holiday_date", "description"], filters=holiday_filter ) holiday_names = {} holidays_list = [] for holiday in holidays: holidays_list.append(holiday.holiday_date) holiday_names[holiday.holiday_date] = holiday.description if holidays_list: cond = " attendance_date in %(holidays_list)s" if filters.holiday_list: cond += ( ) employee_list = frappe.db.sql( % cond.format(", ".join(["%s"] * len(holidays_list))), {"holidays_list": holidays_list, "holidays": filters.holiday_list}, as_list=True, ) for employee_data in employee_list: employee_data.append(holiday_names[employee_data[2]]) return employee_list else: return []
494bd9ef78313436f0424b918f200dab8fc7c20b
18
employees_working_on_a_holiday.py
308
style: format code with black
14,147
0
47
185
58
66,253
77
erpnext
25
erpnext/hr/report/employees_working_on_a_holiday/employees_working_on_a_holiday.py
Python
35
{ "docstring": " and (employee in (select employee from tabEmployee where holiday_list = %(holidays)s))select\n\t\t\t\temployee, employee_name, attendance_date, status\n\t\t\tfrom tabAttendance\n\t\t\twhere %s", "language": "en", "n_whitespaces": 16, "n_words": 19, "vocab_size": 17 }
https://github.com/frappe/erpnext.git
1
test_django_date_trunc
def test_django_date_trunc(self): updated = datetime.datetime(2010, 2, 20) SchoolClass.objects.create(year=2009, last_updated=updated) years = SchoolClass.objects.dates("last_updated", "year") self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
tests.py
105
Refs #33476 -- Reformatted code with Black.
50,004
0
52
66
16
201,823
17
django
14
tests/backends/tests.py
Python
5
{ "docstring": "\n Test the custom ``django_date_trunc method``, in particular against\n fields which clash with strings passed to it (e.g. 'year') (#12818).\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 19 }
https://github.com/django/django.git
3
get_tests_dir
def get_tests_dir(append_path=None): # this function caller's __file__ caller__file__ = inspect.stack()[1][1] tests_dir = os.path.abspath(os.path.dirname(caller__file__)) while not tests_dir.endswith("tests"): tests_dir = os.path.dirname(tests_dir) if append_path: return os.path.join(tests_dir, append_path) else: return tests_dir # # Helper functions for dealing with testing text outputs # The original code came from: # https://github.com/fastai/fastai/blob/master/tests/utils/text.py # When any function contains print() calls that get overwritten, like progress bars, # a special care needs to be applied, since under pytest -s captured output (capsys # or contextlib.redirect_stdout) contains any temporary printed strings, followed by # \r's. This helper function ensures that the buffer will contain the same output # with and without -s in pytest, by turning: # foo bar\r tar mar\r final message # into: # final message # it can handle a single string or a multiline buffer
29c10a41d04f855c433a6cde7797b325651417d2
11
testing_utils.py
138
[Test refactor 1/5] Per-folder tests reorganization (#15725) * Per-folder tests reorganization Co-authored-by: sgugger <[email protected]> Co-authored-by: Stas Bekman <[email protected]>
6,452
0
158
75
95
35,453
129
transformers
12
src/transformers/testing_utils.py
Python
9
{ "docstring": "\n Args:\n append_path: optional path to append to the tests dir path\n\n Return:\n The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is\n joined after the `tests` dir the former is provided.\n\n ", "language": "en", "n_whitespaces": 71, "n_words": 40, "vocab_size": 28 }
https://github.com/huggingface/transformers.git