complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
sorted_items
def sorted_items(self) -> List[Tuple[str, "PNGHeaderDict"]]: items = sorted(self.process_folder(), key=itemgetter(0)) logger.trace(items) # type: ignore return items
c79175cbde5600bebd65785f3821fc74b3a80cbe
11
media.py
69
Alignments Tool updates - Copy info back to alignments file from faces
21,162
0
44
41
14
101,758
15
faceswap
12
tools/alignments/media.py
Python
11
{ "docstring": " Return the items sorted by the saved file name.\n\n Returns\n --------\n list\n List of `dict` objects for each face found, sorted by the face's current filename\n ", "language": "en", "n_whitespaces": 66, "n_words": 26, "vocab_size": 22 }
https://github.com/deepfakes/faceswap.git
4
encrypt_file
def encrypt_file(self, file, key=0): # precondition assert isinstance(file, str) and isinstance(key, int) try: with open(file, "r") as fin: with open("encrypt.out", "w+") as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(line, key)) except: return False return True
f0af0c43340763724f139fa68aa1e5a9ffe458b4
17
XOR_cipher.py
125
refactor: clean code Signed-off-by: slowy07 <[email protected]>
4,359
0
177
72
32
22,543
37
Python
13
XORcipher/XOR_cipher.py
Python
10
{ "docstring": "\n input: filename (str) and a key (int)\n output: returns true if encrypt process was\n successful otherwise false\n if key not passed the method uses the key by the constructor.\n otherwise key = 1\n ", "language": "en", "n_whitespaces": 76, "n_words": 33, "vocab_size": 26 }
https://github.com/geekcomputers/Python.git
11
_get_next_prev
def _get_next_prev(generic_view, date, is_previous, period): date_field = generic_view.get_date_field() allow_empty = generic_view.get_allow_empty() allow_future = generic_view.get_allow_future() get_current = getattr(generic_view, "_get_current_%s" % period) get_next = getattr(generic_view, "_get_next_%s" % period) # Bounds of the current interval start, end = get_current(date), get_next(date) # If allow_empty is True, the naive result will be valid if allow_empty: if is_previous: result = get_current(start - datetime.timedelta(days=1)) else: result = end if allow_future or result <= timezone_today(): return result else: return None # Otherwise, we'll need to go to the database to look for an object # whose date_field is at least (greater than/less than) the given # naive result else: # Construct a lookup and an ordering depending on whether we're doing # a previous date or a next date lookup. if is_previous: lookup = {"%s__lt" % date_field: generic_view._make_date_lookup_arg(start)} ordering = "-%s" % date_field else: lookup = {"%s__gte" % date_field: generic_view._make_date_lookup_arg(end)} ordering = date_field # Filter out objects in the future if appropriate. if not allow_future: # Fortunately, to match the implementation of allow_future, # we need __lte, which doesn't conflict with __lt above. if generic_view.uses_datetime_field: now = timezone.now() else: now = timezone_today() lookup["%s__lte" % date_field] = now qs = generic_view.get_queryset().filter(**lookup).order_by(ordering) # Snag the first object from the queryset; if it doesn't exist that # means there's no next/previous link available. try: result = getattr(qs[0], date_field) except IndexError: return None # Convert datetimes to dates in the current time zone. if generic_view.uses_datetime_field: if settings.USE_TZ: result = timezone.localtime(result) result = result.date() # Return the first day of the period. return get_current(result)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
16
dates.py
429
Refs #33476 -- Reformatted code with Black.
51,762
0
658
247
144
206,861
251
django
35
django/views/generic/dates.py
Python
39
{ "docstring": "\n Get the next or the previous valid date. The idea is to allow links on\n month/day views to never be 404s by never providing a date that'll be\n invalid for the given view.\n\n This is a bit complicated since it handles different intervals of time,\n hence the coupling to generic_view.\n\n However in essence the logic comes down to:\n\n * If allow_empty and allow_future are both true, this is easy: just\n return the naive result (just the next/previous day/week/month,\n regardless of object existence.)\n\n * If allow_empty is true, allow_future is false, and the naive result\n isn't in the future, then return it; otherwise return None.\n\n * If allow_empty is false and allow_future is true, return the next\n date *that contains a valid object*, even if it's in the future. If\n there are no next objects, return None.\n\n * If allow_empty is false and allow_future is false, return the next\n date that contains a valid object. If that date is in the future, or\n if there are no next objects, return None.\n ", "language": "en", "n_whitespaces": 283, "n_words": 170, "vocab_size": 88 }
https://github.com/django/django.git
5
request_url
def request_url(self, request, proxies): proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme is_proxied_http_request = proxy and scheme != "https" using_socks_proxy = False if proxy: proxy_scheme = urlparse(proxy).scheme.lower() using_socks_proxy = proxy_scheme.startswith("socks") url = request.path_url if is_proxied_http_request and not using_socks_proxy: url = urldefragauth(request.url) return url
cd5a9683be69c86c8f3adcd13385a9bc5db198ec
13
adapters.py
140
Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir.
4,131
0
138
84
27
22,042
42
pipenv
16
pipenv/patched/pip/_vendor/requests/adapters.py
Python
12
{ "docstring": "Obtain the url to use when making the final request.\n\n If the message is being sent through a HTTP proxy, the full URL has to\n be used. Otherwise, we should only use the path portion of the URL.\n\n This should not be called from user code, and is only exposed for use\n when subclassing the\n :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.\n\n :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.\n :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.\n :rtype: str\n ", "language": "en", "n_whitespaces": 142, "n_words": 79, "vocab_size": 59 }
https://github.com/pypa/pipenv.git
2
cast_to_floatx
def cast_to_floatx(x): if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)): return tf.cast(x, dtype=floatx()) return np.asarray(x, dtype=floatx()) @keras_export("keras.backend.get_uid")
84afc5193d38057e2e2badf9c889ea87d80d8fbf
@keras_export("keras.backend.get_uid")
12
backend.py
92
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,227
1
29
52
12
269,607
14
keras
13
keras/backend.py
Python
4
{ "docstring": "Cast a Numpy array to the default Keras float type.\n\n Args:\n x: Numpy array or TensorFlow tensor.\n\n Returns:\n The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor\n if `x` was a tensor), cast to its new type.\n\n Example:\n\n >>> tf.keras.backend.floatx()\n 'float32'\n >>> arr = np.array([1.0, 2.0], dtype='float64')\n >>> arr.dtype\n dtype('float64')\n >>> new_arr = cast_to_floatx(arr)\n >>> new_arr\n array([1., 2.], dtype=float32)\n >>> new_arr.dtype\n dtype('float32')\n\n ", "language": "en", "n_whitespaces": 131, "n_words": 67, "vocab_size": 46 }
https://github.com/keras-team/keras.git
10
query
def query(self, table, columns=None, where=None, where_data=None, order_by=None, group_by=None, integration_name=None, integration_type=None): if table == 'predictors': return self._select_predictors() if table == 'datasources': return self._select_datasources() new_where = {} if where_data is None: for key, value in where_data.items(): if isinstance(value, dict) is False or len(value.keys()) != 1 or list(value.keys())[0] != '$eq': # TODO value should be just string or number raise Exception() new_where[key] = value['$eq'] if len(new_where) == 0: return [] where_data = [new_where] if isinstance(where_data, dict): where_data = [where_data] result = self.handler.predict(table, where_data) return result
b9ee4a5930e20a09350a3e0774283ba658a42da7
16
mindsdb_datanode.py
269
del code
25,538
0
287
169
59
115,746
82
mindsdb
25
mindsdb/api/mysql/mysql_proxy/datahub/datanodes/mindsdb_datanode.py
Python
18
{ "docstring": " NOTE WHERE statements can be just $eq joined with 'and'\n ", "language": "en", "n_whitespaces": 18, "n_words": 10, "vocab_size": 10 }
https://github.com/mindsdb/mindsdb.git
2
mock_json_schema
def mock_json_schema(request, monkeypatch, tmp_path): # Do not patch integration tests if "integration" in request.keywords: return # Mock the subclasses list to make it very small, containing only mock nodes monkeypatch.setattr( haystack.nodes._json_schema, "find_subclasses_in_modules", lambda *a, **k: [(conftest, MockDocumentStore), (conftest, MockReader), (conftest, MockRetriever)], ) # Point the JSON schema path to tmp_path monkeypatch.setattr(haystack.pipelines.config, "JSON_SCHEMAS_PATH", tmp_path) # Generate mock schema in tmp_path filename = f"haystack-pipeline-unstable.schema.json" test_schema = _json_schema.get_json_schema(filename=filename, compatible_versions=["unstable"]) with open(tmp_path / filename, "w") as schema_file: json.dump(test_schema, schema_file, indent=4) # # Integration # @pytest.mark.integration @pytest.mark.elasticsearch
11cf94a9652a577732941f27ad59eb7c8bc5063e
@pytest.mark.integration @pytest.mark.elasticsearch
11
test_pipeline_yaml.py
209
Pipeline's YAML: syntax validation (#2226) * Add BasePipeline.validate_config, BasePipeline.validate_yaml, and some new custom exception classes * Make error composition work properly * Clarify typing * Help mypy a bit more * Update Documentation & Code Style * Enable autogenerated docs for Milvus1 and 2 separately * Revert "Enable autogenerated docs for Milvus1 and 2 separately" This reverts commit 282be4a78a6e95862a9b4c924fc3dea5ca71e28d. * Update Documentation & Code Style * Re-enable 'additionalProperties: False' * Add pipeline.type to JSON Schema, was somehow forgotten * Disable additionalProperties on the pipeline properties too * Fix json-schemas for 1.1.0 and 1.2.0 (should not do it again in the future) * Cal super in PipelineValidationError * Improve _read_pipeline_config_from_yaml's error handling * Fix generate_json_schema.py to include document stores * Fix json schemas (retro-fix 1.1.0 again) * Improve custom errors printing, add link to docs * Add function in BaseComponent to list its subclasses in a module * Make some document stores base classes abstract * Add marker 'integration' in pytest flags * Slighly improve validation of pipelines at load * Adding tests for YAML loading and validation * Make custom_query Optional for validation issues * Fix bug in _read_pipeline_config_from_yaml * Improve error handling in BasePipeline and Pipeline and add DAG check * Move json schema generation into haystack/nodes/_json_schema.py (useful for tests) * Simplify errors slightly * Add some YAML validation tests * Remove load_from_config from BasePipeline, it was never used anyway * Improve tests * Include json-schemas in package * Fix conftest imports * Make BasePipeline abstract * Improve mocking by making the test independent from the YAML version * Add exportable_to_yaml decorator to forget about set_config on mock nodes * Fix mypy errors * Comment out one monkeypatch * Fix typing again * Improve error message for validation * Add required properties to pipelines * Fix YAML version for REST API YAMLs to 1.2.0 * Fix load_from_yaml call in load_from_deepset_cloud * fix HaystackError.__getattr__ * Add super().__init__()in most nodes and docstore, comment set_config * Remove type from REST API pipelines * Remove useless init from doc2answers * Call super in Seq3SeqGenerator * Typo in deepsetcloud.py * Fix rest api indexing error mismatch and mock version of JSON schema in all tests * Working on pipeline tests * Improve errors printing slightly * Add back test_pipeline.yaml * _json_schema.py supports different versions with identical schemas * Add type to 0.7 schema for backwards compatibility * Fix small bug in _json_schema.py * Try alternative to generate json schemas on the CI * Update Documentation & Code Style * Make linux CI match autoformat CI * Fix super-init-not-called * Accidentally committed file * Update Documentation & Code Style * fix test_summarizer_translation.py's import * Mock YAML in a few suites, split and simplify test_pipeline_debug_and_validation.py::test_invalid_run_args * Fix json schema for ray tests too * Update Documentation & Code Style * Reintroduce validation * Usa unstable version in tests and rest api * Make unstable support the latest versions * Update Documentation & Code Style * Remove needless fixture * Make type in pipeline optional in the strings validation * Fix schemas * Fix string validation for pipeline type * Improve validate_config_strings * Remove type from test p[ipelines * Update Documentation & Code Style * Fix test_pipeline * Removing more type from pipelines * Temporary CI patc * Fix issue with exportable_to_yaml never invoking the wrapped init * rm stray file * pipeline tests are green again * Linux CI now needs .[all] to generate the schema * Bugfixes, pipeline tests seems to be green * Typo in version after merge * Implement missing methods in Weaviate * Trying to avoid FAISS tests from running in the Milvus1 test suite * Fix some stray test paths and faiss index dumping * Fix pytest markers list * Temporarily disable cache to be able to see tests failures * Fix pyproject.toml syntax * Use only tmp_path * Fix preprocessor signature after merge * Fix faiss bug * Fix Ray test * Fix documentation issue by removing quotes from faiss type * Update Documentation & Code Style * use document properly in preprocessor tests * Update Documentation & Code Style * make preprocessor capable of handling documents * import document * Revert support for documents in preprocessor, do later * Fix bug in _json_schema.py that was breaking validation * re-enable cache * Update Documentation & Code Style * Simplify calling _json_schema.py from the CI * Remove redundant ABC inheritance * Ensure exportable_to_yaml works only on implementations * Rename subclass to class_ in Meta * Make run() and get_config() abstract in BasePipeline * Revert unintended change in preprocessor * Move outgoing_edges_input_node check inside try block * Rename VALID_CODE_GEN_INPUT_REGEX into VALID_INPUT_REGEX * Add check for a RecursionError on validate_config_strings * Address usages of _pipeline_config in data silo and elasticsearch * Rename _pipeline_config into _init_parameters * Fix pytest marker and remove unused imports * Remove most redundant ABCs * Rename _init_parameters into _component_configuration * Remove set_config and type from _component_configuration's dict * Remove last instances of set_config and replace with super().__init__() * Implement __init_subclass__ approach * Simplify checks on the existence of _component_configuration * Fix faiss issue * Dynamic generation of node schemas & weed out old schemas * Add debatable test * Add docstring to debatable test * Positive diff between schemas implemented * Improve diff printing * Rename REST API YAML files to trigger IDE validation * Fix typing issues * Fix more typing * Typo in YAML filename * Remove needless type:ignore * Add tests * Fix tests & validation feedback for accessory classes in custom nodes * Refactor RAGeneratorType out * Fix broken import in conftest * Improve source error handling * Remove unused import in test_eval.py breaking tests * Fix changed error message in tests matches too * Normalize generate_openapi_specs.py and generate_json_schema.py in the actions * Fix path to generate_openapi_specs.py in autoformat.yml * Update Documentation & Code Style * Add test for FAISSDocumentStore-like situations (superclass with init params) * Update Documentation & Code Style * Fix indentation * Remove commented set_config * Store model_name_or_path in FARMReader to use in DistillationDataSilo * Rename _component_configuration into _component_config * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
74,961
1
148
116
68
256,915
82
haystack
30
test/test_pipeline_yaml.py
Python
13
{ "docstring": "\n JSON schema with the unstable version and only mocked nodes.\n ", "language": "en", "n_whitespaces": 17, "n_words": 10, "vocab_size": 10 }
https://github.com/deepset-ai/haystack.git
4
_get_object_config
def _get_object_config(obj): if isinstance(obj, str): # Use the content of the string as the config for string. return obj elif isinstance(obj, types.FunctionType): # Keep track of the function's module and name in a dict as the config. return { 'module': obj.__module__, 'function_name': obj.__name__, } if not hasattr(obj, 'get_config'): raise TypeError(f'Unable to recognize the config of {obj}.') return obj.get_config()
e70d21c144503f560d8659fec0e08267e686a431
11
saving_lib.py
110
Keras saving: A prototype of config-based (idempotent) saving and loading. This shows an example of custom Model, Layer, Optimizer, and Loss, and adds test to confirm that the basic aforementioned elements work across this new saving and loading scheme. PiperOrigin-RevId: 447193044
79,990
0
95
62
44
269,264
58
keras
11
keras/saving/experimental/saving_lib.py
Python
11
{ "docstring": "Return the object's config depending on string, function, or others.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/keras-team/keras.git
1
_get
def _get(self, serialized_key): cur = self.connection.cursor() # should always be a single match, hence the [0] return list(cur.execute(f))[0][0]
0fd3b436c38f38bcae6fed9e14dc4d2a12e90793
13
storage_handler.py
57
fix tests and reformat
25,171
0
38
33
18
114,374
18
mindsdb
8
mindsdb/integrations/libs/storage_handler.py
Python
3
{ "docstring": "select value from store where key='{serialized_key}'", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
https://github.com/mindsdb/mindsdb.git
6
deal_duplicate_bb
def deal_duplicate_bb(thead_part): # 1. find out <td></td> in <thead></thead>. td_pattern = "<td rowspan=\"(\d)+\" colspan=\"(\d)+\">(.+?)</td>|" \ "<td colspan=\"(\d)+\" rowspan=\"(\d)+\">(.+?)</td>|" \ "<td rowspan=\"(\d)+\">(.+?)</td>|" \ "<td colspan=\"(\d)+\">(.+?)</td>|" \ "<td>(.*?)</td>" td_iter = re.finditer(td_pattern, thead_part) td_list = [t.group() for t in td_iter] # 2. is multiply <b></b> in <td></td> or not? new_td_list = [] for td_item in td_list: if td_item.count('<b>') > 1 or td_item.count('</b>') > 1: # multiply <b></b> in <td></td> case. # 1. remove all <b></b> td_item = td_item.replace('<b>', '').replace('</b>', '') # 2. replace <tb> -> <tb><b>, </tb> -> </b></tb>. td_item = td_item.replace('<td>', '<td><b>').replace('</td>', '</b></td>') new_td_list.append(td_item) else: new_td_list.append(td_item) # 3. replace original thead part. for td_item, new_td_item in zip(td_list, new_td_list): thead_part = thead_part.replace(td_item, new_td_item) return thead_part
ddaa2c2552e19635cd6cdf38619f1f176c358f89
15
table_master_match.py
259
add SLANet
4,744
0
371
140
71
24,496
112
PaddleOCR
16
ppstructure/table/table_master_match.py
Python
20
{ "docstring": "\n Deal duplicate <b> or </b> after replace.\n Keep one <b></b> in a <td></td> token.\n :param thead_part:\n :return:\n ", "language": "en", "n_whitespaces": 33, "n_words": 17, "vocab_size": 17 }
https://github.com/PaddlePaddle/PaddleOCR.git
1
test_readback_tfrecords
def test_readback_tfrecords(ray_start_regular_shared, tmp_path): # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { "int_item": 1, "int_list": [2, 2, 3], "float_item": 1.0, "float_list": [2.0, 3.0, 4.0], "bytes_item": b"abc", "bytes_list": [b"abc", b"1234"], }, # Row two. { "int_item": 2, "int_list": [3, 3, 4], "float_item": 2.0, "float_list": [2.0, 2.0, 3.0], "bytes_item": b"def", "bytes_list": [b"def", b"1234"], }, ] ) # Write the TFRecords. ds.write_tfrecords(tmp_path) # Read the TFRecords. readback_ds = ray.data.read_tfrecords(tmp_path) assert ds.take() == readback_ds.take()
9fab504fe776f96fecf85e12ea006264cbe92f4a
13
test_dataset_tfrecords.py
226
[Datasets] Add writer for TFRecords. (#29448) This PR enables users to write TFRecords from datasets. In particular, the master branch already includes an API for reading TFRecords from datasets. Users have requested the ability to write these datasets back to TFRecords.
30,664
0
366
155
59
135,585
79
ray
11
python/ray/data/tests/test_dataset_tfrecords.py
Python
24
{ "docstring": "\n Test reading back TFRecords written using datasets.\n The dataset we read back should be the same that we wrote.\n ", "language": "en", "n_whitespaces": 29, "n_words": 19, "vocab_size": 17 }
https://github.com/ray-project/ray.git
1
test_uninstall_from_sentry
def test_uninstall_from_sentry(self): self.login_as(self.user) with self.tasks(): config_id = "my_config_id" responses.add( responses.DELETE, f"{VercelClient.base_url}{VercelClient.UNINSTALL % config_id}", json={}, ) path = ( f"/api/0/organizations/{self.organization.slug}/integrations/{self.integration.id}/" ) response = self.client.delete(path, format="json") assert response.status_code == 204 assert ( len( OrganizationIntegration.objects.filter( integration=self.integration, status=ObjectStatus.VISIBLE, ) ) == 1 ) response = self.client.delete( path=self.url, data=PRIMARY_UNINSTALL_RESPONSE, content_type="application/json", ) assert response.status_code == 204 integration = Integration.objects.get(id=self.integration.id) assert integration.metadata == { "access_token": "my_access_token2", "installation_id": "my_config_id2", "installation_type": "team", "webhook_id": "my_webhook_id2", "configurations": { "my_config_id2": { "access_token": "my_access_token2", "webhook_id": "my_webhook_id2", "organization_id": self.second_org.id, } }, } with self.tasks(): config_id = "my_config_id2" responses.add( responses.DELETE, f"{VercelClient.base_url}{VercelClient.UNINSTALL % config_id}", json={}, ) path = ( f"/api/0/organizations/{self.second_org.slug}/integrations/{self.integration.id}/" ) response = self.client.delete(path, format="json") assert response.status_code == 204 assert ( len( OrganizationIntegration.objects.filter( integration=self.integration, status=ObjectStatus.VISIBLE, ) ) == 0 ) response = self.client.delete( path=self.url, data=NONPRIMARY_UNINSTALL_RESPONSE, content_type="application/json", ) assert response.status_code == 204 assert not Integration.objects.filter(id=self.integration.id).exists()
8201e74ec3d81e89354905c946e62436f0247602
14
test_uninstall.py
596
ref(integrations): Update Vercel endpoints (#36150) This PR updates the endpoints we reach to in the Vercel integration. It seems to work just fine without changes as the payloads returned from vercel haven't updated, but we'll need to specify API Scopes so they don't receive 403s. This also refactored the pagination code to loop 100 at a time, indefinitely I had previously tried to consolidate the project webhooks in this PR, but I'll be doing that separately.
18,884
0
949
316
58
92,186
128
sentry
40
tests/sentry/integrations/vercel/test_uninstall.py
Python
71
{ "docstring": "\n Test flows of uninstalling from sentry first to make sure\n that uninstall webhook is valid even if the OrganizationIntegration\n was deleted prior.\n 1. Uninstall the primary configuration\n 2. Check that integration metadata still updated\n 3. Uninstall remaining configuration\n 4. Check that integration is removed\n ", "language": "en", "n_whitespaces": 109, "n_words": 44, "vocab_size": 36 }
https://github.com/getsentry/sentry.git
4
check_connection
def check_connection(self) -> StatusResponse: response = StatusResponse(False) try: session = self.connect() # TODO: change the healthcheck session.execute('SELECT release_version FROM system.local').one() response.success = True except Exception as e: log.error(f'Error connecting to Scylla {self.connection_args["keyspace"]}, {e}!') response.error_message = e if response.success is False and self.is_connected is True: self.is_connected = False return response
f1fb699f88f8644d46afa9a2786a934510b05b79
14
scylla_handler.py
147
Impl handler
25,506
0
168
75
40
115,631
49
mindsdb
16
mindsdb/integrations/handlers/scylla_handler/scylla_handler.py
Python
16
{ "docstring": "\n Check the connection of the Scylla database\n :return: success status and error message if error occurs\n ", "language": "en", "n_whitespaces": 38, "n_words": 16, "vocab_size": 14 }
https://github.com/mindsdb/mindsdb.git
2
downgrade
def downgrade(): conn = op.get_bind() if conn.dialect.name == "mysql": op.alter_column( table_name=TABLE_NAME, column_name=COLUMN_NAME, type_=mysql.TIMESTAMP(), nullable=False )
69f6f9e01b6df76c3c8fa266d460324163957887
12
a66efa278eea_add_precision_to_execution_date_in_mysql.py
75
Autogenerate migration reference doc (#21601) * document airflow version in each alembic migration module and use this to autogen the doc * update each migration module to have the same description used in migration ref (so it can be used in autogen)
8,614
0
49
45
15
45,487
15
airflow
15
airflow/migrations/versions/a66efa278eea_add_precision_to_execution_date_in_mysql.py
Python
6
{ "docstring": "Unapply Add Precision to ``execution_date`` in ``RenderedTaskInstanceFields`` table", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/apache/airflow.git
1
setraw
def setraw(fd, when=termios.TCSAFLUSH): mode = termios.tcgetattr(fd) mode[tty.IFLAG] = mode[tty.IFLAG] & ~(termios.BRKINT | termios.ICRNL | termios.INPCK | termios.ISTRIP | termios.IXON) # mode[tty.OFLAG] = mode[tty.OFLAG] & ~(termios.OPOST) mode[tty.CFLAG] = mode[tty.CFLAG] & ~(termios.CSIZE | termios.PARENB) mode[tty.CFLAG] = mode[tty.CFLAG] | termios.CS8 mode[tty.LFLAG] = mode[tty.LFLAG] & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG) mode[tty.CC][termios.VMIN] = 1 mode[tty.CC][termios.VTIME] = 0 termios.tcsetattr(fd, when, mode)
5797d06aecb1ff620483f6c3ff4bd6d30356332f
14
pause.py
250
Use customized setraw to prevent output issues (#78060)
78,971
0
87
165
33
267,602
57
ansible
27
lib/ansible/plugins/action/pause.py
Python
9
{ "docstring": "Put terminal into a raw mode.\n\n Copied from ``tty`` from CPython 3.11.0, and modified to not remove OPOST from OFLAG\n\n OPOST is kept to prevent an issue with multi line prompts from being corrupted now that display\n is proxied via the queue from forks. The problem is a race condition, in that we proxy the display\n over the fork, but before it can be displayed, this plugin will have continued executing, potentially\n setting stdout and stdin to raw which remove output post processing that commonly converts NL to CRLF\n ", "language": "en", "n_whitespaces": 107, "n_words": 89, "vocab_size": 70 }
https://github.com/ansible/ansible.git
1
get_student_attendance
def get_student_attendance(student_group, date): student_attendance = frappe.db.sql( , (student_group, date), as_dict=1, ) return student_attendance
494bd9ef78313436f0424b918f200dab8fc7c20b
9
student_batch_wise_attendance.py
45
style: format code with black
14,062
0
6
30
12
65,941
13
erpnext
8
erpnext/education/report/student_batch_wise_attendance/student_batch_wise_attendance.py
Python
9
{ "docstring": "select count(*) as count, status from `tabStudent Attendance` where\n\t\t\t\tstudent_group= %s and date= %s and docstatus = 1 and\n\t\t\t\t(course_schedule is Null or course_schedule='') group by status", "language": "en", "n_whitespaces": 24, "n_words": 27, "vocab_size": 23 }
https://github.com/frappe/erpnext.git
3
test_legend_auto5
def test_legend_auto5(): fig, axs = plt.subplots(ncols=2, figsize=(9.6, 4.8)) leg_bboxes = [] for ax, loc in zip(axs.flat, ("center", "best")): # An Ellipse patch at the top, a U-shaped Polygon patch at the # bottom and a ring-like Wedge patch: the correct placement of # the legend should be in the center. for _patch in [ mpatches.Ellipse( xy=(0.5, 0.9), width=0.8, height=0.2, fc="C1"), mpatches.Polygon(np.array([ [0, 1], [0, 0], [1, 0], [1, 1], [0.9, 1.0], [0.9, 0.1], [0.1, 0.1], [0.1, 1.0], [0.1, 1.0]]), fc="C1"), mpatches.Wedge((0.5, 0.5), 0.5, 0, 360, width=0.05, fc="C0") ]: ax.add_patch(_patch) ax.plot([0.1, 0.9], [0.9, 0.9], label="A segment") # sthg to label leg = ax.legend(loc=loc) fig.canvas.draw() leg_bboxes.append( leg.get_window_extent().transformed(ax.transAxes.inverted())) assert_allclose(leg_bboxes[1].bounds, leg_bboxes[0].bounds) @image_comparison(['legend_various_labels'], remove_text=True)
d8bb1a52316c38434e526412c27d9c4b01960084
@image_comparison(['legend_various_labels'], remove_text=True)
14
test_legend.py
390
ENH: rely on non-rectangular patch paths rather than bboxes for legend auto-placing (fix #9580) (#9598) * use path rather than bbox for non rectangular patches * Add tests * Add a short breadcrumb note in api_changes
24,225
1
319
300
82
110,587
109
matplotlib
39
lib/matplotlib/tests/test_legend.py
Python
19
{ "docstring": "\n Check that the automatic placement handle a rather complex\n case with non rectangular patch. Related to issue #9580.\n ", "language": "en", "n_whitespaces": 28, "n_words": 18, "vocab_size": 18 }
https://github.com/matplotlib/matplotlib.git
1
sort_accounts
def sort_accounts(accounts, is_root=False, key="name"): def compare_accounts(a, b): if re.split(r"\W+", a[key])[0].isdigit(): # if chart of accounts is numbered, then sort by number return int(a[key] > b[key]) - int(a[key] < b[key]) elif is_root: if a.report_type != b.report_type and a.report_type == "Balance Sheet": return -1 if a.root_type != b.root_type and a.root_type == "Asset": return -1 if a.root_type == "Liability" and b.root_type == "Equity": return -1 if a.root_type == "Income" and b.root_type == "Expense": return -1 else: # sort by key (number) or name return int(a[key] > b[key]) - int(a[key] < b[key]) return 1 accounts.sort(key=functools.cmp_to_key(compare_accounts))
494bd9ef78313436f0424b918f200dab8fc7c20b
15
financial_statements.py
297
style: format code with black
13,828
0
72
29
49
65,230
91
erpnext
16
erpnext/accounts/report/financial_statements.py
Python
3
{ "docstring": "Sort root types as Asset, Liability, Equity, Income, Expense", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/frappe/erpnext.git
1
look_at
def look_at(eye, center, world_up): batch_size = center.shape[0] vector_degeneracy_cutoff = 1e-6 forward = center - eye forward_norm = np.linalg.norm(forward, axis=1, keepdims=True) forward = np.divide(forward, forward_norm) to_side = np.cross(forward, world_up) to_side_norm = np.linalg.norm(to_side, axis=1, keepdims=True) to_side = np.divide(to_side, to_side_norm) cam_up = np.cross(to_side, forward) w_column = np.array( batch_size * [[0., 0., 0., 1.]], dtype=np.float32) # [batch_size, 4] w_column = np.reshape(w_column, [batch_size, 4, 1]) view_rotation = np.stack( [to_side, cam_up, -forward, np.zeros_like(to_side, dtype=np.float32)], axis=1) # [batch_size, 4, 3] matrix view_rotation = np.concatenate( [view_rotation, w_column], axis=2) # [batch_size, 4, 4] identity_batch = np.tile(np.expand_dims(np.eye(3), 0), [batch_size, 1, 1]) view_translation = np.concatenate([identity_batch, np.expand_dims(-eye, 2)], 2) view_translation = np.concatenate( [view_translation, np.reshape(w_column, [batch_size, 1, 4])], 1) camera_matrices = np.matmul(view_rotation, view_translation) return camera_matrices
7375ee364e0df2a417f92593e09557f1b2a3575a
13
ganfit_camera.py
410
initialize ostec
1,638
0
223
287
72
9,563
112
insightface
34
reconstruction/ostec/utils/ganfit_camera.py
Python
26
{ "docstring": "Computes camera viewing matrices.\n Functionality mimes gluLookAt (third_party/GL/glu/include/GLU/glu.h).\n Args:\n eye: 2-D float32 tensor with shape [batch_size, 3] containing the XYZ world\n space position of the camera.\n center: 2-D float32 tensor with shape [batch_size, 3] containing a position\n along the center of the camera's gaze.\n world_up: 2-D float32 tensor with shape [batch_size, 3] specifying the\n world's up direction; the output camera will have no tilt with respect\n to this direction.\n Returns:\n A [batch_size, 4, 4] float tensor containing a right-handed camera\n extrinsics matrix that maps points from world space to points in eye space.\n ", "language": "en", "n_whitespaces": 148, "n_words": 93, "vocab_size": 60 }
https://github.com/deepinsight/insightface.git
1
on_train_batch_end
def on_train_batch_end(self, batch, logs=None): # For backwards compatibility. self.on_batch_end(batch, logs=logs)
f3cafc77c269f7ecbf80bb4cf4b54e28c153f4e6
8
callbacks.py
36
resolve line-too-long in root directory
82,206
0
31
22
10
277,786
10
keras
5
keras/callbacks.py
Python
2
{ "docstring": "Called at the end of a training batch in `fit` methods.\n\n Subclasses should override for any actions to run.\n\n Note that if the `steps_per_execution` argument to `compile` in\n `tf.keras.Model` is set to `N`, this method will only be called every\n `N` batches.\n\n Args:\n batch: Integer, index of batch within the current epoch.\n logs: Dict. Aggregated metric results up until this batch.\n ", "language": "en", "n_whitespaces": 125, "n_words": 61, "vocab_size": 53 }
https://github.com/keras-team/keras.git
5
pop_up
def pop_up(self, text='', move=True): self.edit.setText(text) self.edit.setSelection(0, len(text)) self.edit.setFocus(Qt.PopupFocusReason) if move: cursor_pos = QCursor.pos() # move OK button below cursor btn = self.button_box.buttons()[0] self.adjustSize() btn.adjustSize() offset = btn.mapToGlobal(btn.pos()) - self.pos() offset += QPoint(btn.size().width()/4, btn.size().height()/2) cursor_pos.setX(max(0, cursor_pos.x() - offset.x())) cursor_pos.setY(max(0, cursor_pos.y() - offset.y())) parent_bottom_right = self.parentWidget().geometry() max_x = parent_bottom_right.x() + parent_bottom_right.width() - self.sizeHint().width() max_y = parent_bottom_right.y() + parent_bottom_right.height() - self.sizeHint().height() max_global = self.parentWidget().mapToGlobal(QPoint(max_x, max_y)) if cursor_pos.x() > max_global.x(): cursor_pos.setX(max_global.x()) if cursor_pos.y() > max_global.y(): cursor_pos.setY(max_global.y()) self.move(cursor_pos) return trimmed(self.edit.text()) if self.exec_() else None
0c377fc2588ad0f57b3060c26f2d4f891c68276f
15
labelDialog.py
515
move "OK" button below cursor when opening label dialog (#893) The intent for this change is to allow faster labeling when there are streaks of the same label occasionally, but not consistently enough to enable "default label" mode. With this PR the user can simply click again to confirm the previous selection, without having to aim for the OK button, or move either hand to the ENTER button. The alignment and position change of the buttons is for two reasons: - it covers less of the drawn shape, easing verification of the label to be selected - the alignment wasn't taken into account for offset calculation. It works if the dialog is shown, but that causes the dialog to briefly flicker in the old position. Presumably an `adjustSize` or similar is needed on more widgets, but I was unable to figure out which.
43,352
0
327
310
61
181,545
79
labelImg
38
libs/labelDialog.py
Python
23
{ "docstring": "\n Shows the dialog, setting the current text to `text`, and blocks the caller until the user has made a choice.\n If the user entered a label, that label is returned, otherwise (i.e. if the user cancelled the action)\n `None` is returned.\n ", "language": "en", "n_whitespaces": 70, "n_words": 41, "vocab_size": 31 }
https://github.com/heartexlabs/labelImg.git
5
create_pseudo_labeled_data
def create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir): dataset = datasets.concatenate_datasets([infer_input, infer_output], axis=1) if args.do_filter_by_confidence: dataset = dataset.filter(lambda example: example["probability"] > args.confidence_threshold) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 num_selected_rows = int(eval_result * len(dataset)) print(num_selected_rows) dataset = dataset.sort("probability", reverse=True) dataset = dataset.select(range(num_selected_rows)) dataset = dataset.remove_columns(["label", "probability"]) dataset = dataset.rename_column("prediction", "label") dataset = dataset.map(lambda example: {"label": id2label[example["label"]]}) dataset = dataset.shuffle(seed=args.seed) pseudo_labeled_data_file = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") if args.data_file_extension == "csv": dataset.to_csv(pseudo_labeled_data_file, index=False) else: dataset.to_json(pseudo_labeled_data_file)
34ef029dc04bb94b4e280e02ef3e82dacf1b9dfc
14
selftraining.py
332
Add self training code for text classification (#16738) * Add self-training code for text-classification * Add self-training code for text-classification * Add self-training code for text-classification * Add self-training code for text-classification * Add self-training code for text-classification * Delete strata
6,736
0
162
203
53
37,117
73
transformers
37
examples/research_projects/self-training-text-classification/selftraining.py
Python
19
{ "docstring": "Create pseudeo labeled data for the next self-training iteration.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/huggingface/transformers.git
17
jacobi_symbol
def jacobi_symbol(m, n): r m, n = as_int(m), as_int(n) if n < 0 or not n % 2: raise ValueError("n should be an odd positive integer") if m < 0 or m > n: m %= n if not m: return int(n == 1) if n == 1 or m == 1: return 1 if igcd(m, n) != 1: return 0 j = 1 while m != 0: while m % 2 == 0 and m > 0: m >>= 1 if n % 8 in [3, 5]: j = -j m, n = n, m if m % 4 == n % 4 == 3: j = -j m %= n return j
d7614cda8f6b341df8515c65244a415b0bf2fc6f
13
residue_ntheory.py
234
jacobi_symbol: remove dead code paths Fixes #23443 The first part of the code was never reached `m %= n` The later `n != 1` condition never matched either since the gcd calculation prevented that from ever happening. ```python if igcd(m, n) != 1: return 0 ``` https://en.wikipedia.org/wiki/Jacobi_symbol#Implementation_in_Lua
48,735
0
254
177
52
197,883
114
sympy
8
sympy/ntheory/residue_ntheory.py
Python
85
{ "docstring": "\n Returns the Jacobi symbol `(m / n)`.\n\n For any integer ``m`` and any positive odd integer ``n`` the Jacobi symbol\n is defined as the product of the Legendre symbols corresponding to the\n prime factors of ``n``:\n\n .. math ::\n \\genfrac(){}{}{m}{n} =\n \\genfrac(){}{}{m}{p^{1}}^{\\alpha_1}\n \\genfrac(){}{}{m}{p^{2}}^{\\alpha_2}\n ...\n \\genfrac(){}{}{m}{p^{k}}^{\\alpha_k}\n \\text{ where } n =\n p_1^{\\alpha_1}\n p_2^{\\alpha_2}\n ...\n p_k^{\\alpha_k}\n\n Like the Legendre symbol, if the Jacobi symbol `\\genfrac(){}{}{m}{n} = -1`\n then ``m`` is a quadratic nonresidue modulo ``n``.\n\n But, unlike the Legendre symbol, if the Jacobi symbol\n `\\genfrac(){}{}{m}{n} = 1` then ``m`` may or may not be a quadratic residue\n modulo ``n``.\n\n Parameters\n ==========\n\n m : integer\n n : odd positive integer\n\n Examples\n ========\n\n >>> from sympy.ntheory import jacobi_symbol, legendre_symbol\n >>> from sympy import S\n >>> jacobi_symbol(45, 77)\n -1\n >>> jacobi_symbol(60, 121)\n 1\n\n The relationship between the ``jacobi_symbol`` and ``legendre_symbol`` can\n be demonstrated as follows:\n\n >>> L = legendre_symbol\n >>> S(45).factors()\n {3: 2, 5: 1}\n >>> jacobi_symbol(7, 45) == L(7, 3)**2 * L(7, 5)**1\n True\n\n See Also\n ========\n\n is_quad_residue, legendre_symbol\n ", "language": "en", "n_whitespaces": 387, "n_words": 165, "vocab_size": 107 }
https://github.com/sympy/sympy.git
1
_topk
def _topk(self, scores): k = self.max_per_img shape_fm = paddle.shape(scores) shape_fm.stop_gradient = True cat, height, width = shape_fm[1], shape_fm[2], shape_fm[3] # batch size is 1 scores_r = paddle.reshape(scores, [cat, -1]) topk_scores, topk_inds = paddle.topk(scores_r, k) topk_ys = topk_inds // width topk_xs = topk_inds % width topk_score_r = paddle.reshape(topk_scores, [-1]) topk_score, topk_ind = paddle.topk(topk_score_r, k) k_t = paddle.full(paddle.shape(topk_ind), k, dtype='int64') topk_clses = paddle.cast(paddle.floor_divide(topk_ind, k_t), 'float32') topk_inds = paddle.reshape(topk_inds, [-1]) topk_ys = paddle.reshape(topk_ys, [-1, 1]) topk_xs = paddle.reshape(topk_xs, [-1, 1]) topk_inds = paddle.gather(topk_inds, topk_ind) topk_ys = paddle.gather(topk_ys, topk_ind) topk_xs = paddle.gather(topk_xs, topk_ind) return topk_score, topk_inds, topk_clses, topk_ys, topk_xs
5e02a81af77a9a4ecd1e394430c4396b48bc76fd
10
layers.py
344
remove one line redundant code (#6424)
53,025
0
242
238
60
211,087
95
PaddleDetection
29
ppdet/modeling/layers.py
Python
21
{ "docstring": "\n Select top k scores and decode to get xy coordinates.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/PaddlePaddle/PaddleDetection.git
3
latitude
def latitude(self) -> float | None: if ( self.extra_state_attributes is not None and ATTR_LATITUDE in self.extra_state_attributes ): latitude: float = self.extra_state_attributes[ATTR_LATITUDE] return latitude return None
bcae6d604e2967c7475f0caa4b1b5e4e76ab88bf
10
schema_discovery.py
64
Improve MQTT type hints part 8 (#81034) * Improve typing device_tracker discovery * Improve typing device_tracker yaml * Add test source_type attribute * Follow up comment * Initialize at `__init__` not at class level. * Use full name for return variable * Correct import, remove assert * Use AsyncSeeCallback
89,084
0
97
40
21
289,958
25
core
5
homeassistant/components/mqtt/device_tracker/schema_discovery.py
Python
9
{ "docstring": "Return latitude if provided in extra_state_attributes or None.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
1
test_false_values_displayed
def test_false_values_displayed(self): response = self.get(4) self.assertContains(response, "<dd>False</dd>", count=3, html=True)
d10f15e55806c6944827d801cd9c2d53f5da4186
8
test_page_modeladmin.py
50
Reformat with black
15,994
0
30
30
9
73,222
9
wagtail
7
wagtail/contrib/modeladmin/tests/test_page_modeladmin.py
Python
3
{ "docstring": "\n Boolean fields with False values should display False, rather than the\n value of `get_empty_value_display()`. For this page, those should be\n `locked`, `expired` and `has_unpublished_changes`\n ", "language": "en", "n_whitespaces": 53, "n_words": 24, "vocab_size": 23 }
https://github.com/wagtail/wagtail.git
2
call
def call(self, inputs, **kwargs): # pylint:disable=unused-argument # Compute the axes along which to reduce the mean / variance input_shape = K.int_shape(inputs) layer_size = input_shape[self.axis] if self.partial in (0.0, 1.0): mean_square = K.mean(K.square(inputs), axis=self.axis, keepdims=True) else: partial_size = int(layer_size * self.partial) partial_x, _ = tf.split( # pylint:disable=redundant-keyword-arg,no-value-for-parameter inputs, [partial_size, layer_size - partial_size], axis=self.axis) mean_square = K.mean(K.square(partial_x), axis=self.axis, keepdims=True) recip_square_root = tf.math.rsqrt(mean_square + self.epsilon) output = self.scale * inputs * recip_square_root + self.offset return output
c1512fd41d86ef47a5d1ce618d6d755ef7cbacdf
13
normalization_tf.py
230
Update code to support Tensorflow versions up to 2.8 (#1213) * Update maximum tf version in setup + requirements * - bump max version of tf version in launcher - standardise tf version check * update keras get_custom_objects for tf>2.6 * bugfix: force black text in GUI file dialogs (linux) * dssim loss - Move to stock tf.ssim function * Update optimizer imports for compatibility * fix logging for tf2.8 * Fix GUI graphing for TF2.8 * update tests * bump requirements.txt versions * Remove limit on nvidia-ml-py * Graphing bugfixes - Prevent live graph from displaying if data not yet available * bugfix: Live graph. Collect loss labels correctly * fix: live graph - swallow inconsistent loss errors * Bugfix: Prevent live graph from clearing during training * Fix graphing for AMD
19,848
0
227
153
53
100,359
73
faceswap
27
lib/model/normalization/normalization_tf.py
Python
15
{ "docstring": " Call Root Mean Square Layer Normalization\n\n Parameters\n ----------\n inputs: tensor\n Input tensor, or list/tuple of input tensors\n\n Returns\n -------\n tensor\n A tensor or list/tuple of tensors\n ", "language": "en", "n_whitespaces": 98, "n_words": 26, "vocab_size": 20 }
https://github.com/deepfakes/faceswap.git
3
test_to_python
def test_to_python(self): table = self.block.to_python(self.db_data) self.assertIsInstance(table, TypedTable) self.assertEqual(len(table.columns), 2) self.assertEqual(table.columns[0]["heading"], "Country") self.assertEqual(table.columns[1]["heading"], "Description") rows = list(table.rows) self.assertEqual(len(rows), 2) self.assertEqual( [block.value for block in rows[0]], ["nl", "A small country with stroopwafels"], ) self.assertEqual( [block.value for block in rows[1]], ["fr", "A large country with baguettes"] )
d10f15e55806c6944827d801cd9c2d53f5da4186
10
tests.py
221
Reformat with black
16,073
0
161
137
33
73,622
44
wagtail
14
wagtail/contrib/typed_table_block/tests.py
Python
15
{ "docstring": "\n Test that we can turn JSONish data from the database into a TypedTable instance\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
https://github.com/wagtail/wagtail.git
2
_format_maybe_minus_and_locale
def _format_maybe_minus_and_locale(self, fmt, arg): return self.fix_minus(locale.format_string(fmt, (arg,), True) if self._useLocale else fmt % arg)
88cb4c9d0aa1e790fc4689ca7e68725bf851bf63
11
ticker.py
55
Properly capitalize "Unicode". See e.g. https://en.wikipedia.org/wiki/Unicode, https://docs.python.org/3/howto/unicode.html. Also associated minor doc cleanups.
22,797
0
57
37
14
107,535
14
matplotlib
8
lib/matplotlib/ticker.py
Python
3
{ "docstring": "\n Format *arg* with *fmt*, applying Unicode minus and locale if desired.\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/matplotlib/matplotlib.git
1
create_session
def create_session(self, loop): session = ClientSession(loop=loop, json_serialize=json_dumps) # Setting directly on `session` will raise deprecation warning object.__setattr__(session, "_request", self.match_request) return session
da027fa3905d36b91beefd456cb546f891617eab
9
aiohttp.py
56
JSON serialize NamedTuple subclasses with aiohttp (#74971)
115,615
0
56
34
20
317,039
21
core
10
tests/test_util/aiohttp.py
Python
4
{ "docstring": "Create a ClientSession that is bound to this mocker.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
3
last
def last(self): for obj in (self.reverse() if self.ordered else self.order_by("-pk"))[:1]: return obj
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
query.py
57
Refs #33476 -- Reformatted code with Black.
51,217
0
37
34
11
205,791
12
django
6
django/db/models/query.py
Python
3
{ "docstring": "Return the last object of a query or None if no match is found.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 14 }
https://github.com/django/django.git
1
create_view
def create_view(self, request): kwargs = {"model_admin": self} view_class = self.create_view_class return view_class.as_view(**kwargs)(request)
d10f15e55806c6944827d801cd9c2d53f5da4186
9
options.py
54
Reformat with black
15,968
0
40
31
11
73,170
12
wagtail
7
wagtail/contrib/modeladmin/options.py
Python
4
{ "docstring": "\n Instantiates a class-based view to provide 'creation' functionality for\n the assigned model, or redirect to Wagtail's create view if the\n assigned model extends 'Page'. The view class used can be overridden by\n changing the 'create_view_class' attribute.\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 30 }
https://github.com/wagtail/wagtail.git
5
_create_index
def _create_index(instance, table_name, index_name): table = Table(table_name, Base.metadata) _LOGGER.debug("Looking up index %s for table %s", index_name, table_name) # Look up the index object by name from the table is the models index_list = [idx for idx in table.indexes if idx.name == index_name] if not index_list: _LOGGER.debug("The index %s no longer exists", index_name) return index = index_list[0] _LOGGER.debug("Creating %s index", index_name) _LOGGER.warning( "Adding index `%s` to database. Note: this can take several " "minutes on large databases and slow computers. Please " "be patient!", index_name, ) try: with session_scope(session=instance.get_session()) as session: connection = session.connection() index.create(connection) except (InternalError, ProgrammingError, OperationalError) as err: raise_if_exception_missing_str(err, ["already exists", "duplicate"]) _LOGGER.warning( "Index %s already exists on %s, continuing", index_name, table_name ) _LOGGER.debug("Finished creating %s", index_name)
41ab12cb88741807a246bb296762d7c683a30b58
15
migration.py
258
Don't use shared session during recorder migration (#65672)
110,990
0
261
153
91
312,340
119
core
26
homeassistant/components/recorder/migration.py
Python
25
{ "docstring": "Create an index for the specified table.\n\n The index name should match the name given for the index\n within the table definition described in the models\n ", "language": "en", "n_whitespaces": 35, "n_words": 26, "vocab_size": 18 }
https://github.com/home-assistant/core.git
5
squeeze
def squeeze(self, axis=None): axis = range(self._AXIS_LEN) if axis is None else (self._get_axis_number(axis),) return self.iloc[ tuple( 0 if i in axis and len(a) == 1 else slice(None) for i, a in enumerate(self.axes) ) ] # ---------------------------------------------------------------------- # Rename
244f747bb63f45c1c439193f0672c6162853b168
13
generic.py
108
make series axis parameter docs consistent (#47109) * make series docs consistent add series unused param info to DF docs * fix trailing whitespace * fix docs build * add unused * add or update docs for all series methods * small fix * fix line length * fix param order * fix param order * add * add backticks to None and fix space Co-authored-by: uncjackg <[email protected]>
39,840
0
123
69
31
166,611
37
pandas
14
pandas/core/generic.py
Python
8
{ "docstring": "\n Squeeze 1 dimensional axis objects into scalars.\n\n Series or DataFrames with a single element are squeezed to a scalar.\n DataFrames with a single column or a single row are squeezed to a\n Series. Otherwise the object is unchanged.\n\n This method is most useful when you don't know if your\n object is a Series or DataFrame, but you do know it has just a single\n column. In that case you can safely call `squeeze` to ensure you have a\n Series.\n\n Parameters\n ----------\n axis : {0 or 'index', 1 or 'columns', None}, default None\n A specific axis to squeeze. By default, all length-1 axes are\n squeezed. For `Series` this parameter is unused and defaults to `None`.\n\n Returns\n -------\n DataFrame, Series, or scalar\n The projection after squeezing `axis` or all the axes.\n\n See Also\n --------\n Series.iloc : Integer-location based indexing for selecting scalars.\n DataFrame.iloc : Integer-location based indexing for selecting Series.\n Series.to_frame : Inverse of DataFrame.squeeze for a\n single-column DataFrame.\n\n Examples\n --------\n >>> primes = pd.Series([2, 3, 5, 7])\n\n Slicing might produce a Series with a single value:\n\n >>> even_primes = primes[primes % 2 == 0]\n >>> even_primes\n 0 2\n dtype: int64\n\n >>> even_primes.squeeze()\n 2\n\n Squeezing objects with more than one value in every axis does nothing:\n\n >>> odd_primes = primes[primes % 2 == 1]\n >>> odd_primes\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n >>> odd_primes.squeeze()\n 1 3\n 2 5\n 3 7\n dtype: int64\n\n Squeezing is even more effective when used with DataFrames.\n\n >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])\n >>> df\n a b\n 0 1 2\n 1 3 4\n\n Slicing a single column will produce a DataFrame with the columns\n having only one value:\n\n >>> df_a = df[['a']]\n >>> df_a\n a\n 0 1\n 1 3\n\n So the columns can be squeezed down, resulting in a Series:\n\n >>> df_a.squeeze('columns')\n 0 1\n 1 3\n Name: a, dtype: int64\n\n Slicing a single row from a single column will produce a single\n scalar DataFrame:\n\n >>> df_0a = df.loc[df.index < 1, ['a']]\n >>> df_0a\n a\n 0 1\n\n Squeezing the rows produces a single scalar Series:\n\n >>> df_0a.squeeze('rows')\n a 1\n Name: 0, dtype: int64\n\n Squeezing all axes will project directly into a scalar:\n\n >>> df_0a.squeeze()\n 1\n ", "language": "en", "n_whitespaces": 964, "n_words": 361, "vocab_size": 184 }
https://github.com/pandas-dev/pandas.git
2
filename
def filename(self): if self.buildver: buildver = '-' + self.buildver else: buildver = '' pyver = '.'.join(self.pyver) abi = '.'.join(self.abi) arch = '.'.join(self.arch) # replace - with _ as a local version separator version = self.version.replace('-', '_') return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch)
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
wheel.py
146
upd; format
12,910
0
170
83
38
62,287
45
transferlearning
10
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/wheel.py
Python
11
{ "docstring": "\n Build and return a filename from the various components.\n ", "language": "en", "n_whitespaces": 24, "n_words": 9, "vocab_size": 9 }
https://github.com/jindongwang/transferlearning.git
3
async_internal_added_to_hass
async def async_internal_added_to_hass(self) -> None: await super().async_internal_added_to_hass() state = await self.async_get_last_state() if state is not None and state.attributes.get(ATTR_SKIPPED_VERSION) is not None: self.__skipped_version = state.attributes[ATTR_SKIPPED_VERSION]
073fb40b79cf8aa06790fdceb23b6857db888c99
10
__init__.py
88
Add update entity platform (#68248) Co-authored-by: Glenn Waters <[email protected]>
93,040
0
63
52
18
293,995
24
core
9
homeassistant/components/update/__init__.py
Python
9
{ "docstring": "Call when the update entity is added to hass.\n\n It is used to restore the skipped version, if any.\n ", "language": "en", "n_whitespaces": 33, "n_words": 19, "vocab_size": 16 }
https://github.com/home-assistant/core.git
2
generate_export_pipeline_code
def generate_export_pipeline_code(pipeline_tree, operators): steps = _process_operator(pipeline_tree, operators) # number of steps in a pipeline num_step = len(steps) if num_step > 1: pipeline_text = "make_pipeline(\n{STEPS}\n)".format( STEPS=_indent(",\n".join(steps), 4) ) # only one operator (root = True) else: pipeline_text = "{STEPS}".format(STEPS=_indent(",\n".join(steps), 0)) return pipeline_text
388616b6247ca4ea8de4e2f340d6206aee523541
17
export_utils.py
128
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,653
0
97
71
32
181,898
41
tpot
12
tpot/export_utils.py
Python
10
{ "docstring": "Generate code specific to the construction of the sklearn Pipeline for export_pipeline.\n\n Parameters\n ----------\n pipeline_tree: list\n List of operators in the current optimized pipeline\n\n Returns\n -------\n Source code for the sklearn pipeline\n\n ", "language": "en", "n_whitespaces": 60, "n_words": 32, "vocab_size": 24 }
https://github.com/EpistasisLab/tpot.git
13
OutputString
def OutputString(self, attrs=None): # Build up our result # result = [] append = result.append # First, the key=value pair append("%s=%s" % (self.key, self.coded_value)) # Now add any defined attributes if attrs is None: attrs = self._reserved items = sorted(self.items()) for key, value in items: if value == "": continue if key not in attrs: continue if key == "expires" and isinstance(value, int): append("%s=%s" % (self._reserved[key], _getdate(value))) elif key == "max-age" and isinstance(value, int): append("%s=%d" % (self._reserved[key], value)) elif key == "comment" and isinstance(value, str): append("%s=%s" % (self._reserved[key], _quote(value))) elif key in self._flags: if value: append(str(self._reserved[key])) else: append("%s=%s" % (self._reserved[key], value)) # Return the result return _semispacejoin(result) __class_getitem__ = classmethod(types.GenericAlias) # # Pattern for finding cookie # # This used to be strict parsing based on the RFC2109 and RFC2068 # specifications. I have since discovered that MSIE 3.0x doesn't # follow the character rules outlined in those specs. As a # result, the parsing rules here are less strict. # _LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\=" _LegalValueChars = _LegalKeyChars + r'\[\]' _CookiePattern = re.compile(r + _LegalKeyChars + r + _LegalValueChars + r, re.ASCII | re.VERBOSE) # re.ASCII may be removed if safe. # At long last, here is the cookie class. Using this class is almost just like # using a dictionary. See this module's docstring for example usage. #
8198943edd73a363c266633e1aa5b2a9e9c9f526
18
cookies.py
420
add python 3.10.4 for windows
54,962
0
510
203
132
217,838
218
XX-Net
29
python3.10.4/Lib/http/cookies.py
Python
24
{ "docstring": "\n \\s* # Optional whitespace at start of cookie\n (?P<key> # Start of group 'key'\n []+? # Any word of at least one letter\n ) # End of group 'key'\n ( # Optional group: there may not be a value.\n \\s*=\\s* # Equal Sign\n (?P<val> # Start of group 'val'\n \"(?:[^\\\\\"]|\\\\.)*\" # Any doublequoted string\n | # or\n \\w{3},\\s[\\w\\d\\s-]{9,11}\\s[\\d:]{8}\\sGMT # Special case for \"expires\" attr\n | # or\n []* # Any word or empty string\n ) # End of group 'val'\n )? # End of optional value group\n \\s* # Any number of spaces.\n (\\s+|;|$) # Ending either at space, semicolon, or EOS.\n ", "language": "en", "n_whitespaces": 508, "n_words": 102, "vocab_size": 57 }
https://github.com/XX-net/XX-Net.git
11
validate_cycler
def validate_cycler(s): if isinstance(s, str): # TODO: We might want to rethink this... # While I think I have it quite locked down, it is execution of # arbitrary code without sanitation. # Combine this with the possibility that rcparams might come from the # internet (future plans), this could be downright dangerous. # I locked it down by only having the 'cycler()' function available. # UPDATE: Partly plugging a security hole. # I really should have read this: # https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html # We should replace this eval with a combo of PyParsing and # ast.literal_eval() try: _DunderChecker().visit(ast.parse(s)) s = eval(s, {'cycler': cycler, '__builtins__': {}}) except BaseException as e: raise ValueError(f"{s!r} is not a valid cycler construction: {e}" ) from e # Should make sure what comes from the above eval() # is a Cycler object. if isinstance(s, Cycler): cycler_inst = s else: raise ValueError(f"Object is not a string or Cycler instance: {s!r}") unknowns = cycler_inst.keys - (set(_prop_validators) | set(_prop_aliases)) if unknowns: raise ValueError("Unknown artist properties: %s" % unknowns) # Not a full validation, but it'll at least normalize property names # A fuller validation would require v0.10 of cycler. checker = set() for prop in cycler_inst.keys: norm_prop = _prop_aliases.get(prop, prop) if norm_prop != prop and norm_prop in cycler_inst.keys: raise ValueError(f"Cannot specify both {norm_prop!r} and alias " f"{prop!r} in the same prop_cycle") if norm_prop in checker: raise ValueError(f"Another property was already aliased to " f"{norm_prop!r}. Collision normalizing {prop!r}.") checker.update([norm_prop]) # This is just an extra-careful check, just in case there is some # edge-case I haven't thought of. assert len(checker) == len(cycler_inst.keys) # Now, it should be safe to mutate this cycler for prop in cycler_inst.keys: norm_prop = _prop_aliases.get(prop, prop) cycler_inst.change_key(prop, norm_prop) for key, vals in cycler_inst.by_key().items(): _prop_validators[key](vals) return cycler_inst
22a44db48c1b7659837092c5b01a59eb1d004fc6
15
rcsetup.py
423
Use repr in error messages
23,050
0
646
228
192
108,079
289
matplotlib
31
lib/matplotlib/rcsetup.py
Python
32
{ "docstring": "Return a Cycler object from a string repr or the object itself.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 10 }
https://github.com/matplotlib/matplotlib.git
1
addslashes
def addslashes(value): return value.replace("\\", "\\\\").replace('"', '\\"').replace("'", "\\'") @register.filter(is_safe=True) @stringfilter
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@register.filter(is_safe=True) @stringfilter
12
defaultfilters.py
81
Refs #33476 -- Reformatted code with Black.
51,427
1
13
29
9
206,236
9
django
7
django/template/defaultfilters.py
Python
2
{ "docstring": "\n Add slashes before quotes. Useful for escaping strings in CSV, for\n example. Less useful for escaping JavaScript; use the ``escapejs``\n filter instead.\n ", "language": "en", "n_whitespaces": 35, "n_words": 22, "vocab_size": 19 }
https://github.com/django/django.git
3
get_data
def get_data(filters): lead_details = [] lead_filters = get_lead_filters(filters) for lead in frappe.get_all( "Lead", fields=["name", "lead_name", "company_name"], filters=lead_filters ): data = frappe.db.sql( , {"lead": lead.name, "limit": filters.get("no_of_interaction")}, ) for lead_info in data: lead_data = [lead.name, lead.lead_name, lead.company_name] + list(lead_info) lead_details.append(lead_data) return lead_details
494bd9ef78313436f0424b918f200dab8fc7c20b
15
prospects_engaged_but_not_converted.py
164
style: format code with black
14,009
0
27
100
35
65,767
41
erpnext
20
erpnext/crm/report/prospects_engaged_but_not_converted/prospects_engaged_but_not_converted.py
Python
33
{ "docstring": "\n\t\t\tselect\n\t\t\t\t`tabCommunication`.reference_doctype, `tabCommunication`.reference_name,\n\t\t\t\t`tabCommunication`.content, `tabCommunication`.communication_date\n\t\t\tfrom\n\t\t\t\t(\n\t\t\t\t\t(select name, party_name as lead from `tabOpportunity` where opportunity_from='Lead' and party_name = %(lead)s)\n\t\t\t\tunion\n\t\t\t\t\t(select name, party_name as lead from `tabQuotation` where quotation_to = 'Lead' and party_name = %(lead)s)\n\t\t\t\tunion\n\t\t\t\t\t(select name, lead from `tabIssue` where lead = %(lead)s and status!='Closed')\n\t\t\t\tunion\n\t\t\t\t\t(select %(lead)s, %(lead)s)\n\t\t\t\t)\n\t\t\t\tas ref_document, `tabCommunication`\n\t\t\twhere\n\t\t\t\t`tabCommunication`.reference_name = ref_document.name and\n\t\t\t\t`tabCommunication`.sent_or_received = 'Received'\n\t\t\torder by\n\t\t\t\tref_document.lead, `tabCommunication`.creation desc limit %(limit)s", "language": "en", "n_whitespaces": 52, "n_words": 71, "vocab_size": 40 }
https://github.com/frappe/erpnext.git
1
choose
def choose(self, choices): from dask.array.routines import choose return choose(self, choices)
2820bae493a49cb1d0a6e376985c5473b8f04fa8
7
core.py
36
Don't include docs in ``Array`` methods, just refer to module docs (#9244) Co-authored-by: James Bourbeau <[email protected]>
36,737
0
31
23
9
156,727
10
dask
6
dask/array/core.py
Python
3
{ "docstring": "Use an index array to construct a new array from a set of choices.\n\n Refer to :func:`dask.array.choose` for full documentation.\n\n See Also\n --------\n dask.array.choose : equivalent function\n ", "language": "en", "n_whitespaces": 62, "n_words": 27, "vocab_size": 24 }
https://github.com/dask/dask.git
3
newer
def newer(self, source, target): if not os.path.exists(source): raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True return os.stat(source).st_mtime > os.stat(target).st_mtime
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
13
util.py
108
upd; format
12,901
0
108
66
20
62,218
24
transferlearning
11
.venv/lib/python3.8/site-packages/pip/_vendor/distlib/util.py
Python
7
{ "docstring": "Tell if the target is newer than the source.\n\n Returns true if 'source' exists and is more recently modified than\n 'target', or if 'source' exists and 'target' doesn't.\n\n Returns false if both exist and 'target' is the same age or younger\n than 'source'. Raise PackagingFileError if 'source' does not exist.\n\n Note that this test is not very accurate: files created in the same\n second will have the same \"age\".\n ", "language": "en", "n_whitespaces": 118, "n_words": 69, "vocab_size": 45 }
https://github.com/jindongwang/transferlearning.git
1
filldedent
def filldedent(s, w=70, **kwargs): return '\n' + fill(dedent(str(s)).strip('\n'), width=w, **kwargs)
2047f4855577845b1b99e0926b887d313725a6e7
14
misc.py
66
Pass keyword arguments to filldedent() through to fill()
48,569
0
16
38
10
197,470
10
sympy
9
sympy/utilities/misc.py
Python
2
{ "docstring": "\n Strips leading and trailing empty lines from a copy of ``s``, then dedents,\n fills and returns it.\n\n Empty line stripping serves to deal with docstrings like this one that\n start with a newline after the initial triple quote, inserting an empty\n line at the beginning of the string.\n\n Additional keyword arguments will be passed to ``textwrap.fill()``.\n\n See Also\n ========\n strlines, rawlines\n\n ", "language": "en", "n_whitespaces": 92, "n_words": 61, "vocab_size": 52 }
https://github.com/sympy/sympy.git
1
test_regex
def test_regex(self) -> None: # Some of the undocumented endpoints which are very similar to # some of the documented endpoints. assert find_openapi_endpoint("/users/me/presence") is None assert find_openapi_endpoint("/users/me/subscriptions/23") is None assert find_openapi_endpoint("/users/iago/subscriptions/23") is None assert find_openapi_endpoint("/messages/matches_narrow") is None # Making sure documented endpoints are matched correctly. assert ( find_openapi_endpoint("/users/23/subscriptions/21") == "/users/{user_id}/subscriptions/{stream_id}" ) assert ( find_openapi_endpoint("/users/[email protected]/presence") == "/users/{user_id_or_email}/presence" ) assert find_openapi_endpoint("/users/[email protected]") == "/users/{email}" assert find_openapi_endpoint("/messages/23") == "/messages/{message_id}" assert find_openapi_endpoint("/realm/emoji/realm_emoji_1") == "/realm/emoji/{emoji_name}"
b0ce4f1bce8031881addecb1e86073483517f392
10
test_openapi.py
152
docs: Fix many spelling mistakes. Signed-off-by: Anders Kaseorg <[email protected]>
17,641
0
218
75
42
83,259
69
zulip
3
zerver/tests/test_openapi.py
Python
20
{ "docstring": "\n Calls a few documented and undocumented endpoints and checks whether they\n find a match or not.\n ", "language": "en", "n_whitespaces": 39, "n_words": 16, "vocab_size": 14 }
https://github.com/zulip/zulip.git
27
compare_total
def compare_total(self, other, context=None): other = _convert_other(other, raiseit=True) # if one is negative and the other is positive, it's easy if self._sign and not other._sign: return _NegativeOne if not self._sign and other._sign: return _One sign = self._sign # let's handle both NaN types self_nan = self._isnan() other_nan = other._isnan() if self_nan or other_nan: if self_nan == other_nan: # compare payloads as though they're integers self_key = len(self._int), self._int other_key = len(other._int), other._int if self_key < other_key: if sign: return _One else: return _NegativeOne if self_key > other_key: if sign: return _NegativeOne else: return _One return _Zero if sign: if self_nan == 1: return _NegativeOne if other_nan == 1: return _One if self_nan == 2: return _NegativeOne if other_nan == 2: return _One else: if self_nan == 1: return _One if other_nan == 1: return _NegativeOne if self_nan == 2: return _One if other_nan == 2: return _NegativeOne if self < other: return _NegativeOne if self > other: return _One if self._exp < other._exp: if sign: return _One else: return _NegativeOne if self._exp > other._exp: if sign: return _NegativeOne else: return _One return _Zero
8198943edd73a363c266633e1aa5b2a9e9c9f526
14
_pydecimal.py
391
add python 3.10.4 for windows
55,829
0
999
242
61
219,816
183
XX-Net
19
python3.10.4/Lib/_pydecimal.py
Python
57
{ "docstring": "Compares self to other using the abstract representations.\n\n This is not like the standard compare, which use their numerical\n value. Note that a total ordering is defined for all possible abstract\n representations.\n ", "language": "en", "n_whitespaces": 60, "n_words": 32, "vocab_size": 28 }
https://github.com/XX-net/XX-Net.git
1
_override_cmds
def _override_cmds(self) -> None: self.options.version_cmd[0] = self.options.ctl self.options.restart_cmd[0] = self.options.ctl self.options.conftest_cmd[0] = self.options.ctl self.options.get_modules_cmd[0] = self.options.ctl self.options.get_includes_cmd[0] = self.options.ctl self.options.get_defines_cmd[0] = self.options.ctl
eed1afb8082518a5212a6d8016a4ccf44c5ad99e
9
configurator.py
142
certbot-apache: use httpd by default for CentOS/RHEL (#9402) * certbot-apache: use httpd for newer RHEL derived distros A change in RHEL 9 is causing apachectl to error out when used with additional arguments, resulting in certbot errors. The CentOS configurator now uses httpd instead for RHEL 9 (and later) derived distros. * Single CentOS class which uses the apache_bin option * soothe mypy * Always call super()._override_cmds()
45,657
0
71
92
12
186,917
22
certbot
10
certbot-apache/certbot_apache/_internal/configurator.py
Python
10
{ "docstring": "\n Set our various command binaries to whatever the user has overridden for apachectl\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 13 }
https://github.com/certbot/certbot.git
5
_get_files
def _get_files(self, files): # Older versions of fsspec doesn't support unstrip_protocol(). It # was only added relatively recently: # https://github.com/fsspec/filesystem_spec/pull/828
b240370bf83c88589d293b76b4a2409294e06f90
6
parquet_dispatcher.py
18
FEAT-#4733: Support fastparquet as engine for `read_parquet` (#4807) Signed-off-by: Karthik Velayutham <[email protected]>
35,881
0
48
81
18
154,259
20
modin
3
modin/core/io/column_stores/parquet_dispatcher.py
Python
9
{ "docstring": "\n Retrieve list of formatted file names in dataset path.\n\n Parameters\n ----------\n files : list\n List of files from path.\n\n Returns\n -------\n fs_files : list\n List of files from path with fs-protocol prepended.\n ", "language": "en", "n_whitespaces": 111, "n_words": 32, "vocab_size": 22 }
https://github.com/modin-project/modin.git
3
get_wheel_cache_entry
def get_wheel_cache_entry(self, link, name): # type: (Link, Optional[str]) -> Optional[CacheEntry] if self._wheel_cache is None or self.preparer.require_hashes: return None return self._wheel_cache.get_cache_entry( link=link, package_name=name, supported_tags=get_supported(), )
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
10
factory.py
73
upd; format
12,412
0
103
47
22
61,110
24
transferlearning
11
.venv/lib/python3.8/site-packages/pip/_internal/resolution/resolvelib/factory.py
Python
8
{ "docstring": "Look up the link in the wheel cache.\n\n If ``preparer.require_hashes`` is True, don't use the wheel cache,\n because cached wheels, always built locally, have different hashes\n than the files downloaded from the index server and thus throw false\n hash mismatches. Furthermore, cached wheels at present have\n nondeterministic contents due to file modification times.\n ", "language": "en", "n_whitespaces": 95, "n_words": 53, "vocab_size": 46 }
https://github.com/jindongwang/transferlearning.git
2
broadcast
async def broadcast(self, data): for channel in self.channels.values(): await channel.send(data)
9f6bba40af1a407f190a89f5c0c8b4e3f528ba46
10
channel.py
45
initial concept for replicate, basic leader and follower logic
34,740
0
35
26
10
150,418
10
freqtrade
7
freqtrade/rpc/replicate/channel.py
Python
3
{ "docstring": "\n Broadcast data on all Channels\n\n :param data: The data to send\n ", "language": "en", "n_whitespaces": 33, "n_words": 11, "vocab_size": 10 }
https://github.com/freqtrade/freqtrade.git
4
_set_concurrent_future_state
def _set_concurrent_future_state(concurrent, source): assert source.done() if source.cancelled(): concurrent.cancel() if not concurrent.set_running_or_notify_cancel(): return exception = source.exception() if exception is not None: concurrent.set_exception(_convert_future_exc(exception)) else: result = source.result() concurrent.set_result(result)
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
futures.py
124
add python 3.10.4 for windows
56,029
0
82
72
21
220,522
26
XX-Net
12
python3.10.4/Lib/asyncio/futures.py
Python
12
{ "docstring": "Copy state from a future to a concurrent.futures.Future.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
https://github.com/XX-net/XX-Net.git
10
test_repeated_paginate_relations
def test_repeated_paginate_relations(self): expected_event_ids = [] for idx in range(10): channel = self._send_relation( RelationTypes.ANNOTATION, "m.reaction", chr(ord("a") + idx) ) self.assertEquals(200, channel.code, channel.json_body) expected_event_ids.append(channel.json_body["event_id"]) prev_token = "" found_event_ids: List[str] = [] for _ in range(20): from_token = "" if prev_token: from_token = "&from=" + prev_token channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) next_batch = channel.json_body.get("next_batch") self.assertNotEquals(prev_token, next_batch) prev_token = next_batch if not prev_token: break # We paginated backwards, so reverse found_event_ids.reverse() self.assertEquals(found_event_ids, expected_event_ids) # Reset and try again, but convert the tokens to the legacy format. prev_token = "" found_event_ids = [] for _ in range(20): from_token = "" if prev_token: from_token = "&from=" + self._stream_token_to_relation_token(prev_token) channel = self.make_request( "GET", f"/_matrix/client/unstable/rooms/{self.room}/relations/{self.parent_id}?limit=1{from_token}", access_token=self.user_token, ) self.assertEquals(200, channel.code, channel.json_body) found_event_ids.extend(e["event_id"] for e in channel.json_body["chunk"]) next_batch = channel.json_body.get("next_batch") self.assertNotEquals(prev_token, next_batch) prev_token = next_batch if not prev_token: break # We paginated backwards, so reverse found_event_ids.reverse() self.assertEquals(found_event_ids, expected_event_ids)
df36945ff0e4a293a9dac0da07e2c94256835b32
16
test_relations.py
544
Support pagination tokens from /sync and /messages in the relations API. (#11952)
71,143
0
689
305
66
246,308
148
synapse
33
tests/rest/client/test_relations.py
Python
48
{ "docstring": "Test that if we paginate using a limit and tokens then we get the\n expected events.\n ", "language": "en", "n_whitespaces": 30, "n_words": 16, "vocab_size": 15 }
https://github.com/matrix-org/synapse.git
3
_delete_composed_index
def _delete_composed_index(self, model, fields, *args): first_field = model._meta.get_field(fields[0]) if first_field.get_internal_type() == "ForeignKey": constraint_names = self._constraint_names( model, [first_field.column], index=True ) if not constraint_names: self.execute( self._create_index_sql(model, fields=[first_field], suffix="") ) return super()._delete_composed_index(model, fields, *args)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
15
schema.py
145
Refs #33476 -- Reformatted code with Black.
50,995
0
156
92
26
205,011
31
django
17
django/db/backends/mysql/schema.py
Python
11
{ "docstring": "\n MySQL can remove an implicit FK index on a field when that field is\n covered by another index like a unique_together. \"covered\" here means\n that the more complex index starts like the simpler one.\n https://bugs.mysql.com/bug.php?id=37910 / Django ticket #24757\n We check here before removing the [unique|index]_together if we have to\n recreate a FK index.\n ", "language": "en", "n_whitespaces": 104, "n_words": 54, "vocab_size": 43 }
https://github.com/django/django.git
4
climate_adc_t3000_missing_fan_mode_states_fixture
def climate_adc_t3000_missing_fan_mode_states_fixture(client, climate_adc_t3000_state): data = copy.deepcopy(climate_adc_t3000_state) data["name"] = f"{data['name']} missing fan mode states" for value in data["values"]: if ( value["commandClassName"] == "Thermostat Fan Mode" and value["property"] == "mode" ): del value["metadata"]["states"] node = Node(client, data) client.driver.controller.nodes[node.node_id] = node return node @pytest.fixture(name="climate_danfoss_lc_13")
21aa07e3e58268d6ca1425cd05abcfd847b9872c
@pytest.fixture(name="climate_danfoss_lc_13")
12
conftest.py
165
Add Z-Wave thermostat fan entity (#65865) * Add Z-Wave thermostat fan entity * Fix failing test, increase number of entities to 27 * Add tests to improve coverage * Take back unrelated changes to climate.py * Clean up guard clauses, use info.primary_value, and make entity disabled by default * Fix tests * Add more tests for code coverage * Remove unused const * Remove speed parameter from overridden method since it was removed from entity * Address PR comments
92,724
1
108
80
35
293,667
41
core
16
tests/components/zwave_js/conftest.py
Python
12
{ "docstring": "Mock a climate ADC-T3000 node with missing 'states' metadata on Thermostat Fan Mode.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
https://github.com/home-assistant/core.git
1
get_basefile
def get_basefile(self, tex, fontsize, dpi=None): src = self._get_tex_source(tex, fontsize) + str(dpi) return os.path.join( self.texcache, hashlib.md5(src.encode('utf-8')).hexdigest())
4ea309aac5b6b638967d19107db297b5c7bde551
14
texmanager.py
89
Pick TeX cache name based on entire TeX source. This allows invalidating the cache when the source generation algorithm changes.
23,006
0
47
56
15
107,997
15
matplotlib
16
lib/matplotlib/texmanager.py
Python
4
{ "docstring": "\n Return a filename based on a hash of the string, fontsize, and dpi.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
https://github.com/matplotlib/matplotlib.git
2
get_dict_for_field
def get_dict_for_field(self, field_name): try: field = self.model._meta.get_field(field_name) except FieldDoesNotExist: field = None return { "label": self.get_field_label(field_name, field), "value": self.get_field_display_value(field_name, field), }
d10f15e55806c6944827d801cd9c2d53f5da4186
12
views.py
87
Reformat with black
16,009
0
100
53
18
73,300
21
wagtail
10
wagtail/contrib/modeladmin/views.py
Python
9
{ "docstring": "\n Return a dictionary containing `label` and `value` values to display\n for a field.\n ", "language": "en", "n_whitespaces": 35, "n_words": 13, "vocab_size": 12 }
https://github.com/wagtail/wagtail.git
6
_upsample_2d
def _upsample_2d(self, hidden_states, weight=None, kernel=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if kernel is None: kernel = [1] * factor # setup kernel kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * (factor**2)) if self.use_conv: convH = weight.shape[2] convW = weight.shape[3] inC = weight.shape[1] pad_value = (kernel.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. output_shape = ( (hidden_states.shape[2] - 1) * factor + convH, (hidden_states.shape[3] - 1) * factor + convW, ) output_padding = ( output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = hidden_states.shape[1] // inC # Transpose weights. weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW)) inverse_conv = F.conv_transpose2d( hidden_states, weight, stride=stride, output_padding=output_padding, padding=0 ) output = upfirdn2d_native( inverse_conv, torch.tensor(kernel, device=inverse_conv.device), pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1), ) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), ) return output
a73f8b725105b12a60a9b22918bda68f8b6d26c3
18
resnet.py
660
Clean up resnet.py file (#780) * clean up resnet.py * make style and quality * minor formatting
120,955
0
759
430
109
337,070
220
diffusers
39
src/diffusers/models/resnet.py
Python
45
{ "docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of\n arbitrary order.\n\n Args:\n hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n weight: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n kernel: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n output: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same\n datatype as `hidden_states`.\n ", "language": "en", "n_whitespaces": 289, "n_words": 140, "vocab_size": 103 }
https://github.com/huggingface/diffusers.git
6
_update_lines
def _update_lines(self, lines, new_line): code_matches = [x for x in re.finditer(_invisible_codes, new_line)] color_codes = [ code.string[code.span()[0] : code.span()[1]] for code in code_matches ] # Add color codes from earlier in the unwrapped line, and then track any new ones we add. new_line = "".join(self._active_codes) + new_line for code in color_codes: if code != _ansi_color_reset_code: self._active_codes.append(code) else: # A single reset code resets everything self._active_codes = [] # Always ensure each line is color terminted if any colors are # still active, otherwise colors will bleed into other cells on the console if len(self._active_codes) > 0: new_line = new_line + _ansi_color_reset_code lines.append(new_line)
adf24bfa9723b0621183bb27f0c889b813c06e8a
12
tabulate.py
186
[State Observability] Use a table format by default (#26159) NOTE: tabulate is copied/pasted to the codebase for table formatting. This PR changes the default layout to be the table format for both summary and list APIs.
27,807
0
253
115
75
125,203
101
ray
18
python/ray/_private/thirdparty/tabulate/tabulate.py
Python
14
{ "docstring": "Adds a new line to the list of lines the text is being wrapped into\n This function will also track any ANSI color codes in this string as well\n as add any colors from previous lines order to preserve the same formatting\n as a single unwrapped string.\n ", "language": "en", "n_whitespaces": 75, "n_words": 47, "vocab_size": 39 }
https://github.com/ray-project/ray.git
1
score
def score(self, X, y, **fit_params): check_is_fitted(self) return self.estimator_.score(self.transform(X), y, **fit_params)
6e5ef2e9b8c64e6788428610ae884b9bf3d298a2
9
_rfe.py
56
MAINT solve long line reported by flake8 (#24065)
76,391
0
31
36
9
260,641
10
scikit-learn
8
sklearn/feature_selection/_rfe.py
Python
3
{ "docstring": "Reduce X to the selected features and return the score of the estimator.\n\n Parameters\n ----------\n X : array of shape [n_samples, n_features]\n The input samples.\n\n y : array of shape [n_samples]\n The target values.\n\n **fit_params : dict\n Parameters to pass to the `score` method of the underlying\n estimator.\n\n .. versionadded:: 1.0\n\n Returns\n -------\n score : float\n Score of the underlying base estimator computed with the selected\n features returned by `rfe.transform(X)` and `y`.\n ", "language": "en", "n_whitespaces": 212, "n_words": 72, "vocab_size": 46 }
https://github.com/scikit-learn/scikit-learn.git
1
allowlist_svg
def allowlist_svg(dirty_xml): from lxml.html import clean allow_tags = [ 'xml', 'svg', 'circle', 'ellipse', 'line', 'path', 'polygon', 'polyline', 'rect' ] cleaner = clean.Cleaner( allow_tags=allow_tags, style=True, links=True, add_nofollow=False, page_structure=True, safe_attrs_only=False, remove_unknown_tags=False) clean_xml = cleaner.clean_html(dirty_xml) return clean_xml
53f6308186aa131946e196b0409f3d732ec9e007
9
uploader.py
126
fix: DEV-2236: Stored XSS via SVG file (#2273) * user uploaded content rendered as plain text or known image only * allow list for svg in progress * allow list for svg basic pass * add error handling * add to file processing re: code review * rm uneeded code * add env var to disable svg cleaning * add test * update env setting * rm lxml string methods * Update uploader.py * Update base.py Co-authored-by: Max Tkachenko <[email protected]>
42,555
0
231
77
31
177,986
34
label-studio
16
label_studio/data_import/uploader.py
Python
23
{ "docstring": "Filter out malicious/harmful content from SVG files\n by defining allowed tags\n ", "language": "en", "n_whitespaces": 17, "n_words": 11, "vocab_size": 11 }
https://github.com/heartexlabs/label-studio.git
1
test_save_periodic_pipeline
def test_save_periodic_pipeline(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light' ) tpot_obj.fit(training_features, training_target) with closing(StringIO()) as our_file: tpot_obj.log_file_ = our_file tpot_obj.verbosity = 3 tpot_obj._last_pipeline_write = datetime.now() sleep(0.11) tpot_obj._output_best_pipeline_period_seconds = 0.1 tmpdir = mkdtemp() + '/' tpot_obj.periodic_checkpoint_folder = tmpdir # reset _pareto_front to rasie exception tpot_obj._pareto_front = None tpot_obj._save_periodic_pipeline(1) our_file.seek(0) assert_in('Failed saving periodic pipeline, exception', our_file.read()) #clean up rmtree(tmpdir)
388616b6247ca4ea8de4e2f340d6206aee523541
12
tpot_tests.py
207
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
43,518
0
215
126
52
181,731
60
tpot
30
tests/tpot_tests.py
Python
23
{ "docstring": "Assert that the _save_periodic_pipeline does not export periodic pipeline if exception happened", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/EpistasisLab/tpot.git
2
tzname
def tzname(self): if self._tzinfo is None: return None name = self._tzinfo.tzname(None) _check_tzname(name) return name
8198943edd73a363c266633e1aa5b2a9e9c9f526
9
datetime.py
53
add python 3.10.4 for windows
56,547
0
60
31
12
222,361
14
XX-Net
5
python3.10.4/Lib/datetime.py
Python
6
{ "docstring": "Return the timezone name.\n\n Note that the name is 100% informational -- there's no requirement that\n it mean anything in particular. For example, \"GMT\", \"UTC\", \"-500\",\n \"-5:00\", \"EDT\", \"US/Eastern\", \"America/New York\" are all valid replies.\n ", "language": "en", "n_whitespaces": 63, "n_words": 35, "vocab_size": 33 }
https://github.com/XX-net/XX-Net.git
1
gcn_loss
def gcn_loss(self, gcn_data): gcn_pred, gt_labels = gcn_data gt_labels = gt_labels.reshape([-1]) loss = F.cross_entropy(gcn_pred, gt_labels) return loss
1f9400dd7374ce9cc47981372e324ff412e53ba3
10
det_drrg_loss.py
59
add drrg
4,861
0
51
36
12
25,192
16
PaddleOCR
9
ppocr/losses/det_drrg_loss.py
Python
5
{ "docstring": "CrossEntropy Loss from gcn module.\n\n Args:\n gcn_data (tuple(Tensor, Tensor)): The first is the\n prediction with shape :math:`(N, 2)` and the\n second is the gt label with shape :math:`(m, n)`\n where :math:`m * n = N`.\n\n Returns:\n Tensor: CrossEntropy loss.\n ", "language": "en", "n_whitespaces": 127, "n_words": 39, "vocab_size": 33 }
https://github.com/PaddlePaddle/PaddleOCR.git
1
add_never_cache_headers
def add_never_cache_headers(response): patch_response_headers(response, cache_timeout=-1) patch_cache_control( response, no_cache=True, no_store=True, must_revalidate=True, private=True )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
cache.py
54
Refs #33476 -- Reformatted code with Black.
51,571
0
30
35
11
206,572
11
django
9
django/utils/cache.py
Python
5
{ "docstring": "\n Add headers to a response to indicate that a page should never be cached.\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 12 }
https://github.com/django/django.git
1
test_n_step_very_short_trajectory
def test_n_step_very_short_trajectory(self): gamma = 1.0 obs = np.arange(0, 2) actions = np.random.randint(-100, 300, size=(2,)) check_actions = actions.copy() rewards = [10.0, 100.0] next_obs = np.arange(1, 3) batch = SampleBatch( { SampleBatch.OBS: obs, SampleBatch.ACTIONS: actions, SampleBatch.REWARDS: rewards, SampleBatch.TERMINATEDS: [False, False], SampleBatch.TRUNCATEDS: [False, False], SampleBatch.NEXT_OBS: next_obs, } ) adjust_nstep(3, gamma, batch) check(batch[SampleBatch.OBS], [0, 1]) check(batch[SampleBatch.ACTIONS], check_actions) check(batch[SampleBatch.TERMINATEDS], [False, False]) check(batch[SampleBatch.TRUNCATEDS], [False, False]) check(batch[SampleBatch.REWARDS], [10.0 + gamma * 100.0, 100.0]) check(batch[SampleBatch.NEXT_OBS], [2, 2])
8e680c483ce326cefc62e44f68ab1a6948b1c3d2
11
test_postprocessing.py
307
[RLlib] gymnasium support (new `Env.reset()/step()/seed()/render()` APIs). (#28369)
31,248
0
293
224
57
137,797
69
ray
24
rllib/evaluation/tests/test_postprocessing.py
Python
24
{ "docstring": "Tests, whether n-step also works for very small trajectories.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/ray-project/ray.git
6
_create_index_name
def _create_index_name(self, table_name, column_names, suffix=""): _, table_name = split_identifier(table_name) hash_suffix_part = "%s%s" % ( names_digest(table_name, *column_names, length=8), suffix, ) max_length = self.connection.ops.max_name_length() or 200 # If everything fits into max_length, use that name. index_name = "%s_%s_%s" % (table_name, "_".join(column_names), hash_suffix_part) if len(index_name) <= max_length: return index_name # Shorten a long suffix. if len(hash_suffix_part) > max_length / 3: hash_suffix_part = hash_suffix_part[: max_length // 3] other_length = (max_length - len(hash_suffix_part)) // 2 - 1 index_name = "%s_%s_%s" % ( table_name[:other_length], "_".join(column_names)[:other_length], hash_suffix_part, ) # Prepend D if needed to prevent the name from starting with an # underscore or a number (not permitted on Oracle). if index_name[0] == "_" or index_name[0].isdigit(): index_name = "D%s" % index_name[:-1] return index_name
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
schema.py
276
Refs #33476 -- Reformatted code with Black.
50,980
0
323
163
84
204,926
116
django
19
django/db/backends/base/schema.py
Python
21
{ "docstring": "\n Generate a unique name for an index/unique constraint.\n\n The name is divided into 3 parts: the table name, the column names,\n and a unique digest and suffix.\n ", "language": "en", "n_whitespaces": 56, "n_words": 27, "vocab_size": 22 }
https://github.com/django/django.git
3
approve
def approve(self, user=None, update=True, comment=""): if self.status != self.STATUS_IN_PROGRESS: raise PermissionDenied self.status = self.STATUS_APPROVED self.finished_at = timezone.now() self.finished_by = user self.comment = comment self.save() self.log_state_change_action(user, "approve") if update: self.workflow_state.update(user=user) task_approved.send( sender=self.specific.__class__, instance=self.specific, user=user ) return self
d10f15e55806c6944827d801cd9c2d53f5da4186
10
__init__.py
168
Reformat with black
16,117
0
153
105
31
73,800
36
wagtail
22
wagtail/core/models/__init__.py
Python
15
{ "docstring": "Approve the task state and update the workflow state", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 7 }
https://github.com/wagtail/wagtail.git
1
minimize
def minimize(self, loss, var_list, grad_loss=None, name=None, tape=None): grads_and_vars = self._compute_gradients( loss, var_list=var_list, grad_loss=grad_loss, tape=tape ) return self.apply_gradients(grads_and_vars, name=name)
84afc5193d38057e2e2badf9c889ea87d80d8fbf
9
optimizer_v2.py
76
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,417
0
57
53
17
275,527
18
keras
10
keras/optimizers/optimizer_v2/optimizer_v2.py
Python
5
{ "docstring": "Minimize `loss` by updating `var_list`.\n\n This method simply computes gradient using `tf.GradientTape` and calls\n `apply_gradients()`. If you want to process the gradient before applying\n then call `tf.GradientTape` and `apply_gradients()` explicitly instead\n of using this function.\n\n Args:\n loss: `Tensor` or callable. If a callable, `loss` should take no arguments\n and return the value to minimize. If a `Tensor`, the `tape` argument\n must be passed.\n var_list: list or tuple of `Variable` objects to update to minimize\n `loss`, or a callable returning the list or tuple of `Variable` objects.\n Use callable when the variable list would otherwise be incomplete before\n `minimize` since the variables are created at the first time `loss` is\n called.\n grad_loss: (Optional). A `Tensor` holding the gradient computed for\n `loss`.\n name: (Optional) str. Name for the returned operation.\n tape: (Optional) `tf.GradientTape`. If `loss` is provided as a `Tensor`,\n the tape that computed the `loss` must be provided.\n\n Returns:\n An `Operation` that updates the variables in `var_list`. The `iterations`\n will be automatically increased by 1.\n\n Raises:\n ValueError: If some of the variables are not `Variable` objects.\n\n ", "language": "en", "n_whitespaces": 391, "n_words": 175, "vocab_size": 113 }
https://github.com/keras-team/keras.git
1
test_render_config
def test_render_config(tmpdir): user_config_path = os.path.join(tmpdir, "config.yaml") input_features = [ number_feature(), number_feature(), category_feature(encoder={"vocab_size": 3}), category_feature(encoder={"vocab_size": 3}), ] output_features = [category_feature(decoder={"vocab_size": 3})] user_config = { INPUT_FEATURES: input_features, OUTPUT_FEATURES: output_features, } with open(user_config_path, "w") as f: yaml.dump(user_config, f) output_config_path = os.path.join(tmpdir, "rendered.yaml") _run_ludwig("render_config", config=user_config_path, output=output_config_path) rendered_config = load_yaml(output_config_path) assert len(rendered_config[INPUT_FEATURES]) == len(user_config[INPUT_FEATURES]) assert len(rendered_config[OUTPUT_FEATURES]) == len(user_config[OUTPUT_FEATURES]) assert TRAINER in rendered_config assert COMBINER in rendered_config assert PREPROCESSING in rendered_config
5a25d1f2a914c849cf978573c5993411151bc447
13
test_cli.py
276
Added tests for init_config and render_config CLI commands (#2551)
1,363
0
162
170
46
8,165
65
ludwig
29
tests/integration_tests/test_cli.py
Python
23
{ "docstring": "Test rendering a full config from a partial user config.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/ludwig-ai/ludwig.git
8
update_info
def update_info(self, info) -> None: for key in self._info_fields: value = getattr(self, key, None) idx = info.setdefault(self.name, {}) existing_value = idx.get(key) if key in idx and value is not None and existing_value != value: # frequency/name just warn if key in ["freq", "index_name"]: ws = attribute_conflict_doc % (key, existing_value, value) warnings.warn( ws, AttributeConflictWarning, stacklevel=find_stack_level() ) # reset idx[key] = None setattr(self, key, None) else: raise ValueError( f"invalid info for [{self.name}] for [{key}], " f"existing_value [{existing_value}] conflicts with " f"new value [{value}]" ) else: if value is not None or existing_value is not None: idx[key] = value
7d2f9b8d59908fbf57c6453bc41891efbfe981a6
18
pytables.py
240
TYP: some return annotations in pytables.py (#47512)
39,980
0
488
141
61
167,373
96
pandas
21
pandas/io/pytables.py
Python
26
{ "docstring": "\n set/update the info for this indexable with the key/value\n if there is a conflict raise/warn as needed\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
https://github.com/pandas-dev/pandas.git
8
split_dataset
def split_dataset(items, eval_split_max_size=None, eval_split_size=0.01): speakers = [item["speaker_name"] for item in items] is_multi_speaker = len(set(speakers)) > 1 if eval_split_size > 1: eval_split_size = int(eval_split_size) else: if eval_split_max_size: eval_split_size = min(eval_split_max_size, int(len(items) * eval_split_size)) else: eval_split_size = int(len(items) * eval_split_size) assert ( eval_split_size > 0 ), " [!] You do not have enough samples for the evaluation set. You can work around this setting the 'eval_split_size' parameter to a minimum of {}".format( 1 / len(items) ) np.random.seed(0) np.random.shuffle(items) if is_multi_speaker: items_eval = [] speakers = [item["speaker_name"] for item in items] speaker_counter = Counter(speakers) while len(items_eval) < eval_split_size: item_idx = np.random.randint(0, len(items)) speaker_to_be_removed = items[item_idx]["speaker_name"] if speaker_counter[speaker_to_be_removed] > 1: items_eval.append(items[item_idx]) speaker_counter[speaker_to_be_removed] -= 1 del items[item_idx] return items_eval, items return items[:eval_split_size], items[eval_split_size:]
1425a023fe4bc6bda8578295aeeeb02af78cc082
18
__init__.py
347
Make style and lint
77,180
0
324
219
82
262,317
118
TTS
23
TTS/tts/datasets/__init__.py
Python
30
{ "docstring": "Split a dataset into train and eval. Consider speaker distribution in multi-speaker training.\n\n Args:\n <<<<<<< HEAD\n items (List[List]):\n A list of samples. Each sample is a list of `[audio_path, text, speaker_id]`.\n\n eval_split_max_size (int):\n Number maximum of samples to be used for evaluation in proportion split. Defaults to None (Disabled).\n\n eval_split_size (float):\n If between 0.0 and 1.0 represents the proportion of the dataset to include in the evaluation set.\n If > 1, represents the absolute number of evaluation samples. Defaults to 0.01 (1%).\n =======\n items (List[List]): A list of samples. Each sample is a list of `[text, audio_path, speaker_id]`.\n >>>>>>> Fix docstring\n ", "language": "en", "n_whitespaces": 224, "n_words": 101, "vocab_size": 65 }
https://github.com/coqui-ai/TTS.git
7
center
def center(G, e=None, usebounds=False, weight=None): if usebounds is True and e is None and not G.is_directed(): return _extrema_bounding(G, compute="center", weight=weight) if e is None: e = eccentricity(G, weight=weight) radius = min(e.values()) p = [v for v in e if e[v] == radius] return p
28f78cfa9a386620ee1179582fda1db5ffc59f84
11
distance_measures.py
140
Add weight distance metrics (#5305) Adds the weight keyword argument to allow users to compute weighted distance metrics e.g. diameter, eccentricity, periphery, etc. The kwarg works in the same fashion as the weight param for shortest paths - i.e. if a string, look up with edge attr by key, if callable, compute the weight via the function. Default is None, meaning return unweighted result which is the current behavior. Co-authored-by: Dan Schult <[email protected]> Co-authored-by: Ross Barnowski <[email protected]>
42,268
0
76
90
31
177,081
44
networkx
14
networkx/algorithms/distance_measures.py
Python
8
{ "docstring": "Returns the center of the graph G.\n\n The center is the set of nodes with eccentricity equal to radius.\n\n Parameters\n ----------\n G : NetworkX graph\n A graph\n\n e : eccentricity dictionary, optional\n A precomputed dictionary of eccentricities.\n\n weight : string, function, or None\n If this is a string, then edge weights will be accessed via the\n edge attribute with this key (that is, the weight of the edge\n joining `u` to `v` will be ``G.edges[u, v][weight]``). If no\n such edge attribute exists, the weight of the edge is assumed to\n be one.\n\n If this is a function, the weight of an edge is the value\n returned by the function. The function must accept exactly three\n positional arguments: the two endpoints of an edge and the\n dictionary of edge attributes for that edge. The function must\n return a number.\n\n If this is None, every edge has weight/distance/cost 1.\n\n Weights stored as floating point values can lead to small round-off\n errors in distances. Use integer weights to avoid this.\n\n Weights should be positive, since they are distances.\n\n Returns\n -------\n c : list\n List of nodes in center\n\n Examples\n --------\n >>> G = nx.Graph([(1, 2), (1, 3), (1, 4), (3, 4), (3, 5), (4, 5)])\n >>> list(nx.center(G))\n [1, 3, 4]\n\n See Also\n --------\n barycenter\n periphery\n ", "language": "en", "n_whitespaces": 384, "n_words": 212, "vocab_size": 129 }
https://github.com/networkx/networkx.git
37
infer_concrete_type_builder
def infer_concrete_type_builder(nn_module, share_types=True): concrete_type_builder = torch._C.ConcreteModuleTypeBuilder(type(nn_module)) if isinstance(nn_module, (torch.nn.ModuleDict)): concrete_type_builder.set_module_dict() if isinstance(nn_module, (torch.nn.ModuleList, torch.nn.Sequential)): concrete_type_builder.set_module_list() class_annotations = getattr(nn_module, '__annotations__', {}) if isinstance(nn_module, (torch.ao.quantization.QuantWrapper)): class_annotations = {} # Get user-annotated ignored attributes. user_annotated_ignored_attributes = getattr(nn_module, "__jit_ignored_attributes__", list()) concrete_type_builder.add_ignored_attributes(user_annotated_ignored_attributes) ignored_properties = jit_ignored_properties(nn_module) # try to infer the type from type annotation or from the object itself
8e6d1738a41cb28c36db3dc5e10fd812417673de
11
_recursive.py
198
Per-overload torch.ops API (#67254) Summary: Pull Request resolved: https://github.com/pytorch/pytorch/pull/67254 Fixes https://github.com/pytorch/pytorch/issues/65997 TODO: disallow `default` as an overload name for aten operators. BC breaking: `output = torch.ops._test.leaky_relu(self=torch.tensor(-1.0))` now fails with the error `TypeError: __call__() got multiple values for argument 'self'` since we call into `OpOverloadBundle`'s `__call__` method that has `self` bound to it as its first argument. cc ezyang gchanan Test Plan: Imported from OSS Reviewed By: albanD Differential Revision: D33262228 Pulled By: anjali411 fbshipit-source-id: 600dbf511514ea9b41aea3e6b1bc1102dab08909
21,503
0
108
846
40
102,252
54
pytorch
25
torch/jit/_recursive.py
Python
120
{ "docstring": "\n Build a ConcreteModuleTypeBuilder from an nn.Module. This\n ConcreteModuleType doesn't have a JIT type associated with it yet, it\n must be filled in by the caller.\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 23 }
https://github.com/pytorch/pytorch.git
6
to_undirected
def to_undirected(self, as_view=False): graph_class = self.to_undirected_class() if as_view is True: return nx.graphviews.generic_graph_view(self, graph_class) # deepcopy when not a view G = graph_class() G.graph.update(deepcopy(self.graph)) G.add_nodes_from((n, deepcopy(d)) for n, d in self._node.items()) G.add_edges_from( (u, v, key, deepcopy(datadict)) for u, nbrs in self._adj.items() for v, keydict in nbrs.items() for key, datadict in keydict.items() ) return G
8f4c99debc9440728c5e85f8bffa5d26b232eb6f
11
multigraph.py
198
Multigraph docs update (#5389) * Updated MultiDiGraph documentation to include more examples of actually using parallel edges, and fixed references to things like G[u, v] where G[u, v, k] is required for a MultiDigraph. Have not made parallel changes in MultiGraph which should maybe also be made? Docs tests pass on my end; no code outside of comments was changed. -Peter Mawhorter * Updated docs for MultiGraph to add more multigraph-specific examples and fix a few places where untested examples were wrong. -Peter Mawhorter * [DOC] fix typo * add the right amount of separators Co-authored-by: Mridul Seth <[email protected]>
41,880
0
178
127
42
176,415
53
networkx
25
networkx/classes/multigraph.py
Python
14
{ "docstring": "Returns an undirected copy of the graph.\n\n Returns\n -------\n G : Graph/MultiGraph\n A deepcopy of the graph.\n\n See Also\n --------\n copy, add_edge, add_edges_from\n\n Notes\n -----\n This returns a \"deepcopy\" of the edge, node, and\n graph attributes which attempts to completely copy\n all of the data and references.\n\n This is in contrast to the similar `G = nx.MultiGraph(D)`\n which returns a shallow copy of the data.\n\n See the Python copy module for more information on shallow\n and deep copies, https://docs.python.org/3/library/copy.html.\n\n Warning: If you have subclassed MultiGraph to use dict-like\n objects in the data structure, those changes do not transfer\n to the MultiGraph created by this method.\n\n Examples\n --------\n >>> G = nx.MultiGraph([(0, 1), (0, 1), (1, 2)])\n >>> H = G.to_directed()\n >>> list(H.edges)\n [(0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 2, 0), (2, 1, 0)]\n >>> G2 = H.to_undirected()\n >>> list(G2.edges)\n [(0, 1, 0), (0, 1, 1), (1, 2, 0)]\n ", "language": "en", "n_whitespaces": 362, "n_words": 155, "vocab_size": 94 }
https://github.com/networkx/networkx.git
2
rgb_to_hsv
def rgb_to_hsv(arr): arr = np.asarray(arr) # check length of the last dimension, should be _some_ sort of rgb if arr.shape[-1] != 3: raise ValueError("Last dimension of input array must be 3; " "shape {} was found.".format(arr.shape)) in_shape = arr.shape arr = np.array( arr, copy=False, dtype=np.promote_types(arr.dtype, np.float32), # Don't work on ints. ndmin=2, # In case input was 1D. ) out = np.zeros_like(arr) arr_max = arr.max(-1) ipos = arr_max > 0 delta = arr.ptp(-1) s = np.zeros_like(delta) s[ipos] = delta[ipos] / arr_max[ipos] ipos = delta > 0 # red is max idx = (arr[..., 0] == arr_max) & ipos out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx] # green is max idx = (arr[..., 1] == arr_max) & ipos out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx] # blue is max idx = (arr[..., 2] == arr_max) & ipos out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx] out[..., 0] = (out[..., 0] / 6.0) % 1.0 out[..., 1] = s out[..., 2] = arr_max return out.reshape(in_shape)
9b6abd0b4933811e0a45c2535ab8fd107db65dd9
13
colors.py
452
DOC: improve grammar and consistency
24,007
0
310
308
95
110,265
175
matplotlib
24
lib/matplotlib/colors.py
Python
28
{ "docstring": "\n Convert float RGB values (in the range [0, 1]), in a numpy array to HSV\n values.\n\n Parameters\n ----------\n arr : (..., 3) array-like\n All values must be in the range [0, 1]\n\n Returns\n -------\n (..., 3) ndarray\n Colors converted to HSV values in range [0, 1]\n ", "language": "en", "n_whitespaces": 86, "n_words": 46, "vocab_size": 32 }
https://github.com/matplotlib/matplotlib.git
4
__len__
def __len__(self) -> int: # do a DFS to count the number of leaf nodes count = 0 stack = [self._data] while stack: node = stack.pop() if isinstance(node, NestedDict): node = node._data if isinstance(node, Mapping): stack.extend(node.values()) else: count += 1 return count
c8dbbf374661d89e620b472a8c62aa3ce3c6ce98
13
nested_dict.py
111
[RLlib] Introduction of Nested dict for RLModule construction (#29027) Signed-off-by: Kourosh Hakhamaneshi <[email protected]>
28,718
0
173
66
33
128,493
42
ray
13
rllib/utils/nested_dict.py
Python
15
{ "docstring": "Returns the number of leaf nodes in the `NestedDict` that\n are not of type Mappings.\n ", "language": "en", "n_whitespaces": 29, "n_words": 15, "vocab_size": 13 }
https://github.com/ray-project/ray.git
2
generate_mask
def generate_mask(self, h, w, p, shift): # supporting sqaure. attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device) if self.type == 'W': return attn_mask s = p - shift attn_mask[-1, :, :s, :, s:, :] = True attn_mask[-1, :, s:, :, :s, :] = True attn_mask[:, -1, :, :s, :, s:] = True attn_mask[:, -1, :, s:, :, :s] = True attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)') return attn_mask
8deae077004f0332ca607fc3a5d568b1a4705bec
11
scunet_model_arch.py
217
Add ScuNET DeNoiser/Upscaler Q&D Implementation of ScuNET, thanks to our handy model loader. :P https://github.com/cszn/SCUNet
35,189
0
170
146
48
152,620
82
stable-diffusion-webui
16
modules/scunet_model_arch.py
Python
11
{ "docstring": " generating the mask of SW-MSA\n Args:\n shift: shift parameters in CyclicShift.\n Returns:\n attn_mask: should be (1 1 w p p),\n ", "language": "en", "n_whitespaces": 64, "n_words": 20, "vocab_size": 20 }
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
2
setup_tpu
def setup_tpu(tpu_driver_version='tpu_driver-0.2'): global TPU_DRIVER_MODE if not TPU_DRIVER_MODE: colab_tpu_addr = os.environ['COLAB_TPU_ADDR'].split(':')[0] url = f'http://{colab_tpu_addr}:8475/requestversion/{tpu_driver_version}' requests.post(url) TPU_DRIVER_MODE = 1 # The following is required to use TPU Driver as JAX's backend. config.FLAGS.jax_xla_backend = "tpu_driver" config.FLAGS.jax_backend_target = "grpc://" + os.environ['COLAB_TPU_ADDR']
0cc4066bb7bf758a5ba8c5def9c2c32a1c98fb89
13
colab_tpu.py
125
Pin default jax.tools.colab_tpu.setup_tpu driver version. Prior to this change, we were defaulting to the TPU nightly driver version. We should instead pin to the version associated with the default jaxlib version that Colab uses.
27,112
0
55
64
32
122,164
37
jax
14
jax/tools/colab_tpu.py
Python
9
{ "docstring": "Sets up Colab to run on TPU.\n\n Note: make sure the Colab Runtime is set to Accelerator: TPU.\n\n Args\n ----\n tpu_driver_version : (str) specify the version identifier for the tpu driver.\n Defaults to \"tpu_driver-0.2\", which can be used with jaxlib 0.3.20. Set to\n \"tpu_driver_nightly\" to use the nightly tpu driver build.\n ", "language": "en", "n_whitespaces": 62, "n_words": 51, "vocab_size": 41 }
https://github.com/google/jax.git
3
check_connection
def check_connection(self, logger, config) -> Tuple[bool, any]: try: req = requests.get(RkiCovidStream.url_base + "germany") if req.status_code == 200: return True, None return False, req.reason except Exception: return False, "There is a problem in source check connection."
d6d52c5d99dd7ebbd966da79c1d890699a947f39
12
source.py
90
:tada: New Source: RKI (Robert Koch-Institut) Covid Public API (#11732) * Added source for RKI-covid-germany, updated spec.json, implemented source with check and discover method added germany.json. * implemented incremental method for germany history cases with date as parameters, updated streams, added cursor field for incremental streams. * main file added. * added Incidence source with date as parameter. Incremental stream. spec, source, schemas updated, added class GermanHistoryIncidence added. * Added a full-refresh stream for germany/age-group .Added incremental streams for deaths/:days, recovered/:days, frozen-incidence/:days, hospitalization/:days. Updated source.py methods. Updated sepc.json properties key. Updated configured_catalogue.json with required streams. Updated config.json * writting test unit test cases for incremental streams: german history cases and german history incidence. * Incremental streams for germanhistorydeaths, germanhistoryfrozenIncidence, germanhistoryhospitalization, germanhistoryrecovered. Fixing other test cases. * Added test stream for age group and germany. * changes in Readme and source.py. * IncrementalMixin added to class GermanyHistoryCases. AFter review IncrementalMixin will be implemented to all incremental classes. * Implemented Incremental mixin or GermanHistory Cases connector. * corrected changes. * adding integration test * comment acceptence test. * On path with master * updated the changes for PR request. * changed file source-rki-covid/integration_tests/configured_catalog.json * corrected flake and blackformatting. Build gradel. * Worked on the suggestions. * source_rki_covid/schemas/germany_age_groups.json * uodated abnormal_state.json * updated the schemas for german age groups and history hospitalization. * correct dockerfile and update airbyte_cdk version * run format * update python version * correct dockerfile build * add source in seed * update uuid for rki-covid source * change docker * add bash * auto-bump connector version * run seed file * correct doc * auto-bump connector version Co-authored-by: Marcos Marx Millnitz <[email protected]> Co-authored-by: Octavia Squidington III <[email protected]>
770
0
115
55
32
5,421
35
airbyte
15
airbyte-integrations/connectors/source-rki-covid/source_rki_covid/source.py
Python
15
{ "docstring": "\n Testing connection availability for the connector.\n\n :param config: the user-input config object conforming to the connector's spec.json\n :param logger: logger object\n :return Tuple[bool, any]: (True, None) if the input config can be used to connect to the API successfully, (False, error) otherwise.\n ", "language": "en", "n_whitespaces": 80, "n_words": 42, "vocab_size": 33 }
https://github.com/airbytehq/airbyte.git
2
get_heading
def get_heading(self, queryset, field): heading_override = self.export_headings.get(field) if heading_override: return force_str(heading_override) return force_str( label_for_field( field, model=self.model, model_admin=self.model_admin ).title() )
d10f15e55806c6944827d801cd9c2d53f5da4186
13
views.py
82
Reformat with black
16,007
0
102
52
18
73,294
19
wagtail
12
wagtail/contrib/modeladmin/views.py
Python
9
{ "docstring": "Get headings for exported spreadsheet column for the relevant field", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/wagtail/wagtail.git
2
_nannumel_sparse
def _nannumel_sparse(x, **kwargs): n = _nannumel(x, **kwargs) # If all dimensions are contracted, this will just be a number, otherwise we # want to densify it. return n.todense() if hasattr(n, "todense") else n
8b95f983c232c1bd628e9cba0695d3ef229d290b
9
backends.py
57
Sparse array reductions (#9342)
36,793
0
48
33
31
156,874
33
dask
7
dask/array/backends.py
Python
3
{ "docstring": "\n A reduction to count the number of elements in a sparse array, excluding nans.\n This will in general result in a dense matrix with an unpredictable fill value.\n So make it official and convert it to dense.\n\n https://github.com/dask/dask/issues/7169\n ", "language": "en", "n_whitespaces": 54, "n_words": 38, "vocab_size": 33 }
https://github.com/dask/dask.git
1
_new_step
def _new_step(self): self.should_save = False self.should_evaluate = False self.should_log = False
44a290e94d1becd1f09fddc3d873f9e19c9d6919
7
trainer_callback.py
37
[Trainer] Add init version of paddlenlp trainer and apply finetune for ernie-1.0 pretraining. (#1761) * add some datasets for finetune. * support fine tune for all tastks. * add trainer prototype. * init verison for paddlenlp trainer. * refine trainer. * update for some details. * support multi-cards training evaluation. * support load from ckpt. * support for export inference model. * first version of trainer. * seq cls support clue. * trainer support for token classification and question answersing tasks. * fix as reviews. Co-authored-by: Zeyu Chen <[email protected]>
118,389
0
39
21
7
323,153
11
PaddleNLP
5
paddlenlp/trainer/trainer_callback.py
Python
4
{ "docstring": "Internal method that resets the variable for a new step.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/PaddlePaddle/PaddleNLP.git
7
shuffle_group
def shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal): if isinstance(cols, str): cols = [cols] if cols and cols[0] == "_partitions": ind = df[cols[0]] else: ind = hash_object_dispatch(df[cols] if cols else df, index=False) if nfinal and nfinal != npartitions: ind = ind % int(nfinal) c = ind.values typ = np.min_scalar_type(npartitions * 2) c = np.mod(c, npartitions).astype(typ, copy=False) np.floor_divide(c, k**stage, out=c) np.mod(c, k, out=c) return group_split_dispatch(df, c, k, ignore_index=ignore_index) @contextlib.contextmanager
510bbc380531cbf56a409f1ae68e6fd84a9599e6
@contextlib.contextmanager
14
shuffle.py
240
Update `pre-commit` version (#8691)
36,457
1
136
157
47
155,752
68
dask
27
dask/dataframe/shuffle.py
Python
15
{ "docstring": "Splits dataframe into groups\n\n The group is determined by their final partition, and which stage we are in\n in the shuffle\n\n Parameters\n ----------\n df: DataFrame\n cols: str or list\n Column name(s) on which to split the dataframe. If ``cols`` is not\n \"_partitions\", hashing will be used to determine target partition\n stage: int\n We shuffle dataframes with many partitions we in a few stages to avoid\n a quadratic number of tasks. This number corresponds to which stage\n we're in, starting from zero up to some small integer\n k: int\n Desired number of splits from this dataframe\n npartition: int\n Total number of output partitions for the full dataframe\n nfinal: int\n Total number of output partitions after repartitioning\n\n Returns\n -------\n out: Dict[int, DataFrame]\n A dictionary mapping integers in {0..k} to dataframes such that the\n hash values of ``df[col]`` are well partitioned.\n ", "language": "en", "n_whitespaces": 251, "n_words": 138, "vocab_size": 100 }
https://github.com/dask/dask.git
8
unifi_dict
def unifi_dict(self) -> dict[str, Any]: return { "nvr": self.nvr.unifi_dict(), "cameras": [c.unifi_dict() for c in self.cameras.values()], "lights": [c.unifi_dict() for c in self.lights.values()], "sensors": [c.unifi_dict() for c in self.sensors.values()], "viewers": [c.unifi_dict() for c in self.viewers.values()], "liveviews": [c.unifi_dict() for c in self.liveviews.values()], "doorlocks": [c.unifi_dict() for c in self.doorlocks.values()], "chimes": [c.unifi_dict() for c in self.chimes.values()], } @dataclass
654c59c19414c52f95f56603c460f63edeb60959
@dataclass
12
conftest.py
272
Add diagnostics for UniFi Protect (#72280)
99,995
1
161
166
29
301,147
53
core
16
tests/components/unifiprotect/conftest.py
Python
12
{ "docstring": "Return UniFi formatted dict representation of the NVR.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
4
get_attendance_list
def get_attendance_list(from_date, to_date, student_group, students_list): attendance_list = frappe.db.sql( , (student_group, from_date, to_date), as_dict=1, ) att_map = {} students_with_leave_application = get_students_with_leave_application( from_date, to_date, students_list ) for d in attendance_list: att_map.setdefault(d.student, frappe._dict()).setdefault(d.date, "") if students_with_leave_application.get( d.date ) and d.student in students_with_leave_application.get(d.date): att_map[d.student][d.date] = "Present" else: att_map[d.student][d.date] = d.status att_map = mark_holidays(att_map, from_date, to_date, students_list) return att_map
494bd9ef78313436f0424b918f200dab8fc7c20b
14
student_monthly_attendance_sheet.py
215
style: format code with black
14,067
0
34
143
39
65,948
54
erpnext
21
erpnext/education/report/student_monthly_attendance_sheet/student_monthly_attendance_sheet.py
Python
24
{ "docstring": "select student, date, status\n\t\tfrom `tabStudent Attendance` where student_group = %s\n\t\tand docstatus = 1\n\t\tand date between %s and %s\n\t\torder by student, date", "language": "en", "n_whitespaces": 20, "n_words": 25, "vocab_size": 18 }
https://github.com/frappe/erpnext.git
5
update_static
def update_static(): config = Config() logger = get_log('http') static_path = Path(config['paths']['static']) last_gui_version_lv = get_last_compatible_gui_version() current_gui_version_lv = get_current_gui_version() if last_gui_version_lv is False: return False if current_gui_version_lv is not None: if current_gui_version_lv >= last_gui_version_lv: return True logger.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...') temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_') success = download_gui(temp_dir, last_gui_version_lv.vstring) if success is False: shutil.rmtree(temp_dir) return False temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_') shutil.rmtree(temp_dir_for_rm) shutil.copytree(str(static_path), temp_dir_for_rm) shutil.rmtree(str(static_path)) shutil.copytree(temp_dir, str(static_path)) shutil.rmtree(temp_dir_for_rm) logger.info(f'GUI version updated to {last_gui_version_lv.vstring}') return True
9ce5a21dd6359fd7e8ebf78051ce9e97bd195ec9
11
initialize.py
286
ML handler supbrocess (#3377) * log -> logger dividing components: app initialize parse args set env.MINDSDB_CONFIG_PATH config requiers env.MINDSDB_CONFIG_PATH sets env.MINDSDB_DB_CON Config() - makes initialization log uses config initialize_log - makes initialization database uses env.MINDSDB_DB_CON have init() method file storage uses config * partial sync for model storage interfaces * ml handler in subprocess interface * fix delete model * fix: model with error in success status * fix: remove hf predictor * fix pg handler * MLHandlerPersistWrapper keeps wrapper process opened * predictor with error keeps 'success' status #3362 * lock for sending tasks to subprocess one by one * check target of predictor before run learn in subproccess * fix check target * fix: json_ai override and problem definition generation * fix None case * folder for ml handler tests * fix: add timeseries_settings key to learn_args * fixes in lw_handler * fix: del join_learn_process * tests for LW handler * finish unit test for LW * changes in tests: - set-project -> to base class - return of ml handler is dataframe - draft for project structure test * merge from staging * create_validation method to check learn params before send to subprocess fixes of HF fixed version of transformers in HF requirements Co-authored-by: Patricio Cerda Mardini <[email protected]>
25,952
0
171
160
47
117,309
72
mindsdb
24
mindsdb/api/http/initialize.py
Python
25
{ "docstring": " Update Scout files basing on compatible-config.json content.\n Files will be downloaded and updated if new version of GUI > current.\n Current GUI version stored in static/version.txt.\n ", "language": "en", "n_whitespaces": 44, "n_words": 26, "vocab_size": 24 }
https://github.com/mindsdb/mindsdb.git
1
test_stream_admin_remove_others_from_public_stream
def test_stream_admin_remove_others_from_public_stream(self) -> None: result = self.attempt_unsubscribe_of_principal( query_count=15, target_users=[self.example_user("cordelia")], is_realm_admin=False, is_stream_admin=True, is_subbed=True, invite_only=False, target_users_subbed=True, ) json = self.assert_json_success(result) self.assert_length(json["removed"], 1) self.assert_length(json["not_removed"], 0)
803982e87254e3b1ebcb16ed795e224afceea3a3
13
test_subs.py
125
message_flags: Short-circuit if no messages changed. Omit sending an event, and updating the database, if there are no matching messages.
17,733
0
141
80
21
83,840
22
zulip
15
zerver/tests/test_subs.py
Python
16
{ "docstring": "\n You can remove others from public streams you're a stream administrator of.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/zulip/zulip.git
2
test_conversation_part_has_conversation_id
def test_conversation_part_has_conversation_id(requests_mock): response_body = { "type": "conversation", "id": "151272900024304", "created_at": 1647365706, "updated_at": 1647366443, "conversation_parts": { "type": "conversation_part.list", "conversation_parts": [ {"type": "conversation_part", "id": "13740311965"}, {"type": "conversation_part", "id": "13740312024"}, ], "total_count": 2, }, } url = "https://api.intercom.io/conversations/151272900024304" requests_mock.get(url, json=response_body) stream1 = ConversationParts(authenticator=NoAuth()) record_count = 0 for record in stream1.read_records(sync_mode=SyncMode.incremental, stream_slice={"id": "151272900024304"}): assert record["conversation_id"] == "151272900024304" record_count += 1 assert record_count == 2
787daa9961b6b103e8d520c544277fc77c191dfb
14
unit_test.py
233
🎉 Source Intercom: Added conversation_id field to conversation_part records (#11206)
666
0
217
126
46
4,470
60
airbyte
17
airbyte-integrations/connectors/source-intercom/unit_tests/unit_test.py
Python
23
{ "docstring": "\n Test shows that conversation_part records include the `conversation_id` field.\n ", "language": "en", "n_whitespaces": 16, "n_words": 9, "vocab_size": 9 }
https://github.com/airbytehq/airbyte.git
9
flatten
def flatten(data, levels=None, preserve_nulls=False, _ids=None): if _ids is None: _ids = set() if id(data) in _ids: raise RecursionError("Reference cycle detected. Check input list.") _ids.add(id(data)) ret = [] for element in data: if not preserve_nulls and element in (None, "None", "null"): # ignore null items continue elif is_iter(element): if levels is None: ret.extend(flatten(element, preserve_nulls=preserve_nulls, _ids=_ids)) elif levels >= 1: # decrement as we go down the stack ret.extend( flatten( element, levels=(int(levels) - 1), preserve_nulls=preserve_nulls, _ids=_ids, ) ) else: ret.append(element) else: ret.append(element) return ret
ec41b56baee98ade5e6e21083645da1ec21c4b86
22
data.py
249
detect reference cycles in flatten function
54,379
0
409
154
64
216,076
82
salt
15
salt/utils/data.py
Python
27
{ "docstring": "\n .. versionadded:: 3005\n\n Flatten a list.\n\n :param data: A list to flatten\n\n :param levels: The number of levels in sub-lists to descend\n\n :param preserve_nulls: Preserve nulls in a list, by default flatten removes\n them\n\n :param _ids: Parameter used internally within the function to detect\n reference cycles.\n\n :returns: A flat(ter) list of values\n\n .. code-block:: jinja\n\n {{ [3, [4, 2] ] | flatten }}\n # => [3, 4, 2]\n\n Flatten only the first level of a list:\n\n .. code-block:: jinja\n\n {{ [3, [4, [2]] ] | flatten(levels=1) }}\n # => [3, 4, [2]]\n\n Preserve nulls in a list, by default flatten removes them.\n\n .. code-block:: jinja\n\n {{ [3, None, [4, [2]] ] | flatten(levels=1, preserve_nulls=True) }}\n # => [3, None, 4, [2]]\n ", "language": "en", "n_whitespaces": 245, "n_words": 121, "vocab_size": 63 }
https://github.com/saltstack/salt.git
9
training_iteration
def training_iteration(self) -> ResultDict: # Some shortcuts. batch_size = self.config["train_batch_size"] # Collects SampleBatches in parallel and synchronously # from the Trainer's RolloutWorkers until we hit the # configured `train_batch_size`. sample_batches = [] num_env_steps = 0 num_agent_steps = 0 while (not self._by_agent_steps and num_env_steps < batch_size) or \ (self._by_agent_steps and num_agent_steps < batch_size): new_sample_batches = synchronous_parallel_sample(self.workers) sample_batches.extend(new_sample_batches) num_env_steps += sum(len(s) for s in new_sample_batches) num_agent_steps += sum( len(s) if isinstance(s, SampleBatch) else s.agent_steps() for s in new_sample_batches) self._counters[NUM_ENV_STEPS_SAMPLED] += num_env_steps self._counters[NUM_AGENT_STEPS_SAMPLED] += num_agent_steps # Combine all batches at once train_batch = SampleBatch.concat_samples(sample_batches) # Use simple optimizer (only for multi-agent or tf-eager; all other # cases should use the multi-GPU optimizer, even if only using 1 GPU). # TODO: (sven) rename MultiGPUOptimizer into something more # meaningful. if self.config.get("simple_optimizer") is True: train_results = train_one_step(self, train_batch) else: train_results = multi_gpu_train_one_step(self, train_batch) return train_results
853d10871c55cb741cb15212ef4ab42f158f968c
13
trainer.py
261
[RLlib] Issue 18499: PGTrainer with training_iteration fn does not support multi-GPU. (#21376)
28,878
0
399
158
97
129,027
140
ray
28
rllib/agents/trainer.py
Python
35
{ "docstring": "Default single iteration logic of an algorithm.\n\n - Collect on-policy samples (SampleBatches) in parallel using the\n Trainer's RolloutWorkers (@ray.remote).\n - Concatenate collected SampleBatches into one train batch.\n - Note that we may have more than one policy in the multi-agent case:\n Call the different policies' `learn_on_batch` (simple optimizer) OR\n `load_batch_into_buffer` + `learn_on_loaded_batch` (multi-GPU\n optimizer) methods to calculate loss and update the model(s).\n - Return all collected metrics for the iteration.\n\n Returns:\n The results dict from executing the training iteration.\n ", "language": "en", "n_whitespaces": 168, "n_words": 79, "vocab_size": 66 }
https://github.com/ray-project/ray.git
5
get_model
def get_model(self, app_label, model_name=None, require_ready=True): if require_ready: self.check_models_ready() else: self.check_apps_ready() if model_name is None: app_label, model_name = app_label.split(".") app_config = self.get_app_config(app_label) if not require_ready and app_config.models is None: app_config.import_models() return app_config.get_model(model_name, require_ready=require_ready)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
registry.py
132
Refs #33476 -- Reformatted code with Black.
50,292
0
125
80
25
203,306
32
django
12
django/apps/registry.py
Python
11
{ "docstring": "\n Return the model matching the given app_label and model_name.\n\n As a shortcut, app_label may be in the form <app_label>.<model_name>.\n\n model_name is case-insensitive.\n\n Raise LookupError if no application exists with this label, or no\n model exists with this name in the application. Raise ValueError if\n called with a single argument that doesn't contain exactly one dot.\n ", "language": "en", "n_whitespaces": 105, "n_words": 55, "vocab_size": 41 }
https://github.com/django/django.git
20
handle_groupme
def handle_groupme(request) -> bool: req = json.loads(request.decode("utf-8")) text = req.get("text").strip().lower() group_id = req.get("group_id").strip() if text[0] == "!": cmd = text[1:] full_cmd = cmd.split("/") group = full_cmd[0].split("-")[0] parents = {x.split("-")[0] for x in commands} if group in parents: if full_cmd[0] in commands: selected = commands[full_cmd[0]] if len(full_cmd) != len(selected.get("required", [])) + 1: syntax = get_syntax(selected, full_cmd[0]) send_message(f"Required syntax: {syntax}", group_id) return False other_args = {} for i, val in enumerate(full_cmd[1:]): req_name = list(selected.get("required", {}).keys())[i] required = selected.get("required", [])[req_name] if isinstance(required, List) and required != [True, False]: required = [str(x) for x in required] if isinstance(val, str) and req_name in ["ticker"]: val = val.upper() elif isinstance(val, str) and req_name == "raw": val = bool(val) if (isinstance(required, List) and val not in required) or ( isinstance(required, Pattern) and not required.match(val) ): syntax = get_syntax(selected, full_cmd[0]) send_message( f"{syntax}\nInvalid argument for: {req_name}", group_id ) get_arguments(selected, req_name, group_id) return False other_args[req_name] = val func = selected["function"] ShowView().groupme(func, group_id, cmd, **other_args) return True else: show_cmds = [] for command in commands: if group == command[: len(group)]: show_cmds.append(command) send_options("Valid commands: ", show_cmds, group_id) return False else: send_options("Valid categories: ", parents, group_id) return False return False
b8b3b49896b7553c8ad57e0f3fbb4464ecf24179
22
run_groupme.py
703
Groupme Bot (#1418) * Added groupme helpers * Began handling of user input * Expanded commands, improved bot * Completed dictionary * Eliminated redundancies * Added more * Added debuggers for server * Fixed bugs * Improved input handling and image sending * Fixed dd-est * Fixed dd commands * Added dps fix * Fixed feargreed * Fixed econ commands * Fixed gov commands * Fixed ta * Added docstrings * Added docstrings * black Co-authored-by: didierlopes.eth <[email protected]>
84,275
0
941
428
108
282,723
187
OpenBBTerminal
46
bots/groupme/run_groupme.py
Python
62
{ "docstring": "Handles groupme bot inputs\n\n Parameters\n ----------\n request : Request\n The request object provided by FASTAPI\n\n Returns\n ----------\n success : bool\n Whether the response was sent successfully\n ", "language": "en", "n_whitespaces": 61, "n_words": 26, "vocab_size": 23 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
3
_fill
def _fill(strings, linelen=75): currpos = 0 lasti = 0 result = [] for i, s in enumerate(strings): length = len(s) if currpos + length < linelen: currpos += length + 1 else: result.append(b' '.join(strings[lasti:i])) lasti = i currpos = length result.append(b' '.join(strings[lasti:])) return b'\n'.join(result) # PDF strings are supposed to be able to include any eight-bit data, # except that unbalanced parens and backslashes must be escaped by a # backslash. However, sf bug #2708559 shows that the carriage return # character may get read as a newline; these characters correspond to # \gamma and \Omega in TeX's math font encoding. Escaping them fixes # the bug. _string_escape_regex = re.compile(br'([\\()\r\n])')
ec410abbb3a721e31f3aaa61e9e4f941467e35e1
16
backend_pdf.py
181
Deprecate functions in backends
23,073
0
189
97
80
108,136
110
matplotlib
16
lib/matplotlib/backends/backend_pdf.py
Python
14
{ "docstring": "\n Make one string from sequence of strings, with whitespace in between.\n\n The whitespace is chosen to form lines of at most *linelen* characters,\n if possible.\n ", "language": "en", "n_whitespaces": 38, "n_words": 25, "vocab_size": 23 }
https://github.com/matplotlib/matplotlib.git
1
plextv_resources_fixture
def plextv_resources_fixture(): return load_fixture("plex/plextv_resources_one_server.xml") @pytest.fixture(name="plextv_resources_two_servers", scope="session")
10195dc700770cdfdeaff79c53cf5d1d763f20c6
@pytest.fixture(name="plextv_resources_two_servers", scope="session")
8
conftest.py
46
Improve server selection for Plex config flows (#63408)
107,537
1
11
10
6
308,799
6
core
6
tests/components/plex/conftest.py
Python
2
{ "docstring": "Load single-server payload for plex.tv resources and return it.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
2
test_timeout_does_not_wait_for_completion_for_sync_flows
def test_timeout_does_not_wait_for_completion_for_sync_flows(self, tmp_path): if sys.version_info[1] == 11: pytest.xfail("The engine returns _after_ sleep finishes in Python 3.11") canary_file = tmp_path / "canary"
a7bd9cadd5038383449b0e75a87bb23a73b278d8
10
test_flows.py
52
Add support for Python 3.11 (#7304) Co-authored-by: Chris Guidry <[email protected]>
11,913
0
53
96
21
59,586
21
prefect
8
tests/test_flows.py
Python
14
{ "docstring": "\n Sync flows are cancelled when they change instructions. The flow will return\n immediately when the timeout is reached, but the thread it executes in will\n continue until the next instruction is reached. `time.sleep` will return then\n the thread will be interrupted.\n ", "language": "en", "n_whitespaces": 77, "n_words": 41, "vocab_size": 31 }
https://github.com/PrefectHQ/prefect.git
2
inertia
def inertia(self): if self.is_rigidbody: return RigidBody.inertia.fget(self) return (self.central_inertia, self.masscenter)
f0ccc88c86f77f1c3e13d590d4506e49c8cb2a12
10
body.py
49
Add property docstrings
49,282
0
41
30
8
199,560
9
sympy
7
sympy/physics/mechanics/body.py
Python
4
{ "docstring": "The body's inertia about a point; stored as (Dyadic, Point).", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/sympy/sympy.git
1
test_event_ref
def test_event_ref(self): # Reset the event cache self.store._get_event_cache.clear() with LoggingContext("test") as ctx: # We keep hold of the event event though we never use it. event = self.get_success(self.store.get_event(self.event_id)) # noqa: F841 # We should have fetched the event from the DB self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) # Reset the event cache self.store._get_event_cache.clear() with LoggingContext("test") as ctx: self.get_success(self.store.get_event(self.event_id)) # Since the event is still in memory we shouldn't have fetched it # from the DB self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 0)
fcf951d5dc7ca8c4cb18aa9c1f5ccb005df3610a
13
test_events_worker.py
181
Track in memory events using weakrefs (#10533)
72,209
0
211
100
40
248,311
73
synapse
14
tests/storage/databases/main/test_events_worker.py
Python
9
{ "docstring": "Test that we reuse events that are still in memory but have fallen\n out of the cache, rather than requesting them from the DB.\n ", "language": "en", "n_whitespaces": 38, "n_words": 24, "vocab_size": 22 }
https://github.com/matrix-org/synapse.git
8
_load_specials
def _load_specials(self, *args, **kwargs): super(KeyedVectors, self)._load_specials(*args, **kwargs) if hasattr(self, 'doctags'): self._upconvert_old_d2vkv() # fixup rename/consolidation into index_to_key of older index2word, index2entity if not hasattr(self, 'index_to_key'): self.index_to_key = self.__dict__.pop('index2word', self.__dict__.pop('index2entity', None)) # fixup rename into vectors of older syn0 if not hasattr(self, 'vectors'): self.vectors = self.__dict__.pop('syn0', None) self.vector_size = self.vectors.shape[1] # ensure at least a 'None' in 'norms' to force recalc if not hasattr(self, 'norms'): self.norms = None # ensure at least an empty 'expandos' if not hasattr(self, 'expandos'): self.expandos = {} # fixup rename of vocab into map if 'key_to_index' not in self.__dict__: self._upconvert_old_vocab() # ensure older instances have next_index if not hasattr(self, 'next_index'): self.next_index = len(self)
766b9e19585968d011649b53057fba3663eff551
13
keyedvectors.py
286
Ensure next_index available when loading old stored KeyedVectors models (#3117) * fix #3114: ensure next_index available * rm trailing whitespace
1,665
0
299
166
66
9,736
106
gensim
19
gensim/models/keyedvectors.py
Python
17
{ "docstring": "Handle special requirements of `.load()` protocol, usually up-converting older versions.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/RaRe-Technologies/gensim.git
1
simple_test
def simple_test(self, feats, img_metas, rescale=False): outs = self.forward(feats) results_list = self.get_results( *outs, img_metas=img_metas, rescale=rescale) return results_list
9c5b3331ac8edbfa328922fbab45c382380da540
9
base_dense_head.py
63
Simplify api of one-stage detector
70,350
0
55
41
14
244,361
16
mmdetection
9
mmdet/models/dense_heads/base_dense_head.py
Python
5
{ "docstring": "Test function without test-time augmentation.\n\n Args:\n feats (tuple[torch.Tensor]): Multi-level features from the\n upstream network, each is a 4D-tensor.\n img_metas (list[dict]): List of image information.\n rescale (bool, optional): Whether to rescale the results.\n Defaults to False.\n\n Returns:\n list[obj:`InstanceData`]: Detection results of each image\n after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance,)\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances,).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n ", "language": "en", "n_whitespaces": 278, "n_words": 89, "vocab_size": 70 }
https://github.com/open-mmlab/mmdetection.git
3
test_http_server_aborts
def test_http_server_aborts(tctx, stream): server = Placeholder(Server) flow = Placeholder(HTTPFlow) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular))
372a632161dee642d81542069507826e34466ba1
11
test_http.py
58
reintroduce `Flow.live` We previously relied on the state of `Flow.reply` to check if a flow can be killed, but this doesn't work anymore with `Flow.reply` being removed. Instead, we now reintroduce the `Flow.live` attribute, which signals if we are on a live connection. Killing still is not ideal (see comment in `Flow.killable`), but this paves the way.
73,522
0
25
277
11
250,618
13
mitmproxy
14
test/mitmproxy/proxy/layers/http/test_http.py
Python
58
{ "docstring": "Test handling of the case where a server aborts during response transmission.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/mitmproxy/mitmproxy.git