complexity
int64
1
139
fun_name
stringlengths
1
80
code
stringlengths
101
62.2k
commit_id
stringlengths
40
40
ast_errors
stringlengths
0
3.11k
ast_levels
int64
6
36
file_name
stringlengths
5
79
n_ast_nodes
int64
17
19.2k
commit_message
stringlengths
3
15.3k
d_id
int64
12
121k
n_ast_errors
int64
0
9
n_whitespaces
int64
4
10.8k
token_counts
int64
5
3.06k
vocab_size
int64
4
1.11k
id
int64
20
338k
n_words
int64
4
4.82k
repo
stringlengths
3
22
n_identifiers
int64
2
176
path
stringlengths
7
134
language
stringclasses
1 value
nloc
int64
1
413
documentation
dict
url
stringlengths
31
59
1
makeDllEntryPoint
def makeDllEntryPoint(cls, source_path, dest_path, package_name): return makeDllEntryPoint( source_path=source_path, dest_path=dest_path, package_name=package_name )
56eb59d93f13815e66d0dea07e7669dfe275fa10
8
PluginBase.py
40
Plugins: Massive cleanup and API improvements and Kivy support * Added method to locate a DLL and to create a DLL entry point as expected, removing need for imports and making it more clear as an API. * The location of modules had already an API, but it wasn' used where it could be. * Moved implicit imports and DLL usage for Gi to its plugin, solving a TODO for it. * Make sure sure to only yield, and not return, that is just more error prone. * Also allow generators for implicit dependencies, such that generators work in a yield from fashion. * With this, Kivy apps work on at least Linux.
42,655
0
43
27
11
178,303
11
Nuitka
5
nuitka/plugins/PluginBase.py
Python
4
{ "docstring": "Create an entry point, as expected to be provided by getExtraDlls.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/Nuitka/Nuitka.git
1
contains_point
def contains_point(self, point): return self.patch.contains_point(point, radius=1.0)
f156db08eee54d285ab0fb4e031e48d078ba6aa3
8
_base.py
34
DOC: More cleanup axes -> Axes
22,768
0
20
23
6
107,479
6
matplotlib
5
lib/matplotlib/axes/_base.py
Python
2
{ "docstring": "\n Return whether *point* (pair of pixel coordinates) is inside the Axes\n patch.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
https://github.com/matplotlib/matplotlib.git
13
to_dict
def to_dict(self) -> dict: if self._data_dict: # If the checkpoint data is already a dict, return checkpoint_data = self._data_dict elif self._obj_ref: # If the checkpoint data is an object reference, resolve checkpoint_data = ray.get(self._obj_ref) elif self._local_path or self._uri: # Else, checkpoint is either on FS or external storage with self.as_directory() as local_path: checkpoint_data_path = os.path.join( local_path, _DICT_CHECKPOINT_FILE_NAME ) if os.path.exists(checkpoint_data_path): # If we are restoring a dict checkpoint, load the dict # from the checkpoint file. with open(checkpoint_data_path, "rb") as f: checkpoint_data = pickle.load(f) # If there are additional files in the directory, add them as # _DICT_CHECKPOINT_ADDITIONAL_FILE_KEY additional_files = {} for file_or_dir in os.listdir(local_path): if file_or_dir in [ ".", "..", _DICT_CHECKPOINT_FILE_NAME, _CHECKPOINT_METADATA_FILE_NAME, ]: continue additional_files[file_or_dir] = _pack( os.path.join(local_path, file_or_dir) ) if additional_files: checkpoint_data[ _DICT_CHECKPOINT_ADDITIONAL_FILE_KEY ] = additional_files else: files = [ f for f in os.listdir(local_path) if os.path.isfile(os.path.join(local_path, f)) and f.endswith(_METADATA_CHECKPOINT_SUFFIX) ] metadata = {} for file in files: with open(os.path.join(local_path, file), "rb") as f: key = file[: -len(_METADATA_CHECKPOINT_SUFFIX)] value = pickle.load(f) metadata[key] = value data = _pack(local_path) checkpoint_data = { _FS_CHECKPOINT_KEY: data, } checkpoint_data.update(metadata) else: raise RuntimeError(f"Empty data for checkpoint {self}") return _CheckpointDict(checkpoint_data, metadata=self._metadata)
5034544d5df5d7d9596b261d7bdffdd28e76fe2b
23
checkpoint.py
465
[AIR] Maintain checkpoint type information during serialization (#28387) These changes are needed to fix the errors described in #28134. Signed-off-by: Balaji Veeramani <[email protected]> Co-authored-by: Amog Kamsetty <[email protected]>
28,585
0
1,257
280
112
127,998
186
ray
43
python/ray/air/checkpoint.py
Python
60
{ "docstring": "Return checkpoint data as dictionary.\n\n .. note::\n :meth:`~Checkpoint.to_dict` returns a ``dict`` subclass that contains\n information about the checkpoint type. This ``dict`` subclass is\n functionally identical to the built-in ``dict``.\n\n Returns:\n dict: Dictionary containing checkpoint data.\n ", "language": "en", "n_whitespaces": 100, "n_words": 35, "vocab_size": 30 }
https://github.com/ray-project/ray.git
5
fill_properties
def fill_properties(self, address, props_dict): device = self._devices[Address(address)] operating_flags = props_dict.get("operating_flags", {}) properties = props_dict.get("properties", {}) with patch("pyinsteon.subscriber_base.publish_topic", MagicMock()): for flag in operating_flags: value = operating_flags[flag] if device.operating_flags.get(flag): device.operating_flags[flag].load(value) for flag in properties: value = properties[flag] if device.properties.get(flag): device.properties[flag].load(value)
781ec87dff674b5f47f4138552d818cc490b5201
15
mock_devices.py
190
Fix Insteon tests (#71092)
98,486
0
193
117
28
299,558
38
core
15
tests/components/insteon/mock_devices.py
Python
13
{ "docstring": "Fill the operating flags and extended properties of a device.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/home-assistant/core.git
1
set_freqai_targets
def set_freqai_targets(self, dataframe, **kwargs): return dataframe ### # END - Intended to be overridden by strategy ###
c2936d551b8ad6ccf7b57e2ac6cb55d8550622cf
6
interface.py
25
improve doc, update test strats, change function names
35,164
0
28
13
16
151,922
17
freqtrade
4
freqtrade/strategy/interface.py
Python
2
{ "docstring": "\n *Only functional with FreqAI enabled strategies*\n Required function to set the targets for the model.\n All targets must be prepended with `&` to be recognized by the FreqAI internals.\n\n More details about feature engineering available:\n\n https://www.freqtrade.io/en/latest/freqai-feature-engineering\n\n :param df: strategy dataframe which will receive the targets\n usage example: dataframe[\"&-target\"] = dataframe[\"close\"].shift(-1) / dataframe[\"close\"]\n ", "language": "en", "n_whitespaces": 109, "n_words": 52, "vocab_size": 43 }
https://github.com/freqtrade/freqtrade.git
2
__exit__
def __exit__(self, type, value, traceback): if self.is_locked: self.release()
c541f4ba1fcab8304c7ac4efdce3d63a2e478176
9
file_lock.py
36
Add model parallel for FasterGPT. (#1755) * Add model parallel for FasterGPT. * Make GPT model parallel runable * Make FT model parallel optional. * Fix _write_setup_file when kwargs is not empty. * Fix ext_utils.load * Add file lock for model parallel. * Fix model_parallel.flag in CMakeLists.txt. * Use a separated lib for model parallel. * Make from_pretrained get partial model. * Make model parallel support layer group in python. * Fix fit_partial_model when model having keys state not including. Add api docs for model parallel. * Fix the default world_size when mpi is not available. * Add demo for GPT model parallel. * Fix default global ft_para_conf. * Fix GPTModel state_dict wrapper for layer parallel. * Set seed for tensor parallel. * Fix location of gpt.h in cmake. * Fix seed in gpt.h * Fix NV FT GPT embedding. * Add more cases in gpt_mp_sample.py * Fix seed in ker_curand_setupLauncher. Put build dir of FG in PPNLP_HOME with digest of current path. * Refine gpt_mp_sample.py
118,415
0
33
22
8
323,211
8
PaddleNLP
7
paddlenlp/utils/file_lock.py
Python
3
{ "docstring": " Activated at the end of the with statement.\n It automatically releases the lock if it isn't locked.\n ", "language": "en", "n_whitespaces": 36, "n_words": 17, "vocab_size": 15 }
https://github.com/PaddlePaddle/PaddleNLP.git
2
pip_imported_during_build
def pip_imported_during_build(): import traceback return any( frame.f_globals['__file__'].endswith('setup.py') for frame, line in traceback.walk_stack(None) ) DISTUTILS_FINDER = DistutilsMetaFinder()
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
11
__init__.py
68
upd; format
12,171
0
65
33
16
60,461
16
transferlearning
10
.venv/lib/python3.8/site-packages/_distutils_hack/__init__.py
Python
6
{ "docstring": "\n Detect if pip is being imported in a build script. Ref #2355.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 12 }
https://github.com/jindongwang/transferlearning.git
3
compute_loss
def compute_loss(self, batch, return_output=False): if batch.label_vec is None: raise ValueError('Cannot compute loss without a label.') model_output = self.model(*self._model_input(batch), ys=batch.label_vec) scores, preds, *_ = model_output score_view = scores.reshape(-1, scores.size(-1)) loss_flattened = self.criterion(score_view, batch.label_vec.view(-1)) loss_per_token = loss_flattened.view(scores.shape[:-1]) notnull = batch.label_vec.ne(self.NULL_IDX) # save loss to metrics # cross entropy loss self.record_local_metric( 'loss', AverageMetric.from_mask(loss_per_token, notnull) ) # perplexity self.record_local_metric('ppl', PPLMetric.from_mask(loss_per_token, notnull)) # token-wise accuracy self.record_local_metric( 'token_acc', AverageMetric.from_mask(batch.label_vec == preds, notnull) ) # utterance-wise exact match num_target_tokens = notnull.long().sum(dim=-1) num_tokens_correct = ((batch.label_vec == preds) * notnull).sum(dim=-1) self.record_local_metric( 'token_em', AverageMetric.many(num_tokens_correct == num_target_tokens) ) # actually do backwards loss loss = loss_per_token.sum(dim=1) loss = loss.sum() loss /= num_target_tokens.sum() # average loss per token if return_output: return (loss, model_output) else: return loss
05ff60935043cf39192535eac38e2730212057c7
13
torch_generator_agent.py
410
Add Metric.from_mask helper method (#3411) (#4894) * Add Metric.from_mask helper method (#3411) * Use cls directly instead of passing in MyMetric
47,269
0
377
252
80
195,534
114
ParlAI
35
parlai/core/torch_generator_agent.py
Python
28
{ "docstring": "\n Compute and return the loss for the given batch.\n\n Easily overridable for customized loss functions.\n\n If return_output is True, the full output from the call to self.model()\n is also returned, via a (loss, model_output) pair.\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 29 }
https://github.com/facebookresearch/ParlAI.git
2
_relative_position_bucket
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128): relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).to(torch.long) * num_buckets relative_position = torch.abs(relative_position) else: relative_position = -torch.min(relative_position, torch.zeros_like(relative_position)) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance relative_position_if_large = max_exact + ( torch.log(relative_position.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_position, relative_position_if_large) return relative_buckets
a72f1c9f5b907f96cbb7de3bbb02a1d431d34071
17
modeling_longt5.py
240
Add `LongT5` model (#16792) * Initial commit * Make some fixes * Make PT model full forward pass * Drop TF & Flax implementation, fix copies etc * Add Flax model and update some corresponding stuff * Drop some TF things * Update config and flax local attn * Add encoder_attention_type to config * . * Update docs * Do some cleansing * Fix some issues -> make style; add some docs * Fix position_bias + mask addition + Update tests * Fix repo consistency * Fix model consistency by removing flax operation over attn_mask * [WIP] Add PT TGlobal LongT5 * . * [WIP] Add flax tglobal model * [WIP] Update flax model to use the right attention type in the encoder * Fix flax tglobal model forward pass * Make the use of global_relative_attention_bias * Add test suites for TGlobal model * Fix minor bugs, clean code * Fix pt-flax equivalence though not convinced with correctness * Fix LocalAttn implementation to match the original impl. + update READMEs * Few updates * Update: [Flax] improve large model init and loading #16148 * Add ckpt conversion script accoring to #16853 + handle torch device placement * Minor updates to conversion script. * Typo: AutoModelForSeq2SeqLM -> FlaxAutoModelForSeq2SeqLM * gpu support + dtype fix * Apply some suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Patrick von Platen <[email protected]> * * Remove (de)parallelize stuff * Edit shape comments * Update README.md * make fix-copies * Remove caching logic for local & tglobal attention * Apply another batch of suggestions from code review * Add missing checkpoints * Format converting scripts * Drop (de)parallelize links from longT5 mdx * Fix converting script + revert config file change * Revert "Remove caching logic for local & tglobal attention" This reverts commit 2a619828f6ddc3e65bd9bb1725a12b77fa883a46. * Stash caching logic in Flax model * Make side relative bias used always * Drop caching logic in PT model * Return side bias as it was * Drop all remaining model parallel logic * Remove clamp statements * Move test files to the proper place * Update docs with new version of hf-doc-builder * Fix test imports * Make some minor improvements * Add missing checkpoints to docs * Make TGlobal model compatible with torch.onnx.export * Replace some np.ndarray with jnp.ndarray * Fix TGlobal for ONNX conversion + update docs * fix _make_global_fixed_block_ids and masked neg value * update flax model * style and quality * fix imports * remove load_tf_weights_in_longt5 from init and fix copies * add slow test for TGlobal model * typo fix * Drop obsolete is_parallelizable and one warning * Update __init__ files to fix repo-consistency * fix pipeline test * Fix some device placements * [wip]: Update tests -- need to generate summaries to update expected_summary * Fix quality * Update LongT5 model card * Update (slow) summarization tests * make style * rename checkpoitns * finish * fix flax tests Co-authored-by: phungvanduy <[email protected]> Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Patrick von Platen <[email protected]> Co-authored-by: patil-suraj <[email protected]>
5,721
0
301
150
70
31,289
108
transformers
20
src/transformers/models/longt5/modeling_longt5.py
Python
20
{ "docstring": "\n Adapted from Mesh Tensorflow:\n https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593\n\n Translate relative position to a bucket number for relative attention. The relative position is defined as\n memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to\n position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for\n small absolute relative_position and larger buckets for larger absolute relative_positions. All relative\n positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.\n This should allow for more graceful generalization to longer sequences than the model has been trained on\n\n Args:\n relative_position: an int32 Tensor\n bidirectional: a boolean - whether the attention is bidirectional\n num_buckets: an integer\n max_distance: an integer\n\n Returns:\n a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)\n ", "language": "en", "n_whitespaces": 265, "n_words": 132, "vocab_size": 88 }
https://github.com/huggingface/transformers.git
12
_file_lists
def _file_lists(load, form): if "env" in load: # "env" is not supported; Use "saltenv". load.pop("env") saltenv = load["saltenv"] actual_saltenv = saltenv if saltenv not in __opts__["file_roots"]: if "__env__" in __opts__["file_roots"]: log.debug( "salt environment '%s' maps to __env__ file_roots directory", saltenv ) saltenv = "__env__" else: return [] list_cachedir = os.path.join(__opts__["cachedir"], "file_lists", "roots") if not os.path.isdir(list_cachedir): try: os.makedirs(list_cachedir) except OSError: log.critical("Unable to make cachedir %s", list_cachedir) return [] list_cache = os.path.join( list_cachedir, "{}.p".format(salt.utils.files.safe_filename_leaf(saltenv)) ) w_lock = os.path.join( list_cachedir, ".{}.w".format(salt.utils.files.safe_filename_leaf(saltenv)) ) cache_match, refresh_cache, save_cache = salt.fileserver.check_file_list_cache( __opts__, form, list_cache, w_lock ) if cache_match is not None: return cache_match if refresh_cache: ret = {"files": set(), "dirs": set(), "empty_dirs": set(), "links": {}}
52e1d0b8116c86777c85cb6c3d940e2c04a518c4
14
roots.py
377
add __env__ substitution inside file and pillar root paths
54,610
0
322
366
70
216,493
108
salt
31
salt/fileserver/roots.py
Python
51
{ "docstring": "\n Return a dict containing the file lists for files, dirs, emtydirs and symlinks\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 13 }
https://github.com/saltstack/salt.git
4
_preprocess_conv2d_input
def _preprocess_conv2d_input(x, data_format, force_transpose=False): tf_data_format = "NHWC" if data_format == "channels_first": if not _has_nchw_support() or force_transpose: x = tf.compat.v1.transpose(x, (0, 2, 3, 1)) # NCHW -> NHWC else: tf_data_format = "NCHW" return x, tf_data_format
84afc5193d38057e2e2badf9c889ea87d80d8fbf
14
backend.py
99
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,102
0
83
59
29
269,467
34
keras
10
keras/backend.py
Python
8
{ "docstring": "Transpose and cast the input before the conv2d.\n\n Args:\n x: input tensor.\n data_format: string, `\"channels_last\"` or `\"channels_first\"`.\n force_transpose: Boolean. If True, the input will always be transposed\n from NCHW to NHWC if `data_format` is `\"channels_first\"`.\n If False, the transposition only occurs on CPU (GPU ops are\n assumed to support NCHW).\n\n Returns:\n A tensor.\n ", "language": "en", "n_whitespaces": 123, "n_words": 53, "vocab_size": 44 }
https://github.com/keras-team/keras.git
1
test_changelist_field_classes
def test_changelist_field_classes(self): Podcast.objects.create(name="Django Dose", release_date=datetime.date.today()) response = self.client.get(reverse("admin:admin_views_podcast_changelist")) self.assertContains(response, '<th class="field-name">') self.assertContains(response, '<td class="field-release_date nowrap">') self.assertContains(response, '<td class="action-checkbox">') try: import docutils except ImportError: docutils = None @unittest.skipUnless(docutils, "no docutils installed.") @override_settings(ROOT_URLCONF="admin_views.urls") @modify_settings( INSTALLED_APPS={"append": ["django.contrib.admindocs", "django.contrib.flatpages"]} )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
@unittest.skipUnless(docutils, "no docutils installed.") @override_settings(ROOT_URLCONF="admin_views.urls") @modify_settings( INSTALLED_APPS={"append": ["django.contrib.admindocs", "django.contrib.flatpages"]} )
11
tests.py
186
Refs #33476 -- Reformatted code with Black.
52,031
1
81
63
30
207,636
36
django
23
tests/admin_views/tests.py
Python
6
{ "docstring": "\n Cells of the change list table should contain the field name in their class attribute\n Refs #11195.\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
https://github.com/django/django.git
1
time
def time(self) -> npt.NDArray[np.object_]: # If the Timestamps have a timezone that is not UTC, # convert them into their i8 representation while # keeping their timezone and not using UTC timestamps = self._local_timestamps() return ints_to_pydatetime(timestamps, box="time")
521259299f7829da667ba39302ec77acedde9e5e
9
datetimes.py
56
DOC: Improve doc summaries in series.rst (#45237)
39,396
0
79
31
32
163,188
37
pandas
10
pandas/core/arrays/datetimes.py
Python
8
{ "docstring": "\n Returns numpy array of :class:`datetime.time` objects.\n\n The time part of the Timestamps.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 11 }
https://github.com/pandas-dev/pandas.git
10
putalpha
def putalpha(self, alpha): self._ensure_mutable() if self.mode not in ("LA", "PA", "RGBA"): # attempt to promote self to a matching alpha mode try: mode = getmodebase(self.mode) + "A" try: self.im.setmode(mode) except (AttributeError, ValueError) as e: # do things the hard way im = self.im.convert(mode) if im.mode not in ("LA", "PA", "RGBA"): raise ValueError from e # sanity check self.im = im self.pyaccess = None self.mode = self.im.mode except KeyError as e: msg = "illegal image mode" raise ValueError(msg) from e if self.mode in ("LA", "PA"): band = 1 else: band = 3 if isImageType(alpha): # alpha layer if alpha.mode not in ("1", "L"): msg = "illegal image mode" raise ValueError(msg) alpha.load() if alpha.mode == "1": alpha = alpha.convert("L") else: # constant alpha try: self.im.fillband(band, alpha) except (AttributeError, ValueError): # do things the hard way alpha = new("L", self.size, alpha) else: return self.im.putband(alpha.im, band)
2ae55ccbdad9c842929fb238ea1eb81d1f999024
16
Image.py
403
Improve exception traceback readability
70,078
0
670
233
78
243,706
142
Pillow
22
src/PIL/Image.py
Python
36
{ "docstring": "\n Adds or replaces the alpha layer in this image. If the image\n does not have an alpha layer, it's converted to \"LA\" or \"RGBA\".\n The new layer must be either \"L\" or \"1\".\n\n :param alpha: The new alpha layer. This can either be an \"L\" or \"1\"\n image having the same size as this image, or an integer or\n other color value.\n ", "language": "en", "n_whitespaces": 120, "n_words": 62, "vocab_size": 43 }
https://github.com/python-pillow/Pillow.git
1
run_query
def run_query(filters, extra_fields, extra_joins, extra_filters, as_dict=1): query = .format( extra_fields=extra_fields, extra_joins=extra_joins, extra_filters=extra_filters ) gl_entries = frappe.db.sql(query, filters, as_dict=as_dict) return gl_entries
363ed9ccba3f848908113e6d728735a1c894aec8
9
datev.py
79
revert: BU Schlüssel (a21f76f)
13,598
0
12
52
18
64,293
20
erpnext
12
erpnext/regional/report/datev/datev.py
Python
58
{ "docstring": "\n\tGet a list of accounting entries.\n\n\tSelect GL Entries joined with Account and Party Account in order to get the\n\taccount numbers. Returns a list of accounting entries.\n\n\tArguments:\n\tfilters -- dict of filters to be passed to the sql query\n\tas_dict -- return as list of dicts [0,1]\n\t\n\t\tSELECT\n\n\t\t\t/* either debit or credit amount; always positive */\n\t\t\tcase gl.debit when 0 then gl.credit else gl.debit end as 'Umsatz (ohne Soll/Haben-Kz)',\n\n\t\t\t/* 'H' when credit, 'S' when debit */\n\t\t\tcase gl.debit when 0 then 'H' else 'S' end as 'Soll/Haben-Kennzeichen',\n\n\t\t\t/* account number or, if empty, party account number */\n\t\t\tacc.account_number as 'Konto',\n\n\t\t\t/* against number or, if empty, party against number */\n\t\t\t%(temporary_against_account_number)s as 'Gegenkonto (ohne BU-Schlüssel)',\n\n\t\t\t'' as 'BU-Schlüssel',\n\n\t\t\tgl.posting_date as 'Belegdatum',\n\t\t\tgl.voucher_no as 'Belegfeld 1',\n\t\t\tREPLACE(LEFT(gl.remarks, 60), '\\n', ' ') as 'Buchungstext',\n\t\t\tgl.voucher_type as 'Beleginfo - Art 1',\n\t\t\tgl.voucher_no as 'Beleginfo - Inhalt 1',\n\t\t\tgl.against_voucher_type as 'Beleginfo - Art 2',\n\t\t\tgl.against_voucher as 'Beleginfo - Inhalt 2',\n\t\t\tgl.party_type as 'Beleginfo - Art 3',\n\t\t\tgl.party as 'Beleginfo - Inhalt 3',\n\t\t\tcase gl.party_type when 'Customer' then 'Debitorennummer' when 'Supplier' then 'Kreditorennummer' else NULL end as 'Beleginfo - Art 4',\n\t\t\tpar.debtor_creditor_number as 'Beleginfo - Inhalt 4'\n\n\t\t\t{extra_fields}\n\n\t\tFROM `tabGL Entry` gl\n\n\t\t\t/* Kontonummer */\n\t\t\tLEFT JOIN `tabAccount` acc\n\t\t\tON gl.account = acc.name\n\n\t\t\tLEFT JOIN `tabParty Account` par\n\t\t\tON par.parent = gl.party\n\t\t\tAND par.parenttype = gl.party_type\n\t\t\tAND par.company = %(company)s\n\n\t\t\t{extra_joins}\n\n\t\tWHERE gl.company = %(company)s\n\t\tAND DATE(gl.posting_date) >= %(from_date)s\n\t\tAND DATE(gl.posting_date) <= %(to_date)s\n\n\t\t{extra_filters}\n\n\t\tORDER BY 'Belegdatum', gl.voucher_no", "language": "en", "n_whitespaces": 205, "n_words": 247, "vocab_size": 136 }
https://github.com/frappe/erpnext.git
5
get_preset_choices
def get_preset_choices() -> Dict: PRESETS_PATH = PRESETS_DIRECTORY / "etf" / "screener" PRESETS_PATH_DEFAULT = Path(__file__).parent / "presets" preset_choices = { filepath.name: filepath for filepath in PRESETS_PATH.iterdir() if filepath.suffix == ".ini" } preset_choices.update( { filepath.name: filepath for filepath in PRESETS_PATH_DEFAULT.iterdir() if filepath.suffix == ".ini" } ) return preset_choices @log_start_end(log=logger)
3780cffeced904d41e04e2adfe8c1f2a34337099
@log_start_end(log=logger)
11
screener_model.py
142
New path for presets (#2634) * modify path for etf, options, insider * fix small * change path for stocks screener * fix flake 8 * remove preset_path * black * fix * fix tests * fix tests * fix flake8 * change paths_helper load * put ns parser back Co-authored-by: minhhoang1023 <[email protected]>
85,436
1
138
75
30
285,809
47
OpenBBTerminal
17
openbb_terminal/etf/screener/screener_model.py
Python
20
{ "docstring": "\n Return a dict containing keys as name of preset and\n filepath as value\n ", "language": "en", "n_whitespaces": 23, "n_words": 13, "vocab_size": 12 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
2
tokenize
def tokenize(doc): return (tok.lower() for tok in re.findall(r"\w+", doc)) list(tokenize("This is a simple example, isn't it?")) # %% # We define an additional function that counts the (frequency of) occurrence of # each token in a given document. It returns a frequency dictionary to be used # by the vectorizers. from collections import defaultdict
6ff214c46bacaf0385125bb47b4d8cb4a305fa3a
10
plot_hashing_vs_dict_vectorizer.py
68
DOC Rework plot_hashing_vs_dict_vectorizer.py example (#23266) Co-authored-by: Olivier Grisel <[email protected]> Co-authored-by: Julien Jerphanion <[email protected]>
76,052
0
54
26
47
260,068
54
scikit-learn
9
examples/text/plot_hashing_vs_dict_vectorizer.py
Python
2
{ "docstring": "Extract tokens from doc.\n\n This uses a simple regex that matches word characters to break strings\n into tokens. For a more principled approach, see CountVectorizer or\n TfidfVectorizer.\n ", "language": "en", "n_whitespaces": 39, "n_words": 27, "vocab_size": 26 }
https://github.com/scikit-learn/scikit-learn.git
1
get_first_menu_and_fail
def get_first_menu_and_fail() -> click.testing.Result: command = ["storage", "create"] runner = CliRunner() result = runner.invoke(app, command, input=INVALID_OPTION, catch_exceptions=False) return result
11638691240b7595c0d02542af506a96d344ae8b
9
test_storage_cli.py
72
Update tests
11,330
0
34
43
16
55,479
19
prefect
13
tests/cli/test_storage_cli.py
Python
8
{ "docstring": "\n Utility function to get output of first step of `prefect storage create` and exit\n ", "language": "en", "n_whitespaces": 21, "n_words": 14, "vocab_size": 13 }
https://github.com/PrefectHQ/prefect.git
5
get_cat2imgs
def get_cat2imgs(self) -> Dict[int, list]: classes = self.dataset.metainfo.get('CLASSES', None) if classes is None: raise ValueError('dataset metainfo must contain CLASSES') # sort the label index cat2imgs = {i: [] for i in range(len(classes))} for i in range(len(self.dataset)): cat_ids = set(self.dataset.get_cat_ids(i)) for cat in cat_ids: cat2imgs[cat].append(i) return cat2imgs
36c1f477b273cb2fb0dea3c921ec267db877c039
13
class_aware_sampler.py
157
Refactor OpenImages.
70,678
0
143
97
37
245,154
46
mmdetection
19
mmdet/datasets/samplers/class_aware_sampler.py
Python
17
{ "docstring": "Get a dict with class as key and img_ids as values.\n\n Returns:\n dict[int, list]: A dict of per-label image list,\n the item of the dict indicates a label index,\n corresponds to the image index that contains the label.\n ", "language": "en", "n_whitespaces": 85, "n_words": 38, "vocab_size": 29 }
https://github.com/open-mmlab/mmdetection.git
2
print_telemetry_report
def print_telemetry_report(): if is_telemetry_enabled(): user_id = _get_or_create_user_id() meta_data = _get_or_create_telemetry_meta_data() print({**{"user_id": user_id}, **meta_data}) else: print("Telemetry is disabled.")
ac5617e757e9ace6f30b7291686d9dbbc339f433
14
telemetry.py
76
Add basic telemetry features (#2314) * add basic telemetry features * change pipeline_config to _component_config * Update Documentation & Code Style * add super().__init__() calls to error classes * make posthog mock work with python 3.7 * Update Documentation & Code Style * update link to docs web page * log exceptions, send event for raised HaystackErrors, refactor Path(CONFIG_PATH) * add comment on send_event in BaseComponent.init() and fix mypy * mock NonPrivateParameters and fix pylint undefined-variable * Update Documentation & Code Style * check model path contains multiple / * add test for writing to file * add test for en-/disable telemetry * Update Documentation & Code Style * merge file deletion methods and ignore pylint global statement * Update Documentation & Code Style * set env variable in demo to activate telemetry * fix mock of HAYSTACK_TELEMETRY_ENABLED * fix mypy and linter * add CI as env variable to execution contexts * remove threading, add test for custom error event * Update Documentation & Code Style * simplify config/log file deletion * add test for final event being sent * force writing config file in test * make test compatible with python 3.7 * switch to posthog production server * Update Documentation & Code Style Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com>
74,974
0
54
40
16
256,957
17
haystack
7
haystack/telemetry.py
Python
7
{ "docstring": "\n Prints the user id and the meta data that are sent in events\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 12 }
https://github.com/deepset-ai/haystack.git
1
test_edgeql_for_in_computable_05
async def test_edgeql_for_in_computable_05(self): await self.assert_query_result( r, [{"select_deck": tb.bag(["Bog monster", "Dragon", "Giant turtle", "Imp"])}], ) # This one caused a totally nonsense type error. await self.assert_query_result( r, [{"select_deck": tb.bag(["Bog monster", "Dragon", "Giant turtle", "Imp"])}], )
26be7d28bdb4eb96c888e373e08f46e6b85711e3
15
test_edgeql_for.py
117
Add a `bag` type that tells assert_query_result to ignore order (#3314) assert_query_result currently supports using sets to ignore order, but that doesn't work for objects, which can't be hashed or sorted. There is a system for specifying a sort key for internal data, but it is way clunkier than just saying we don't care about the order. I converted some places that were using sort= to use this.
41,673
0
138
65
23
176,083
34
edgedb
5
tests/test_edgeql_for.py
Python
29
{ "docstring": "\n SELECT User {\n select_deck := (\n FOR letter IN {'X'}\n UNION (\n (SELECT .deck.name)\n )\n )\n } FILTER .name = 'Alice';\n \n SELECT User {\n select_deck := (\n FOR letter IN 'X'\n UNION (\n ((SELECT .deck).name)\n )\n )\n } FILTER .name = 'Alice';\n ", "language": "en", "n_whitespaces": 394, "n_words": 42, "vocab_size": 22 }
https://github.com/edgedb/edgedb.git
3
stop
async def stop(self, block=False) -> None: self._stop() if block: while self._is_running: await asyncio.sleep(0.1)
b7d5fc52e4235370e368e96da218ca49bd32edcc
12
loop_service.py
56
Change default behavior to block=false
11,739
0
60
34
13
58,181
13
prefect
7
src/prefect/orion/services/loop_service.py
Python
15
{ "docstring": "\n Gracefully stops a running LoopService and optionally blocks until the\n service stops.\n\n Args:\n block (bool): if True, blocks until the service is\n finished running. Otherwise it requests a stop and returns but\n the service may still be running a final loop.\n\n ", "language": "en", "n_whitespaces": 111, "n_words": 41, "vocab_size": 31 }
https://github.com/PrefectHQ/prefect.git
30
run
def run(self): display.debug("in run() - task %s" % self._task._uuid) try: try: items = self._get_loop_items() except AnsibleUndefinedVariable as e: # save the error raised here for use later items = None self._loop_eval_error = e if items is not None: if len(items) > 0: item_results = self._run_loop(items) # create the overall result item res = dict(results=item_results) # loop through the item results and set the global changed/failed/skipped result flags based on any item. res['skipped'] = True for item in item_results: if 'changed' in item and item['changed'] and not res.get('changed'): res['changed'] = True if res['skipped'] and ('skipped' not in item or ('skipped' in item and not item['skipped'])): res['skipped'] = False if 'failed' in item and item['failed']: item_ignore = item.pop('_ansible_ignore_errors') if not res.get('failed'): res['failed'] = True res['msg'] = 'One or more items failed' self._task.ignore_errors = item_ignore elif self._task.ignore_errors and not item_ignore: self._task.ignore_errors = item_ignore # ensure to accumulate these for array in ['warnings', 'deprecations']: if array in item and item[array]: if array not in res: res[array] = [] if not isinstance(item[array], list): item[array] = [item[array]] res[array] = res[array] + item[array] del item[array] if not res.get('failed', False): res['msg'] = 'All items completed' if res['skipped']: res['msg'] = 'All items skipped' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: display.debug("calling self._execute()") res = self._execute() display.debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False
a0dede545864b9835f84315b4b3ea17dd1eafe04
def run(self): ''' The main executor entrypoint, where we determine if the specified task requires looping and either runs the task with self._run_loop() or self._execute(). After that, the returned results are parsed and returned as a dict. ''' display.debug("in run() - task %s" % self._task._uuid) try: try: items = self._get_loop_items() except AnsibleUndefinedVariable as e: # save the error raised here for use later items = None self._loop_eval_error = e if items is not None: if len(items) > 0: item_results = self._run_loop(items) # create the overall result item res = dict(results=item_results) # loop through the item results and set the global changed/failed/skipped result flags based on any item. res['skipped'] = True for item in item_results: if 'changed' in item and item['changed'] and not res.get('changed'): res['changed'] = True if res['skipped'] and ('skipped' not in item or ('skipped' in item and not item['skipped'])): res['skipped'] = False if 'failed' in item and item['failed']: item_ignore = item.pop('_ansible_ignore_errors') if not res.get('failed'): res['failed'] = True res['msg'] = 'One or more items failed' self._task.ignore_errors = item_ignore elif self._task.ignore_errors and not item_ignore: self._task.ignore_errors = item_ignore # ensure to accumulate these for array in ['warnings', 'deprecations']: if array in item and item[array]: if array not in res: res[array] = [] if not isinstance(item[array], list): item[array] = [item[array]] res[array] = res[array] + item[array] del item[array] if not res.get('failed', False): res['msg'] = 'All items completed' if res['skipped']: res['msg'] = 'All items skipped' else: res = dict(changed=False, skipped=True, skipped_reason='No items in the list', results=[]) else: display.debug("calling self._execute()") res = self._execute() display.debug("_execute() done") # make sure changed is set in the result, if it's not present if 'changed' not in res: res['changed'] = False
20
task_executor.py
625
ansible-connection verboistery (#77509) * ansible-connection verboistery for cli, just use normal parser creation this also adds --help, but that seems fine also some error cleanup Co-authored-by: Nathaniel Case <[email protected]>
78,689
1
1,283
501
121
267,014
234
ansible
29
lib/ansible/executor/task_executor.py
Python
63
{ "docstring": "\n The main executor entrypoint, where we determine if the specified\n task requires looping and either runs the task with self._run_loop()\n or self._execute(). After that, the returned results are parsed and\n returned as a dict.\n ", "language": "en", "n_whitespaces": 70, "n_words": 34, "vocab_size": 29 }
https://github.com/ansible/ansible.git
2
test_orion_full_migration_works_with_data_in_db
async def test_orion_full_migration_works_with_data_in_db(sample_db_data): try: await run_sync_in_worker_thread(alembic_downgrade) finally: await run_sync_in_worker_thread(alembic_upgrade)
36e7e0838aeaffc9492b330297e4905f3ab4b11f
11
test_migrations.py
40
code review revisions pt3
10,770
0
32
20
8
53,285
9
prefect
5
tests/orion/database/test_migrations.py
Python
5
{ "docstring": "\n Tests that downgrade migrations work when the database has data in it.\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
https://github.com/PrefectHQ/prefect.git
2
test_execute_in_batch_with_few_requests
def test_execute_in_batch_with_few_requests(self, api, batch, mock_batch_responses): mock_batch_responses( [ { "json": [{"body": json.dumps({"name": "creative 1"}), "code": 200, "headers": {}}] * 3, } ] ) stream = SomeTestStream(api=api) requests = [FacebookRequest("node", "GET", "endpoint") for _ in range(5)] result = list(stream.execute_in_batch(requests)) assert batch.add_request.call_count == len(requests) batch.execute.assert_called_once() assert len(result) == 3
6cd20e6879a42053692c18a16906eb410aad85db
19
test_base_streams.py
190
🐛 Source FB Marketing: fix `execute_in_batch` when batch is bigger than 50 (#10588) * fix execute_in_batch * add tests * fix pre-commit config Co-authored-by: Sherif A. Nada <[email protected]> Co-authored-by: Eugene Kulak <[email protected]> Co-authored-by: Sherif A. Nada <[email protected]>
600
0
180
112
42
3,985
46
airbyte
21
airbyte-integrations/connectors/source-facebook-marketing/unit_tests/test_base_streams.py
Python
14
{ "docstring": "Should execute single batch if number of requests less than MAX_BATCH_SIZE.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/airbytehq/airbyte.git
3
get_multi_io_subclass_model
def get_multi_io_subclass_model(use_bn=False, use_dp=False, num_classes=(2, 3)): shared_layer = keras.layers.Dense(32, activation='relu') branch_a = [shared_layer] if use_dp: branch_a.append(keras.layers.Dropout(0.5)) branch_a.append(keras.layers.Dense(num_classes[0], activation='softmax')) branch_b = [shared_layer] if use_bn: branch_b.append(keras.layers.BatchNormalization()) branch_b.append(keras.layers.Dense(num_classes[1], activation='softmax')) model = ( test_utils._MultiIOSubclassModel( # pylint: disable=protected-access branch_a, branch_b, name='test_model')) return model
b96518a22bfd92a29811e507dec0b34248a8a3f5
12
model_subclassing_test_util.py
214
- Consolidate disparate test-related files into a single testing_infra folder. - Cleanup TODO related to removing testing infra as a dependency of the Keras target. - Standardize import naming: there is now only "test_combinations" for test combinations, and "test_utils" for utilities. The TF utilities module "test_util" is now always imported as "tf_test_utils" to avoid confusion. PiperOrigin-RevId: 426773173
79,770
0
69
135
30
268,931
37
keras
18
keras/tests/model_subclassing_test_util.py
Python
15
{ "docstring": "Creates MultiIOModel for the tests of subclass model.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/keras-team/keras.git
2
inertia
def inertia(frame, ixx, iyy, izz, ixy=0, iyz=0, izx=0): if not isinstance(frame, ReferenceFrame): raise TypeError('Need to define the inertia in a frame') ol = sympify(ixx) * (frame.x | frame.x) ol += sympify(ixy) * (frame.x | frame.y) ol += sympify(izx) * (frame.x | frame.z) ol += sympify(ixy) * (frame.y | frame.x) ol += sympify(iyy) * (frame.y | frame.y) ol += sympify(iyz) * (frame.y | frame.z) ol += sympify(izx) * (frame.z | frame.x) ol += sympify(iyz) * (frame.z | frame.y) ol += sympify(izz) * (frame.z | frame.z) return ol
65be461082dda54c8748922f9c29a19af1279fe1
10
functions.py
286
Remove abbreviations in documentation
48,478
0
129
184
39
197,335
86
sympy
16
sympy/physics/mechanics/functions.py
Python
13
{ "docstring": "Simple way to create inertia Dyadic object.\n\n Explanation\n ===========\n\n If you do not know what a Dyadic is, just treat this like the inertia\n tensor. Then, do the easy thing and define it in a body-fixed frame.\n\n Parameters\n ==========\n\n frame : ReferenceFrame\n The frame the inertia is defined in\n ixx : Sympifyable\n the xx element in the inertia dyadic\n iyy : Sympifyable\n the yy element in the inertia dyadic\n izz : Sympifyable\n the zz element in the inertia dyadic\n ixy : Sympifyable\n the xy element in the inertia dyadic\n iyz : Sympifyable\n the yz element in the inertia dyadic\n izx : Sympifyable\n the zx element in the inertia dyadic\n\n Examples\n ========\n\n >>> from sympy.physics.mechanics import ReferenceFrame, inertia\n >>> N = ReferenceFrame('N')\n >>> inertia(N, 1, 2, 3)\n (N.x|N.x) + 2*(N.y|N.y) + 3*(N.z|N.z)\n\n ", "language": "en", "n_whitespaces": 240, "n_words": 131, "vocab_size": 73 }
https://github.com/sympy/sympy.git
1
hashes_to_frame
def hashes_to_frame(self) -> Dict[str, Dict[str, int]]: return self._legacy.hashes_to_frame
e5356a417e7c2124e75c4a2994ed604fc0a3cc74
7
alignments.py
37
Alignments update: - Store face embeddings in PNG header when sorting - typing + refactor - Update alignments keys for 'identity' and 'video_meta' + bump to v2.3 - General typing fixes
21,093
0
22
24
7
101,689
8
faceswap
6
lib/align/alignments.py
Python
10
{ "docstring": " dict: The SHA1 hash of the face mapped to the frame(s) and face index within the frame\n that the hash corresponds to.\n\n Notes\n -----\n This method is depractated and exists purely for updating legacy hash based alignments\n to new png header storage in :class:`lib.align.update_legacy_png_header`.\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 36 }
https://github.com/deepfakes/faceswap.git
1
start
async def start(self): with ImportExtensions(required=True): import aiohttp self.session = aiohttp.ClientSession(**self._session_kwargs) await self.session.__aenter__() return self
65d6873500f008dafba193340f8731bca39bc7dc
10
helper.py
71
feat: expose kwargs to Clients (#4947)
2,350
0
60
39
14
12,595
14
jina
9
jina/clients/base/helper.py
Python
6
{ "docstring": "Create ClientSession and enter context\n\n :return: self\n ", "language": "en", "n_whitespaces": 21, "n_words": 7, "vocab_size": 7 }
https://github.com/jina-ai/jina.git
6
evaluate_accuracy_gpu
def evaluate_accuracy_gpu(net, data_iter, device=None): if isinstance(net, nn.Module): net.eval() # Set the model to evaluation mode if not device: device = next(iter(net.parameters())).device # No. of correct predictions, no. of predictions metric = d2l.Accumulator(2) with torch.no_grad(): for X, y in data_iter: if isinstance(X, list): # Required for BERT Fine-tuning (to be covered later) X = [x.to(device) for x in X] else: X = X.to(device) y = y.to(device) metric.add(d2l.accuracy(net(X), y), d2l.size(y)) return metric[0] / metric[1]
2b1acfbfe84b6c9c4756a615620f9b376d48085a
17
torch.py
228
JAX: Fix CI bug; enable build all sections
74,503
0
212
141
58
254,229
72
d2l-en
24
d2l/torch.py
Python
15
{ "docstring": "Compute the accuracy for a model on a dataset using a GPU.\n\n Defined in :numref:`sec_utils`", "language": "en", "n_whitespaces": 17, "n_words": 15, "vocab_size": 13 }
https://github.com/d2l-ai/d2l-en.git
3
get_classes_from_csv
def get_classes_from_csv(self, label_file): index_list = [] classes_names = [] with open(label_file, 'r') as f: reader = csv.reader(f) for line in reader: self.cat2label[line[0]] = line[1] classes_names.append(line[1]) index_list.append(line[0]) self.index_dict = {index: i for i, index in enumerate(index_list)} return classes_names
1516986a616fee8bb741d0ab2be40683045efccd
12
openimages.py
147
[Feature] Support OpenImages Dataset (#6331) * [Feature] support openimage group of eval * [Feature] support openimage group of eval * support openimage dataset * support openimage challenge dataset * fully support OpenImages-V6 and OpenImages Challenge 2019 * Fix some logic error * update config file * fix get data_infos error * fully support OpenImages evaluation * update OpenImages config files * [Feature] support OpenImages datasets * fix bug * support load image metas from pipeline * fix bug * fix get classes logic error * update code * support get image metas * support openimags * support collect image metas * support Open Images * fix openimages logic * minor fix * add a new function to compute openimages tpfp * minor fix * fix ci error * minor fix * fix indication * minor fix * fix returns * fix returns * fix returns * fix returns * fix returns * minor fix * update readme * support loading image level labels and fix some logic * minor fix * minor fix * add class names * minor fix * minor fix * minor fix * add openimages test unit * minor fix * minor fix * fix test unit * minor fix * fix logic error * minor fix * fully support openimages * minor fix * fix docstring * fix docstrings in readthedocs * update get image metas script * label_description_file -> label_file * update openimages readme * fix test unit * fix test unit * minor fix * update readme file * Update get_image_metas.py
70,184
0
146
91
29
243,994
37
mmdetection
16
mmdet/datasets/openimages.py
Python
11
{ "docstring": "Get classes name from file.\n\n Args:\n label_file (str): File path of the label description file that\n maps the classes names in MID format to their short\n descriptions.\n\n Returns:\n list[str]: Class name of OpenImages.\n ", "language": "en", "n_whitespaces": 106, "n_words": 33, "vocab_size": 29 }
https://github.com/open-mmlab/mmdetection.git
1
_get_dated_items
def _get_dated_items(self, date): lookup_kwargs = self._make_single_date_lookup(date) qs = self.get_dated_queryset(**lookup_kwargs) return ( None, qs, { "day": date, "previous_day": self.get_previous_day(date), "next_day": self.get_next_day(date), "previous_month": self.get_previous_month(date), "next_month": self.get_next_month(date), }, )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
10
dates.py
123
Refs #33476 -- Reformatted code with Black.
51,763
0
180
75
25
206,862
26
django
11
django/views/generic/dates.py
Python
14
{ "docstring": "\n Do the actual heavy lifting of getting the dated items; this accepts a\n date object so that TodayArchiveView can be trivial.\n ", "language": "en", "n_whitespaces": 43, "n_words": 21, "vocab_size": 20 }
https://github.com/django/django.git
4
perform_action
def perform_action(action_id, project, queryset, user, **kwargs): if action_id not in settings.DATA_MANAGER_ACTIONS: raise DataManagerException("Can't find '" + action_id + "' in registered actions") action = settings.DATA_MANAGER_ACTIONS[action_id] # check user permissions for this action if not check_permissions(user, action): raise DRFPermissionDenied(f'Action is not allowed for the current user: {action["id"]}') try: result = action['entry_point'](project, queryset, **kwargs) except Exception as e: text = 'Error while perform action: ' + action_id + '\n' + tb.format_exc() logger.error(text, extra={'sentry_skip': True}) raise e return result register_actions_from_dir('data_manager.actions', os.path.dirname(__file__))
1c4328c5a8b10ee20ac4328ce30612d106350699
14
__init__.py
212
feat: DEV-1205: Add task.updated_at column (#1784) * Update task.updated_at on annotation update (DEV-1205) * Fix set updated_at on annotation delete (DEV-1205) * Set update_at for every dm action (DEV-1205) * Stop changing updated_at on actions (DEV-1205) * Update experimental.py Co-authored-by: Max Tkachenko <[email protected]> Co-authored-by: niklub <[email protected]>
42,459
0
143
107
60
177,606
78
label-studio
26
label_studio/data_manager/actions/__init__.py
Python
13
{ "docstring": " Perform action using entry point from actions\n ", "language": "en", "n_whitespaces": 11, "n_words": 7, "vocab_size": 7 }
https://github.com/heartexlabs/label-studio.git
4
fit
def fit(self) -> ResultGrid: if not self._is_ray_client: try: return self._local_tuner.fit() except Exception as e: raise TuneError( f"Tune run failed. " f'Please use tuner = Tuner.restore("' f'{self._local_tuner.get_experiment_checkpoint_dir()}") to resume.' ) from e else: experiment_checkpoint_dir = ray.get( self._remote_tuner.get_experiment_checkpoint_dir.remote() ) try: return ray.get(self._remote_tuner.fit.remote()) except Exception as e: raise TuneError( f"Tune run failed. " f'Please use tuner = Tuner.restore("' f'{experiment_checkpoint_dir}") to resume.' ) from e
5c500f6308dce526c50a5230029fb4909b492a35
18
tuner.py
175
[docs/air] Fix up some minor docstrings (#28361)
28,451
0
367
93
37
127,486
61
ray
14
python/ray/tune/tuner.py
Python
41
{ "docstring": "Executes hyperparameter tuning job as configured and returns result.\n\n Failure handling:\n For the kind of exception that happens during the execution of a trial,\n one may inspect it together with stacktrace through the returned result grid.\n See ``ResultGrid`` for reference. Each trial may fail up to a certain number.\n This is configured by ``RunConfig.FailureConfig.max_failures``.\n\n Exception that happens beyond trials will be thrown by this method as well.\n In such cases, there will be instruction like the following printed out\n at the end of console output to inform users on how to resume.\n\n Please use tuner = Tuner.restore(\"~/ray_results/tuner_resume\")\n to resume.\n\n Raises:\n RayTaskError: If user-provided trainable raises an exception\n TuneError: General Ray Tune error.\n ", "language": "en", "n_whitespaces": 218, "n_words": 112, "vocab_size": 92 }
https://github.com/ray-project/ray.git
1
test_task_states_for_dag_run_when_dag_run_not_exists
def test_task_states_for_dag_run_when_dag_run_not_exists(self): with pytest.raises(DagRunNotFound): default_date2 = timezone.datetime(2016, 1, 9) task_command.task_states_for_dag_run( self.parser.parse_args( [ 'tasks', 'states-for-dag-run', 'not_exists_dag', default_date2.isoformat(), '--output', "json", ] ) )
f352ee63a5d09546a7997ba8f2f8702a1ddb4af7
14
test_task_command.py
97
Replaced all days_ago functions with datetime functions (#23237) Co-authored-by: Dev232001 <[email protected]>
7,671
0
274
56
20
42,640
21
airflow
13
tests/cli/commands/test_task_command.py
Python
15
{ "docstring": "\n task_states_for_dag_run should return an AirflowException when invalid dag id is passed\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
https://github.com/apache/airflow.git
3
yeardatescalendar
def yeardatescalendar(self, year, width=3): months = [ self.monthdatescalendar(year, i) for i in range(January, January+12) ] return [months[i:i+width] for i in range(0, len(months), width) ]
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
calendar.py
88
add python 3.10.4 for windows
56,289
0
74
60
20
221,242
24
XX-Net
10
python3.10.4/Lib/calendar.py
Python
6
{ "docstring": "\n Return the data for the specified year ready for formatting. The return\n value is a list of month rows. Each month row contains up to width months.\n Each month contains between 4 and 6 weeks and each week contains 1-7\n days. Days are datetime.date objects.\n ", "language": "en", "n_whitespaces": 81, "n_words": 45, "vocab_size": 37 }
https://github.com/XX-net/XX-Net.git
1
process_downloaded_dataset
def process_downloaded_dataset(self): df = pd.read_csv(os.path.join(self.raw_dataset_path, self.csv_filename)) df[SPLIT] = np.random.choice(3, len(df), p=(0.7, 0.1, 0.2)).astype(np.int8) makedirs(self.processed_temp_path, exist_ok=True) df.to_csv(os.path.join(self.processed_temp_path, self.csv_filename), index=False) rename(self.processed_temp_path, self.processed_dataset_path)
c13bc2cf1443ce9339cde0eb5e0d2b568af341f7
12
__init__.py
159
Add medical no-show appointments dataset (#2387) Co-authored-by: Daniel Treiman <[email protected]>
1,281
0
62
111
19
7,853
20
ludwig
25
ludwig/datasets/noshow_appointments/__init__.py
Python
6
{ "docstring": "The final method where we create a concatenated CSV file with both training and test data.", "language": "en", "n_whitespaces": 15, "n_words": 16, "vocab_size": 16 }
https://github.com/ludwig-ai/ludwig.git
2
template
def template(self) -> str | None: if self.value_template is not None: return self.value_template.template return None
dd1463da287f591652e47b00eee0c5b77f5f5b7c
9
helpers.py
43
Refactor bayesian observations using dataclass (#79590) * refactor * remove some changes * remove typehint * improve codestyle * move docstring to comment * < 88 chars * avoid short var names * more readable * fix rename * Update homeassistant/components/bayesian/helpers.py Co-authored-by: epenet <[email protected]> * Update homeassistant/components/bayesian/binary_sensor.py Co-authored-by: epenet <[email protected]> * Update homeassistant/components/bayesian/binary_sensor.py Co-authored-by: epenet <[email protected]> * no intermediate * comment why set before list Co-authored-by: epenet <[email protected]>
87,644
0
47
26
13
288,487
15
core
4
homeassistant/components/bayesian/helpers.py
Python
5
{ "docstring": "Not all observations have templates and we want to get template strings.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
https://github.com/home-assistant/core.git
2
get_capacity
def get_capacity(self): if self.is_container_group: return self.max_forks return self.get_instance_capacity()
e403c603d68def3b4cc6e4b253092343d646da1a
8
task_manager_models.py
37
use task manager models more consistently in serializer
17,333
0
40
21
7
82,225
8
awx
5
awx/main/scheduler/task_manager_models.py
Python
4
{ "docstring": "This reports any type of capacity, including that of container group jobs.\n\n Container groups don't really have capacity, but if they have max_forks set,\n we can interperet that as how much capacity the user has defined them to have.\n ", "language": "en", "n_whitespaces": 60, "n_words": 39, "vocab_size": 35 }
https://github.com/ansible/awx.git
8
create_trial_from_spec
def create_trial_from_spec(spec, output_path, parser, **trial_kwargs): global _cached_pgf spec = spec.copy() resources = spec.pop("resources_per_trial", None) try: args, _ = parser.parse_known_args(to_argv(spec)) except SystemExit: raise TuneError("Error parsing args, see above message", spec) if resources: if isinstance(resources, PlacementGroupFactory): trial_kwargs["placement_group_factory"] = resources else: # This will be converted to a placement group factory in the # Trial object constructor try: trial_kwargs["resources"] = json_to_resources(resources) except (TuneError, ValueError) as exc: raise TuneError("Error parsing resources_per_trial", resources) from exc remote_checkpoint_dir = spec.get("remote_checkpoint_dir") sync_config = spec.get("sync_config", SyncConfig()) if sync_config.syncer is None or isinstance(sync_config.syncer, str): sync_function_tpl = sync_config.syncer elif not isinstance(sync_config.syncer, str): # If a syncer was specified, but not a template, it is a function. # Functions cannot be used for trial checkpointing on remote nodes, # so we set the remote checkpoint dir to None to disable this. sync_function_tpl = None remote_checkpoint_dir = None else: sync_function_tpl = None # Auto-detect return Trial( # Submitting trial via server in py2.7 creates Unicode, which does not # convert to string in a straightforward manner. trainable_name=spec["run"], # json.load leads to str -> unicode in py2.7 config=spec.get("config", {}), local_dir=os.path.join(spec["local_dir"], output_path), # json.load leads to str -> unicode in py2.7 stopping_criterion=spec.get("stop", {}), remote_checkpoint_dir=remote_checkpoint_dir, sync_function_tpl=sync_function_tpl, checkpoint_freq=args.checkpoint_freq, checkpoint_at_end=args.checkpoint_at_end, sync_on_checkpoint=sync_config.sync_on_checkpoint, keep_checkpoints_num=args.keep_checkpoints_num, checkpoint_score_attr=args.checkpoint_score_attr, export_formats=spec.get("export_formats", []), # str(None) doesn't create None restore_path=spec.get("restore"), trial_name_creator=spec.get("trial_name_creator"), trial_dirname_creator=spec.get("trial_dirname_creator"), log_to_file=spec.get("log_to_file"), # str(None) doesn't create None max_failures=args.max_failures, **trial_kwargs )
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
16
config_parser.py
511
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
29,682
0
587
313
140
132,094
214
ray
46
python/ray/tune/config_parser.py
Python
45
{ "docstring": "Creates a Trial object from parsing the spec.\n\n Args:\n spec (dict): A resolved experiment specification. Arguments should\n The args here should correspond to the command line flags\n in ray.tune.config_parser.\n output_path (str); A specific output path within the local_dir.\n Typically the name of the experiment.\n parser (ArgumentParser): An argument parser object from\n make_parser.\n trial_kwargs: Extra keyword arguments used in instantiating the Trial.\n\n Returns:\n A trial object with corresponding parameters to the specification.\n ", "language": "en", "n_whitespaces": 159, "n_words": 71, "vocab_size": 55 }
https://github.com/ray-project/ray.git
1
test_spectralbiclustering_parameter_validation
def test_spectralbiclustering_parameter_validation(params, type_err, err_msg): data = np.arange(25).reshape((5, 5)) model = SpectralBiclustering(**params) with pytest.raises(type_err, match=err_msg): model.fit(data) @pytest.mark.parametrize("est", (SpectralBiclustering(), SpectralCoclustering()))
604bc5c605b9587526794de2653624b19c6a8031
@pytest.mark.parametrize("est", (SpectralBiclustering(), SpectralCoclustering()))
10
test_bicluster.py
117
MNT use check_scalar in SpectralBiClustering and SpectralCoClustering (#20817) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
75,345
1
36
52
17
258,639
18
scikit-learn
17
sklearn/cluster/tests/test_bicluster.py
Python
5
{ "docstring": "Check parameters validation in `SpectralBiClustering`", "language": "en", "n_whitespaces": 4, "n_words": 5, "vocab_size": 5 }
https://github.com/scikit-learn/scikit-learn.git
6
categorical_mapping
def categorical_mapping(self, data, palette, order): # -- Identify the order and name of the levels levels = categorical_order(data, order) n_colors = len(levels) # -- Identify the set of colors to use if isinstance(palette, dict): missing = set(levels) - set(palette) if any(missing): err = "The palette dictionary is missing keys: {}" raise ValueError(err.format(missing)) lookup_table = palette else: if palette is None: if n_colors <= len(get_color_cycle()): colors = color_palette(None, n_colors) else: colors = color_palette("husl", n_colors) elif isinstance(palette, list): colors = self._check_list_length(levels, palette, "palette") else: colors = color_palette(palette, n_colors) lookup_table = dict(zip(levels, colors)) return levels, lookup_table
563e96d3be1eaee8db8dfbccf7eed1f1c66dfd31
17
_oldcore.py
240
Downgrade exception on mapping list length mismatch to warning (#2856) * Downgrade exception on mapping list length mismatch to warning * Lint * Fix pairplot test * Set stacklevel to report warning in user code
7,453
0
354
147
57
41,880
93
seaborn
24
seaborn/_oldcore.py
Python
21
{ "docstring": "Determine colors when the hue mapping is categorical.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/mwaskom/seaborn.git
4
has_all_dags_access
def has_all_dags_access(self, user): if not user: user = g.user return ( self._has_role(['Admin', 'Viewer', 'Op', 'User'], user) or self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user) or self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user) )
3e9828022b03b60d9e112f1f64340a528c8407e3
12
security.py
105
Simplify fab has access lookup (#19294) * Use FAB models. * Remove incorrect conversions to new permission naming scheme. * Fix missing FAB renames. * Remove unused FAB compatibility fixes in models.py. * Set perms directly on user objects. * Set perms properties on User model. * Rename missed old naming scheme conversion. * Remove unused imports. * Remove unused imports. * Remeve get_user_roles() method. * Make permissions eagerload. * Remove unused imports. * Clarify query params. * Modify sort logic so MSSQL passes. * Add text modifier to order_by values. * Remove calls to get_*_dags. * Add back execution_date * Add back comma to match rest of file. * Remove unused permission functions. * Fix failing tests. * Pass user object to current_app.appbuilder.sm.has_all_dags_access. * Remove attempts to fix query. * Update the api_connexion query builders. * Add typing. * Apply sorts directly to model objects. * Apply sorts directly to model objects. * Standardize custom sort code. * Code review * Augment xcom docs (#20755) * Fix relationship join bug in FAB/SecurityManager with SQLA 1.4 (#21296) This is fixed in SQLA 1.4.19, but the fix makes the intent clearer here anyway. * Docs: Fix task order in overview example (#21282) * Update stat_name_handler documentation (#21298) Previously stat_name_handler was under the scheduler section of the configuration but it was moved to the metrics section since 2.0.0. * Update recipe for Google Cloud SDK (#21268) * Use FAB models. * Remove incorrect conversions to new permission naming scheme. * Fix missing FAB renames. * Remove unused FAB compatibility fixes in models.py. * Set perms directly on user objects. * Set perms properties on User model. * Rename missed old naming scheme conversion. * Remove unused imports. * Remove unused imports. * Remeve get_user_roles() method. * Make permissions eagerload. * Remove unused imports. * Clarify query params. * Modify sort logic so MSSQL passes. * Add text modifier to order_by values. * Remove calls to get_*_dags. * Add back execution_date * Add back comma to match rest of file. * Remove unused permission functions. * Fix failing tests. * Pass user object to current_app.appbuilder.sm.has_all_dags_access. * Remove attempts to fix query. * Update the api_connexion query builders. * Add typing. * Apply sorts directly to model objects. * Apply sorts directly to model objects. * Standardize custom sort code. * Make sure joined fields prefetch. * Dont use cached_property, since its only on > 3.8. Co-authored-by: Ash Berlin-Taylor <[email protected]> Co-authored-by: Lewis John McGibbney <[email protected]> Co-authored-by: Ash Berlin-Taylor <[email protected]> Co-authored-by: Lucia Kasman <[email protected]> Co-authored-by: Fran Sánchez <[email protected]> Co-authored-by: Kamil Breguła <[email protected]>
8,272
0
97
66
21
44,454
25
airflow
10
airflow/www/security.py
Python
8
{ "docstring": "\n Has all the dag access in any of the 3 cases:\n 1. Role needs to be in (Admin, Viewer, User, Op).\n 2. Has can_read action on dags resource.\n 3. Has can_edit action on dags resource.\n ", "language": "en", "n_whitespaces": 71, "n_words": 35, "vocab_size": 27 }
https://github.com/apache/airflow.git
2
bootstrap
def bootstrap(pip, options): # type: (str, t.Dict[str, t.Any]) -> None pip_version = options['pip_version'] packages = options['packages'] url = 'https://ci-files.testing.ansible.com/ansible-test/get-pip-%s.py' % pip_version cache_path = os.path.expanduser('~/.ansible/test/cache/get_pip_%s.py' % pip_version.replace(".", "_")) temp_path = cache_path + '.download' if os.path.exists(cache_path): log('Using cached pip %s bootstrap script: %s' % (pip_version, cache_path)) else: log('Downloading pip %s bootstrap script: %s' % (pip_version, url)) make_dirs(os.path.dirname(cache_path)) download_file(url, temp_path) shutil.move(temp_path, cache_path) log('Cached pip %s bootstrap script: %s' % (pip_version, cache_path)) env = common_pip_environment() env.update(GET_PIP=cache_path) options = common_pip_options() options.extend(packages) command = [sys.executable, pip] + options execute_command(command, env=env)
68fb3bf90efa3a722ba5ab7d66b1b22adc73198c
13
requirements.py
272
ansible-test - Fix consistency of managed venvs. (#77028)
78,507
0
169
159
56
266,665
84
ansible
29
test/lib/ansible_test/_util/target/setup/requirements.py
Python
20
{ "docstring": "Bootstrap pip and related packages in an empty virtual environment.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/ansible/ansible.git
4
_create_sql_query
def _create_sql_query(self) -> str: escaper = ParamEscaper() location = escaper.escape_item(self._file_location) if self._expression_list is not None: location = f"(SELECT {self._expression_list} FROM {location})" files_or_pattern = "" if self._pattern is not None: files_or_pattern = f"PATTERN = {escaper.escape_item(self._pattern)}\n" elif self._files is not None: files_or_pattern = f"FILES = {escaper.escape_item(self._files)}\n" format_options = self._generate_options("FORMAT_OPTIONS", escaper, self._format_options) copy_options = self._generate_options("COPY_OPTIONS", escaper, self._copy_options) # TODO: think on how to make sure that table_name and expression_list aren't used for SQL injection sql = f return sql.strip()
27d19e7626ef80687997a6799762fa00162c1328
14
databricks_sql.py
236
Databricks SQL operators (#21363)
8,546
0
185
99
55
45,314
76
airflow
21
airflow/providers/databricks/operators/databricks_sql.py
Python
18
{ "docstring": "COPY INTO {self._table_name}\nFROM {location}\nFILEFORMAT = {self._file_format}\n{files_or_pattern}{format_options}{copy_options}\n", "language": "en", "n_whitespaces": 5, "n_words": 9, "vocab_size": 9 }
https://github.com/apache/airflow.git
9
map
def map(self, mapper, na_action=None): from pandas.core.indexes.multi import MultiIndex new_values = self._map_values(mapper, na_action=na_action) # we can return a MultiIndex if new_values.size and isinstance(new_values[0], tuple): if isinstance(self, MultiIndex): names = self.names elif self.name: names = [self.name] * len(new_values[0]) else: names = None return MultiIndex.from_tuples(new_values, names=names) dtype = None if not new_values.size: # empty dtype = self.dtype # e.g. if we are floating and new_values is all ints, then we # don't want to cast back to floating. But if we are UInt64 # and new_values is all ints, we want to try. same_dtype = lib.infer_dtype(new_values, skipna=False) == self.inferred_type if same_dtype: new_values = maybe_cast_pointwise_result( new_values, self.dtype, same_dtype=same_dtype ) if self._is_backward_compat_public_numeric_index and is_numeric_dtype( new_values.dtype ): return self._constructor( new_values, dtype=dtype, copy=False, name=self.name ) return Index._with_infer(new_values, dtype=dtype, copy=False, name=self.name) # TODO: De-duplicate with map, xref GH#32349
521259299f7829da667ba39302ec77acedde9e5e
15
base.py
304
DOC: Improve doc summaries in series.rst (#45237)
39,399
0
437
199
83
163,191
130
pandas
31
pandas/core/indexes/base.py
Python
26
{ "docstring": "\n Map values using an input mapping or function.\n\n Parameters\n ----------\n mapper : function, dict, or Series\n Mapping correspondence.\n na_action : {None, 'ignore'}\n If 'ignore', propagate NA values, without passing them to the\n mapping correspondence.\n\n Returns\n -------\n applied : Union[Index, MultiIndex], inferred\n The output of the mapping function applied to the index.\n If the function returns a tuple with more than one element\n a MultiIndex will be returned.\n ", "language": "en", "n_whitespaces": 197, "n_words": 67, "vocab_size": 53 }
https://github.com/pandas-dev/pandas.git
2
_handle_deprecations
def _handle_deprecations(self) -> None: if self._args.distributed: deprecation_warning("`-d`, `--distributed`", "Please use `-D`, `--distribution-strategy`") logger.warning("Setting 'distribution-strategy' to 'mirrored'") setattr(self._args, "distribution_strategy", "mirrored") del self._args.distributed
2ea05623bd684b2d1dd75679ad00441a5c751e7e
10
train.py
79
Update Distibution Strategies: - Add Central Storage Stategy - Deprecate 'distributed' cli argument
20,513
0
110
43
21
101,076
21
faceswap
8
scripts/train.py
Python
8
{ "docstring": " Handle the update of deprecated arguments and output warnings. ", "language": "en", "n_whitespaces": 10, "n_words": 9, "vocab_size": 9 }
https://github.com/deepfakes/faceswap.git
1
multi_input_functional
def multi_input_functional(): input_1 = keras.Input(shape=(1,)) input_2 = keras.Input(shape=(1,)) input_3 = keras.Input(shape=(1,)) added = keras.layers.Add()([input_1, input_2, input_3]) output = test_utils.Bias()(added) return keras.Model([input_1, input_2, input_3], output) @test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes
b96518a22bfd92a29811e507dec0b34248a8a3f5
@test_combinations.run_with_all_model_types @test_combinations.run_all_keras_modes
10
correctness_test.py
149
- Consolidate disparate test-related files into a single testing_infra folder. - Cleanup TODO related to removing testing infra as a dependency of the Keras target. - Standardize import naming: there is now only "test_combinations" for test combinations, and "test_utils" for utilities. The TF utilities module "test_util" is now always imported as "tf_test_utils" to avoid confusion. PiperOrigin-RevId: 426773173
79,765
1
31
87
19
268,905
26
keras
17
keras/engine/correctness_test.py
Python
7
{ "docstring": "Functional Model that adds its inputs and then adds a bias.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/keras-team/keras.git
7
nullify_connected_endpoints
def nullify_connected_endpoints(instance, **kwargs): logger = logging.getLogger('netbox.dcim.cable') # Disassociate the Cable from its termination points if instance.termination_a: logger.debug(f"Nullifying termination A for cable {instance}") model = instance.termination_a_type.model_class() model.objects.filter(pk__in=instance.termination_a_ids).update(_link_peer_type=None, _link_peer_id=None) if instance.termination_b: logger.debug(f"Nullifying termination B for cable {instance}") model = instance.termination_b_type.model_class() model.objects.filter(pk__in=instance.termination_b_ids).update(_link_peer_type=None, _link_peer_id=None) # Delete and retrace any dependent cable paths for cablepath in CablePath.objects.filter(path__contains=instance): cp = CablePath.from_origin(cablepath.origin) if cp: CablePath.objects.filter(pk=cablepath.pk).update( path=cp.path, destination_type=ContentType.objects.get_for_model(cp.destination) if cp.destination else None, destination_id=cp.destination.pk if cp.destination else None, is_active=cp.is_active, is_split=cp.is_split ) else: cablepath.delete()
4bb9b6ee2639db683b70d6ddbee055497e0a3647
16
signals.py
332
Extend Cable model to support multiple A/B terminations
77,796
0
266
205
53
264,753
74
netbox
37
netbox/dcim/signals.py
Python
22
{ "docstring": "\n When a Cable is deleted, check for and update its two connected endpoints\n ", "language": "en", "n_whitespaces": 20, "n_words": 13, "vocab_size": 13 }
https://github.com/netbox-community/netbox.git
1
_async_update
def _async_update(self) -> None: super()._async_update() node = self.gateway.sensors[self.node_id] child = node.children[self.child_id] position: str = child.values[self.value_type] latitude, longitude, _ = position.split(",") self._latitude = float(latitude) self._longitude = float(longitude)
df2d0cd3e3ade2339a18415f92c85810308a9926
9
device_tracker.py
126
Refactor mysensors device tracker (#84747)
97,180
0
82
77
21
298,235
26
core
21
homeassistant/components/mysensors/device_tracker.py
Python
9
{ "docstring": "Update the controller with the latest value from a device.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/home-assistant/core.git
1
preprocess_input
def preprocess_input(x, data_format=None): # pylint: disable=unused-argument return x @keras_export("keras.applications.convnext.decode_predictions")
2d1086447a25d281f9428832d046c473d80ad761
@keras_export("keras.applications.convnext.decode_predictions")
7
convnext.py
33
Corrected preprocess_input docstring in regnet.py and convnext.py
80,001
1
15
12
9
269,284
9
keras
4
keras/applications/convnext.py
Python
2
{ "docstring": "A placeholder method for backward compatibility.\n\n The preprocessing logic has been included in the convnext model\n implementation. Users are no longer required to call this method to normalize\n the input data. This method does nothing and only kept as a placeholder to\n align the API surface between old and new version of model.\n\n Args:\n x: A floating point `numpy.array` or a `tf.Tensor`.\n data_format: Optional data format of the image tensor/array. Defaults to\n None, in which case the global setting\n `tf.keras.backend.image_data_format()` is used (unless you changed it, it\n defaults to \"channels_last\").{mode}\n\n Returns:\n Unchanged `numpy.array` or `tf.Tensor`.\n ", "language": "en", "n_whitespaces": 126, "n_words": 95, "vocab_size": 76 }
https://github.com/keras-team/keras.git
2
get_delivery_notes_to_be_billed
def get_delivery_notes_to_be_billed(doctype, txt, searchfield, start, page_len, filters, as_dict): doctype = "Delivery Note" fields = get_fields(doctype, ["name", "customer", "posting_date"]) return frappe.db.sql( % { "fields": ", ".join(["`tabDelivery Note`.{0}".format(f) for f in fields]), "key": searchfield, "fcond": get_filters_cond(doctype, filters, []), "mcond": get_match_cond(doctype), "start": start, "page_len": page_len, "txt": "%(txt)s", }, {"txt": ("%%%s%%" % txt)}, as_dict=as_dict, ) @frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
9baa2229761c5415f29646a1a5bed4a3f4981e05
@frappe.whitelist() @frappe.validate_and_sanitize_search_inputs
16
queries.py
210
fix: specify allowed doctype in queries (#31761)
14,974
1
35
116
47
69,105
53
erpnext
20
erpnext/controllers/queries.py
Python
32
{ "docstring": "\n\t\tselect %(fields)s\n\t\tfrom `tabDelivery Note`\n\t\twhere `tabDelivery Note`.`%(key)s` like %(txt)s and\n\t\t\t`tabDelivery Note`.docstatus = 1\n\t\t\tand status not in ('Stopped', 'Closed') %(fcond)s\n\t\t\tand (\n\t\t\t\t(`tabDelivery Note`.is_return = 0 and `tabDelivery Note`.per_billed < 100)\n\t\t\t\tor (`tabDelivery Note`.grand_total = 0 and `tabDelivery Note`.per_billed < 100)\n\t\t\t\tor (\n\t\t\t\t\t`tabDelivery Note`.is_return = 1\n\t\t\t\t\tand return_against in (select name from `tabDelivery Note` where per_billed < 100)\n\t\t\t\t)\n\t\t\t)\n\t\t\t%(mcond)s order by `tabDelivery Note`.`%(key)s` asc limit %(page_len)s offset %(start)s\n\t", "language": "en", "n_whitespaces": 59, "n_words": 73, "vocab_size": 41 }
https://github.com/frappe/erpnext.git
7
_build_attention_equation
def _build_attention_equation(rank, attn_axes): target_notation = _CHR_IDX[:rank] # `batch_dims` includes the head dim. batch_dims = tuple(np.delete(range(rank), attn_axes + (rank - 1,))) letter_offset = rank source_notation = "" for i in range(rank): if i in batch_dims or i == rank - 1: source_notation += target_notation[i] else: source_notation += _CHR_IDX[letter_offset] letter_offset += 1 product_notation = "".join( [target_notation[i] for i in batch_dims] + [target_notation[i] for i in attn_axes] + [source_notation[i] for i in attn_axes] ) dot_product_equation = "%s,%s->%s" % ( source_notation, target_notation, product_notation, ) attn_scores_rank = len(product_notation) combine_equation = "%s,%s->%s" % ( product_notation, source_notation, target_notation, ) return dot_product_equation, combine_equation, attn_scores_rank
84afc5193d38057e2e2badf9c889ea87d80d8fbf
13
multi_head_attention.py
241
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
81,007
0
251
155
56
272,394
96
keras
19
keras/layers/attention/multi_head_attention.py
Python
28
{ "docstring": "Builds einsum equations for the attention computation.\n\n Query, key, value inputs after projection are expected to have the shape as:\n `(bs, <non-attention dims>, <attention dims>, num_heads, channels)`.\n `bs` and `<non-attention dims>` are treated as `<batch dims>`.\n\n The attention operations can be generalized:\n (1) Query-key dot product:\n `(<batch dims>, <query attention dims>, num_heads, channels), (<batch dims>,\n <key attention dims>, num_heads, channels) -> (<batch dims>,\n num_heads, <query attention dims>, <key attention dims>)`\n (2) Combination:\n `(<batch dims>, num_heads, <query attention dims>, <key attention dims>),\n (<batch dims>, <value attention dims>, num_heads, channels) -> (<batch dims>,\n <query attention dims>, num_heads, channels)`\n\n Args:\n rank: Rank of query, key, value tensors.\n attn_axes: List/tuple of axes, `[-1, rank)`,\n that attention will be applied to.\n\n Returns:\n Einsum equations.\n ", "language": "en", "n_whitespaces": 186, "n_words": 119, "vocab_size": 73 }
https://github.com/keras-team/keras.git
9
nms
def nms(dets, match_threshold=0.6, match_metric='iou'): if dets.shape[0] == 0: return dets[[], :] scores = dets[:, 0] x1 = dets[:, 1] y1 = dets[:, 2] x2 = dets[:, 3] y2 = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] ndets = dets.shape[0] suppressed = np.zeros((ndets), dtype=np.int) for _i in range(ndets): i = order[_i] if suppressed[i] == 1: continue ix1 = x1[i] iy1 = y1[i] ix2 = x2[i] iy2 = y2[i] iarea = areas[i] for _j in range(_i + 1, ndets): j = order[_j] if suppressed[j] == 1: continue xx1 = max(ix1, x1[j]) yy1 = max(iy1, y1[j]) xx2 = min(ix2, x2[j]) yy2 = min(iy2, y2[j]) w = max(0.0, xx2 - xx1 + 1) h = max(0.0, yy2 - yy1 + 1) inter = w * h if match_metric == 'iou': union = iarea + areas[j] - inter match_value = inter / union elif match_metric == 'ios': smaller = min(iarea, areas[j]) match_value = inter / smaller else: raise ValueError() if match_value >= match_threshold: suppressed[j] = 1 keep = np.where(suppressed == 0)[0] dets = dets[keep, :] return dets
0f75cb6c640203bc5c2670f6cbd9ac89ad53ecf4
16
utils.py
553
[smalldet] add SAHI slice train eval infer deploy (#6465) * add slice infer for smalldet * add slice infer for smalldet * fix slice infer * fix doc, test=document_fix * fix eval and configs * add slice dataset loader * fix deploy * fix docs, test=document_fix * update docs, test=document_fix
53,054
0
550
365
100
211,275
183
PaddleDetection
44
deploy/python/utils.py
Python
45
{ "docstring": " Apply NMS to avoid detecting too many overlapping bounding boxes.\n Args:\n dets: shape [N, 5], [score, x1, y1, x2, y2]\n match_metric: 'iou' or 'ios'\n match_threshold: overlap thresh for match metric.\n ", "language": "en", "n_whitespaces": 74, "n_words": 30, "vocab_size": 30 }
https://github.com/PaddlePaddle/PaddleDetection.git
1
test_fetch_period_api_with_minimal_response
async def test_fetch_period_api_with_minimal_response(recorder_mock, hass, hass_client): now = dt_util.utcnow() await async_setup_component(hass, "history", {}) hass.states.async_set("sensor.power", 0, {"attr": "any"}) await async_wait_recording_done(hass) hass.states.async_set("sensor.power", 50, {"attr": "any"}) await async_wait_recording_done(hass) hass.states.async_set("sensor.power", 23, {"attr": "any"}) last_changed = hass.states.get("sensor.power").last_changed await async_wait_recording_done(hass) hass.states.async_set("sensor.power", 23, {"attr": "any"}) await async_wait_recording_done(hass) client = await hass_client() response = await client.get( f"/api/history/period/{now.isoformat()}?filter_entity_id=sensor.power&minimal_response&no_attributes" ) assert response.status == HTTPStatus.OK response_json = await response.json() assert len(response_json[0]) == 3 state_list = response_json[0] assert state_list[0]["entity_id"] == "sensor.power" assert state_list[0]["attributes"] == {} assert state_list[0]["state"] == "0" assert "attributes" not in state_list[1] assert "entity_id" not in state_list[1] assert state_list[1]["state"] == "50" assert "attributes" not in state_list[2] assert "entity_id" not in state_list[2] assert state_list[2]["state"] == "23" assert state_list[2]["last_changed"] == json.dumps( process_timestamp(last_changed), cls=JSONEncoder, ).replace('"', "")
31a787558fd312331b55e5c2c4b33341fc3601fc
13
test_init.py
500
Ensure recorder test fixture is setup before hass fixture (#80528) * Ensure recorder test fixture is setup before hass fixture * Adjust more tests
88,516
0
223
284
59
289,374
112
core
28
tests/components/history/test_init.py
Python
33
{ "docstring": "Test the fetch period view for history with minimal_response.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
https://github.com/home-assistant/core.git
11
describe_operation
def describe_operation(operation, backwards): prefix = "" is_error = False if hasattr(operation, "code"): code = operation.reverse_code if backwards else operation.code action = (code.__doc__ or "") if code else None elif hasattr(operation, "sql"): action = operation.reverse_sql if backwards else operation.sql else: action = "" if backwards: prefix = "Undo " if action is not None: action = str(action).replace("\n", "") elif backwards: action = "IRREVERSIBLE" is_error = True if action: action = " -> " + action truncated = Truncator(action) return prefix + operation.describe() + truncated.chars(40), is_error
9c19aff7c7561e3a82978a272ecdaad40dda5c00
12
migrate.py
234
Refs #33476 -- Reformatted code with Black.
50,839
0
275
135
43
204,670
84
django
18
django/core/management/commands/migrate.py
Python
21
{ "docstring": "Return a string that describes a migration operation for --plan.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 9 }
https://github.com/django/django.git
2
get_binance_available_quotes_for_each_coin
def get_binance_available_quotes_for_each_coin() -> dict: trading_pairs = _get_trading_pairs() results = defaultdict(list) for pair in trading_pairs: results[pair["baseAsset"]].append(pair["quoteAsset"]) return results @log_start_end(log=logger)
e1b6022b9cf156ffc0697d0d25a5ed2772ea8d68
@log_start_end(log=logger)
12
binance_model.py
82
Global plot styles (#1228) * Add default stylesheets * Add terminal style helper class and global style initialization in cfg * Style comments and docstrings * Load rich terminal theme from config file * Add application chart styles to candle charts * Add todos * Remove explicit color setting for some ta charts * Add user styles folder to gitignore * Update default stylesheets * Add matplotlib font manager support * Add matplotlib font manager support * Update docstrings and default style * Update stocks candle chart formatting (return fig to style title) * Style common ta overlap view * Make up and down market colors a part of the style helper * Update stylesheets * Style common ta volume view * Style common ta momentum view * Style common ta trend indicators view * Style common ta volatility view * Style common ta volume view * Style common ta custom indicators view * Fix styling bugs and remove the obvious time x lablel * Style charts in the covid menu * Set legend position to upper left in the mpl stylesheet * Add mpl_rcparams configs for parameters not covered by stylesheets * Remove font configuration files * Update style class utility functions * Implement passing external axes and style utility usage in ema & stoch * Add theme watermark and output helpers * Rename style to theme * Update helper usage in ta/ma and ta/stoch * Update style to theme in sample menus * Style forex (#1305) * Make tight layout optional 'cause mplfinance doesn't support it * Apply global style to the forex menu * Update code layout in oanda view and black * Style common TA (#1315) * Make tight layout optional 'cause mplfinance doesn't support it * Apply global style to the forex menu * Add linewidth to theme for use in mpf's addplots * Add vwap to the stocks notebook api * Update common/ta overlap to follow charting style * Apply style on TerminalStyle init * Enable infrastructure for excluding non-trading days from plots * Update notebook api to include there and resolve bandit warning * Update ta/common/overlap to exclude non-trading days * Enable external ax, style and non-trading days in common/ta/momentum * Enable external ax, style and non-trading days in common/ta/trend * Update vwap to the argument naming convention * Enable external ax, style and non-trading days in common/ta/volatility * Enable external ax, style and non-trading days in common/ta/volume * Enable external ax, style and non-trading days in common/ta/custom * Fix controller tests * Forgot to disable rewriting of the cassettes ... * Fix controller errors that came up because a merge conflict * Fix price label position on fib * Fix line having wrong x values in fib Co-authored-by: Colin Delahunty <[email protected]> * Style economy (#1308) * Began converting * Added alphavan_view * Added CNN View * Updated nasdaq view, fixed glitch * Added fred * Refactored URL * Theo's requested changes * Updated docstrings * Updated tests * Fixed pylint * Fixed tests * Theo changes * Econ Fix * Refactor chart style for Crypto context (#1306) * Remove mock for gff * Mock visualize_output helper function * Refactor * Fix plot helper * Update legend loc * Refactor mplfinance candle plot * Fix errors in the helper function * Fix binbook having the wrong call_ function name * Remove hardcoded style params * Resolve kwargs future warning from pandas * Remove warnings import Co-authored-by: Theodore Aptekarev <[email protected]> * funds + custom (#1311) * funds + custom * cleanup cleanup everybody everywhere * Fix external axes conditional and a typo Co-authored-by: Theodore Aptekarev <[email protected]> * Add external axes mode to covid charts (#1328) * Add portfolio menu plots (#1318) * Portfolio view plots (commenting out report stuff) * PA Menu broken. Commenting out and fix tests * portfolio optimization * comment out commented api line * Add notes on disabling the pa submenu Co-authored-by: Theodore Aptekarev <[email protected]> * Plot updates in common BA (#1335) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Etf refactor (#1323) * Refactored no ETF * Fixed gtff import * Fixed tests * Fix pie chart style * Refactored etf/candle * Added pylint fix * Fixed tests * Update candle chart layout * Update etf controller test * Remove strange binary file Co-authored-by: Theodore Aptekarev <[email protected]> * Expose ETF candle function in the notebooks API * Common BA and Common QA charts update (#1342) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Update stylesheet files * Refactor charts for common/qa * Update the forgotten line plot * Update tests * Add missing arg to a docstring * Remove scientific notation * Black imports Co-authored-by: Minh Hoang <[email protected]> * Options refactor (#1324) * Fixed alphaquery_view * finished options * Fixed pylint * Fixed tests * Fixed tests * Fixed tests * update yfinance * Tradier + Chartexchange * change mocks from gtff to theme.visualize output * tests Co-authored-by: Theodore Aptekarev <[email protected]> Co-authored-by: james <[email protected]> * Refactor Stocks menu (#1325) * Fix backtesting menu * Refactor comparison analysis * Refactor Dark pool shorts * Refactor rest of menu * Fix test * Fix tests failing * Fix tests fail * Fix test failing * Remove record mode=none to record new output * Rewrite test output * Rewrite test outputs * Adding more rewritten test output * Mock plt.show * Mock missing plt.show * Missing @pytest.mark.vcr * Updating tests : common/behavioural_analysis/finbrain * Improve notebooks API coverage for CA and DPS * Silence annoying flake8 warning Co-authored-by: Chavithra PARANA <[email protected]> Co-authored-by: Theodore Aptekarev <[email protected]> * Charts update for common/pred (#1344) * Add external axes support to common/ba/finbrain * Add external axes support to common/ba/twitter * Add external axes support to common/ba/google * Add external axes support to common/ba/sentimentinvestor * Add sentimentinvestor to the notebooks API * Fix tests * Update stylesheet files * Refactor charts for common/qa * Update the forgotten line plot * Update tests * Add missing arg to a docstring * Style pred helper and controllers * Update ETS plot * Update plots in KNN and pred helper * Update plot and pretty table for arima * Update plot for common/pred/regression * Refactor mc_view * Fix linting * Fix mypy * Move plot title to the axis level to make more vertical space Co-authored-by: Minh Hoang <[email protected]> Co-authored-by: jmaslek <[email protected]> * linter * Update common/ba test data * Change etf candle to match stock candle * try updating sia test Co-authored-by: Colin Delahunty <[email protected]> Co-authored-by: jmaslek <[email protected]> Co-authored-by: minhhoang1023 <[email protected]> Co-authored-by: Minh Hoang <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
84,165
1
39
40
16
282,485
18
OpenBBTerminal
12
gamestonk_terminal/cryptocurrency/due_diligence/binance_model.py
Python
15
{ "docstring": "Helper methods that for every coin available on Binance add all quote assets. [Source: Binance]\n\n Returns\n -------\n dict:\n All quote assets for given coin\n {'ETH' : ['BTC', 'USDT' ...], 'UNI' : ['ETH', 'BTC','BUSD', ...]\n\n ", "language": "en", "n_whitespaces": 60, "n_words": 34, "vocab_size": 30 }
https://github.com/OpenBB-finance/OpenBBTerminal.git
13
test_search_tsquery_chars
def test_search_tsquery_chars(self): # Simple quote should be escaped inside each tsquery term. results = self.backend.search("L'amour piqué par une abeille", models.Book) self.assertUnsortedListEqual([r.title for r in results], []) results = self.backend.search("'starting quote", models.Book) self.assertUnsortedListEqual([r.title for r in results], []) results = self.backend.search("ending quote'", models.Book) self.assertUnsortedListEqual([r.title for r in results], []) results = self.backend.search("double quo''te", models.Book) self.assertUnsortedListEqual([r.title for r in results], []) results = self.backend.search("triple quo
d10f15e55806c6944827d801cd9c2d53f5da4186
9
test_postgres_backend.py
595
Reformat with black
16,435
0
140
378
34
75,853
63
wagtail
10
wagtail/search/tests/test_postgres_backend.py
Python
25
{ "docstring": "\n Checks that tsquery characters are correctly escaped\n and do not generate a PostgreSQL syntax error.\n te\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n\n # Now suffixes.\n results = self.backend.search(\"Something:B\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n results = self.backend.search(\"Something:*\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n results = self.backend.search(\"Something:A*BCD\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n\n # Now the AND operator.\n results = self.backend.search(\"first & second\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n\n # Now the OR operator.\n results = self.backend.search(\"first | second\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n\n # Now the NOT operator.\n results = self.backend.search(\"first & !second\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n\n # Now the phrase operator.\n results = self.backend.search(\"first <-> second\", models.Book)\n self.assertUnsortedListEqual([r.title for r in results], [])\n", "language": "en", "n_whitespaces": 285, "n_words": 124, "vocab_size": 43 }
https://github.com/wagtail/wagtail.git
4
sample_weights_mismatch
def sample_weights_mismatch(self): # If there is a mismatch between sample weight mode and the placeholders # created, then recompile the sub-graphs that depend on sample weights. return ( self.sample_weight_mode is not None and self.sample_weight is None ) or ( self.sample_weight_mode is None and self.sample_weight is not None )
84afc5193d38057e2e2badf9c889ea87d80d8fbf
10
training_v1.py
59
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,913
0
112
36
31
271,957
48
keras
4
keras/engine/training_v1.py
Python
6
{ "docstring": "Check if the sample weight and the mode match or not.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/keras-team/keras.git
3
set_undefined_options
def set_undefined_options(self, src_cmd, *option_pairs): # Option_pairs: list of (src_option, dst_option) tuples src_cmd_obj = self.distribution.get_command_obj(src_cmd) src_cmd_obj.ensure_finalized() for (src_option, dst_option) in option_pairs: if getattr(self, dst_option) is None: setattr(self, dst_option, getattr(src_cmd_obj, src_option))
8198943edd73a363c266633e1aa5b2a9e9c9f526
13
cmd.py
90
add python 3.10.4 for windows
56,675
0
90
58
26
222,614
29
XX-Net
12
python3.10.4/Lib/distutils/cmd.py
Python
6
{ "docstring": "Set the values of any \"undefined\" options from corresponding\n option values in some other command object. \"Undefined\" here means\n \"is None\", which is the convention used to indicate that an option\n has not been changed between 'initialize_options()' and\n 'finalize_options()'. Usually called from 'finalize_options()' for\n options that depend on some other command rather than another\n option of the same command. 'src_cmd' is the other command from\n which option values will be taken (a command object will be created\n for it if necessary); the remaining arguments are\n '(src_option,dst_option)' tuples which mean \"take the value of\n 'src_option' in the 'src_cmd' command object, and copy it to\n 'dst_option' in the current command object\".\n ", "language": "en", "n_whitespaces": 196, "n_words": 109, "vocab_size": 71 }
https://github.com/XX-net/XX-Net.git
2
get_token
def get_token(request): if "CSRF_COOKIE" in request.META: csrf_secret = request.META["CSRF_COOKIE"] # Since the cookie is being used, flag to send the cookie in # process_response() (even if the client already has it) in order to # renew the expiry timer. request.META["CSRF_COOKIE_NEEDS_UPDATE"] = True else: csrf_secret = _add_new_csrf_cookie(request) return _mask_cipher_secret(csrf_secret)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
11
csrf.py
79
Refs #33476 -- Reformatted code with Black.
51,381
0
102
42
35
206,140
48
django
6
django/middleware/csrf.py
Python
7
{ "docstring": "\n Return the CSRF token required for a POST form. The token is an\n alphanumeric value. A new token is created if one is not already set.\n\n A side effect of calling this function is to make the csrf_protect\n decorator and the CsrfViewMiddleware add a CSRF cookie and a 'Vary: Cookie'\n header to the outgoing response. For this reason, you may need to use this\n function lazily, as is done by the csrf context processor.\n ", "language": "en", "n_whitespaces": 97, "n_words": 74, "vocab_size": 54 }
https://github.com/django/django.git
13
as_sql
def as_sql(self, compiler, connection): result = [] result_params = [] if self.connector == AND: full_needed, empty_needed = len(self.children), 1 else: full_needed, empty_needed = 1, len(self.children) for child in self.children: try: sql, params = compiler.compile(child) except EmptyResultSet: empty_needed -= 1 else: if sql: result.append(sql) result_params.extend(params) else: full_needed -= 1 # Check if this node matches nothing or everything. # First check the amount of full nodes and empty nodes # to make this node empty/full. # Now, check if this node is full/empty using the # counts. if empty_needed == 0: if self.negated: return "", [] else: raise EmptyResultSet if full_needed == 0: if self.negated: raise EmptyResultSet else: return "", [] conn = " %s " % self.connector sql_string = conn.join(result) if sql_string: if self.negated: # Some backends (Oracle at least) need parentheses # around the inner SQL in the negated case, even if the # inner SQL contains just a single expression. sql_string = "NOT (%s)" % sql_string elif len(result) > 1 or self.resolved: sql_string = "(%s)" % sql_string return sql_string, result_params
9c19aff7c7561e3a82978a272ecdaad40dda5c00
14
where.py
320
Refs #33476 -- Reformatted code with Black.
51,276
0
724
185
98
205,905
172
django
24
django/db/models/sql/where.py
Python
36
{ "docstring": "\n Return the SQL version of the where clause and the value to be\n substituted in. Return '', [] if this node matches everything,\n None, [] if this node is empty, and raise EmptyResultSet if this\n node can't match anything.\n ", "language": "en", "n_whitespaces": 75, "n_words": 39, "vocab_size": 28 }
https://github.com/django/django.git
1
test_pageurl_with_unknown_site
def test_pageurl_with_unknown_site(self): page = Page.objects.get(url_path="/home/events/") tpl = template.Template( ) # 'request' object in context, but site is None request = HttpRequest() request.META["HTTP_HOST"] = "unknown.example.com" request.META["SERVER_PORT"] = 80 result = tpl.render(template.Context({"page": page, "request": request})) self.assertIn('<a href="/events/">Events</a>', result)
d10f15e55806c6944827d801cd9c2d53f5da4186
13
tests.py
135
Reformat with black
16,286
0
110
75
31
74,670
36
wagtail
17
wagtail/core/tests/tests.py
Python
10
{ "docstring": "{% load wagtailcore_tags %}<a href=\"{% pageurl page %}\">{{ page.title }}</a>", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/wagtail/wagtail.git
1
get_labels
def get_labels(self): return ["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "B-MISC", "I-MISC"], \ self.BUILDER_CONFIGS[self.name]['pos_tags']
7b455cce47204d4d664deea9661670a838ec8d35
9
conll2002.py
72
feat: add conll2002 dataset (#1561) Co-authored-by: Zeyu Chen <[email protected]>
118,099
0
42
39
14
322,232
14
PaddleNLP
4
paddlenlp/datasets/conll2002.py
Python
3
{ "docstring": "\n Returns labels of ner tags and pos tags.\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/PaddlePaddle/PaddleNLP.git
4
format_value
def format_value(self, value): if value == "" or value is None: return None if self.is_localized: return formats.localize_input(value) return str(value)
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
widgets.py
61
Refs #33476 -- Reformatted code with Black.
51,328
0
69
36
15
206,022
19
django
7
django/forms/widgets.py
Python
6
{ "docstring": "\n Return a value as it should appear when rendered in a template.\n ", "language": "en", "n_whitespaces": 27, "n_words": 12, "vocab_size": 11 }
https://github.com/django/django.git
2
__iter__
def __iter__(self) -> Iterator[tuple[Widget, Region, Region, Size, Size]]: layers = sorted(self.map.items(), key=lambda item: item[1].order, reverse=True) intersection = Region.intersection for widget, (region, _order, clip, virtual_size, container_size) in layers: yield ( widget, intersection(region, clip), region, virtual_size, container_size, )
1a20b9de7d4cef7f93e4500757d3fb42e680f40c
12
_compositor.py
126
docstring
43,896
0
161
90
32
182,658
36
textual
22
src/textual/_compositor.py
Python
17
{ "docstring": "Iterate map with information regarding each widget and is position\n\n Yields:\n Iterator[tuple[Widget, Region, Region, Size, Size]]: Iterates a tuple of\n Widget, clip region, region, virtual size, and container size.\n ", "language": "en", "n_whitespaces": 69, "n_words": 29, "vocab_size": 26 }
https://github.com/Textualize/textual.git
3
_express_axis
def _express_axis(self, axis, frame): try: ax_mat = axis.to_matrix(self.parent_interframe) except ValueError: ax_mat = axis.to_matrix(self.child_interframe) try: self.parent_interframe.dcm(frame) # Check if connected int_frame = self.parent_interframe except ValueError: int_frame = self.child_interframe return self._to_vector(ax_mat, int_frame).express(frame)
7c199e306648513c13c9b5c5b5fad06e5f1c3020
12
joint.py
118
Fix failing test and simplify joint orient methods
49,253
0
128
72
22
199,355
30
sympy
13
sympy/physics/mechanics/joint.py
Python
11
{ "docstring": "Helper function to get an axis expressed in a specified frame.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/sympy/sympy.git
1
run_api_experiment
def run_api_experiment(input_features, output_features): config = { "input_features": input_features, "output_features": output_features, "combiner": {"type": "concat", "output_size": 14}, "training": {"epochs": 2}, } model = LudwigModel(config) return model @pytest.fixture(scope="module")
69604268c2ddc06a4ee0b3dce0e05a8fb73b5d16
@pytest.fixture(scope="module")
11
test_visualization_api.py
105
Rename fc_size to output_size (#1641) * Rename fc_size to output_size * Responding to comments
894
1
67
48
23
5,940
25
ludwig
9
tests/integration_tests/test_visualization_api.py
Python
9
{ "docstring": "Helper method to avoid code repetition in running an experiment.\n\n :param input_features: input schema\n :param output_features: output schema\n :return: None\n ", "language": "en", "n_whitespaces": 32, "n_words": 20, "vocab_size": 18 }
https://github.com/ludwig-ai/ludwig.git
1
label_axes
def label_axes(ax, text): ax.text(.5, .5, text, transform=ax.transAxes, horizontalalignment="center", verticalalignment="center") ax.tick_params(bottom=False, labelbottom=False, left=False, labelleft=False) ############################################################################## # Fixed axes sizes; fixed paddings. fig = plt.figure(figsize=(6, 6)) fig.suptitle("Fixed axes sizes, fixed paddings") # Sizes are in inches. horiz = [Size.Fixed(1.), Size.Fixed(.5), Size.Fixed(1.5), Size.Fixed(.5)] vert = [Size.Fixed(1.5), Size.Fixed(.5), Size.Fixed(1.)] rect = (0.1, 0.1, 0.8, 0.8) # Divide the axes rectangle into a grid with sizes specified by horiz * vert. div = Divider(fig, rect, horiz, vert, aspect=False) # The rect parameter will actually be ignored and overridden by axes_locator. ax1 = fig.add_axes(rect, axes_locator=div.new_locator(nx=0, ny=0)) label_axes(ax1, "nx=0, ny=0") ax2 = fig.add_axes(rect, axes_locator=div.new_locator(nx=0, ny=2)) label_axes(ax2, "nx=0, ny=2") ax3 = fig.add_axes(rect, axes_locator=div.new_locator(nx=2, ny=2)) label_axes(ax3, "nx=2, ny=2") ax4 = fig.add_axes(rect, axes_locator=div.new_locator(nx=2, nx1=4, ny=0)) label_axes(ax4, "nx=2, nx1=4, ny=0") ############################################################################## # Axes sizes that scale with the figure size; fixed paddings. fig = plt.figure(figsize=(6, 6)) fig.suptitle("Scalable axes sizes, fixed paddings") horiz = [Size.Scaled(1.5), Size.Fixed(.5), Size.Scaled(1.), Size.Scaled(.5)] vert = [Size.Scaled(1.), Size.Fixed(.5), Size.Scaled(1.5)] rect = (0.1, 0.1, 0.8, 0.8) # Divide the axes rectangle into a grid with sizes specified by horiz * vert. div = Divider(fig, rect, horiz, vert, aspect=False) # The rect parameter will actually be ignored and overridden by axes_locator. ax1 = fig.add_axes(rect, axes_locator=div.new_locator(nx=0, ny=0)) label_axes(ax1, "nx=0, ny=0") ax2 = fig.add_axes(rect, axes_locator=div.new_locator(nx=0, ny=2)) label_axes(ax2, "nx=0, ny=2") ax3 = fig.add_axes(rect, axes_locator=div.new_locator(nx=2, ny=2)) label_axes(ax3, "nx=2, ny=2") ax4 = fig.add_axes(rect, axes_locator=div.new_locator(nx=2, nx1=4, ny=0)) label_axes(ax4, "nx=2, nx1=4, ny=0") plt.show()
162bd59f50a9c59a51574aee5dc9e932133bb971
10
simple_axes_divider1.py
767
DOC: More capitalization of Axes In line with #18726. Triggered by #22242.
22,676
0
226
54
101
107,291
226
matplotlib
37
examples/axes_grid1/simple_axes_divider1.py
Python
5
{ "docstring": "Place a label at the center of an Axes, and remove the axis ticks.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
https://github.com/matplotlib/matplotlib.git
2
max_mireds
def max_mireds(self) -> int: max_mireds = self.cluster.get("color_temp_physical_max", self.MAX_MIREDS) if max_mireds == 0: self.warning( "[Max mireds is 0, setting to %s] Please open an issue on the quirks repo to have this device corrected", self.MAX_MIREDS, ) max_mireds = self.MAX_MIREDS return max_mireds
83c6a7e18b1b0e4d5a302e304f117dee11d3aa51
10
lighting.py
76
Fix invalid min and max color temp in bad ZHA light devices (#81604) * Fix ZHA default color temps * update test
89,352
0
131
45
35
290,234
40
core
7
homeassistant/components/zha/core/channels/lighting.py
Python
10
{ "docstring": "Return the warmest color_temp that this channel supports.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/home-assistant/core.git
3
get_names
def get_names(adtype): listnames = [] names = adtype.names for name in names: current = adtype[name] if current.names is not None: listnames.append((name, tuple(get_names(current)))) else: listnames.append(name) return tuple(listnames)
569fc6a40ea53054409e00c7d1c0e7f5f53cb0ce
16
recfunctions.py
103
Fix docstring and examples for rfn.get_names*
38,607
0
84
63
24
160,357
26
numpy
8
numpy/lib/recfunctions.py
Python
10
{ "docstring": "\n Returns the field names of the input datatype as a tuple. Input datatype\n has to have fields otherwise error is raised.\n\n Parameters\n ----------\n adtype : dtype\n Input datatype\n\n Examples\n --------\n >>> from numpy.lib import recfunctions as rfn\n >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)\n ('A',)\n >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)\n ('A', 'B')\n >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])\n >>> rfn.get_names(adtype)\n ('a', ('b', ('ba', 'bb')))\n ", "language": "en", "n_whitespaces": 121, "n_words": 65, "vocab_size": 52 }
https://github.com/numpy/numpy.git
7
pipe
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): address = tempfile.mktemp( prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format( os.getpid(), next(_mmap_counter))) if duplex: openmode = _winapi.PIPE_ACCESS_DUPLEX access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE obsize, ibsize = bufsize, bufsize else: openmode = _winapi.PIPE_ACCESS_INBOUND access = _winapi.GENERIC_WRITE obsize, ibsize = 0, bufsize openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE if overlapped[0]: openmode |= _winapi.FILE_FLAG_OVERLAPPED if overlapped[1]: flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED else: flags_and_attribs = 0 h1 = h2 = None try: h1 = _winapi.CreateNamedPipe( address, openmode, _winapi.PIPE_WAIT, 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) h2 = _winapi.CreateFile( address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, flags_and_attribs, _winapi.NULL) ov = _winapi.ConnectNamedPipe(h1, overlapped=True) ov.GetOverlappedResult(True) return h1, h2 except: if h1 is not None: _winapi.CloseHandle(h1) if h2 is not None: _winapi.CloseHandle(h2) raise # Wrapper for a pipe handle
8198943edd73a363c266633e1aa5b2a9e9c9f526
13
windows_utils.py
355
add python 3.10.4 for windows
56,180
0
346
232
70
221,015
111
XX-Net
38
python3.10.4/Lib/asyncio/windows_utils.py
Python
37
{ "docstring": "Like os.pipe() but with overlapped support and using handles not fds.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/XX-net/XX-Net.git
1
test_unload_config_entry
async def test_unload_config_entry(hass, entry, lcn_connection): await hass.config_entries.async_unload(entry.entry_id) assert hass.states.get("light.light_output1").state == STATE_UNAVAILABLE
d5ec2fe842411aea086333889c4357d8cd6cc394
10
test_light.py
58
Add tests for LCN light platform (#64741)
109,403
0
20
34
11
310,731
11
core
11
tests/components/lcn/test_light.py
Python
3
{ "docstring": "Test the light is removed when the config entry is unloaded.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 9 }
https://github.com/home-assistant/core.git
6
real_get_relative_path
def real_get_relative_path(config, path): requests_pathname = config.requests_pathname_prefix if requests_pathname == "/" and path == "": return "/" if requests_pathname != "/" and path == "": return requests_pathname if not path.startswith("/"): raise exceptions.UnsupportedRelativePath( .format( path ) ) return "/".join([requests_pathname.rstrip("/"), path.lstrip("/")])
cd13276a2df1c33b5daabdd3a07f57e3128e471c
12
_get_paths.py
139
Added dash.get_relative_path, dash.strip_relative_path, dash.get_asset_url
7,266
0
117
76
22
39,818
38
dash
12
dash/_get_paths.py
Python
16
{ "docstring": "\n Paths that aren't prefixed with a leading / are not supported.\n You supplied: {}\n ", "language": "en", "n_whitespaces": 48, "n_words": 14, "vocab_size": 14 }
https://github.com/plotly/dash.git
4
load_model
def load_model(filepath, custom_objects=None, compile=True, **kwargs): if str(filepath).endswith(".keras") and zipfile.is_zipfile(filepath): if kwargs: raise ValueError( "The following argument(s) are not supported " f"with the native Keras format: {list(kwargs.keys())}" ) return saving_lib.load_model( filepath, custom_objects=custom_objects, compile=compile ) # Legacy case. return legacy_sm_saving_lib.load_model( filepath, custom_objects=custom_objects, compile=compile, **kwargs )
c9068087d9142bab573e0c300bf9874a957accff
18
saving_api.py
140
Prepare public API surface for v3 saving. PiperOrigin-RevId: 484397600
83,345
0
149
78
37
280,315
43
keras
14
keras/saving/saving_api.py
Python
13
{ "docstring": "Loads a model saved via `model.save()`.\n\n Args:\n filepath: `str` or `pathlib.Path` object, path to the saved model file.\n custom_objects: Optional dictionary mapping names\n (strings) to custom classes or functions to be\n considered during deserialization.\n compile: Boolean, whether to compile the model after loading.\n\n SavedModel format arguments:\n options: Only applies to SavedModel format.\n Optional `tf.saved_model.LoadOptions` object that specifies\n SavedModel loading options.\n\n Returns:\n A Keras model instance. If the original model was compiled,\n and the argument `compile=True` is set, then the returned model\n will be compiled. Otherwise, the model will be left uncompiled.\n\n Example:\n\n ```python\n model = tf.keras.Sequential([\n tf.keras.layers.Dense(5, input_shape=(3,)),\n tf.keras.layers.Softmax()])\n model.save(\"model.keras\")\n loaded_model = tf.keras.models.load_model(\"model.keras\")\n x = tf.random.uniform((10, 3))\n assert np.allclose(model.predict(x), loaded_model.predict(x))\n ```\n\n Note that the model variables may have different name values\n (`var.name` property, e.g. `\"dense_1/kernel:0\"`) after being reloaded.\n It is recommended that you use layer attributes to\n access specific variables, e.g. `model.get_layer(\"dense_1\").kernel`.\n ", "language": "en", "n_whitespaces": 297, "n_words": 142, "vocab_size": 108 }
https://github.com/keras-team/keras.git
1
test_cache_invalidation
def test_cache_invalidation() -> None: ledger_store = DictLedgerStore() user_key = b"1483" ledger = DataSubjectLedger.get_or_create(store=ledger_store, user_key=user_key) assert ledger.delta == 1e-6, "The cache has been changed or is invalid." assert ( ledger._cache_constant2epsilon.all() ), "There is a zero epsilon value in the cache- major security flaw." assert ( ledger._cache_constant2epsilon > 0 ).all(), "Negative epsilon value in the cache- major security flaw."
61f4138eeb028287425f6007d692bf7faa808e75
10
data_subject_ledger_test.py
108
Add tests for ledger and cache
411
0
98
66
43
3,243
57
PySyft
11
packages/syft/tests/syft/core/adp/data_subject_ledger_test.py
Python
12
{ "docstring": "The cache was built assuming a particular value of delta (1e-6), and shouldn't contain any zero values.", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 17 }
https://github.com/OpenMined/PySyft.git
1
test_cancellation
def test_cancellation(self): deferred: "Deferred[str]" = Deferred() wrapper_deferred = stop_cancellation(deferred) # Cancel the new `Deferred`. wrapper_deferred.cancel() self.assertTrue(wrapper_deferred.called) self.failureResultOf(wrapper_deferred, CancelledError) self.assertFalse( deferred.called, "Original `Deferred` was unexpectedly cancelled." ) # Now make the inner `Deferred` fail. # The `Failure` must be consumed, otherwise unwanted tracebacks will be printed # in logs. deferred.errback(ValueError("abc")) self.assertIsNone(deferred.result, "`Failure` was not consumed")
91bc15c772d22fbe814170ab2e0fdbfa50f9c372
10
test_async_helpers.py
126
Add `stop_cancellation` utility function (#12106)
71,509
0
163
69
46
247,147
54
synapse
16
tests/util/test_async_helpers.py
Python
11
{ "docstring": "Test that cancellation of the new `Deferred` leaves the original running.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
1
test_from
def test_from(self) -> None: self._create_users_with_media(20, 2) channel = self.make_request( "GET", self.url + "?from=5", access_token=self.admin_user_tok, ) self.assertEqual(200, channel.code, msg=channel.json_body) self.assertEqual(channel.json_body["total"], 20) self.assertEqual(len(channel.json_body["users"]), 15) self.assertNotIn("next_token", channel.json_body) self._check_fields(channel.json_body["users"])
c97042f7eef3748e17c90e48a4122389a89c4735
11
test_statistics.py
168
Use literals in place of `HTTPStatus` constants in tests (#13469)
72,674
0
121
103
25
249,167
25
synapse
15
tests/rest/admin/test_statistics.py
Python
15
{ "docstring": "\n Testing list of media with a defined starting point (from)\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 10 }
https://github.com/matrix-org/synapse.git
1
get_internal_block_refs
def get_internal_block_refs(self) -> List[ObjectRef[Block]]: return self._plan.execute().get_blocks()
35a157948efa7ba1adf1d1507c2af1d6d84a7db7
10
dataset.py
44
Lay the groundwork for lazy dataset optimization (no behavior changes) (#22233) This PR refactors Dataset execution to enable lazy mode in the future, which can reduce memory usage in large-scale ingest pipelines. There should be no behavior changes in this PR. Many of the optimizations are also punted for future work.
33,326
0
20
26
6
144,867
6
ray
8
python/ray/data/dataset.py
Python
12
{ "docstring": "Get a list of references to the underlying blocks of this dataset.\n\n This function can be used for zero-copy access to the data. It blocks\n until the underlying blocks are computed.\n\n Time complexity: O(1)\n\n Returns:\n A list of references to this dataset's blocks.\n ", "language": "en", "n_whitespaces": 89, "n_words": 43, "vocab_size": 31 }
https://github.com/ray-project/ray.git
1
get_group_tag_value_count
def get_group_tag_value_count(self, group, environment_id, key): raise NotImplementedError
72e351082168f68cbaa5700a51e8ed577222e887
6
base.py
22
feat(perf_issues): Fix `GroupTagKeyDetailsEndpoint` to work for performance issues (#38860) This allows this endpoint to return results for performance issues.
18,042
0
21
14
7
85,773
7
sentry
6
src/sentry/tagstore/base.py
Python
2
{ "docstring": "\n >>> get_group_tag_value_count(group, 3, 'key1')\n ", "language": "en", "n_whitespaces": 19, "n_words": 4, "vocab_size": 4 }
https://github.com/getsentry/sentry.git
2
_create_local_rank_map
def _create_local_rank_map(self) -> Dict: rank_mapping = {} ip_dict = defaultdict(int) for world_rank in range(len(self.worker_group)): worker = self.worker_group.workers[world_rank] node_ip = worker.metadata.node_ip rank_mapping[world_rank] = ip_dict[node_ip] ip_dict[node_ip] += 1 return rank_mapping
80ae651f259e1ea13c21b285d6bfcc7fd834ef9c
11
backend_executor.py
104
[Train] Clean up `ray.train` package (#25566)
32,355
0
107
65
22
141,415
28
ray
15
python/ray/train/_internal/backend_executor.py
Python
30
{ "docstring": "Create mapping from worker world_rank to local_rank.\n\n Example:\n Worker 0: 0.0.0.0\n Worker 1: 0.0.0.0\n Worker 2: 0.0.0.1\n Worker 3: 0.0.0.0\n Worker 4: 0.0.0.1\n\n Workers 0, 1, 3 are on 0.0.0.0.\n Workers 2, 4 are on 0.0.0.1.\n\n Expected Output:\n {\n 0 -> 0,\n 1 -> 1,\n 2 -> 0,\n 3 -> 2,\n 4 -> 1\n }\n ", "language": "en", "n_whitespaces": 254, "n_words": 55, "vocab_size": 34 }
https://github.com/ray-project/ray.git
1
iloc
def iloc(self): from dask.dataframe.indexing import _iLocIndexer # For dataframes with unique column names, this will be transformed into a __getitem__ call return _iLocIndexer(self)
cccb9d8d8e33a891396b1275c2448c352ef40c27
7
core.py
33
absolufy-imports - No relative - PEP8 (#8796) Conversation in https://github.com/dask/distributed/issues/5889
36,552
0
51
19
23
156,093
23
dask
6
dask/dataframe/core.py
Python
3
{ "docstring": "Purely integer-location based indexing for selection by position.\n\n Only indexing the column positions is supported. Trying to select\n row positions will raise a ValueError.\n\n See :ref:`dataframe.indexing` for more.\n\n Examples\n --------\n >>> df.iloc[:, [2, 0, 1]] # doctest: +SKIP\n ", "language": "en", "n_whitespaces": 88, "n_words": 38, "vocab_size": 35 }
https://github.com/dask/dask.git
3
check_for_non_existent_openapi_endpoints
def check_for_non_existent_openapi_endpoints(self) -> None: openapi_paths = set(get_openapi_paths()) undocumented_paths = openapi_paths - self.checked_endpoints undocumented_paths -= self.buggy_documentation_endpoints undocumented_paths -= self.pending_endpoints try: self.assert_length(undocumented_paths, 0) except AssertionError: # nocoverage msg = "The following endpoints have been documented but can't be found in urls.py:" for undocumented_path in undocumented_paths: msg += f"\n + {undocumented_path}" raise AssertionError(msg)
b0ce4f1bce8031881addecb1e86073483517f392
13
test_openapi.py
113
docs: Fix many spelling mistakes. Signed-off-by: Anders Kaseorg <[email protected]>
17,642
0
159
63
42
83,261
50
zulip
13
zerver/tests/test_openapi.py
Python
18
{ "docstring": "Here, we check to see if every endpoint documented in the OpenAPI\n documentation actually exists in urls.py and thus in actual code.\n Note: We define this as a helper called at the end of\n test_openapi_arguments instead of as a separate test to ensure that\n this test is only executed after test_openapi_arguments so that it's\n results can be used here in the set operations.", "language": "en", "n_whitespaces": 97, "n_words": 63, "vocab_size": 50 }
https://github.com/zulip/zulip.git
3
_send_connect
def _send_connect(self, identifier): # type: (int) -> List[XCPScannerResult] all_slaves = [] body = Connect(connection_mode=0x00) xcp_req_and_res_list = self._scan(identifier, body, 0xFF, ConnectPositiveResponse) for req_and_res in xcp_req_and_res_list: result = XCPScannerResult(response_id=req_and_res[1].identifier, request_id=identifier) all_slaves.append(result) log_automotive.info( "Detected XCP slave for broadcast identifier: " + str( identifier) + "\nResponse: " + str(result)) if len(all_slaves) == 0: log_automotive.info( "No XCP slave detected for identifier: " + str(identifier)) return all_slaves
495b21f2867e48286767085c8cf2918e4092e9dc
14
scanner.py
167
Add Automotive Logger for all debug outputs of the automotive layer
52,743
0
288
101
46
209,600
61
scapy
20
scapy/contrib/automotive/xcp/scanner.py
Python
16
{ "docstring": "\n Sends CONNECT Message on the Control Area Network\n ", "language": "en", "n_whitespaces": 23, "n_words": 8, "vocab_size": 8 }
https://github.com/secdev/scapy.git
2
_calc_mode
def _calc_mode(path): try: mode = _path_stat(path).st_mode except OSError: mode = 0o666 # We always ensure write access so we can update cached files # later even when the source files are read-only on Windows (#6074) mode |= 0o200 return mode
8198943edd73a363c266633e1aa5b2a9e9c9f526
11
_bootstrap_external.py
50
add python 3.10.4 for windows
55,174
0
75
27
34
218,170
40
XX-Net
6
python3.10.4/Lib/importlib/_bootstrap_external.py
Python
7
{ "docstring": "Calculate the mode permissions for a bytecode file.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 8 }
https://github.com/XX-net/XX-Net.git
3
download
def download(self, url_or_urls): download_config = self.download_config.copy() download_config.extract_compressed_file = False # Default to using 16 parallel thread for downloading # Note that if we have less than or equal to 16 files, multi-processing is not activated if download_config.num_proc is None: download_config.num_proc = 16 if download_config.download_desc is None: download_config.download_desc = "Downloading data" download_func = partial(self._download, download_config=download_config) start_time = datetime.now() downloaded_path_or_paths = map_nested( download_func, url_or_urls, map_tuple=True, num_proc=download_config.num_proc, parallel_min_length=16, disable_tqdm=not is_progress_bar_enabled(), desc="Downloading data files", ) duration = datetime.now() - start_time logger.info(f"Downloading took {duration.total_seconds() // 60} min") url_or_urls = NestedDataStructure(url_or_urls) downloaded_path_or_paths = NestedDataStructure(downloaded_path_or_paths) self.downloaded_paths.update(dict(zip(url_or_urls.flatten(), downloaded_path_or_paths.flatten()))) start_time = datetime.now() self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) duration = datetime.now() - start_time logger.info(f"Checksum Computation took {duration.total_seconds() // 60} min") return downloaded_path_or_paths.data
a5e05ccffa49051cbad84c007f31725ad4b2e7f7
13
download_manager.py
330
Fix multiprocessing in map_nested (#4740) * Do multiprocessing in map_nested when num_proc >= len(iterable) * Add missing type hint * Update docstring * Fix tests * Document download default and fix docstring * Fix docstring syntax * Add tests * Fix test for Python<3.8 * Download with multiprocessing only if at least 16 files * Revert test fix * Rename multiprocessing_min_length to parallel_min_length
22,123
0
355
186
73
105,462
109
datasets
33
src/datasets/download/download_manager.py
Python
28
{ "docstring": "Download given URL(s).\n\n By default, if there is more than one URL to download, multiprocessing is used with maximum `num_proc = 16`.\n Pass customized `download_config.num_proc` to change this behavior.\n\n Args:\n url_or_urls (`str` or `list` or `dict`): URL or list/dict of URLs to download. Each URL is a `str`.\n\n Returns:\n `str` or `list` or `dict`: The downloaded paths matching the given input `url_or_urls`.\n\n Example:\n\n ```py\n >>> downloaded_files = dl_manager.download('https://storage.googleapis.com/seldon-datasets/sentence_polarity_v1/rt-polaritydata.tar.gz')\n ```\n ", "language": "en", "n_whitespaces": 154, "n_words": 69, "vocab_size": 56 }
https://github.com/huggingface/datasets.git
2
execute_test_case
def execute_test_case(self, test_case): # type: (AutomotiveTestCaseABC) -> None test_case.pre_execute( self.socket, self.target_state, self.configuration) try: test_case_kwargs = self.configuration[test_case.__class__.__name__] except KeyError: test_case_kwargs = dict() log_interactive.debug("[i] Execute test_case %s with args %s", test_case.__class__.__name__, test_case_kwargs) test_case.execute(self.socket, self.target_state, **test_case_kwargs) test_case.post_execute( self.socket, self.target_state, self.configuration) self.check_new_states(test_case) self.check_new_testcases(test_case)
f549b66f3f523c1c1a4a21aadc40c6153c5ad77e
11
executor.py
162
Stabilize UDS_Scanner unit tests for appveyor (#3615) * Different approach * add event * fix test * cleanup * cleanup * lower timeout of GMLAN Scanner * apply feedback
52,642
0
182
103
33
209,211
39
scapy
18
scapy/contrib/automotive/scanner/executor.py
Python
14
{ "docstring": "\n This function ensures the correct execution of a testcase, including\n the pre_execute, execute and post_execute.\n Finally the testcase is asked if a new edge or a new testcase was\n generated.\n :param test_case: A test case to be executed\n :return: None\n ", "language": "en", "n_whitespaces": 90, "n_words": 40, "vocab_size": 34 }
https://github.com/secdev/scapy.git
1
backfill
def backfill(self, limit=None): warnings.warn( "backfill is deprecated and will be removed in a future version. " "Use bfill instead.", FutureWarning, stacklevel=find_stack_level(inspect.currentframe()), ) return self.bfill(limit=limit)
709f274245dc2f3fe2d91ad53a4cfa57f9d41d59
12
groupby.py
68
DOC: Add deprecation infos to deprecated functions (#48599) * DOC: Add deprecation infos to deprecated functions * Add sections * Fix
40,410
0
96
40
24
169,282
24
pandas
11
pandas/core/groupby/groupby.py
Python
8
{ "docstring": "\n Backward fill the values.\n\n .. deprecated:: 1.4\n Use bfill instead.\n\n Parameters\n ----------\n limit : int, optional\n Limit of how many values to fill.\n\n Returns\n -------\n Series or DataFrame\n Object with missing values filled.\n ", "language": "en", "n_whitespaces": 130, "n_words": 33, "vocab_size": 32 }
https://github.com/pandas-dev/pandas.git
1
disable_tf_random_generator
def disable_tf_random_generator(): global _USE_GENERATOR_FOR_RNG _USE_GENERATOR_FOR_RNG = False
84afc5193d38057e2e2badf9c889ea87d80d8fbf
6
backend.py
20
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,141
0
16
10
6
269,510
7
keras
2
keras/backend.py
Python
3
{ "docstring": "Disable the `tf.random.Generator` as the RNG for Keras.\n\n See `tf.keras.backend.experimental.is_tf_random_generator_enabled` for more\n details.\n ", "language": "en", "n_whitespaces": 22, "n_words": 13, "vocab_size": 11 }
https://github.com/keras-team/keras.git
1
test_exception_handling
def test_exception_handling(): # Raising an error in a subprocess should propagate to a runtime error in the master process. with pytest.raises(RuntimeError) as ex_info: isolated.call(fail) # The exception should include the offending function's name and the child's traceback which itself includes the # breaking line of code and the exception type and message (in that order). Unfortunately, there seems to be a bug # in pytest's exception rewriting which makes the exception type and message unverifiable when ran under pytest. assert ex_info.match(r) # It should have removed the following line. assert "Traceback (most recent call last)" not in str(ex_info.value)
f4a8368fb0d1f4e0a902cccfabccde3c8c0d93d1
10
test_isolation.py
76
Allow sources to be formatted using yapf 0.32.0 Allow sources to be formatted using yapf 0.32.0 in addition to currently pinned 0.31.0. Version 0.32.0 enables `blank_line_before_nested_class_or_def` by default. As this was disabled by default in earlier versions, we now need to explicitly turn it off in configuration. The two versions also seem to disagree about alignment of split string fragments (same line in 0.31, vertically aligned in 0.32). In our code, some cases look better vertically aligned (disable yapf on those via comment to prevent changes on older version), while the remaining cases should have been merged into a single string in the first place; so do that now.
77,458
0
132
39
69
263,836
98
pyinstaller
11
tests/unit/test_isolation.py
Python
5
{ "docstring": "\n Test the behaviour which an error is raised in the child process.\n (?s) call to fail\\(\\) failed .* assert 0, \"It's broken!\".*", "language": "en", "n_whitespaces": 28, "n_words": 22, "vocab_size": 21 }
https://github.com/pyinstaller/pyinstaller.git
2
_set_gradient_checkpointing
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (Wav2Vec2ConformerEncoder, Wav2Vec2ConformerFeatureEncoder)): module.gradient_checkpointing = value WAV2VEC2_CONFORMER_START_DOCSTRING = r WAV2VEC2_CONFORMER_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare Wav2Vec2Conformer Model transformer outputting raw hidden-states without any specific head on top.", WAV2VEC2_CONFORMER_START_DOCSTRING, )
5a9957358cebd616e58b2d1ab3b887c2f2793b45
@add_start_docstrings( "The bare Wav2Vec2Conformer Model transformer outputting raw hidden-states without any specific head on top.", WAV2VEC2_CONFORMER_START_DOCSTRING, )
9
modeling_wav2vec2_conformer.py
69
Add Wav2Vec2Conformer (#16812) * save intermediate * add wav2vec2 conformer * add more code * more * first test passes * make all checkpoints work * update * up * more clean ups * save clean-up * save clean-up * save more * remove bogus * finalize design conformer * remove vision * finish all tests * more changes * finish code * add doc tests * add slow tests * fix autoconfig test * up * correct docstring * up * update * fix * Apply suggestions from code review Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Anton Lozhkov <[email protected]> * Update docs/source/en/model_doc/wav2vec2-conformer.mdx * upload * save copied from * correct configs * fix model outputs * add to docs * fix imports * finish * finish code * correct copied from * correct again * correct make fix * improve make fix copies * save * correct fix copy from * correct init structure * correct * fix import * apply suggestions Co-authored-by: Sylvain Gugger <[email protected]> Co-authored-by: Anton Lozhkov <[email protected]>
6,985
1
53
28
31
38,500
34
transformers
11
src/transformers/models/wav2vec2_conformer/modeling_wav2vec2_conformer.py
Python
3
{ "docstring": "\n Wav2Vec2Conformer was proposed in [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech\n Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael\n Auli.\n\n This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the\n library implements for all its model (such as downloading or saving etc.).\n\n This model is a PyTorch [nn.Module](https://pytorch.org/docs/stable/nn.html#nn.Module) sub-class. Use it as a\n regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior.\n\n Parameters:\n config ([`Wav2Vec2ConformerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n\n Args:\n input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):\n Float values of input raw speech waveform. Values can be obtained by loading a *.flac* or *.wav* audio file\n into an array of type *List[float]* or a *numpy.ndarray*, *e.g.* via the soundfile library (*pip install\n soundfile*). To prepare the array into *input_values*, the [`Wav2Vec2Processor`] should be used for padding\n and conversion into a tensor of type *torch.FloatTensor*. See [`Wav2Vec2Processor.__call__`] for details.\n attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0,\n 1]`:\n\n - 1 for tokens that are **not masked**,\n - 0 for tokens that are **masked**.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip warning={true}>\n\n `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask ==\n True`. For all models whose processor has `config.return_attention_mask == False`, such as\n [wav2vec2_conformer-base](https://huggingface.co/facebook/wav2vec2-conformer-large-rel-pos),\n `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For\n such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware\n that these models also yield slightly different results depending on whether `input_values` is padded or\n not.\n\n </Tip>\n\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned\n tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n", "language": "en", "n_whitespaces": 706, "n_words": 362, "vocab_size": 217 }
https://github.com/huggingface/transformers.git
2
__call__
def __call__(self, results): img = results['img'] if self.to_float32: img = img.astype(np.float32) results['img_path'] = None results['img'] = img height, width = img.shape[:2] results['height'] = height results['width'] = width results['ori_height'] = height results['ori_width'] = width return results @TRANSFORMS.register_module()
c71a160c5193b92f6a4f56c113e96b63decf8354
@TRANSFORMS.register_module()
11
loading.py
147
Refacter Visualization
70,426
1
123
78
22
244,549
36
mmdetection
13
mmdet/datasets/pipelines/loading.py
Python
12
{ "docstring": "Call functions to add image meta information.\n\n Args:\n results (dict): Result dict with Webcam read image in\n ``results['img']``.\n\n Returns:\n dict: The dict contains loaded image and meta information.\n ", "language": "en", "n_whitespaces": 86, "n_words": 28, "vocab_size": 23 }
https://github.com/open-mmlab/mmdetection.git
4
set_linestyle
def set_linestyle(self, ls): try: dashes = [mlines._get_dash_pattern(ls)] except ValueError: try: dashes = [mlines._get_dash_pattern(x) for x in ls] except ValueError as err: raise ValueError('Do not know how to convert {!r} to ' 'dashes'.format(ls)) from err # get the list of raw 'unscaled' dash patterns self._us_linestyles = dashes # broadcast and scale the lw and dash patterns self._linewidths, self._linestyles = self._bcast_lwls( self._us_lw, self._us_linestyles)
511232b14b6fca586ac92bc5f8e0b0924f23172f
17
collections.py
138
remove redundant string handling
24,344
0
216
82
48
110,880
61
matplotlib
15
lib/matplotlib/collections.py
Python
12
{ "docstring": "\n Set the linestyle(s) for the collection.\n\n =========================== =================\n linestyle description\n =========================== =================\n ``'-'`` or ``'solid'`` solid line\n ``'--'`` or ``'dashed'`` dashed line\n ``'-.'`` or ``'dashdot'`` dash-dotted line\n ``':'`` or ``'dotted'`` dotted line\n =========================== =================\n\n Alternatively a dash tuple of the following form can be provided::\n\n (offset, onoffseq),\n\n where ``onoffseq`` is an even length tuple of on and off ink in points.\n\n Parameters\n ----------\n ls : str or tuple or list thereof\n Valid values for individual linestyles include {'-', '--', '-.',\n ':', '', (offset, on-off-seq)}. See `.Line2D.set_linestyle` for a\n complete description.\n ", "language": "en", "n_whitespaces": 288, "n_words": 90, "vocab_size": 69 }
https://github.com/matplotlib/matplotlib.git
3
test_valid_incremental_read_with_slices
def test_valid_incremental_read_with_slices(mocker): slices = [{"1": "1"}, {"2": "2"}] stream_output = [{"k1": "v1"}, {"k2": "v2"}, {"k3": "v3"}] s1 = MockStream( [({"sync_mode": SyncMode.incremental, "stream_slice": s, "stream_state": mocker.ANY}, stream_output) for s in slices], name="s1" ) s2 = MockStream( [({"sync_mode": SyncMode.incremental, "stream_slice": s, "stream_state": mocker.ANY}, stream_output) for s in slices], name="s2" ) state = {"cursor": "value"} mocker.patch.object(MockStream, "get_updated_state", return_value=state) mocker.patch.object(MockStream, "supports_incremental", return_value=True) mocker.patch.object(MockStream, "get_json_schema", return_value={}) mocker.patch.object(MockStream, "stream_slices", return_value=slices) src = MockSource(streams=[s1, s2]) catalog = ConfiguredAirbyteCatalog(streams=[_configured_stream(s1, SyncMode.incremental), _configured_stream(s2, SyncMode.incremental)]) expected = [ # stream 1 slice 1 *_as_records("s1", stream_output), _state({"s1": state}), # stream 1 slice 2 *_as_records("s1", stream_output), _state({"s1": state}), # stream 2 slice 1 *_as_records("s2", stream_output), _state({"s1": state, "s2": state}), # stream 2 slice 2 *_as_records("s2", stream_output), _state({"s1": state, "s2": state}), ] messages = _fix_emitted_at(list(src.read(logger, {}, catalog, state=defaultdict(dict)))) assert expected == messages
f83eca58eaf2129d21b5796a301732ab22675130
15
test_abstract_source.py
544
CDK: Fix typing errors (#9037) * fix typing, drop AirbyteLogger * format * bump the version * use logger instead of fixture logger Co-authored-by: Eugene Kulak <[email protected]> Co-authored-by: auganbay <[email protected]>
455
0
280
326
74
3,353
128
airbyte
32
airbyte-cdk/python/unit_tests/sources/test_abstract_source.py
Python
28
{ "docstring": "Tests that an incremental read which uses slices outputs each record in the slice followed by a STATE message, for each slice", "language": "en", "n_whitespaces": 21, "n_words": 22, "vocab_size": 20 }
https://github.com/airbytehq/airbyte.git
7
setup_server
async def setup_server(self): from jina.helper import extend_rest_interface self.app = extend_rest_interface( get_fastapi_app( streamer=self.streamer, title=self.title, description=self.description, no_debug_endpoints=self.no_debug_endpoints, no_crud_endpoints=self.no_crud_endpoints, expose_endpoints=self.expose_endpoints, expose_graphql_endpoint=self.expose_graphql_endpoint, cors=self.cors, logger=self.logger, ) ) with ImportExtensions(required=True): from uvicorn import Config, Server
243639dd2b953fab8654747cbfdf6a0953b95578
12
gateway.py
137
refactor: extract gateway app logic into custom gateway class (#5153)
2,529
0
232
239
26
13,058
29
jina
21
jina/serve/runtimes/gateway/http/gateway.py
Python
40
{ "docstring": "\n Initialize and return GRPC server\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
https://github.com/jina-ai/jina.git
2
get_sortable_by
def get_sortable_by(self, request): return ( self.sortable_by if self.sortable_by is not None else self.get_list_display(request) )
9c19aff7c7561e3a82978a272ecdaad40dda5c00
9
options.py
44
Refs #33476 -- Reformatted code with Black.
50,365
0
68
28
13
203,420
14
django
5
django/contrib/admin/options.py
Python
6
{ "docstring": "Hook for specifying which fields can be sorted in the changelist.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
https://github.com/django/django.git
1
test_class_weight_does_not_contains_more_classses
def test_class_weight_does_not_contains_more_classses(): tree = DecisionTreeClassifier(class_weight={0: 1, 1: 10, 2: 20}) # Does not raise tree.fit([[0, 0, 1], [1, 0, 1], [1, 2, 0]], [0, 0, 1])
3605c140af992b6ac52f04f1689c58509cc0b5b2
11
test_class_weight.py
89
FIX Support extra class_weights in compute_class_weight (#22595)
75,568
0
38
63
22
259,103
26
scikit-learn
5
sklearn/utils/tests/test_class_weight.py
Python
3
{ "docstring": "Check that class_weight can contain more labels than in y.\n\n Non-regression test for #22413\n ", "language": "en", "n_whitespaces": 20, "n_words": 14, "vocab_size": 14 }
https://github.com/scikit-learn/scikit-learn.git
4
_make_replica_execution_function
def _make_replica_execution_function(model, mode): if mode == ModeKeys.TRAIN: func = model.train_on_batch elif mode == ModeKeys.TEST: func = model.test_on_batch else:
84afc5193d38057e2e2badf9c889ea87d80d8fbf
10
distributed_training_utils_v1.py
57
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
80,457
0
44
60
14
270,367
18
keras
9
keras/distribute/distributed_training_utils_v1.py
Python
11
{ "docstring": "A single step of the distributed execution on a replica.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
https://github.com/keras-team/keras.git
1
perform_long_operation
def perform_long_operation(self, func, end_key): thread = threading.Thread(target=_long_func_thread, args=(self, end_key, func), daemon=True) thread.start() return thread
992c5db3bcde5d735b0d00af66d9accfa25a1da7
10
PySimpleGUI.py
62
Updated Window.perform_long_operation docstring to warn that Threads are used
53,360
0
42
41
13
212,715
14
PySimpleGUI
12
PySimpleGUI.py
Python
4
{ "docstring": "\n Call your function that will take a long time to execute. When it's complete, send an event\n specified by the end_key.\n This is a way for you to \"ease into\" threading without learning the details of threading.\n Your function will run, and when it returns 2 things will happen:\n 1. The value you provide for end_key will be returned to you when you call window.read()\n 2. If your function returns a value, then the value returned will also be included in your windows.read call in the values dictionary\n\n IMPORTANT - This method uses THREADS... this means you CANNOT make any PySimpleGUI calls from\n the function you provide with the exception of one function, Window.write_event_value.\n\n\n :param func: A lambda or a function name with no parms\n :type func: Any\n :param end_key: The key that will be generated when the function returns\n :type end_key: (Any)\n :return: The id of the thread\n :rtype: threading.Thread\n ", "language": "en", "n_whitespaces": 281, "n_words": 151, "vocab_size": 99 }
https://github.com/PySimpleGUI/PySimpleGUI.git
2
mixin_base_pod_parser
def mixin_base_pod_parser(parser): gp = add_arg_group(parser, title='Pod') gp.add_argument( '--uses-before', type=str, help='The executor attached after the Peas described by --uses, typically before sending to all ' 'shards, accepted type follows `--uses`', ) gp.add_argument( '--uses-after', type=str, help='The executor attached after the Peas described by --uses, typically used for receiving from ' 'all shards, accepted type follows `--uses`', ) gp.add_argument( '--external', action='store_true', default=False, help='The Pod will be considered an external Pod that has been started independently from the Flow.' 'This Pod will not be context managed by the Flow.', ) # hidden CLI used for internal only gp.add_argument( '--pod-role', type=PodRoleType.from_string, choices=list(PodRoleType), help='The role of this pod in the flow' if _SHOW_ALL_ARGS else argparse.SUPPRESS, )
6f22e6824cbe9e95528e53e0613d5b1babd4af8e
10
pod.py
171
refactor: take out peas hosts from cli (#4171)
1,871
0
276
117
75
10,586
110
jina
18
jina/parsers/peapods/pod.py
Python
37
{ "docstring": "Add mixin arguments required by :class:`BasePod` into the given parser.\n\n :param parser: the parser instance to which we add arguments\n ", "language": "en", "n_whitespaces": 26, "n_words": 20, "vocab_size": 18 }
https://github.com/jina-ai/jina.git