complexity
int64
1
56
n_identifiers
int64
1
114
code
stringlengths
19
12.7k
path
stringlengths
8
134
n_ast_nodes
int64
12
2.35k
ast_errors
stringlengths
0
4.01k
repo
stringlengths
3
28
documentation
dict
n_words
int64
2
866
language
stringclasses
1 value
vocab_size
int64
2
323
commit_id
stringlengths
40
40
file_name
stringlengths
5
79
id
int64
243
338k
nloc
int64
1
228
token_counts
int64
5
1.4k
fun_name
stringlengths
1
77
url
stringlengths
31
60
commit_message
stringlengths
3
15.3k
n_whitespaces
int64
1
3.23k
n_ast_errors
int64
0
20
d_id
int64
74
121k
ast_levels
int64
4
29
7
26
def set_name_with_model(self, model): _, table_name = split_identifier(model._meta.db_table) column_names = [ model._meta.get_field(field_name).column for field_name, order in self.fields_orders ] column_names_with_order = [ (("-%s" if order else "%s") % column_name) for column_name, (field_name, order) in zip( column_names, self.fields_orders ) ] # The length of the parts of the name is based on the default max # length of 30 characters. hash_data = [table_name] + column_names_with_order + [self.suffix] self.name = "%s_%s_%s" % ( table_name[:11], column_names[0][:7], "%s_%s" % (names_digest(*hash_data, length=6), self.suffix), ) if len(self.name) > self.max_name_length: raise ValueError( "Index too long for multiple database support. Is self.suffix " "longer than 3 characters?" ) if self.name[0] == "_" or self.name[0].isdigit(): self.name = "D%s" % self.name[1:]
django/db/models/indexes.py
289
django
{ "docstring": "\n Generate a unique name for the index.\n\n The name is divided into 3 parts - table name (12 chars), field name\n (8 chars) and unique hash + suffix (10 chars). Each part is made to\n fit its size by truncating the excess length.\n ", "language": "en", "n_whitespaces": 79, "n_words": 43, "vocab_size": 37 }
109
Python
81
9c19aff7c7561e3a82978a272ecdaad40dda5c00
indexes.py
205,685
25
180
set_name_with_model
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
366
0
51,168
13
1
14
def test_sparse_matrix(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light' ) assert_raises(ValueError, tpot_obj.fit, sparse_features, sparse_target)
tests/tpot_tests.py
68
tpot
{ "docstring": "Assert that the TPOT fit function will raise a ValueError in a sparse matrix with config_dict='TPOT light'.", "language": "en", "n_whitespaces": 16, "n_words": 17, "vocab_size": 16 }
17
Python
17
388616b6247ca4ea8de4e2f340d6206aee523541
tpot_tests.py
181,800
10
45
test_sparse_matrix
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
71
0
43,586
10
5
19
def check_requirements(self, reqs): # type: (Iterable[str]) -> Tuple[Set[Tuple[str, str]], Set[str]] missing = set() conflicting = set() if reqs: ws = WorkingSet(self._lib_dirs) for req in reqs: try: if ws.find(Requirement.parse(req)) is None: missing.add(req) except VersionConflict as e: conflicting.add((str(e.args[0].as_requirement()), str(e.args[1]))) return conflicting, missing
.venv/lib/python3.8/site-packages/pip/_internal/build_env.py
158
transferlearning
{ "docstring": "Return 2 sets:\n - conflicting requirements: set of (installed, wanted) reqs tuples\n - missing requirements: set of reqs\n ", "language": "en", "n_whitespaces": 47, "n_words": 18, "vocab_size": 13 }
40
Python
34
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
build_env.py
60,470
13
96
check_requirements
https://github.com/jindongwang/transferlearning.git
upd; format
231
0
12,176
21
5
16
def unpack_iterator_input(iterator): try: next_element = iterator.get_next() except tf.errors.OutOfRangeError: raise RuntimeError( "Your dataset iterator ran out of data; " "Make sure that your dataset can generate " "required number of samples." ) if isinstance(next_element, (list, tuple)): if len(next_element) not in [2, 3]: raise ValueError( "Please provide model inputs as a list or tuple of 2 or 3 " "elements: (input, target) or (input, target, sample_weights) " "Received %s" % next_element ) if len(next_element) == 2: x, y = next_element weights = None else: x, y, weights = next_element else: x = next_element y = None weights = None return x, y, weights
keras/engine/training_utils_v1.py
180
keras
{ "docstring": "Convert a dataset iterator to a tuple of tensors `x, y, sample_weights`.\n\n Args:\n iterator: Instance of a dataset iterator.\n\n Returns:\n Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.\n ", "language": "en", "n_whitespaces": 52, "n_words": 33, "vocab_size": 25 }
101
Python
67
84afc5193d38057e2e2badf9c889ea87d80d8fbf
training_utils_v1.py
271,843
26
105
unpack_iterator_input
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
315
0
80,862
14
1
22
def test_missing_required_field(self): cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True) cf3.save() cf3.content_types.set([ContentType.objects.get_for_model(Site)]) site = Site(name='Test Site', slug='test-site') # Set custom field data with a required field omitted site.custom_field_data['foo'] = 'abc' with self.assertRaises(ValidationError): site.clean() site.custom_field_data['baz'] = 'def' site.clean()
netbox/extras/tests/test_customfields.py
165
netbox
{ "docstring": "\n Check that a ValidationError is raised if any required custom fields are not present.\n ", "language": "en", "n_whitespaces": 29, "n_words": 14, "vocab_size": 14 }
34
Python
28
ea6d86e6c4bb6037465410db6205a7471bc81a6c
test_customfields.py
266,037
10
92
test_missing_required_field
https://github.com/netbox-community/netbox.git
Closes #10052: The cf attribute now returns deserialized custom field data
115
0
78,274
11
22
38
def get_annotations(obj, *, globals=None, locals=None, eval_str=False): if isinstance(obj, type): # class obj_dict = getattr(obj, '__dict__', None) if obj_dict and hasattr(obj_dict, 'get'): ann = obj_dict.get('__annotations__', None) if isinstance(ann, types.GetSetDescriptorType): ann = None else: ann = None obj_globals = None module_name = getattr(obj, '__module__', None) if module_name: module = sys.modules.get(module_name, None) if module: obj_globals = getattr(module, '__dict__', None) obj_locals = dict(vars(obj)) unwrap = obj elif isinstance(obj, types.ModuleType): # module ann = getattr(obj, '__annotations__', None) obj_globals = getattr(obj, '__dict__') obj_locals = None unwrap = None elif callable(obj): # this includes types.Function, types.BuiltinFunctionType, # types.BuiltinMethodType, functools.partial, functools.singledispatch, # "class funclike" from Lib/test/test_inspect... on and on it goes. ann = getattr(obj, '__annotations__', None) obj_globals = getattr(obj, '__globals__', None) obj_locals = None unwrap = obj else: raise TypeError(f"{obj!r} is not a module, class, or callable.") if ann is None: return {} if not isinstance(ann, dict): raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None") if not ann: return {} if not eval_str: return dict(ann) if unwrap is not None: while True: if hasattr(unwrap, '__wrapped__'): unwrap = unwrap.__wrapped__ continue if isinstance(unwrap, functools.partial): unwrap = unwrap.func continue break if hasattr(unwrap, "__globals__"): obj_globals = unwrap.__globals__ if globals is None: globals = obj_globals if locals is None: locals = obj_locals return_value = {key: value if not isinstance(value, str) else eval(value, globals, locals) for key, value in ann.items() } return return_value # ----------------------------------------------------------- type-checking
python3.10.4/Lib/inspect.py
573
XX-Net
{ "docstring": "Compute the annotations dict for an object.\n\n obj may be a callable, class, or module.\n Passing in an object of any other type raises TypeError.\n\n Returns a dict. get_annotations() returns a new dict every time\n it's called; calling it twice on the same object will return two\n different but equivalent dicts.\n\n This function handles several details for you:\n\n * If eval_str is true, values of type str will\n be un-stringized using eval(). This is intended\n for use with stringized annotations\n (\"from __future__ import annotations\").\n * If obj doesn't have an annotations dict, returns an\n empty dict. (Functions and methods always have an\n annotations dict; classes, modules, and other types of\n callables may not.)\n * Ignores inherited annotations on classes. If a class\n doesn't have its own annotations dict, returns an empty dict.\n * All accesses to object members and dict values are done\n using getattr() and dict.get() for safety.\n * Always, always, always returns a freshly-created dict.\n\n eval_str controls whether or not values of type str are replaced\n with the result of calling eval() on those values:\n\n * If eval_str is true, eval() is called on values of type str.\n * If eval_str is false (the default), values of type str are unchanged.\n\n globals and locals are passed in to eval(); see the documentation\n for eval() for more information. If either globals or locals is\n None, this function may replace that value with a context-specific\n default, contingent on type(obj):\n\n * If obj is a module, globals defaults to obj.__dict__.\n * If obj is a class, globals defaults to\n sys.modules[obj.__module__].__dict__ and locals\n defaults to the obj class namespace.\n * If obj is a callable, globals defaults to obj.__globals__,\n although if obj is a wrapped function (using\n functools.update_wrapper()) it is first unwrapped.\n ", "language": "en", "n_whitespaces": 468, "n_words": 290, "vocab_size": 146 }
222
Python
108
8198943edd73a363c266633e1aa5b2a9e9c9f526
inspect.py
218,384
56
347
get_annotations
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
676
0
55,272
15
4
14
def load_weights(self, filepath, by_name=False, skip_mismatch=False): if backend.is_tpu_strategy(self._distribution_strategy): if self._distribution_strategy.extended.steps_per_run > 1 and ( not saving_utils.is_hdf5_filepath(filepath) ): raise ValueError( "Load weights is not yet supported with TPUStrategy " "with steps_per_run greater than 1." ) return super().load_weights( filepath, by_name=by_name, skip_mismatch=skip_mismatch )
keras/engine/training_v1.py
113
keras
{ "docstring": "Loads all layer weights, either from a TensorFlow or an HDF5 file.\n\n If `by_name` is False weights are loaded based on the network's\n topology. This means the architecture should be the same as when the\n weights were saved. Note that layers that don't have weights are not\n taken into account in the topological ordering, so adding or removing\n layers is fine as long as they don't have weights.\n\n If `by_name` is True, weights are loaded into layers only if they share\n the same name. This is useful for fine-tuning or transfer-learning\n models where some of the layers have changed.\n\n Only topological loading (`by_name=False`) is supported when loading\n weights from the TensorFlow format. Note that topological loading\n differs slightly between TensorFlow and HDF5 formats for user-defined\n classes inheriting from `tf.keras.Model`: HDF5 loads based on a\n flattened list of weights, while the TensorFlow format loads based on\n the object-local names of attributes to which layers are assigned in the\n `Model`'s constructor.\n\n Args:\n filepath: String, path to the weights file to load. For weight files\n in TensorFlow format, this is the file prefix (the same as was\n passed to `save_weights`).\n by_name: Boolean, whether to load weights by name or by topological\n order. Only topological loading is supported for weight files in\n TensorFlow format.\n skip_mismatch: Boolean, whether to skip loading of layers where\n there is a mismatch in the number of weights, or a mismatch in\n the shape of the weight (only valid when `by_name=True`).\n\n Returns:\n When loading a weight file in TensorFlow format, returns the same\n status object as `tf.train.Checkpoint.restore`. When graph building,\n restore ops are run automatically as soon as the network is built\n (on first call for user-defined classes inheriting from `Model`,\n immediately if it is already built).\n\n When loading weights in HDF5 format, returns `None`.\n\n Raises:\n ImportError: If h5py is not available and the weight file is in HDF5\n format.\n ValueError: If `skip_mismatch` is set to `True` when `by_name` is\n `False`.\n ", "language": "en", "n_whitespaces": 694, "n_words": 321, "vocab_size": 159 }
39
Python
35
e69dd22bc51b28b9f311c81abed92dfe46e82960
training_v1.py
280,645
12
70
load_weights
https://github.com/keras-team/keras.git
Add ability to do partial reloading of v3 models. PiperOrigin-RevId: 493123409
183
0
83,407
13
3
16
def get_mode_of_payments(invoice_list): mode_of_payments = {} if invoice_list: inv_mop = frappe.db.sql( % ", ".join(["%s"] * len(invoice_list)), tuple(invoice_list), as_dict=1, ) for d in inv_mop: mode_of_payments.setdefault(d.parent, []).append(d.mode_of_payment) return mode_of_payments
erpnext/accounts/report/sales_register/sales_register.py
116
erpnext
{ "docstring": "select parent, mode_of_payment\n\t\t\tfrom `tabSales Invoice Payment` where parent in (%s) group by parent, mode_of_payment", "language": "en", "n_whitespaces": 13, "n_words": 15, "vocab_size": 13 }
26
Python
24
494bd9ef78313436f0424b918f200dab8fc7c20b
sales_register.py
65,345
13
71
get_mode_of_payments
https://github.com/frappe/erpnext.git
style: format code with black
15
0
13,866
16
6
11
def assert_configs_are_equivalent_besides_ids(config1, config2): assert config1["mode"] == config2["mode"], "Modes are different" assert config1["theme"] == config2["theme"], "Themes are different" assert len(config1["components"]) == len( config2["components"] ), "# of components are different" mapping = {} for c1, c2 in zip(config1["components"], config2["components"]): c1, c2 = deepcopy(c1), deepcopy(c2) mapping[c1["id"]] = c2["id"] c1.pop("id") c2.pop("id") assert c1 == c2, "{} does not match {}".format(c1, c2)
gradio/utils.py
213
gradio
{ "docstring": "Allows you to test if two different Blocks configs produce the same demo.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
57
Python
43
dc6175a21d7ccf922d53379df5a35111bd1093fd
utils.py
180,043
27
310
assert_configs_are_equivalent_besides_ids
https://github.com/gradio-app/gradio.git
tabbed-interface-rewritten (#958)
120
0
43,068
11
22
63
def execute_list_collection(self, artifacts_manager=None): if artifacts_manager is not None: artifacts_manager.require_build_metadata = False output_format = context.CLIARGS['output_format'] collections_search_paths = set(context.CLIARGS['collections_path']) collection_name = context.CLIARGS['collection'] default_collections_path = AnsibleCollectionConfig.collection_paths collections_in_paths = {} warnings = [] path_found = False collection_found = False for path in collections_search_paths: collection_path = GalaxyCLI._resolve_path(path) if not os.path.exists(path): if path in default_collections_path: # don't warn for missing default paths continue warnings.append("- the configured path {0} does not exist.".format(collection_path)) continue if not os.path.isdir(collection_path): warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path)) continue path_found = True if collection_name: # list a specific collection validate_collection_name(collection_name) namespace, collection = collection_name.split('.') collection_path = validate_collection_path(collection_path) b_collection_path = to_bytes(os.path.join(collection_path, namespace, collection), errors='surrogate_or_strict') if not os.path.exists(b_collection_path): warnings.append("- unable to find {0} in collection paths".format(collection_name)) continue if not os.path.isdir(collection_path): warnings.append("- the configured path {0}, exists, but it is not a directory.".format(collection_path)) continue collection_found = True try: collection = Requirement.from_dir_path_as_unknown( b_collection_path, artifacts_manager, ) except ValueError as val_err: six.raise_from(AnsibleError(val_err), val_err) if output_format in {'yaml', 'json'}: collections_in_paths[collection_path] = { collection.fqcn: {'version': collection.ver} } continue fqcn_width, version_width = _get_collection_widths([collection]) _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width) _display_collection(collection, fqcn_width, version_width) else: # list all collections collection_path = validate_collection_path(path) if os.path.isdir(collection_path): display.vvv("Searching {0} for collections".format(collection_path)) collections = list(find_existing_collections( collection_path, artifacts_manager, )) else: # There was no 'ansible_collections/' directory in the path, so there # or no collections here. display.vvv("No 'ansible_collections' directory found at {0}".format(collection_path)) continue if not collections: display.vvv("No collections found at {0}".format(collection_path)) continue if output_format in {'yaml', 'json'}: collections_in_paths[collection_path] = { collection.fqcn: {'version': collection.ver} for collection in collections } continue # Display header fqcn_width, version_width = _get_collection_widths(collections) _display_header(collection_path, 'Collection', 'Version', fqcn_width, version_width) # Sort collections by the namespace and name for collection in sorted(collections, key=to_text): _display_collection(collection, fqcn_width, version_width) # Do not warn if the specific collection was found in any of the search paths if collection_found and collection_name: warnings = [] for w in warnings: display.warning(w) if not path_found: raise AnsibleOptionsError("- None of the provided paths were usable. Please specify a valid path with --{0}s-path".format(context.CLIARGS['type'])) if output_format == 'json': display.display(json.dumps(collections_in_paths)) elif output_format == 'yaml': display.display(yaml_dump(collections_in_paths)) return 0
lib/ansible/cli/galaxy.py
892
ansible
{ "docstring": "\n List all collections installed on the local system\n\n :param artifacts_manager: Artifacts manager.\n ", "language": "en", "n_whitespaces": 34, "n_words": 12, "vocab_size": 12 }
332
Python
160
05608b20e8f875d51866a184f8c579fe60498e05
galaxy.py
267,729
82
529
execute_list_collection
https://github.com/ansible/ansible.git
Fix listing collections that are missing the metadata required by build (#76596) * Rethread pr/70185 through the dependency resolver Hang optional metadata toggle on the ConcreteArtifactsManager instead of threading it through whole list codepath Don't error while listing collections if a collection's metadata is missing keys required for building a collection. Give an informative warning if metadata has been badly formatted. Co-authored-by: Sam Doran <[email protected]>
1,630
0
79,020
18
1
2
def piecolorway(self): return self["piecolorway"]
packages/python/plotly/plotly/graph_objs/_layout.py
22
plotly.py
{ "docstring": "\n Sets the default pie slice colors. Defaults to the main\n `colorway` used for trace colors. If you specify a new list\n here it can still be extended with lighter and darker colors,\n see `extendpiecolors`.\n\n The 'piecolorway' property is a colorlist that may be specified\n as a tuple, list, one-dimensional numpy array, or pandas Series of valid\n color strings\n\n Returns\n -------\n list\n ", "language": "en", "n_whitespaces": 139, "n_words": 61, "vocab_size": 55 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_layout.py
227,384
2
11
piecolorway
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
59,057
7
4
14
def _generate_object_paths(object_graph_def): paths = {0: "root"} nodes_to_visit = [0] while nodes_to_visit: current_node = nodes_to_visit.pop() current_path = paths[current_node] for reference in object_graph_def.nodes[current_node].children: if reference.node_id in paths: continue paths[reference.node_id] = "{}.{}".format( current_path, reference.local_name ) nodes_to_visit.append(reference.node_id) return paths
keras/saving/saved_model/load.py
133
keras
{ "docstring": "Traverses through an ObjectGraphDef and builds a map of all node paths.", "language": "en", "n_whitespaces": 11, "n_words": 12, "vocab_size": 12 }
35
Python
29
84afc5193d38057e2e2badf9c889ea87d80d8fbf
load.py
276,019
14
81
_generate_object_paths
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
145
0
81,529
13
1
2
def notchwidth(self): return self["notchwidth"]
packages/python/plotly/plotly/graph_objs/_box.py
22
plotly.py
{ "docstring": "\n Sets the width of the notches relative to the box' width. For\n example, with 0, the notches are as wide as the box(es).\n\n The 'notchwidth' property is a number and may be specified as:\n - An int or float in the interval [0, 0.5]\n\n Returns\n -------\n int|float\n ", "language": "en", "n_whitespaces": 106, "n_words": 47, "vocab_size": 40 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_box.py
226,312
2
11
notchwidth
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
57,985
7
1
16
def python_slice_replace(funcstr): # parse the input parameters pattern = 'torch\.slice\((.*)\)' parameter_str = re.findall(pattern, funcstr) parameters = re.split(',', parameter_str[0]) target_tensor = parameters[0] dim = int(parameters[1]) dim_str = ','.join([':']*(dim) + [':'.join(parameters[2:])]) print('%s[%s]' % (target_tensor, dim_str)) new_str = funcstr.replace( 'torch.slice(%s)' % parameter_str[0], '%s[%s]' % (target_tensor, dim_str)) return new_str
nni/compression/pytorch/utils/utils.py
182
nni
{ "docstring": "\n translate the torch.slice to the appropriate python str that can be replace\n in the forward function string.\n\n Parameters\n ----------\n funcstr: str\n the str that calling the torch.slice, for example:\n _8 = torch.slice(attention_mask, 0, 0, 9223372036854775807, 1)\n\n Returns:\n new_str: str\n the string that should replace the original one\n ", "language": "en", "n_whitespaces": 93, "n_words": 47, "vocab_size": 34 }
45
Python
33
97d067e614243f06ed1f8e2d389512977fff8828
utils.py
113,277
11
107
python_slice_replace
https://github.com/microsoft/nni.git
Speedup enhancement (#4925)
85
0
24,877
14
14
43
def forward(self, input, encoder_state, embedded_input=None, incr_state=None): encoder_output, encoder_mask = encoder_state if input is not None: seq_len = input.size(1) positions = input.new(seq_len).long() else: seq_len = embedded_input.size(1) positions = embedded_input.new(seq_len).long() positions = torch.arange(seq_len, out=positions).unsqueeze(0) if incr_state is not None: # We're doing incremental decoding, so select only the most recent position if input is not None: input = input[:, -1:] if embedded_input is not None: embedded_input = embedded_input[:, -1:, :] if positions is not None: positions = positions[:, -1:] else: incr_state = {} if embedded_input is not None: tensor = embedded_input # No need to copy because we only reassign below else: tensor = self.embeddings(input) if self.embeddings_scale: tensor = tensor * np.sqrt(self.dim) if self.variant == 'xlm': tensor = self.norm_embeddings(tensor) if positions.max().item() > self.n_positions: warn_once( 'You are inputting a sequence of {x} length, but only have ' '--n-positions {y}. Set --truncate or increase --n-positions'.format( x=positions.max().item(), y=self.n_positions ) ) tensor = tensor + self.position_embeddings(positions).expand_as(tensor) if self.variant == 'bart': tensor = self.norm_embeddings(tensor) tensor = self.dropout(tensor) # --dropout new_incr_state = {} if getattr(self.layers, 'is_model_parallel', False): tensor, new_incr_state = self._apply_model_parallel( tensor, encoder_output, encoder_mask, incr_state=incr_state ) else: for idx, layer in enumerate(self.layers): tensor, new_incr_state[idx] = layer( x=tensor, encoder_output=encoder_output, encoder_mask=encoder_mask, incr_state=incr_state.get(idx), ) if self.variant == 'prelayernorm': tensor = self.norm_embeddings(tensor) return tensor, new_incr_state
projects/style_gen/modules.py
608
ParlAI
{ "docstring": "\n Forward pass with the ability to pass in token-embedded inputs.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
203
Python
113
ecdfbd0bb2ab76876e9fd3817d4502c3938a2ade
modules.py
195,059
53
378
forward
https://github.com/facebookresearch/ParlAI.git
Decoder-Only Transformer (#4329) * quick and dirty decoder-only implementation * fix decoder_only incremental decoding * remove unused code, add some comments, propogate func signature change * consolidate code in decoder.py * unify encoder_state * export PassThroughEncoder * add missing build_ functions * defaults in TransformerDecoderLayer __init__ * comments, consolidating more logic, simplified forward_layers args * resize token embeddings and unit test * attempt to suppress some unused import warnings * padded_tensor fp16 friendly * autoformat * decoder_only -> decoder * more documentation * update name in test * add missing dict args * more argument massaging * update TestBartDistillation::test_narrow_distillation_losses numbers * update TestTransformerDistillation::test_narrow_distillation_losses numbers * fix _pad_tensor in seeker Co-authored-by: klshuster <[email protected]>
795
0
47,178
16
2
19
def xkcd(scale=1, length=100, randomness=2): # This cannot be implemented in terms of contextmanager() or rc_context() # because this needs to work as a non-contextmanager too. if rcParams['text.usetex']: raise RuntimeError( "xkcd mode is not compatible with text.usetex = True") stack = ExitStack() stack.callback(dict.update, rcParams, rcParams.copy()) from matplotlib import patheffects rcParams.update({ 'font.family': ['xkcd', 'xkcd Script', 'Humor Sans', 'Comic Neue', 'Comic Sans MS'], 'font.size': 14.0, 'path.sketch': (scale, length, randomness), 'path.effects': [ patheffects.withStroke(linewidth=4, foreground="w")], 'axes.linewidth': 1.5, 'lines.linewidth': 2.0, 'figure.facecolor': 'white', 'grid.linewidth': 0.0, 'axes.grid': False, 'axes.unicode_minus': False, 'axes.edgecolor': 'black', 'xtick.major.size': 8, 'xtick.major.width': 3, 'ytick.major.size': 8, 'ytick.major.width': 3, }) return stack ## Figures ## @_api.make_keyword_only("3.6", "facecolor")
lib/matplotlib/pyplot.py
281
@_api.make_keyword_only("3.6", "facecolor")
matplotlib
{ "docstring": "\n Turn on `xkcd <https://xkcd.com/>`_ sketch-style drawing mode. This will\n only have effect on things drawn after this function is called.\n\n For best results, the \"Humor Sans\" font should be installed: it is\n not included with Matplotlib.\n\n Parameters\n ----------\n scale : float, optional\n The amplitude of the wiggle perpendicular to the source line.\n length : float, optional\n The length of the wiggle along the line.\n randomness : float, optional\n The scale factor by which the length is shrunken or expanded.\n\n Notes\n -----\n This function works by a number of rcParams, so it will probably\n override others you have set before.\n\n If you want the effects of this function to be temporary, it can\n be used as a context manager, for example::\n\n with plt.xkcd():\n # This figure will be in XKCD-style\n fig1 = plt.figure()\n # ...\n\n # This figure will be in regular style\n fig2 = plt.figure()\n ", "language": "en", "n_whitespaces": 270, "n_words": 145, "vocab_size": 93 }
100
Python
92
2d918ba09155810194bb4ba136369082ad46c8c8
pyplot.py
109,118
27
158
xkcd
https://github.com/matplotlib/matplotlib.git
Simplify impl. of functions optionally used as context managers. We can actually just put the "exit" logic into an ExitStack callback. If the return value is never `__enter__`'d via a "with" statement, it is never `__exit__`'d either.
285
1
23,441
14
20
40
def post_validate(self, templar): # save the omit value for later checking omit_value = templar.available_variables.get('omit') for (name, attribute) in self.fattributes.items(): if attribute.static: value = getattr(self, name) # we don't template 'vars' but allow template as values for later use if name not in ('vars',) and templar.is_template(value): display.warning('"%s" is not templatable, but we found: %s, ' 'it will not be templated and will be used "as is".' % (name, value)) continue if getattr(self, name) is None: if not attribute.required: continue else: raise AnsibleParserError("the field '%s' is required but was not set" % name) elif not attribute.always_post_validate and self.__class__.__name__ not in ('Task', 'Handler', 'PlayContext'): # Intermediate objects like Play() won't have their fields validated by # default, as their values are often inherited by other objects and validated # later, so we don't want them to fail out early continue try: # Run the post-validator if present. These methods are responsible for # using the given templar to template the values, if required. method = getattr(self, '_post_validate_%s' % name, None) if method: value = method(attribute, getattr(self, name), templar) elif attribute.isa == 'class': value = getattr(self, name) else: # if the attribute contains a variable, template it now value = templar.template(getattr(self, name)) # if this evaluated to the omit value, set the value back to # the default specified in the FieldAttribute and move on if omit_value is not None and value == omit_value: if callable(attribute.default): setattr(self, name, attribute.default()) else: setattr(self, name, attribute.default) continue # and make sure the attribute is of the type it should be if value is not None: value = self.get_validated_value(name, attribute, value, templar) # and assign the massaged value back to the attribute field setattr(self, name, value) except (TypeError, ValueError) as e: value = getattr(self, name) raise AnsibleParserError("the field '%s' has an invalid value (%s), and could not be converted to an %s." "The error was: %s" % (name, value, attribute.isa, e), obj=self.get_ds(), orig_exc=e) except (AnsibleUndefinedVariable, UndefinedError) as e: if templar._fail_on_undefined_errors and name != 'name': if name == 'args': msg = "The task includes an option with an undefined variable. The error was: %s" % (to_native(e)) else: msg = "The field '%s' has an invalid value, which includes an undefined variable. The error was: %s" % (name, to_native(e)) raise AnsibleParserError(msg, obj=self.get_ds(), orig_exc=e) self._finalized = True
lib/ansible/playbook/base.py
611
ansible
{ "docstring": "\n we can't tell that everything is of the right type until we have\n all the variables. Run basic types (from isa) as well as\n any _post_validate_<foo> functions.\n ", "language": "en", "n_whitespaces": 57, "n_words": 27, "vocab_size": 24 }
376
Python
187
43153c58310d02223f2cb0964f4255ba1ac4ed53
base.py
267,571
45
373
post_validate
https://github.com/ansible/ansible.git
`FieldAttribute`s as descriptors (#73908)
1,300
0
78,953
20
5
17
def wait_for_nodes(self, timeout=30): start_time = time.time() while time.time() - start_time < timeout: clients = self.global_state.node_table() live_clients = [client for client in clients if client["Alive"]] expected = len(self.list_all_nodes()) if len(live_clients) == expected: logger.debug("All nodes registered as expected.") return else: logger.debug( f"{len(live_clients)} nodes are currently registered, " f"but we are expecting {expected}" ) time.sleep(0.1) raise TimeoutError("Timed out while waiting for nodes to join.")
python/ray/cluster_utils.py
182
ray
{ "docstring": "Waits for correct number of nodes to be registered.\n\n This will wait until the number of live nodes in the client table\n exactly matches the number of \"add_node\" calls minus the number of\n \"remove_node\" calls that have been made on this cluster. This means\n that if a node dies without \"remove_node\" having been called, this will\n raise an exception.\n\n Args:\n timeout (float): The number of seconds to wait for nodes to join\n before failing.\n\n Raises:\n TimeoutError: An exception is raised if we time out while waiting\n for nodes to join.\n ", "language": "en", "n_whitespaces": 198, "n_words": 90, "vocab_size": 62 }
61
Python
49
7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065
cluster_utils.py
130,567
16
100
wait_for_nodes
https://github.com/ray-project/ray.git
[CI] Format Python code with Black (#21975) See #21316 and #21311 for the motivation behind these changes.
257
0
29,306
17
2
14
def rotate_point(pin, r): if isinstance(r, tuple): # if r is of the form (vector, angle) q = Quaternion.from_axis_angle(r[0], r[1]) else: # if r is a quaternion q = r.normalize() pout = q * Quaternion(0, pin[0], pin[1], pin[2]) * conjugate(q) return (pout.b, pout.c, pout.d)
sympy/algebras/quaternion.py
126
sympy
{ "docstring": "Returns the coordinates of the point pin(a 3 tuple) after rotation.\n\n Parameters\n ==========\n\n pin : tuple\n A 3-element tuple of coordinates of a point which needs to be\n rotated.\n r : Quaternion or tuple\n Axis and angle of rotation.\n\n It's important to note that when r is a tuple, it must be of the form\n (axis, angle)\n\n Returns\n =======\n\n tuple\n The coordinates of the point after rotation.\n\n Examples\n ========\n\n >>> from sympy import Quaternion\n >>> from sympy import symbols, trigsimp, cos, sin\n >>> x = symbols('x')\n >>> q = Quaternion(cos(x/2), 0, 0, sin(x/2))\n >>> trigsimp(Quaternion.rotate_point((1, 1, 1), q))\n (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)\n >>> (axis, angle) = q.to_axis_angle()\n >>> trigsimp(Quaternion.rotate_point((1, 1, 1), (axis, angle)))\n (sqrt(2)*cos(x + pi/4), sqrt(2)*sin(x + pi/4), 1)\n\n ", "language": "en", "n_whitespaces": 322, "n_words": 123, "vocab_size": 71 }
43
Python
33
498015021131af4dbb07eb110e5badaba8250c7b
quaternion.py
196,016
7
83
rotate_point
https://github.com/sympy/sympy.git
Updated import locations
122
0
47,516
11
3
16
def vstack(tup, *, dtype=None, casting="same_kind"): if not overrides.ARRAY_FUNCTION_ENABLED: # raise warning if necessary _arrays_for_stack_dispatcher(tup, stacklevel=2) arrs = atleast_2d(*tup) if not isinstance(arrs, list): arrs = [arrs] return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) @array_function_dispatch(_vhstack_dispatcher)
numpy/core/shape_base.py
117
@array_function_dispatch(_vhstack_dispatcher)
numpy
{ "docstring": "\n Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length.\n\n dtype : str or dtype\n If provided, the destination array will have this dtype. Cannot be\n provided together with `out`.\n\n .. versionadded:: 1.24\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'same_kind'.\n\n .. versionadded:: 1.24\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n block : Assemble an nd-array from nested lists of blocks.\n hstack : Stack arrays in sequence horizontally (column wise).\n dstack : Stack arrays in sequence depth wise (along third axis).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n vsplit : Split an array into multiple sub-arrays vertically (row-wise).\n\n Examples\n --------\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([4, 5, 6])\n >>> np.vstack((a,b))\n array([[1, 2, 3],\n [4, 5, 6]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[4], [5], [6]])\n >>> np.vstack((a,b))\n array([[1],\n [2],\n [3],\n [4],\n [5],\n [6]])\n\n ", "language": "en", "n_whitespaces": 499, "n_words": 282, "vocab_size": 175 }
31
Python
26
126046f84449fffeb0c75ae88657ce6b90236eee
shape_base.py
160,599
7
68
vstack
https://github.com/numpy/numpy.git
ENH: adding casting option to numpy.stack. (#21627) np.concatenate and np.stack are similar methods, but only np.concatenate has the casting option. This PR puts the casting option into the np.stack method to control what kind of data casting may occur Closes gh-20959 * ENH: adding casting option to numpy.stack. See #20959 * ENH: adding dtype option to numpy.stack. See #20959 * REV: removing auto-generated file loops_modulo.dispatch.c See numpy#20959 * REV: removing auto-generated file loops_modulo.dispatch.c See numpy#20959 * REV: removing inserted newlines See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * DOC: inserting versionadded info in dtype and casting parameters. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * TST: writing tests to stack method with dtype and casting options See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * DOC: adding upcoming_change file for new options casting and dtype in method stack. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * REV: reverting lint errors. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * DOC: inserting hstack and vstack methods in upcoming changes See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * ENH: adding dtype and casting keyword arguments to numpy.vstack and numpy.hstack. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * TST: writing tests to vstack and hstack methods with dtype and casting keyword arguments. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * REV: reverting the 'out' option type in stack method. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> * REV: Reverting out type changes in overload of shape_base.pyi file. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: jhonatancunha <[email protected]> Co-authored-by: patriarka <[email protected]> * DOC: correcting some english erros in upcoming_changes file. See numpy#20959 Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: alescrocaro <[email protected]> Co-authored-by: JessePires <[email protected]> Co-authored-by: patriarka <[email protected]>
66
1
38,662
10
13
8
def _determine_setitem_axis(self, row_lookup, col_lookup, row_scalar, col_scalar): if self.df.shape == (1, 1): return None if not (row_scalar ^ col_scalar) else 1 if row_scalar else 0
modin/pandas/indexing.py
61
modin
{ "docstring": "\n Determine an axis along which we should do an assignment.\n\n Parameters\n ----------\n row_lookup : slice or list\n Indexer for rows.\n col_lookup : slice or list\n Indexer for columns.\n row_scalar : bool\n Whether indexer for rows is scalar or not.\n col_scalar : bool\n Whether indexer for columns is scalar or not.\n\n Returns\n -------\n int or None\n None if this will be a both axis assignment, number of axis to assign in other cases.\n\n Notes\n -----\n axis = 0: column assignment df[col] = item\n axis = 1: row assignment df.loc[row] = item\n axis = None: assignment along both axes\n ", "language": "en", "n_whitespaces": 265, "n_words": 97, "vocab_size": 62 }
24
Python
21
0d9d14e6669be3dd6bb3b72222dbe6a6dffe1bee
indexing.py
152,952
23
156
_determine_setitem_axis
https://github.com/modin-project/modin.git
FIX-#3860: Fix single row Series assignment. (#3894) Signed-off-by: mvashishtha <[email protected]>
49
0
35,201
11
6
39
def _upsample_2d(self, hidden_states, weight=None, kernel=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if kernel is None: kernel = [1] * factor # setup kernel kernel = torch.tensor(kernel, dtype=torch.float32) if kernel.ndim == 1: kernel = torch.outer(kernel, kernel) kernel /= torch.sum(kernel) kernel = kernel * (gain * (factor**2)) if self.use_conv: convH = weight.shape[2] convW = weight.shape[3] inC = weight.shape[1] pad_value = (kernel.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. output_shape = ( (hidden_states.shape[2] - 1) * factor + convH, (hidden_states.shape[3] - 1) * factor + convW, ) output_padding = ( output_shape[0] - (hidden_states.shape[2] - 1) * stride[0] - convH, output_shape[1] - (hidden_states.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 num_groups = hidden_states.shape[1] // inC # Transpose weights. weight = torch.reshape(weight, (num_groups, -1, inC, convH, convW)) weight = torch.flip(weight, dims=[3, 4]).permute(0, 2, 1, 3, 4) weight = torch.reshape(weight, (num_groups * inC, -1, convH, convW)) inverse_conv = F.conv_transpose2d( hidden_states, weight, stride=stride, output_padding=output_padding, padding=0 ) output = upfirdn2d_native( inverse_conv, torch.tensor(kernel, device=inverse_conv.device), pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2 + 1), ) else: pad_value = kernel.shape[0] - factor output = upfirdn2d_native( hidden_states, torch.tensor(kernel, device=hidden_states.device), up=factor, pad=((pad_value + 1) // 2 + factor - 1, pad_value // 2), ) return output
src/diffusers/models/resnet.py
660
diffusers
{ "docstring": "Fused `upsample_2d()` followed by `Conv2d()`.\n\n Padding is performed only once at the beginning, not between the operations. The fused op is considerably more\n efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of\n arbitrary order.\n\n Args:\n hidden_states: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.\n weight: Weight tensor of the shape `[filterH, filterW, inChannels,\n outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`.\n kernel: FIR filter of the shape `[firH, firW]` or `[firN]`\n (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling.\n factor: Integer upsampling factor (default: 2).\n gain: Scaling factor for signal magnitude (default: 1.0).\n\n Returns:\n output: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same\n datatype as `hidden_states`.\n ", "language": "en", "n_whitespaces": 289, "n_words": 140, "vocab_size": 103 }
220
Python
109
a73f8b725105b12a60a9b22918bda68f8b6d26c3
resnet.py
337,070
45
430
_upsample_2d
https://github.com/huggingface/diffusers.git
Clean up resnet.py file (#780) * clean up resnet.py * make style and quality * minor formatting
759
0
120,955
18
3
8
def validate_thread_sharing(self): if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()): raise DatabaseError( "DatabaseWrapper objects created in a " "thread can only be used in that same thread. The object " "with alias '%s' was created in thread id %s and this is " "thread id %s." % (self.alias, self._thread_ident, _thread.get_ident()) ) # ##### Miscellaneous #####
django/db/backends/base/base.py
86
django
{ "docstring": "\n Validate that the connection isn't accessed by another thread than the\n one which originally created it, unless the connection was explicitly\n authorized to be shared between threads (via the `inc_thread_sharing()`\n method). Raise an exception if the validation fails.\n ", "language": "en", "n_whitespaces": 74, "n_words": 38, "vocab_size": 33 }
54
Python
46
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
204,824
8
48
validate_thread_sharing
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
153
0
50,907
13
5
14
def process_response(self, request, response): # If the given URL is "Not Found", then check if we should redirect to # a path with a slash appended. if response.status_code == 404 and self.should_redirect_with_slash(request): return self.response_redirect_class(self.get_full_path_with_slash(request)) # Add the Content-Length header to non-streaming responses if not # already set. if not response.streaming and not response.has_header("Content-Length"): response.headers["Content-Length"] = str(len(response.content)) return response
django/middleware/common.py
117
django
{ "docstring": "\n When the status code of the response is 404, it may redirect to a path\n with an appended slash if should_redirect_with_slash() returns True.\n ", "language": "en", "n_whitespaces": 45, "n_words": 23, "vocab_size": 22 }
58
Python
45
9c19aff7c7561e3a82978a272ecdaad40dda5c00
common.py
206,137
6
68
process_response
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
136
0
51,379
13
1
32
def test_duplicate_statistics_handle_integrity_error(hass_recorder, caplog): hass = hass_recorder() wait_recording_done(hass) period1 = dt_util.as_utc(dt_util.parse_datetime("2021-09-01 00:00:00")) period2 = dt_util.as_utc(dt_util.parse_datetime("2021-09-30 23:00:00")) external_energy_metadata_1 = { "has_mean": False, "has_sum": True, "name": "Total imported energy", "source": "test", "state_unit_of_measurement": "kWh", "statistic_id": "test:total_energy_import_tariff_1", "unit_of_measurement": "kWh", } external_energy_statistics_1 = [ { "start": period1, "last_reset": None, "state": 3, "sum": 5, }, ] external_energy_statistics_2 = [ { "start": period2, "last_reset": None, "state": 3, "sum": 6, } ] with patch.object( statistics, "_statistics_exists", return_value=False ), patch.object( statistics, "_insert_statistics", wraps=statistics._insert_statistics ) as insert_statistics_mock: async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_1 ) async_add_external_statistics( hass, external_energy_metadata_1, external_energy_statistics_2 ) wait_recording_done(hass) assert insert_statistics_mock.call_count == 3 with session_scope(hass=hass) as session: tmp = session.query(recorder.db_schema.Statistics).all() assert len(tmp) == 2 assert "Blocked attempt to insert duplicated statistic rows" in caplog.text
tests/components/recorder/test_statistics.py
387
core
{ "docstring": "Test the recorder does not blow up if statistics is duplicated.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
117
Python
79
dd20a7ea62fc003748c5f0cf99be25c69c9b5a05
test_statistics.py
307,759
50
224
test_duplicate_statistics_handle_integrity_error
https://github.com/home-assistant/core.git
Display statistics in the source's unit (#78031)
447
0
106,525
14
13
64
def save_model(model, filepath, weights_format="h5", use_zip=True): if not filepath.endswith(".keras"): raise ValueError( "Invalid filename: expected a `.keras` extension. " f"Received: filepath={filepath}" ) if weights_format == "h5" and h5py is None: raise ImportError( "h5py must be installed in order to save a model in hdf5 format." ) if not model.built: warnings.warn( "You are saving a model that has not yet been built. " "It might not contain any weights yet. " "Consider building the model first by calling it " "on some data.", stacklevel=2, ) saving_v3_enabled_value = getattr(_SAVING_V3_ENABLED, "value", False) _SAVING_V3_ENABLED.value = True serialized_model_dict = serialize_keras_object(model) config_json = json.dumps(serialized_model_dict) # TODO(fchollet): consider saving dependencies list / versions in metadata. metadata_json = json.dumps( { "keras_version": keras.__version__, "date_saved": datetime.datetime.now().strftime("%Y-%m-%d@%H:%M:%S"), } ) if use_zip: # Use a temporary directory for the storing files prior to zipping. write_path = _get_temp_dir() else: tf.io.gfile.makedirs(filepath) write_path = filepath try: # Write files locally before zipping. with open(tf.io.gfile.join(write_path, _METADATA_FILENAME), "w") as f: f.write(metadata_json) with open(tf.io.gfile.join(write_path, _CONFIG_FILENAME), "w") as f: f.write(config_json) weights_path = tf.io.gfile.join(write_path, _VARS_FNAME) assets_path = tf.io.gfile.join(write_path, _ASSETS_DIRNAME) if weights_format == "h5": weights_store = H5IOStore(weights_path, mode="w") elif weights_format == "npz": weights_store = NpzIOStore(weights_path, mode="w") else: raise ValueError( "Unknown `weights_format`. Expected 'h5' or 'npz'. " f"Received: {weights_format}" ) _save_state( model, weights_handler=weights_store, assets_handler=DiskIOStore(assets_path), inner_path="", visited_trackables=set(), ) weights_store.close() if use_zip: # Zip local files into an archive. with zipfile.ZipFile(filepath, "w") as zipfile_to_save: _write_to_zip_recursively(zipfile_to_save, write_path, "") except Exception as e: raise e finally: _SAVING_V3_ENABLED.value = saving_v3_enabled_value if use_zip and tf.io.gfile.exists(write_path): # Remove the directory temporarily used. tf.io.gfile.rmtree(write_path)
keras/saving/experimental/saving_lib.py
634
keras
{ "docstring": "Save an archive representing a Keras model to the given filepath.\n\n The zip-based archive contains the following structure:\n\n - JSON configuration file (`config.json`): Records of model, layer, and\n other object configurations.\n - Npz or h5 model variables file (`variables.npz` or `variables.h5`).\n - Assets files (if any) found in the `assets/` directory structure,\n which mirrors the model's inner structure.\n - JSON metadata file (`metdata.json`).\n\n The states of Keras trackables (layers, optimizers, loss, and metrics) are\n automatically saved as long as they can be discovered through the attributes\n returned by `dir(model)`. Typically, the state includes the variables\n associated with the trackable, but some specially purposed layers may\n contain more such as the vocabularies stored in the hashmaps. The trackables\n define how their asset state is saved by exposing `save_assets()` and\n `load_assets()` APIs.\n\n For the case of layer states, the variables will be visited as long as\n they are either 1) referenced via layer attributes, or 2) referenced via a\n container (list, tuple, or dict), and the container is referenced via a\n layer attribute.\n ", "language": "en", "n_whitespaces": 236, "n_words": 171, "vocab_size": 115 }
242
Python
167
0b393d4049afd187ed7d24ea70177a72cf4a3ce2
saving_lib.py
280,163
66
363
save_model
https://github.com/keras-team/keras.git
New saving: add npz support and make zipping optional. We should decide which store to go with by default. h5 is faster, but only marginally so. Zip has no speed impact (the long saving/loading time was due to the breakdown into many files/dirs previously). But it has a temporary disk space impact. Note: Using h5 without zipping will not work with GCS (due to H5 using its own file pointer). This issue could be worked around via special casing. All other combinations work with GCS. Saving time for NASNetLarge: - Legacy h5: 2.8s - New h5 + zip: 2.6s - New h5 + no zip: 2.5s - New npz + zip: 3.2s - New npz + no zip: 3.0s - Legacy savedmodel: 142.2s (!) Loading times are similar across the board (nozip is a bit faster). PiperOrigin-RevId: 481705383
800
0
83,277
16
2
6
def cleanup(self) -> None: if self.remove_config: os.remove(self.config_path)
test/lib/ansible_test/_internal/commands/integration/cloud/__init__.py
37
ansible
{ "docstring": "Clean up the cloud resource and any temporary configuration files after tests complete.", "language": "en", "n_whitespaces": 12, "n_words": 13, "vocab_size": 13 }
7
Python
7
3eb0485dd92c88cc92152d3656d94492db44b183
__init__.py
267,825
4
21
cleanup
https://github.com/ansible/ansible.git
ansible-test - Use more native type hints. (#78435) * ansible-test - Use more native type hints. Simple search and replace to switch from comments to native type hints for return types of functions with no arguments. * ansible-test - Use more native type hints. Conversion of simple single-line function annotation type comments to native type hints. * ansible-test - Use more native type hints. Conversion of single-line function annotation type comments with default values to native type hints. * ansible-test - Use more native type hints. Manual conversion of type annotation comments for functions which have pylint directives.
32
0
79,106
10
2
9
def notna(obj): # noqa: PR01, RT01, D200 if isinstance(obj, BasePandasDataset): return obj.notna() else: return pandas.notna(obj) notnull = notna @_inherit_docstrings(pandas.merge, apilink="pandas.merge")
modin/pandas/general.py
75
@_inherit_docstrings(pandas.merge, apilink="pandas.merge")
modin
{ "docstring": "\n Detect non-missing values for an array-like object.\n ", "language": "en", "n_whitespaces": 14, "n_words": 7, "vocab_size": 7 }
20
Python
19
2809f7c12384e2615390bcb76f2e7f0d88a3fffa
general.py
153,687
5
29
notna
https://github.com/modin-project/modin.git
DOCS-#4336: Reformat general utilities docstrings (#4338) Co-authored-by: Yaroslav Igoshev <[email protected]> Signed-off-by: Alexander Myskov <[email protected]>
42
1
35,545
10
14
38
def sample(self) -> SampleBatchType: if self.fake_sampler and self.last_batch is not None: return self.last_batch elif self.input_reader is None: raise ValueError( "RolloutWorker has no `input_reader` object! " "Cannot call `sample()`. You can try setting " "`create_env_on_driver` to True." ) if log_once("sample_start"): logger.info( "Generating sample batch of size {}".format( self.rollout_fragment_length ) ) batches = [self.input_reader.next()] steps_so_far = ( batches[0].count if self.count_steps_by == "env_steps" else batches[0].agent_steps() ) # In truncate_episodes mode, never pull more than 1 batch per env. # This avoids over-running the target batch size. if self.batch_mode == "truncate_episodes": max_batches = self.num_envs else: max_batches = float("inf") while steps_so_far < self.rollout_fragment_length and ( len(batches) < max_batches or self.policy_config.get("offline_sampling") ): batch = self.input_reader.next() steps_so_far += ( batch.count if self.count_steps_by == "env_steps" else batch.agent_steps() ) batches.append(batch) batch = concat_samples(batches) self.callbacks.on_sample_end(worker=self, samples=batch) # Always do writes prior to compression for consistency and to allow # for better compression inside the writer. self.output_writer.write(batch) if log_once("sample_end"): logger.info("Completed sample batch:\n\n{}\n".format(summarize(batch))) if self.compress_observations: batch.compress(bulk=self.compress_observations == "bulk") if self.fake_sampler: self.last_batch = batch return batch
rllib/evaluation/rollout_worker.py
430
ray
{ "docstring": "Returns a batch of experience sampled from this worker.\n\n This method must be implemented by subclasses.\n\n Returns:\n A columnar batch of experiences (e.g., tensors).\n\n Examples:\n >>> import gym\n >>> from ray.rllib.evaluation.rollout_worker import RolloutWorker\n >>> from ray.rllib.algorithms.pg.pg_tf_policy import PGTF1Policy\n >>> worker = RolloutWorker( # doctest: +SKIP\n ... env_creator=lambda _: gym.make(\"CartPole-v0\"), # doctest: +SKIP\n ... policy_spec=PGTF1Policy) # doctest: +SKIP\n >>> print(worker.sample()) # doctest: +SKIP\n SampleBatch({\"obs\": [...], \"action\": [...], ...})\n ", "language": "en", "n_whitespaces": 198, "n_words": 67, "vocab_size": 46 }
163
Python
109
d6b6dc560dd2920bf8ee1e4de01c4ec25fdab555
rollout_worker.py
128,786
62
249
sample
https://github.com/ray-project/ray.git
[AIR] Address multiple warnings in AIR examples (#28800) These changes address warnings and errors in several of the AIR examples. They're all small changes, so I've batched them together in this PR: - Fixed `concant_samples` deprecation warning, which appears in `rollout_worker.py` - Fixed call to `randrange` which passed floating point argument; this will generate errors for python>=3.11 - Removed another use of the `object_store_memory` in instantiating an actor - Replaced a number of strings (docstrings and regexes) which had escape sequences with string literals - Fixed an issue that broke the `tfx_tabular_train_to_serve` example where data that was being sent to the server was not being encoded as json correctly by `requests`. - Fixed a deprecated import of `ray.rllib.agents.marwil` in the `rl_offline_example`. Signed-off-by: pdmurray <[email protected]>
654
0
28,809
13
1
30
def benchmark_custom_training_mnist_bs_256(self): batch_size = 256 run_iters = 5 train_dataset = self.train_dataset.shuffle(buffer_size=1024).batch( batch_size ) # Instantiate a loss function. loss_fn = tf.keras.losses.CategoricalCrossentropy( reduction=tf.keras.losses.Reduction.NONE ) # Instantiate an optimizer to train the model. optimizer = tf.keras.optimizers.Adam() model = self._build_model() metrics, wall_time = self.measure_performance( model, train_dataset, loss_fn, optimizer, batch_size, run_iters, self.epochs, ) extras = benchmark_util.get_keras_examples_metadata( "conv", batch_size, ".keras.ctl_graph" ) self.report_benchmark( iters=run_iters, wall_time=wall_time, metrics=metrics, extras=extras )
keras/benchmarks/keras_examples_benchmarks/mnist_conv_custom_training_benchmark_test.py
197
keras
{ "docstring": "Measure performance with batch_size=256 and run_iters=5.", "language": "en", "n_whitespaces": 5, "n_words": 6, "vocab_size": 6 }
62
Python
46
84afc5193d38057e2e2badf9c889ea87d80d8fbf
mnist_conv_custom_training_benchmark_test.py
269,811
26
126
benchmark_custom_training_mnist_bs_256
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
302
0
80,290
13
21
33
def match_submerged_margins(layoutgrids, fig): for sfig in fig.subfigs: match_submerged_margins(layoutgrids, sfig) axs = [a for a in fig.get_axes() if a.get_subplotspec() is not None and a.get_in_layout()] for ax1 in axs: ss1 = ax1.get_subplotspec() if ss1.get_gridspec() not in layoutgrids: axs.remove(ax1) continue lg1 = layoutgrids[ss1.get_gridspec()] # interior columns: if len(ss1.colspan) > 1: maxsubl = np.max( lg1.margin_vals['left'][ss1.colspan[1:]] + lg1.margin_vals['leftcb'][ss1.colspan[1:]] ) maxsubr = np.max( lg1.margin_vals['right'][ss1.colspan[:-1]] + lg1.margin_vals['rightcb'][ss1.colspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None and len(ss2.colspan) > 1: maxsubl2 = np.max( lg2.margin_vals['left'][ss2.colspan[1:]] + lg2.margin_vals['leftcb'][ss2.colspan[1:]]) if maxsubl2 > maxsubl: maxsubl = maxsubl2 maxsubr2 = np.max( lg2.margin_vals['right'][ss2.colspan[:-1]] + lg2.margin_vals['rightcb'][ss2.colspan[:-1]]) if maxsubr2 > maxsubr: maxsubr = maxsubr2 for i in ss1.colspan[1:]: lg1.edit_margin_min('left', maxsubl, cell=i) for i in ss1.colspan[:-1]: lg1.edit_margin_min('right', maxsubr, cell=i) # interior rows: if len(ss1.rowspan) > 1: maxsubt = np.max( lg1.margin_vals['top'][ss1.rowspan[1:]] + lg1.margin_vals['topcb'][ss1.rowspan[1:]] ) maxsubb = np.max( lg1.margin_vals['bottom'][ss1.rowspan[:-1]] + lg1.margin_vals['bottomcb'][ss1.rowspan[:-1]] ) for ax2 in axs: ss2 = ax2.get_subplotspec() lg2 = layoutgrids[ss2.get_gridspec()] if lg2 is not None: if len(ss2.rowspan) > 1: maxsubt = np.max([np.max( lg2.margin_vals['top'][ss2.rowspan[1:]] + lg2.margin_vals['topcb'][ss2.rowspan[1:]] ), maxsubt]) maxsubb = np.max([np.max( lg2.margin_vals['bottom'][ss2.rowspan[:-1]] + lg2.margin_vals['bottomcb'][ss2.rowspan[:-1]] ), maxsubb]) for i in ss1.rowspan[1:]: lg1.edit_margin_min('top', maxsubt, cell=i) for i in ss1.rowspan[:-1]: lg1.edit_margin_min('bottom', maxsubb, cell=i)
lib/matplotlib/_constrained_layout.py
986
matplotlib
{ "docstring": "\n Make the margins that are submerged inside an Axes the same size.\n\n This allows axes that span two columns (or rows) that are offset\n from one another to have the same size.\n\n This gives the proper layout for something like::\n fig = plt.figure(constrained_layout=True)\n axs = fig.subplot_mosaic(\"AAAB\\nCCDD\")\n\n Without this routine, the axes D will be wider than C, because the\n margin width between the two columns in C has no width by default,\n whereas the margins between the two columns of D are set by the\n width of the margin between A and B. However, obviously the user would\n like C and D to be the same size, so we need to add constraints to these\n \"submerged\" margins.\n\n This routine makes all the interior margins the same, and the spacing\n between the three columns in A and the two column in C are all set to the\n margins between the two columns of D.\n\n See test_constrained_layout::test_constrained_layout12 for an example.\n ", "language": "en", "n_whitespaces": 218, "n_words": 158, "vocab_size": 87 }
190
Python
91
c73f4c455514cf5422d27bf38c93250de8316b21
_constrained_layout.py
109,439
64
623
match_submerged_margins
https://github.com/matplotlib/matplotlib.git
Merge SubplotBase into AxesBase.
1,147
0
23,586
26
1
8
def get_returned_qty_map(delivery_note): returned_qty_map = frappe._dict( frappe.db.sql( , delivery_note, ) ) return returned_qty_map @frappe.whitelist()
erpnext/stock/doctype/delivery_note/delivery_note.py
56
@frappe.whitelist()
erpnext
{ "docstring": "returns a map: {so_detail: returned_qty}select dn_item.dn_detail, abs(dn_item.qty) as qty\n\t\tfrom `tabDelivery Note Item` dn_item, `tabDelivery Note` dn\n\t\twhere dn.name = dn_item.parent\n\t\t\tand dn.docstatus = 1\n\t\t\tand dn.is_return = 1\n\t\t\tand dn.return_against = %s\n\t", "language": "en", "n_whitespaces": 27, "n_words": 33, "vocab_size": 26 }
13
Python
11
494bd9ef78313436f0424b918f200dab8fc7c20b
delivery_note.py
67,600
14
26
get_returned_qty_map
https://github.com/frappe/erpnext.git
style: format code with black
4
1
14,572
11
2
7
def set_dash_capstyle(self, s): cs = CapStyle(s) if self._dashcapstyle != cs: self.stale = True self._dashcapstyle = cs
lib/matplotlib/lines.py
52
matplotlib
{ "docstring": "\n How to draw the end caps if the line is `~Line2D.is_dashed`.\n\n The default capstyle is :rc:`lines.dash_capstyle`.\n\n Parameters\n ----------\n s : `.CapStyle` or %(CapStyle)s\n ", "language": "en", "n_whitespaces": 66, "n_words": 23, "vocab_size": 21 }
16
Python
12
b24acb7772e0534f4bcdb0b5b492d9d94954dd91
lines.py
107,199
5
31
set_dash_capstyle
https://github.com/matplotlib/matplotlib.git
DOC: Document default cap styles - remove '(default)' from cap style demo as this is only true for Line2D and the default rcParameters - document default cap styles for Line2D and Patch in their cap style setters - document default cap style for GraphicsContextBase in the same way as it's already done for joinstyle
55
0
22,639
9
6
9
def app_get_relative_path(requests_pathname, path): if requests_pathname == "/" and path == "": return "/" if requests_pathname != "/" and path == "": return requests_pathname if not path.startswith("/"): raise exceptions.UnsupportedRelativePath( f ) return "/".join([requests_pathname.rstrip("/"), path.lstrip("/")])
dash/_get_paths.py
127
dash
{ "docstring": "\n Paths that aren't prefixed with a leading / are not supported.\n You supplied: {path}\n ", "language": "en", "n_whitespaces": 48, "n_words": 14, "vocab_size": 14 }
33
Python
20
c3c84b9ecf16bcc61ed80ec39d511af92fe07f2c
_get_paths.py
40,164
13
67
app_get_relative_path
https://github.com/plotly/dash.git
f-strings everywhere! fffff
83
0
7,331
11
7
31
def translate_exprs_to_base(exprs, base): new_exprs = dict(exprs) frames = set() for k, v in new_exprs.items(): v.collect_frames(frames) frames.discard(base) while len(frames) > 0: mapper = InputMapper() new_frames = set() for frame in frames: frame_base = frame._op.input[0] if frame_base != base: new_frames.add(frame_base) assert isinstance(frame._op, TransformNode) mapper.add_mapper(frame, TransformMapper(frame._op)) for k, v in new_exprs.items(): new_expr = new_exprs[k].translate_input(mapper) new_expr.collect_frames(new_frames) new_exprs[k] = new_expr new_frames.discard(base) frames = new_frames res = OrderedDict() for col in exprs.keys(): res[col] = new_exprs[col] return res
modin/experimental/core/execution/native/implementations/hdk_on_native/df_algebra.py
282
modin
{ "docstring": "\n Fold expressions.\n\n Fold expressions with their input nodes until `base`\n frame is the only input frame.\n\n Parameters\n ----------\n exprs : dict\n Expressions to translate.\n base : HdkOnNativeDataframe\n Required input frame for translated expressions.\n\n Returns\n -------\n dict\n Translated expressions.\n ", "language": "en", "n_whitespaces": 93, "n_words": 38, "vocab_size": 30 }
71
Python
47
e5b1888cd932909e49194d58035da34b210b91c4
df_algebra.py
154,583
25
176
translate_exprs_to_base
https://github.com/modin-project/modin.git
FEAT-#4946: Replace OmniSci with HDK (#4947) Co-authored-by: Iaroslav Igoshev <[email protected]> Signed-off-by: Andrey Pavlenko <[email protected]>
246
0
36,093
14
2
13
def get_http_proxy_names(self) -> bytes: if self.http_state is None: return None from ray.serve.generated.serve_pb2 import ActorNameList actor_name_list = ActorNameList( names=self.http_state.get_http_proxy_names().values() ) return actor_name_list.SerializeToString()
python/ray/serve/controller.py
83
ray
{ "docstring": "Returns the http_proxy actor name list serialized by protobuf.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
21
Python
20
65d0c0aa48be8f9f7faae857d3ab71444997755a
controller.py
128,247
9
51
get_http_proxy_names
https://github.com/ray-project/ray.git
[Serve] add alpha gRPC support (#28175)
85
0
28,646
14
1
2
def decode_crop_and_flip_tf_record_batch(tf_record_batch):
release/air_tests/air_benchmarks/mlperf-train/resnet50_ray_air.py
13
ray
{ "docstring": "\n This version of the preprocessor fuses the load step with the crop and flip\n step, which should have better performance (at the cost of re-executing the\n load step on each epoch):\n - the reference tf.data implementation can use the fused decode_and_crop op\n - ray.data doesn't have to materialize the intermediate decoded batch.\n ", "language": "en", "n_whitespaces": 71, "n_words": 52, "vocab_size": 40 }
2
Python
2
02f911ce78137cb63ecb685a8ef8e56dcb60062c
resnet50_ray_air.py
134,143
5
45
decode_crop_and_flip_tf_record_batch
https://github.com/ray-project/ray.git
Benchmarking Ray Data bulk ingest as input file size changes. (#29296) This PR adds a benchmark which takes work from https://github.com/anyscale/air-benchmarks and makes it run as a release test. Full metrics are stored in Databricks. Signed-off-by: Cade Daniel <[email protected]>
5
0
30,203
6
4
16
def all_checks(ctx, connectors=None): # type: ignore[no-untyped-def] tasks = ( black, flake, isort, mypy, coverage, ) for task_ in tasks: try: task_(ctx, connectors=connectors) except Exit as e: if e.code: raise @task(help={"connectors": _arg_help_connectors, "write": "Write changes into the files (runs 'black' without '--check' option)"})
airbyte-integrations/connectors/tasks.py
107
@task(help={"connectors": _arg_help_connectors, "write": "Write changes into the files (runs 'black' without '--check' option)"})
airbyte
{ "docstring": "\n Run following checks one by one with default parameters: black, flake, isort, mypy, test, coverage.\n Zero exit code indicates about successful passing of all checks.\n Terminate on the first non-zero exit code.\n ", "language": "en", "n_whitespaces": 45, "n_words": 32, "vocab_size": 30 }
42
Python
42
e3e05d79655bc0b2b4c3fbc0c05b8d90fce6dcd8
tasks.py
3,696
14
50
all_checks
https://github.com/airbytehq/airbyte.git
update code-checkers config (#9707) fix `all-checks` command Signed-off-by: Sergei Solonitcyn <[email protected]>
140
1
518
12
8
25
def parseString(self, instring, parseAll=False): ParserElement.resetCache() if not self.streamlined: self.streamline() # ~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() try: loc, tokens = self._parse(instring, 0) if parseAll: loc = self.preParse(instring, loc) se = Empty() + StringEnd() se._parse(instring, loc) except ParseBaseException as exc: if ParserElement.verbose_stacktrace: raise else: # catch and re-raise exception from here, clearing out pyparsing internal stack trace if getattr(exc, '__traceback__', None) is not None: exc.__traceback__ = self._trim_traceback(exc.__traceback__) raise exc else: return tokens
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
233
transferlearning
{ "docstring": "\n Execute the parse expression with the given string.\n This is the main interface to the client code, once the complete\n expression has been built.\n\n Returns the parsed data as a :class:`ParseResults` object, which may be\n accessed as a list, or as a dict or object with attributes if the given parser\n includes results names.\n\n If you want the grammar to require that the entire input string be\n successfully parsed, then set ``parseAll`` to True (equivalent to ending\n the grammar with ``StringEnd()``).\n\n Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,\n in order to report proper column numbers in parse actions.\n If the input string contains tabs and\n the grammar uses parse actions that use the ``loc`` argument to index into the\n string being parsed, you can ensure you have a consistent view of the input\n string by:\n\n - calling ``parseWithTabs`` on your grammar before calling ``parseString``\n (see :class:`parseWithTabs`)\n - define your parse action using the full ``(s, loc, toks)`` signature, and\n reference the input string using the parse action's ``s`` argument\n - explictly expand the tabs in your input string before calling\n ``parseString``\n\n Example::\n\n Word('a').parseString('aaaaabaaa') # -> ['aaaaa']\n Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text\n ", "language": "en", "n_whitespaces": 389, "n_words": 197, "vocab_size": 121 }
80
Python
64
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,403
23
141
parseString
https://github.com/jindongwang/transferlearning.git
upd; format
359
0
13,288
17
11
36
def _create_input_dict_and_dummy_batch(self, view_requirements, existing_inputs): input_dict = {} for view_col, view_req in view_requirements.items(): # Point state_in to the already existing self._state_inputs. mo = re.match("state_in_(\d+)", view_col) if mo is not None: input_dict[view_col] = self._state_inputs[int(mo.group(1))] # State-outs (no placeholders needed). elif view_col.startswith("state_out_"): continue # Skip action dist inputs placeholder (do later). elif view_col == SampleBatch.ACTION_DIST_INPUTS: continue # This is a tower: Input placeholders already exist. elif view_col in existing_inputs: input_dict[view_col] = existing_inputs[view_col] # All others. else: time_axis = not isinstance(view_req.shift, int) if view_req.used_for_training: # Create a +time-axis placeholder if the shift is not an # int (range or list of ints). # Do not flatten actions if action flattening disabled. if self.config.get("_disable_action_flattening") and view_col in [ SampleBatch.ACTIONS, SampleBatch.PREV_ACTIONS, ]: flatten = False # Do not flatten observations if no preprocessor API used. elif ( view_col in [SampleBatch.OBS, SampleBatch.NEXT_OBS] and self.config["_disable_preprocessor_api"] ): flatten = False # Flatten everything else. else: flatten = True input_dict[view_col] = get_placeholder( space=view_req.space, name=view_col, time_axis=time_axis, flatten=flatten, ) dummy_batch = self._get_dummy_batch_from_view_requirements(batch_size=32) return SampleBatch(input_dict, seq_lens=self._seq_lens), dummy_batch
rllib/policy/dynamic_tf_policy_v2.py
346
ray
{ "docstring": "Creates input_dict and dummy_batch for loss initialization.\n\n Used for managing the Policy's input placeholders and for loss\n initialization.\n Input_dict: Str -> tf.placeholders, dummy_batch: str -> np.arrays.\n\n Args:\n view_requirements (ViewReqs): The view requirements dict.\n existing_inputs (Dict[str, tf.placeholder]): A dict of already\n existing placeholders.\n\n Returns:\n Tuple[Dict[str, tf.placeholder], Dict[str, np.ndarray]]: The\n input_dict/dummy_batch tuple.\n ", "language": "en", "n_whitespaces": 155, "n_words": 50, "vocab_size": 43 }
164
Python
107
bc3a1d35cf6e9a5fd7eef908a8e76aefb80ce6a9
dynamic_tf_policy_v2.py
139,481
35
214
_create_input_dict_and_dummy_batch
https://github.com/ray-project/ray.git
[RLlib] Introduce new policy base classes. (#24742)
891
0
31,711
18
8
2
def lock(remote=None):
salt/fileserver/hgfs.py
16
salt
{ "docstring": "\n Place an update.lk\n\n ``remote`` can either be a dictionary containing repo configuration\n information, or a pattern. If the latter, then remotes for which the URL\n matches the pattern will be locked.\n ", "language": "en", "n_whitespaces": 47, "n_words": 31, "vocab_size": 27 }
2
Python
2
06aeefffad82d8f5db43b4429aeae87bad735acf
hgfs.py
216,569
21
115
lock
https://github.com/saltstack/salt.git
Don't leak sub-processes Signed-off-by: Pedro Algarvio <[email protected]>
5
0
54,644
6
5
12
def log_img_scale(img_scale, shape_order='hw', skip_square=False): if shape_order == 'hw': height, width = img_scale elif shape_order == 'wh': width, height = img_scale else: raise ValueError(f'Invalid shape_order {shape_order}.') if skip_square and (height == width): return False logger = get_root_logger() caller = get_caller_name() logger.info(f'image shape: height={height}, width={width} in {caller}') return True
mmdet/utils/logger.py
141
mmdetection
{ "docstring": "Log image size.\n\n Args:\n img_scale (tuple): Image size to be logged.\n shape_order (str, optional): The order of image shape.\n 'hw' for (height, width) and 'wh' for (width, height).\n Defaults to 'hw'.\n skip_square (bool, optional): Whether to skip logging for square\n img_scale. Defaults to False.\n\n Returns:\n bool: Whether to have done logging.\n ", "language": "en", "n_whitespaces": 121, "n_words": 51, "vocab_size": 41 }
47
Python
37
8d7da432af02a52bc5330b30997984335d0930a4
logger.py
244,075
13
72
log_img_scale
https://github.com/open-mmlab/mmdetection.git
Update YOLOX log for non square input (#7235)
102
0
70,229
12
2
8
def get_category(cls): if hasattr(cls, 'category'): return cls.category return cls.model._meta.app_config.verbose_name
netbox/netbox/search/__init__.py
48
netbox
{ "docstring": "\n Return the title of the search category under which this model is registered.\n ", "language": "en", "n_whitespaces": 28, "n_words": 13, "vocab_size": 12 }
9
Python
8
ffce5d968d8a77c97852999b6ef916e80c1de55f
__init__.py
265,839
4
28
get_category
https://github.com/netbox-community/netbox.git
8927 plugin search (#10489) * #7016 base search classes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 7016 add search indexes * 8927 refactor search * 8927 refactor search * 8927 refactor search * 8927 refactor search * 8927 get search choices working * 8927 cleanup - optimize * 8927 use backend search function * 8927 fix for plugin search * 8927 add docs * Move search app to a module under netbox/ * Utilize global registry to register model search classes * Build search form options from registry * Determine search categories from model app by default * Enable dynamic search registration for plugins * Update docs & improve plugin support * Clean up search backend class * Docs for #8927 Co-authored-by: jeremystretch <[email protected]>
41
0
78,210
9
5
11
def _get_animated_artists(self): return tuple(a for ax_ in self.ax.get_figure().get_axes() for a in ax_.get_children() if a.get_animated() and a not in self.artists)
lib/matplotlib/widgets.py
75
matplotlib
{ "docstring": "\n Convenience method to get all animated artists of the figure containing\n this widget, excluding those already present in self.artists.\n The returned tuple is not sorted by 'z_order': z_order sorting is\n valid only when considering all artists and not only a subset of all\n artists.\n ", "language": "en", "n_whitespaces": 87, "n_words": 44, "vocab_size": 37 }
19
Python
15
7603999becc00d39f7616ed20e426e776144cda1
widgets.py
107,073
4
46
_get_animated_artists
https://github.com/matplotlib/matplotlib.git
Improve docstring and add comments
73
0
22,587
13
11
42
def fit(self, X, y=None): subset_df = pd.read_csv(self.subset_list, header=0, index_col=0) if isinstance(self.sel_subset, int): self.sel_subset_name = subset_df.index[self.sel_subset] elif isinstance(self.sel_subset, str): self.sel_subset_name = self.sel_subset else: # list or tuple self.sel_subset_name = [] for s in self.sel_subset: if isinstance(s, int): self.sel_subset_name.append(subset_df.index[s]) else: self.sel_subset_name.append(s) sel_features = subset_df.loc[self.sel_subset_name, 'Features'] if not isinstance(sel_features, str): sel_features = ";".join(sel_features.tolist()) sel_uniq_features = set(sel_features.split(';')) if isinstance(X, pd.DataFrame): # use columns' names self.feature_names = list(X.columns.values) self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names)))) self.feat_list_idx = [list(X.columns).index(feat_name) for feat_name in self.feat_list] elif isinstance(X, np.ndarray): # use index self.feature_names = list(range(X.shape[1])) sel_uniq_features = [int(val) for val in sel_uniq_features] self.feat_list = sorted(list(set(sel_uniq_features).intersection(set(self.feature_names)))) self.feat_list_idx = self.feat_list if not len(self.feat_list): raise ValueError('No feature is found on the subset list!') return self
tpot/builtins/feature_set_selector.py
500
tpot
{ "docstring": "Fit FeatureSetSelector for feature selection\n\n Parameters\n ----------\n X: array-like of shape (n_samples, n_features)\n The training input samples.\n y: array-like, shape (n_samples,)\n The target values (integers that correspond to classes in classification, real numbers in regression).\n\n Returns\n -------\n self: object\n Returns a copy of the estimator\n ", "language": "en", "n_whitespaces": 134, "n_words": 45, "vocab_size": 40 }
109
Python
69
388616b6247ca4ea8de4e2f340d6206aee523541
feature_set_selector.py
181,853
29
313
fit
https://github.com/EpistasisLab/tpot.git
Revert "Deployed 7ccda9a with MkDocs version: 1.3.0" This reverts commit bd9629c40e01241766197119b581a99409b07068.
404
0
43,622
18
1
2
def removesrc(self): return self["removesrc"]
packages/python/plotly/plotly/graph_objs/layout/_modebar.py
22
plotly.py
{ "docstring": "\n Sets the source reference on Chart Studio Cloud for `remove`.\n\n The 'removesrc' property must be specified as a string or\n as a plotly.grid_objs.Column object\n\n Returns\n -------\n str\n ", "language": "en", "n_whitespaces": 77, "n_words": 27, "vocab_size": 25 }
4
Python
4
43e3a4011080911901176aab919c0ecf5046ddd3
_modebar.py
231,617
2
11
removesrc
https://github.com/plotly/plotly.py.git
switch to black .22
18
0
63,061
7
1
5
def __len__(self) -> int: return len(self._valid_keys)
rllib/policy/policy_map.py
27
ray
{ "docstring": "Returns number of all policies, including the stashed-to-disk ones.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
6
Python
6
ed3f3c08225c7049a96a290b586c67c37e2d0bc0
policy_map.py
137,052
3
15
__len__
https://github.com/ray-project/ray.git
[RLlib] PolicyMap LRU cache enhancements: Swap out policies (instead of GC'ing and recreating) + use Ray object store (instead of file system). (#29513)
20
0
31,064
8
1
8
def test_copy_keep_live_false_not_emits_signal(self): homepage = Page.objects.get(url_path="/home/") signal_fired = False
wagtail/core/tests/test_page_model.py
39
wagtail
{ "docstring": "Test that copying of a live page with keep_live=False not emits a page_published signal.", "language": "en", "n_whitespaces": 13, "n_words": 14, "vocab_size": 13 }
8
Python
7
d10f15e55806c6944827d801cd9c2d53f5da4186
test_page_model.py
74,351
7
51
test_copy_keep_live_false_not_emits_signal
https://github.com/wagtail/wagtail.git
Reformat with black
29
0
16,249
10
2
7
def SqueezeAndExciteBlock(filters_in, se_filters, name=None): if name is None: name = str(backend.get_uid("squeeze_and_excite"))
keras/applications/regnet.py
48
keras
{ "docstring": "Implements the Squeeze and excite block (https://arxiv.org/abs/1709.01507).\n\n Args:\n filters_in: input filters to the block\n se_filters: filters to squeeze to\n name: name prefix\n\n Returns:\n A function object\n ", "language": "en", "n_whitespaces": 55, "n_words": 26, "vocab_size": 21 }
11
Python
10
84afc5193d38057e2e2badf9c889ea87d80d8fbf
regnet.py
269,416
5
32
SqueezeAndExciteBlock
https://github.com/keras-team/keras.git
Reformatting the codebase with black. PiperOrigin-RevId: 450093126
24
0
80,065
13
4
16
def _sanitize_column(self, value) -> ArrayLike: self._ensure_valid_index(value) # We should never get here with DataFrame value if isinstance(value, Series): return _reindex_for_setitem(value, self.index) elif isinstance(value, dict): return _reindex_for_setitem(Series(value), self.index) if is_list_like(value): com.require_length_match(value, self.index) return sanitize_array(value, self.index, copy=True, allow_2d=True)
pandas/core/frame.py
132
pandas
{ "docstring": "\n Ensures new columns (which go into the BlockManager as new blocks) are\n always copied and converted into an array.\n\n Parameters\n ----------\n value : scalar, Series, or array-like\n\n Returns\n -------\n numpy.ndarray or ExtensionArray\n ", "language": "en", "n_whitespaces": 96, "n_words": 32, "vocab_size": 29 }
36
Python
30
cd2b8196c519b58c82cccdb8472d86c1c58f9511
frame.py
167,519
21
86
_sanitize_column
https://github.com/pandas-dev/pandas.git
BUG: DataFrame.loc not aligning dict when setting to a column (#47361) * BUG: DataFrame.loc not aligning dict when setting to a column * Add partial case * Use is_dict_like * Revert "Use is_dict_like" This reverts commit d2708512b751c6470690fdff0abc9c99404dc002.
118
0
40,030
12
7
20
def get_all_items(date_range, company, field, limit=None): if field in ("available_stock_qty", "available_stock_value"): select_field = "sum(actual_qty)" if field == "available_stock_qty" else "sum(stock_value)" return frappe.db.get_all( "Bin", fields=["item_code as name", "{0} as value".format(select_field)], group_by="item_code", order_by="value desc", limit=limit, ) else: if field == "total_sales_amount": select_field = "sum(order_item.base_net_amount)" select_doctype = "Sales Order" elif field == "total_purchase_amount": select_field = "sum(order_item.base_net_amount)" select_doctype = "Purchase Order" elif field == "total_qty_sold": select_field = "sum(order_item.stock_qty)" select_doctype = "Sales Order" elif field == "total_qty_purchased": select_field = "sum(order_item.stock_qty)" select_doctype = "Purchase Order" date_condition = get_date_condition(date_range, "sales_order.transaction_date") return frappe.db.sql( .format( select_field, select_doctype, date_condition ), (company, cint(limit)), as_dict=1, ) # nosec @frappe.whitelist()
erpnext/startup/leaderboard.py
284
@frappe.whitelist()
erpnext
{ "docstring": "\n\t\t\tselect order_item.item_code as name, {0} as value\n\t\t\tfrom `tab{1}` sales_order join `tab{1} Item` as order_item\n\t\t\t\ton sales_order.name = order_item.parent\n\t\t\twhere sales_order.docstatus = 1\n\t\t\t\tand sales_order.company = %s {2}\n\t\t\tgroup by order_item.item_code\n\t\t\torder by value desc\n\t\t\tlimit %s\n\t\t", "language": "en", "n_whitespaces": 29, "n_words": 37, "vocab_size": 29 }
96
Python
56
494bd9ef78313436f0424b918f200dab8fc7c20b
leaderboard.py
67,568
40
152
get_all_items
https://github.com/frappe/erpnext.git
style: format code with black
65
1
14,559
14
2
7
def _get_reconciled_name_object(self, other) -> MultiIndex: names = self._maybe_match_names(other) if self.names != names: # error: Cannot determine type of "rename" return self.rename(names) # type: ignore[has-type] return self
pandas/core/indexes/multi.py
58
pandas
{ "docstring": "\n If the result of a set operation will be self,\n return self, unless the names change, in which\n case make a shallow copy of self.\n ", "language": "en", "n_whitespaces": 54, "n_words": 25, "vocab_size": 21 }
26
Python
24
5c66e65d7b9fef47ccb585ce2fd0b3ea18dc82ea
multi.py
169,226
10
34
_get_reconciled_name_object
https://github.com/pandas-dev/pandas.git
TYP: type all arguments with bool default values (#48624) * TYP: type all arguments with bool default values * bool_t * ignore type error in pandas/core/arrays/sparse/accessor.py
77
0
40,404
9
2
6
def is_equilateral(self): return not has_variety(s.length for s in self.sides)
sympy/geometry/polygon.py
34
sympy
{ "docstring": "Are all the sides the same length?\n\n Returns\n =======\n\n is_equilateral : boolean\n\n See Also\n ========\n\n sympy.geometry.entity.GeometryEntity.is_similar, RegularPolygon\n is_isosceles, is_right, is_scalene\n\n Examples\n ========\n\n >>> from sympy import Triangle, Point\n >>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))\n >>> t1.is_equilateral()\n False\n\n >>> from sympy import sqrt\n >>> t2 = Triangle(Point(0, 0), Point(10, 0), Point(5, 5*sqrt(3)))\n >>> t2.is_equilateral()\n True\n\n ", "language": "en", "n_whitespaces": 183, "n_words": 57, "vocab_size": 41 }
9
Python
9
498015021131af4dbb07eb110e5badaba8250c7b
polygon.py
196,312
2
20
is_equilateral
https://github.com/sympy/sympy.git
Updated import locations
23
0
47,812
10
3
18
def makeport(self): sock = socket.create_server(("", 0), family=self.af, backlog=1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if self.timeout is not _GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(self.timeout) return sock
python3.10.4/Lib/ftplib.py
159
XX-Net
{ "docstring": "Create a new socket and send a PORT command for it.", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 10 }
43
Python
30
8198943edd73a363c266633e1aa5b2a9e9c9f526
ftplib.py
217,444
11
99
makeport
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
132
0
54,789
11
2
13
def test_in1d_mixed_boolean(self, kind): for dtype in np.typecodes["AllInteger"]: a = np.array([True, False, False], dtype=bool) b = np.array([1, 1, 1, 1], dtype=dtype) expected = np.array([True, False, False], dtype=bool) assert_array_equal(in1d(a, b, kind=kind), expected) a, b = b, a expected = np.array([True, True, True, True], dtype=bool) assert_array_equal(in1d(a, b, kind=kind), expected)
numpy/lib/tests/test_arraysetops.py
188
numpy
{ "docstring": "Test that in1d works as expected for bool/int input.", "language": "en", "n_whitespaces": 8, "n_words": 9, "vocab_size": 9 }
46
Python
26
f9bed20bffd88bce06dbc8be200179edfe7580a4
test_arraysetops.py
160,860
9
131
test_in1d_mixed_boolean
https://github.com/numpy/numpy.git
BUG: Fix numpy.isin for timedelta dtype (#21860) This PR fixes the issue discussed on #12065 and #21843 where 'timedelta64' was noted to be a subtype of numpy.integer. This in principle should detect any cases where int(np.min(ar2)) fails. This PR also adds unittests for these. * TST: Create in1d test for timedelta input * MAINT: fix in1d for timedelta input * TST: in1d raise ValueError for timedelta input * MAINT: Clean up type checking for isin kind="table" * TST: Add test for mixed boolean/integer in1d * MAINT: Increase readability of in1d type checking * STY: Apply small code style tweaks This is probably really mainly my personal opinion... Co-authored-by: Sebastian Berg <[email protected]>
137
0
38,763
12
1
6
def strip(self, text): return re.sub(self.puncs_regular_exp, " ", text).strip()
TTS/tts/utils/text/punctuation.py
42
TTS
{ "docstring": "Remove all the punctuations by replacing with `space`.\n\n Args:\n text (str): The text to be processed.\n\n Example::\n\n \"This is. example !\" -> \"This is example \"\n ", "language": "en", "n_whitespaces": 69, "n_words": 26, "vocab_size": 23 }
8
Python
8
8d85af84cd5f1748f979fddcbc4aab1449f61ecb
punctuation.py
261,971
2
25
strip
https://github.com/coqui-ai/TTS.git
Implement Punctuation class
22
0
77,086
10
4
9
def dispatch_line(self, frame): if self.stop_here(frame) or self.break_here(frame): self.user_line(frame) if self.quitting: raise BdbQuit return self.trace_dispatch
python3.10.4/Lib/bdb.py
66
XX-Net
{ "docstring": "Invoke user function and return trace function for line event.\n\n If the debugger stops on the current line, invoke\n self.user_line(). Raise BdbQuit if self.quitting is set.\n Return self.trace_dispatch to continue tracing in this scope.\n ", "language": "en", "n_whitespaces": 62, "n_words": 34, "vocab_size": 32 }
14
Python
13
8198943edd73a363c266633e1aa5b2a9e9c9f526
bdb.py
221,111
5
40
dispatch_line
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
57
0
56,214
9
2
14
def articles_from_same_day_2(self): from django.db import connection with connection.cursor() as cursor: cursor.execute( , [connection.ops.adapt_datefield_value(self.pub_date), self.id], ) return [self.__class__(*row) for row in cursor.fetchall()]
tests/custom_methods/models.py
105
django
{ "docstring": "\n Verbose version of get_articles_from_same_day_1, which does a custom\n database query for the sake of demonstration.\n \n SELECT id, headline, pub_date\n FROM custom_methods_article\n WHERE pub_date = %s\n AND id != %s", "language": "en", "n_whitespaces": 115, "n_words": 29, "vocab_size": 26 }
21
Python
21
9c19aff7c7561e3a82978a272ecdaad40dda5c00
models.py
202,517
12
63
articles_from_same_day_2
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
105
0
50,143
13
1
8
def async_create_post_interval_update_cb(self) -> None: self._post_interval_update_cb_canceller = async_call_later( self.hass, get_unavailability_interval(self.ping_interval), self.async_post_interval_update, )
homeassistant/components/sia/sia_entity_base.py
48
core
{ "docstring": "Create a port interval update cb and store the callback.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
11
Python
11
af4e37339a39badd5596e8bc9ba86d6c1994aa1b
sia_entity_base.py
291,944
7
30
async_create_post_interval_update_cb
https://github.com/home-assistant/core.git
Add Connectivity sensor to SIA (#64305) * implemented connectivity sensor * further cleanup off update code * cleanup and tighter behaviour for attributes * added seperate connectivity class to binary sensor * callbacks and keys * redid name and unique_id logic, non-breaking result * using entry more in inits * Fix import * fix ping_interval in sia_entity_base * added ping_interval default to next * fixed next Co-authored-by: Martin Hjelmare <[email protected]>
65
0
91,047
11
2
22
def test_rate_reached_perf_issue(self): snooze = GroupSnooze.objects.create(group=self.perf_group, count=10, window=24 * 60) for i in range(0, 10): self.store_transaction( environment=None, project_id=self.project.id, user_id=str(i), groups=[self.perf_group], ) assert not snooze.is_valid(test_rates=True)
tests/sentry/models/test_groupsnooze.py
124
sentry
{ "docstring": "Test when a performance issue is ignored until it happens 10 times in a day", "language": "en", "n_whitespaces": 14, "n_words": 15, "vocab_size": 14 }
23
Python
23
d745edbd591063f2c3241cd1960c361834058823
test_groupsnooze.py
86,308
10
82
test_rate_reached_perf_issue
https://github.com/getsentry/sentry.git
ref(perf issues): Enable ignore in a time period (#39120) Enable ignoring a performance issue in a time period e.g. ignore this until it happens 10x / hr or ignore until 10 users experience it in an hour.
133
0
18,097
12
1
10
def test_lazy_load_get_prep_value(self): with self.assertNumQueries(1): instance = self.model.objects.get(pk=self.with_image.pk) # Expect a single UPDATE to update the model, without any additional # SELECT related to the image block that has not been accessed. with self.assertNumQueries(1): instance.save()
wagtail/tests/test_streamfield.py
82
wagtail
{ "docstring": "\n Saving a lazy StreamField that hasn't had its data accessed should not\n cause extra database queries by loading and then re-saving block values.\n Instead the initial JSON stream data should be written back for any\n blocks that have not been accessed.\n ", "language": "en", "n_whitespaces": 77, "n_words": 41, "vocab_size": 37 }
34
Python
29
dcae64c255f2fe97f658b1a3f438d3644b197661
test_streamfield.py
76,619
5
45
test_lazy_load_get_prep_value
https://github.com/wagtail/wagtail.git
Allow `StreamField` to use `JSONField` internal type via `use_json_field` kwarg Add system check for use_json_field in StreamField Change system check level to Warning Add use_json_field argument to StreamField in test models Use RemovedInWagtail219Warning instead of a system check Handle unpacked values in to_python when use_json_field is True Duplicate models and tests for JSONField-based StreamField Add basic tests for JSONField-based StreamField Add json_field property in StreamField to unify JSONField usage Add docs Don't use destructuring for kwargs in deconstruct Add versionchanged note to StreamField reference
91
0
16,555
13
5
32
def gen_quad_from_poly(self, poly): point_num = poly.shape[0] min_area_quad = np.zeros((4, 2), dtype=np.float32) rect = cv2.minAreaRect(poly.astype( np.int32)) # (center (x,y), (width, height), angle of rotation) box = np.array(cv2.boxPoints(rect)) first_point_idx = 0 min_dist = 1e4 for i in range(4): dist = np.linalg.norm(box[(i + 0) % 4] - poly[0]) + \ np.linalg.norm(box[(i + 1) % 4] - poly[point_num // 2 - 1]) + \ np.linalg.norm(box[(i + 2) % 4] - poly[point_num // 2]) + \ np.linalg.norm(box[(i + 3) % 4] - poly[-1]) if dist < min_dist: min_dist = dist first_point_idx = i for i in range(4): min_area_quad[i] = box[(first_point_idx + i) % 4] bbox_new = min_area_quad.tolist() bbox = [] for box in bbox_new: box = list(map(int, box)) bbox.append(box) return bbox
PPOCRLabel/PPOCRLabel.py
378
PaddleOCR
{ "docstring": "\n Generate min area quad from poly.\n ", "language": "en", "n_whitespaces": 21, "n_words": 6, "vocab_size": 6 }
116
Python
64
3c6d551207f8eb5d0916404a7eca5e641887047d
PPOCRLabel.py
23,430
24
192
gen_quad_from_poly
https://github.com/PaddlePaddle/PaddleOCR.git
Support multipoint labeling Support multipoint labeling
358
0
4,591
18
6
17
def all_pairs_node_connectivity(G, nbunch=None, cutoff=None): if nbunch is None: nbunch = G else: nbunch = set(nbunch) directed = G.is_directed() if directed: iter_func = itertools.permutations else: iter_func = itertools.combinations all_pairs = {n: {} for n in nbunch} for u, v in iter_func(nbunch, 2): k = local_node_connectivity(G, u, v, cutoff=cutoff) all_pairs[u][v] = k if not directed: all_pairs[v][u] = k return all_pairs
networkx/algorithms/approximation/connectivity.py
177
networkx
{ "docstring": "Compute node connectivity between all pairs of nodes.\n\n Pairwise or local node connectivity between two distinct and nonadjacent\n nodes is the minimum number of nodes that must be removed (minimum\n separating cutset) to disconnect them. By Menger's theorem, this is equal\n to the number of node independent paths (paths that share no nodes other\n than source and target). Which is what we compute in this function.\n\n This algorithm is a fast approximation that gives an strict lower\n bound on the actual number of node independent paths between two nodes [1]_.\n It works for both directed and undirected graphs.\n\n\n Parameters\n ----------\n G : NetworkX graph\n\n nbunch: container\n Container of nodes. If provided node connectivity will be computed\n only over pairs of nodes in nbunch.\n\n cutoff : integer\n Maximum node connectivity to consider. If None, the minimum degree\n of source or target is used as a cutoff in each pair of nodes.\n Default value None.\n\n Returns\n -------\n K : dictionary\n Dictionary, keyed by source and target, of pairwise node connectivity\n\n Examples\n --------\n A 3 node cycle with one extra node attached has connectivity 2 between all\n nodes in the cycle and connectivity 1 between the extra node and the rest:\n\n >>> G = nx.cycle_graph(3)\n >>> G.add_edge(2, 3)\n >>> import pprint # for nice dictionary formatting\n >>> pprint.pprint(nx.all_pairs_node_connectivity(G))\n {0: {1: 2, 2: 2, 3: 1},\n 1: {0: 2, 2: 2, 3: 1},\n 2: {0: 2, 1: 2, 3: 1},\n 3: {0: 1, 1: 1, 2: 1}}\n\n See Also\n --------\n local_node_connectivity\n node_connectivity\n\n References\n ----------\n .. [1] White, Douglas R., and Mark Newman. 2001 A Fast Algorithm for\n Node-Independent Paths. Santa Fe Institute Working Paper #01-07-035\n http://eclectic.ss.uci.edu/~drwhite/working.pdf\n ", "language": "en", "n_whitespaces": 440, "n_words": 272, "vocab_size": 166 }
58
Python
37
eae1accf4a4eb8b767e82a45f9aca18785301670
connectivity.py
177,419
17
114
all_pairs_node_connectivity
https://github.com/networkx/networkx.git
Added an example in all_pairs_node_connectivity (#6126) * add missing reference in all_node_cuts flow_func parameter * added example to all_pairs_node_connectivity * Update networkx/algorithms/approximation/connectivity.py Added suggestion Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/approximation/connectivity.py added pprint Co-authored-by: Ross Barnowski <[email protected]> * Update networkx/algorithms/connectivity/kcutsets.py fix linking Co-authored-by: Ross Barnowski <[email protected]> * solved style problems Co-authored-by: Ross Barnowski <[email protected]>
145
0
42,370
12
2
9
def require_torch_multi_gpu(test_case): if not is_torch_available(): return unittest.skip("test requires PyTorch")(test_case) import torch return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)
src/transformers/testing_utils.py
79
transformers
{ "docstring": "\n Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without\n multiple GPUs.\n\n To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests -k \"multi_gpu\"\n ", "language": "en", "n_whitespaces": 52, "n_words": 39, "vocab_size": 36 }
19
Python
17
57e6464ac9a31156f1c93e59107323e6ec01309e
testing_utils.py
37,513
5
44
require_torch_multi_gpu
https://github.com/huggingface/transformers.git
Update all require decorators to use skipUnless when possible (#16999)
38
0
6,818
12
2
12
def send_gstin_reminder(party_type, party): frappe.has_permission(party_type, throw=True) email = _send_gstin_reminder(party_type, party) if email: frappe.msgprint(_("Reminder to update GSTIN Sent"), title="Reminder sent", indicator="green")
erpnext/regional/doctype/gst_settings/gst_settings.py
78
erpnext
{ "docstring": "Send GSTIN reminder to one party (called from Customer, Supplier form)", "language": "en", "n_whitespaces": 10, "n_words": 11, "vocab_size": 11 }
19
Python
19
494bd9ef78313436f0424b918f200dab8fc7c20b
gst_settings.py
67,056
5
46
send_gstin_reminder
https://github.com/frappe/erpnext.git
style: format code with black
14
0
14,421
12
1
10
async def test_midnight_turnover_before_midnight_outside_period(hass): config = { "binary_sensor": [ {"platform": "tod", "name": "Night", "after": "22:00", "before": "5:00"} ] } await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() state = hass.states.get("binary_sensor.night") assert state.state == STATE_OFF @freeze_time("2019-01-10 10:00:00-08:00")
tests/components/tod/test_binary_sensor.py
131
@freeze_time("2019-01-10 10:00:00-08:00")
core
{ "docstring": "Test midnight turnover setting before midnight outside period.", "language": "en", "n_whitespaces": 7, "n_words": 8, "vocab_size": 7 }
33
Python
31
23a630e0bcbd2aec6a598a19ebaf2929eba97e5b
test_binary_sensor.py
294,292
10
62
test_midnight_turnover_before_midnight_outside_period
https://github.com/home-assistant/core.git
Update Times of the Day tests to use freezegun (#68327)
78
1
93,329
12
2
8
def turn_off(self) -> None: if self.state != MediaPlayerState.OFF: self._device.send_key("POWER") self._attr_state = MediaPlayerState.OFF
homeassistant/components/panasonic_bluray/media_player.py
56
core
{ "docstring": "\n Instruct the device to turn standby.\n\n Sending the \"POWER\" button will turn the device to standby - there\n is no way to turn it completely off remotely. However this works in\n our favour as it means the device is still accepting commands and we\n can thus turn it back on when desired.\n ", "language": "en", "n_whitespaces": 95, "n_words": 52, "vocab_size": 39 }
12
Python
12
9ecbcd2d8fedc4f0a59f231d1f221157d4cfa359
media_player.py
291,472
12
32
turn_off
https://github.com/home-assistant/core.git
Use _attr_state in panasonic bluray media player (#82834)
44
0
90,581
10
1
28
def test_personal_message(self) -> None: user_profile = self.example_user("hamlet") self.login_user(user_profile) othello = self.example_user("othello") result = self.client_post( "/json/messages", { "type": "private", "content": "Test message", "to": orjson.dumps([othello.email]).decode(), }, ) self.assert_json_success(result) message_id = orjson.loads(result.content)["id"] recent_conversations = get_recent_private_conversations(user_profile) self.assert_length(recent_conversations, 1) recent_conversation = list(recent_conversations.values())[0] recipient_id = list(recent_conversations.keys())[0] self.assertEqual(set(recent_conversation["user_ids"]), {othello.id}) self.assertEqual(recent_conversation["max_message_id"], message_id) # Now send a message to yourself and see how that interacts with the data structure result = self.client_post( "/json/messages", { "type": "private", "content": "Test message", "to": orjson.dumps([user_profile.email]).decode(), }, ) self.assert_json_success(result) self_message_id = orjson.loads(result.content)["id"] recent_conversations = get_recent_private_conversations(user_profile) self.assert_length(recent_conversations, 2) recent_conversation = recent_conversations[recipient_id] self.assertEqual(set(recent_conversation["user_ids"]), {othello.id}) self.assertEqual(recent_conversation["max_message_id"], message_id) # Now verify we have the appropriate self-pm data structure del recent_conversations[recipient_id] recent_conversation = list(recent_conversations.values())[0] recipient_id = list(recent_conversations.keys())[0] self.assertEqual(set(recent_conversation["user_ids"]), set()) self.assertEqual(recent_conversation["max_message_id"], self_message_id)
zerver/tests/test_message_send.py
537
zulip
{ "docstring": "\n Sending a personal message to a valid username is successful.\n ", "language": "en", "n_whitespaces": 25, "n_words": 10, "vocab_size": 9 }
112
Python
66
bd9a1dc9710293e36d2d47d970d7afb95100c2e6
test_message_send.py
84,758
43
318
test_personal_message
https://github.com/zulip/zulip.git
tests: Consistently JSON-encode ‘to’ parameter Although our POST /messages handler accepts the ‘to’ parameter with or without JSON encoding, there are two problems with passing it as an unencoded string. Firstly, you’d fail to send a message to a stream named ‘true’ or ‘false’ or ‘null’ or ‘2022’, as the JSON interpretation is prioritized over the plain string interpretation. Secondly, and more importantly for our tests, it violates our OpenAPI schema, which requires the parameter to be JSON-encoded. This is because OpenAPI has no concept of a parameter that’s “optionally JSON-encoded”, nor should it: such a parameter cannot be unambiguously decoded for the reason above. Our version of openapi-core doesn’t currently detect this schema violation, but after the next upgrade it will. Signed-off-by: Anders Kaseorg <[email protected]>
478
0
17,867
16
1
2
def external_ray_cluster_activity_hook4(): raise Exception("Error in external cluster activity hook")
python/ray/_private/test_utils.py
22
ray
{ "docstring": "\n Example external hook for test_component_activities_hook.\n\n Errors during execution.\n ", "language": "en", "n_whitespaces": 18, "n_words": 8, "vocab_size": 8 }
9
Python
9
56716a1c1b6f9aae3967b910a799bb6af9f2c5d9
test_utils.py
124,497
2
10
external_ray_cluster_activity_hook4
https://github.com/ray-project/ray.git
[dashboard] Add `RAY_CLUSTER_ACTIVITY_HOOK` to `/api/component_activities` (#26297) Add external hook to /api/component_activities endpoint in dashboard snapshot router Change is_active field of RayActivityResponse to take an enum RayActivityStatus instead of bool. This is a backward incompatible change, but should be ok because [dashboard] Add component_activities API #25996 wasn't included in any branch cuts. RayActivityResponse now supports informing when there was an error getting the activity observation and the reason.
15
0
27,614
8
6
18
def get_item_warehouse_projected_qty(items_to_consider): item_warehouse_projected_qty = {} for item_code, warehouse, projected_qty in frappe.db.sql( .format( ", ".join(["%s"] * len(items_to_consider)) ), items_to_consider, ): if item_code not in item_warehouse_projected_qty: item_warehouse_projected_qty.setdefault(item_code, {}) if warehouse not in item_warehouse_projected_qty.get(item_code): item_warehouse_projected_qty[item_code][warehouse] = flt(projected_qty) warehouse_doc = frappe.get_doc("Warehouse", warehouse) while warehouse_doc.parent_warehouse: if not item_warehouse_projected_qty.get(item_code, {}).get(warehouse_doc.parent_warehouse): item_warehouse_projected_qty.setdefault(item_code, {})[warehouse_doc.parent_warehouse] = flt( projected_qty ) else: item_warehouse_projected_qty[item_code][warehouse_doc.parent_warehouse] += flt(projected_qty) warehouse_doc = frappe.get_doc("Warehouse", warehouse_doc.parent_warehouse) return item_warehouse_projected_qty
erpnext/stock/reorder_item.py
265
erpnext
{ "docstring": "select item_code, warehouse, projected_qty\n\t\tfrom tabBin where item_code in ({0})\n\t\t\tand (warehouse != \"\" and warehouse is not null)", "language": "en", "n_whitespaces": 16, "n_words": 19, "vocab_size": 18 }
60
Python
44
494bd9ef78313436f0424b918f200dab8fc7c20b
reorder_item.py
67,844
24
166
get_item_warehouse_projected_qty
https://github.com/frappe/erpnext.git
style: format code with black
38
0
14,639
16
1
5
def affine_matrix(self) -> np.ndarray: assert self._affine_matrix is not None return self._affine_matrix
lib/align/detected_face.py
35
faceswap
{ "docstring": " :class: `numpy.ndarray`: The affine matrix to transpose the mask to a full frame. ", "language": "en", "n_whitespaces": 14, "n_words": 13, "vocab_size": 12 }
11
Python
10
5e73437be47f2410439a3c6716de96354e6a0c94
detected_face.py
101,231
4
21
affine_matrix
https://github.com/deepfakes/faceswap.git
lib.align updates: - alignments.py - Add typed dicts for imported alignments - Explicitly check for presence of thumb value in alignments dict - linting - detected_face.py - Typing - Linting - Legacy support for pre-aligned face - Update dependencies to new property names
32
0
20,651
7
6
26
def predict(self, X): if self.weights == "uniform": # In that case, we do not need the distances to perform # the weighting so we do not compute them. neigh_ind = self.kneighbors(X, return_distance=False) neigh_dist = None else: neigh_dist, neigh_ind = self.kneighbors(X) weights = _get_weights(neigh_dist, self.weights) _y = self._y if _y.ndim == 1: _y = _y.reshape((-1, 1)) if weights is None: y_pred = np.mean(_y[neigh_ind], axis=1) else: y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float64) denom = np.sum(weights, axis=1) for j in range(_y.shape[1]): num = np.sum(_y[neigh_ind, j] * weights, axis=1) y_pred[:, j] = num / denom if self._y.ndim == 1: y_pred = y_pred.ravel() return y_pred
sklearn/neighbors/_regression.py
310
scikit-learn
{ "docstring": "Predict the target for the provided data.\n\n Parameters\n ----------\n X : array-like of shape (n_queries, n_features), \\\n or (n_queries, n_indexed) if metric == 'precomputed'\n Test samples.\n\n Returns\n -------\n y : ndarray of shape (n_queries,) or (n_queries, n_outputs), dtype=int\n Target values.\n ", "language": "en", "n_whitespaces": 126, "n_words": 40, "vocab_size": 33 }
99
Python
65
fb082b223dc9f1dd327f48dc9b830ee382d6f661
_regression.py
258,546
21
199
predict
https://github.com/scikit-learn/scikit-learn.git
MAINT Do not compute distances for uniform weighting (#22280)
320
0
75,287
15
1
5
def handle_m2m_field(self, obj, field): raise NotImplementedError( "subclasses of Serializer must provide a handle_m2m_field() method" )
django/core/serializers/base.py
27
django
{ "docstring": "\n Called to handle a ManyToManyField.\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
15
Python
15
9c19aff7c7561e3a82978a272ecdaad40dda5c00
base.py
204,741
4
15
handle_m2m_field
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
47
0
50,868
8
2
13
def get_user_timezone() -> str: filename = os.path.join( os.path.dirname(os.path.abspath(__file__)), "timezone.openbb", ) if os.path.isfile(filename): with open(filename) as f: return f.read() return ""
openbb_terminal/helper_funcs.py
105
OpenBBTerminal
{ "docstring": "Get user timezone if it is a valid one\n\n Returns\n -------\n str\n user timezone based on timezone.openbb file\n ", "language": "en", "n_whitespaces": 37, "n_words": 18, "vocab_size": 16 }
20
Python
19
b71abcfbf4d7e8ac1855522aff0378e13c8b5362
helper_funcs.py
283,270
16
60
get_user_timezone
https://github.com/OpenBB-finance/OpenBBTerminal.git
Updating some names (#1575) * quick econ fix * black * keys and feature flags * terminal name :eyes: * some more replacements * some more replacements * edit pyproject * gst -> openbb * add example portfolios back to git * Update api from gst * sorry. skipping some tests * another round of names * another round of test edits * Missed some .gst refs and update timezone * water mark stuff * Fixing Names in terminal.spec and name of GTFF_DEFAULTS to OBBFF_DEFAULTS * fix more GST to OpenBB Terminal * Logging : merge conflicts with main * Revert wrong files Co-authored-by: Andrew <[email protected]> Co-authored-by: DidierRLopes <[email protected]> Co-authored-by: Chavithra PARANA <[email protected]>
67
0
84,512
13
2
6
def require_sqlalchemy(test_case): try: import sqlalchemy # noqa except ImportError: test_case = unittest.skip("test requires sqlalchemy")(test_case) return test_case
tests/utils.py
50
datasets
{ "docstring": "\n Decorator marking a test that requires SQLAlchemy.\n\n These tests are skipped when SQLAlchemy isn't installed.\n\n ", "language": "en", "n_whitespaces": 25, "n_words": 15, "vocab_size": 15 }
16
Python
15
d7dfbc83d68e87ba002c5eb2555f7a932e59038a
utils.py
105,788
6
26
require_sqlalchemy
https://github.com/huggingface/datasets.git
Add ability to read-write to SQL databases. (#4928) * Add ability to read-write to SQL databases. * Fix issue where pandas<1.4.0 doesn't return the number of rows * Fix issue where connections were not closed properly * Apply suggestions from code review Co-authored-by: Quentin Lhoest <[email protected]> * Change according to reviews * Change according to reviews * Inherit from AbstractDatasetInputStream in SqlDatasetReader * Revert typing in SQLDatasetReader as we do not support Connexion * Align API with Pandas/Daskk * Update tests * Update docs * Update some more tests * Missing comma * Small docs fix * Style * Update src/datasets/arrow_dataset.py Co-authored-by: Quentin Lhoest <[email protected]> * Update src/datasets/packaged_modules/sql/sql.py Co-authored-by: Quentin Lhoest <[email protected]> * Address some comments * Address the rest * Improve tests * sqlalchemy required tip Co-authored-by: Quentin Lhoest <[email protected]> Co-authored-by: mariosasko <[email protected]>
43
0
22,204
13
2
7
def make_tempfile(name): open(name, 'w', encoding='utf-8').close() try: yield finally: os.unlink(name)
IPython/testing/tools.py
59
ipython
{ "docstring": " Create an empty, named, temporary file for the duration of the context.\n ", "language": "en", "n_whitespaces": 16, "n_words": 12, "vocab_size": 11 }
9
Python
9
23276ac4770f380ce1d5808950dd412a35594af1
tools.py
208,534
6
31
make_tempfile
https://github.com/ipython/ipython.git
Fix EncodingWarning on Python 3.10
35
0
52,377
11
2
14
def findall(dir=os.curdir): files = _find_all_simple(dir) if dir == os.curdir: make_rel = functools.partial(os.path.relpath, start=dir) files = map(make_rel, files) return list(files)
python3.10.4/Lib/distutils/filelist.py
84
XX-Net
{ "docstring": "\n Find all files under 'dir' and return the list of full filenames.\n Unless dir is '.', return full filenames with dir prepended.\n ", "language": "en", "n_whitespaces": 32, "n_words": 22, "vocab_size": 19 }
19
Python
16
8198943edd73a363c266633e1aa5b2a9e9c9f526
filelist.py
222,940
6
52
findall
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
45
0
56,821
12
1
29
async def test_ip_ban_manager_never_started(hass, aiohttp_client, caplog): app = web.Application() app["hass"] = hass setup_bans(hass, app, 5) set_real_ip = mock_real_ip(app) with patch( "homeassistant.components.http.ban.load_yaml_config_file", side_effect=FileNotFoundError, ): client = await aiohttp_client(app) # Mock the manager never being started del app[KEY_BAN_MANAGER] set_real_ip("4.3.2.1") resp = await client.get("/") assert resp.status == HTTPStatus.NOT_FOUND assert "IP Ban middleware loaded but banned IPs not loaded" in caplog.text @pytest.mark.parametrize( "remote_addr, bans, status", list( zip( BANNED_IPS_WITH_SUPERVISOR, [1, 1, 0], [HTTPStatus.FORBIDDEN, HTTPStatus.FORBIDDEN, HTTPStatus.UNAUTHORIZED], ) ), )
tests/components/http/test_ban.py
209
@pytest.mark.parametrize( "remote_addr, bans, status", list( zip( BANNED_IPS_WITH_SUPERVISOR, [1, 1, 0], [HTTPStatus.FORBIDDEN, HTTPStatus.FORBIDDEN, HTTPStatus.UNAUTHORIZED], ) ), )
core
{ "docstring": "Test we handle the ip ban manager not being started.", "language": "en", "n_whitespaces": 9, "n_words": 10, "vocab_size": 10 }
72
Python
65
0c29b68cf82c777ec6fd70ce38911f1d1e39d26a
test_ban.py
315,611
15
87
test_ip_ban_manager_never_started
https://github.com/home-assistant/core.git
Switch linear search to a dict lookup for ip bans (#74482)
186
1
114,189
12
10
32
def remove_field(self, model, field): # Special-case implicit M2M tables if field.many_to_many and field.remote_field.through._meta.auto_created: return self.delete_model(field.remote_field.through) # It might not actually have a column behind it if field.db_parameters(connection=self.connection)["type"] is None: return # Drop any FK constraints, MySQL requires explicit deletion if field.remote_field: fk_names = self._constraint_names(model, [field.column], foreign_key=True) for fk_name in fk_names: self.execute(self._delete_fk_sql(model, fk_name)) # Delete the column sql = self.sql_delete_column % { "table": self.quote_name(model._meta.db_table), "column": self.quote_name(field.column), } self.execute(sql) # Reset connection if required if self.connection.features.connection_persists_old_columns: self.connection.close() # Remove all deferred statements referencing the deleted column. for sql in list(self.deferred_sql): if isinstance(sql, Statement) and sql.references_column( model._meta.db_table, field.column ): self.deferred_sql.remove(sql)
django/db/backends/base/schema.py
304
django
{ "docstring": "\n Remove a field from a model. Usually involves deleting a column,\n but for M2Ms may involve deleting a table.\n ", "language": "en", "n_whitespaces": 41, "n_words": 19, "vocab_size": 15 }
97
Python
79
9c19aff7c7561e3a82978a272ecdaad40dda5c00
schema.py
204,933
21
190
remove_field
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
346
0
50,985
13
2
8
def test_basic(push_channel): msgs = [ {"foo": "bar"}, {"bar": "baz"}, {"baz": "qux", "list": [1, 2, 3]}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) assert ret["load"] == msg
tests/pytests/functional/transport/server/test_req_channel.py
112
salt
{ "docstring": "\n Test a variety of messages, make sure we get the expected responses\n ", "language": "en", "n_whitespaces": 19, "n_words": 12, "vocab_size": 12 }
29
Python
27
3c7e1ec1f08abd7cd1ba78ad7880acb6ba6fdce7
test_req_channel.py
216,279
9
66
test_basic
https://github.com/saltstack/salt.git
Fix minion unit tests, specifically .../tests/pytests/test_minion.py
76
0
54,497
11
5
16
def test_upgrade_allow_downgrades(uninstall_var, upgrade_var): with patch("salt.utils.pkg.clear_rtag", MagicMock()): with patch( "salt.modules.aptpkg.list_pkgs", MagicMock(return_value=uninstall_var) ): mock_cmd = MagicMock(return_value={"retcode": 0, "stdout": upgrade_var}) patch_kwargs = { "__salt__": { "config.get": MagicMock(return_value=True), "cmd.run_all": mock_cmd, }, } with patch.multiple(aptpkg, **patch_kwargs): aptpkg.upgrade() args_matching = [ True for args in patch_kwargs["__salt__"]["cmd.run_all"].call_args[0] if "--allow-downgrades" in args ] # Here we shouldn't see the parameter and args_matching should be empty. assert any(args_matching) is False aptpkg.upgrade(allow_downgrades=True) args_matching = [ True for args in patch_kwargs["__salt__"]["cmd.run_all"].call_args[0] if "--allow-downgrades" in args ] # --allow-downgrades should be in the args list and we should have at least on True in the list. assert any(args_matching) is True
tests/pytests/unit/modules/test_aptpkg.py
270
salt
{ "docstring": "\n Tests the allow_downgrades option for upgrade.\n ", "language": "en", "n_whitespaces": 13, "n_words": 6, "vocab_size": 6 }
98
Python
61
9e1ca8b5b9e7006fea28f473711917755cf5a262
test_aptpkg.py
216,508
27
155
test_upgrade_allow_downgrades
https://github.com/saltstack/salt.git
Add --allow-downgrades capability for apt upgrade
505
0
54,619
19
4
12
def srs(self): # TODO: Is caching really necessary here? Is complexity worth it? if hasattr(self, "_srs"): # Returning a clone of the cached SpatialReference object. return self._srs.clone() else: # Attempting to cache a SpatialReference object. # Trying to get from WKT first. try: self._srs = gdal.SpatialReference(self.wkt) return self.srs except Exception as e: msg = e try: self._srs = gdal.SpatialReference(self.proj4text) return self.srs except Exception as e: msg = e raise Exception( "Could not get OSR SpatialReference from WKT: %s\nError:\n%s" % (self.wkt, msg) )
django/contrib/gis/db/backends/base/models.py
157
django
{ "docstring": "\n Return a GDAL SpatialReference object.\n ", "language": "en", "n_whitespaces": 20, "n_words": 5, "vocab_size": 5 }
82
Python
57
9c19aff7c7561e3a82978a272ecdaad40dda5c00
models.py
203,770
18
89
srs
https://github.com/django/django.git
Refs #33476 -- Reformatted code with Black.
341
0
50,533
14
1
2
def set_package(fxn):
python3.10.4/Lib/importlib/util.py
13
XX-Net
{ "docstring": "Set __package__ on the returned module.\n\n This function is deprecated.\n\n ", "language": "en", "n_whitespaces": 16, "n_words": 10, "vocab_size": 10 }
2
Python
2
8198943edd73a363c266633e1aa5b2a9e9c9f526
util.py
218,358
4
17
set_package
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
5
0
55,256
6
1
9
def get_subsidiary_companies(company): lft, rgt = frappe.get_cached_value("Company", company, ["lft", "rgt"]) return frappe.db.sql_list( .format( lft, rgt ) )
erpnext/accounts/report/consolidated_financial_statement/consolidated_financial_statement.py
66
erpnext
{ "docstring": "select name from `tabCompany`\n\t\twhere lft >= {0} and rgt <= {1} order by lft, rgt", "language": "en", "n_whitespaces": 14, "n_words": 16, "vocab_size": 15 }
16
Python
13
494bd9ef78313436f0424b918f200dab8fc7c20b
consolidated_financial_statement.py
65,209
8
39
get_subsidiary_companies
https://github.com/frappe/erpnext.git
style: format code with black
9
0
13,823
10
4
4
def from_dict(cls, other, name=None):
.venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py
20
transferlearning
{ "docstring": "\n Helper classmethod to construct a ParseResults from a dict, preserving the\n name-value relations as results names. If an optional 'name' argument is\n given, a nested ParseResults will be returned\n ", "language": "en", "n_whitespaces": 58, "n_words": 29, "vocab_size": 26 }
4
Python
4
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
pyparsing.py
63,361
11
93
from_dict
https://github.com/jindongwang/transferlearning.git
upd; format
11
0
13,268
6
1
7
def create_view(self, request): kwargs = {"model_admin": self} view_class = self.create_view_class return view_class.as_view(**kwargs)(request)
wagtail/contrib/modeladmin/options.py
54
wagtail
{ "docstring": "\n Instantiates a class-based view to provide 'creation' functionality for\n the assigned model, or redirect to Wagtail's create view if the\n assigned model extends 'Page'. The view class used can be overridden by\n changing the 'create_view_class' attribute.\n ", "language": "en", "n_whitespaces": 72, "n_words": 36, "vocab_size": 30 }
12
Python
11
d10f15e55806c6944827d801cd9c2d53f5da4186
options.py
73,170
4
31
create_view
https://github.com/wagtail/wagtail.git
Reformat with black
40
0
15,968
9
3
10
def get_viewname(model, action=None): viewname = f'{model._meta.app_label}:{model._meta.model_name}' # Determine whether this is a plugin view and adjust the namespace appropriately if isinstance(model._meta.app_config, PluginConfig): viewname = f'plugins:{viewname}' # Append the action, if any if action: viewname = f'{viewname}_{action}' return viewname
netbox/utilities/utils.py
99
netbox
{ "docstring": "\n Return the view name for the given model and action, if valid.\n\n :param model: The model or instance to which the view applies\n :param action: A string indicating the desired action (if any); e.g. \"add\" or \"list\"\n ", "language": "en", "n_whitespaces": 50, "n_words": 37, "vocab_size": 30 }
38
Python
29
10e6ae20949028171fdfcc50fde78bf289f41d5f
utils.py
264,426
7
39
get_viewname
https://github.com/netbox-community/netbox.git
Introduce get_viewname() as a standard utility
73
0
77,721
10
1
7
def plus(self, a): a = _convert_other(a, raiseit=True) return a.__pos__(context=self)
python3.10.4/Lib/_pydecimal.py
44
XX-Net
{ "docstring": "Plus corresponds to unary prefix plus in Python.\n\n The operation is evaluated using the same rules as add; the\n operation plus(a) is calculated as add('0', a) where the '0'\n has the same exponent as the operand.\n\n >>> ExtendedContext.plus(Decimal('1.3'))\n Decimal('1.3')\n >>> ExtendedContext.plus(Decimal('-1.3'))\n Decimal('-1.3')\n >>> ExtendedContext.plus(-1)\n Decimal('-1')\n ", "language": "en", "n_whitespaces": 115, "n_words": 45, "vocab_size": 34 }
9
Python
9
8198943edd73a363c266633e1aa5b2a9e9c9f526
_pydecimal.py
219,635
3
27
plus
https://github.com/XX-net/XX-Net.git
add python 3.10.4 for windows
30
0
55,667
9
1
11
def get_tables(self) -> Response: q = f"SHOW TABLES FROM {self.connection_data['database']}" result = self.native_query(q) df = result.data_frame result.data_frame = df.rename(columns={df.columns[0]: 'table_name'}) return result
mindsdb/integrations/handlers/clickhouse_handler/clickhouse_handler.py
94
mindsdb
{ "docstring": "\n Get a list with all of the tabels in ClickHouse db\n ", "language": "en", "n_whitespaces": 26, "n_words": 11, "vocab_size": 11 }
22
Python
17
149ae900c62910a480f8af70daa98362d513a350
clickhouse_handler.py
115,562
9
48
get_tables
https://github.com/mindsdb/mindsdb.git
CH handler implementation
64
0
25,491
13
1
4
def uses_before_args(self) -> Namespace: return self.pod_args['uses_before']
jina/orchestrate/deployments/__init__.py
28
jina
{ "docstring": "Get the arguments for the `uses_before` of this Deployment.\n\n\n .. # noqa: DAR201\n ", "language": "en", "n_whitespaces": 27, "n_words": 13, "vocab_size": 12 }
6
Python
6
13edc16d806fb5d77a6849551178ccc75937f25f
__init__.py
10,853
7
15
uses_before_args
https://github.com/jina-ai/jina.git
refactor: rename pod to deployment (#4230) * refactor: rename pod to deployment * style: fix overload and cli autocomplete * fix: undo daemon mistake * refactor: leftover cleanup * fix: more test fixes * fix: more fixes * fix: more fixes * fix: more fixes * fix: more tests * fix: fix more tests * refactor: fix more tests * refactor: more tests fixes * refactor: rename pea to pod * refactor: adjust docs * refactor: complete pea renaming * refactor: more fixes * fix: pea_type in k8s yamls * fix: adjust pod args name * refactor: rename peapods parser folder * fix: da init Co-authored-by: Jina Dev Bot <[email protected]>
20
0
1,951
7
5
12
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): r if system in ["win32", "darwin"]: path = user_data_dir(appname, appauthor, None, roaming) else: path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) if appname: path = os.path.join(path, appname) if appname and version: path = os.path.join(path, version) return path # for the discussion regarding site_config_dir locations # see <https://github.com/pypa/pip/issues/1733>
.venv/lib/python3.8/site-packages/pip/_vendor/appdirs.py
152
transferlearning
{ "docstring": "Return full path to the user-specific config dir for this application.\n\n \"appname\" is the name of application.\n If None, just the system directory is returned.\n \"appauthor\" (only used on Windows) is the name of the\n appauthor or distributing body for this application. Typically\n it is the owning company name. This falls back to appname. You may\n pass False to disable it.\n \"version\" is an optional version path element to append to the\n path. You might want to use this if you want multiple versions\n of your app to be able to run independently. If used, this\n would typically be \"<major>.<minor>\".\n Only applied when appname is present.\n \"roaming\" (boolean, default False) can be set True to use the Windows\n roaming appdata directory. That means that for users on a Windows\n network setup for roaming profiles, this user data will be\n sync'd on login. See\n <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>\n for a discussion of issues.\n\n Typical user config directories are:\n Mac OS X: same as user_data_dir\n Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined\n Win *: same as user_data_dir\n\n For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.\n That means, by default \"~/.config/<AppName>\".\n ", "language": "en", "n_whitespaces": 445, "n_words": 188, "vocab_size": 131 }
48
Python
37
f638f5d0e6c8ebed0e69a6584bc7f003ec646580
appdirs.py
61,443
38
95
user_config_dir
https://github.com/jindongwang/transferlearning.git
upd; format
99
0
12,578
14
1
31
def test_page_token_expired_retry_fails(mock_ads_client, test_config): stream_slice = {"start_date": "2021-01-01", "end_date": "2021-01-15"} google_api = MockGoogleAdsFails(credentials=test_config["credentials"], customer_id=test_config["customer_id"]) incremental_stream_config = dict( api=google_api, conversion_window_days=test_config["conversion_window_days"], start_date=test_config["start_date"], time_zone="local", end_date="2021-04-04", ) stream = ClickView(**incremental_stream_config) stream.get_query = Mock() stream.get_query.return_value = "query" with pytest.raises(GoogleAdsException): list(stream.read_records(sync_mode=SyncMode.incremental, cursor_field=["segments.date"], stream_slice=stream_slice)) stream.get_query.assert_called_with({"start_date": "2021-01-03", "end_date": "2021-01-15"}) assert stream.get_query.call_count == 2
airbyte-integrations/connectors/source-google-ads/unit_tests/test_streams.py
251
airbyte
{ "docstring": "\n Page token has expired while reading records within date \"2021-01-03\", it should raise error,\n because Google Ads API doesn't allow filter by datetime.\n ", "language": "en", "n_whitespaces": 33, "n_words": 23, "vocab_size": 23 }
44
Python
38
359fcd801128239b39297828d39821f631ce00c0
test_streams.py
3,683
17
144
test_page_token_expired_retry_fails
https://github.com/airbytehq/airbyte.git
Source Google Ads: handle page token expired exception (#9812) * dynamic date range * raise exception if exites the cycle without error * if range days is 1 already do not retry * added unit tests * added comments * added comments * common mock classes are moved to common module * change read_records * refactored get_date_params * handle corner case * added parse_dates function * added test_streams * check mock calls * fix unit tests for chunk date range refactoring * removed commented codes * remove commented line * refactor test_streams * refactor CustomQuery.get_query * remove TODO * deleted unused json * format * fix chunk_date_range * added docstring * set range_days to 15 for ShoppingPerformanceReport * refactor chunk_date_range * format code 2 * call parent read_records method * add return type in get_date_params * change e to exception * set start_date as end_date * log page token has expired * bump version * updated spec and def yaml Co-authored-by: auganbay <[email protected]>
119
0
515
14
1
5
def get_favorite_articles_by_page_view(): return frappe.db.sql( , as_dict=True, )
erpnext/www/support/index.py
30
erpnext
{ "docstring": "\n\t\t\tSELECT\n\t\t\t\tt1.name as name,\n\t\t\t\tt1.title as title,\n\t\t\t\tt1.content as content,\n\t\t\t\tt1.route as route,\n\t\t\t\tt1.category as category,\n\t\t\t\tcount(t1.route) as count\n\t\t\tFROM `tabHelp Article` AS t1\n\t\t\t\tINNER JOIN\n\t\t\t\t`tabWeb Page View` AS t2\n\t\t\tON t1.route = t2.path\n\t\t\tWHERE t1.published = 1\n\t\t\tGROUP BY route\n\t\t\tORDER BY count DESC\n\t\t\tLIMIT 6;\n\t\t\t", "language": "en", "n_whitespaces": 33, "n_words": 48, "vocab_size": 38 }
7
Python
7
494bd9ef78313436f0424b918f200dab8fc7c20b
index.py
68,185
21
18
get_favorite_articles_by_page_view
https://github.com/frappe/erpnext.git
style: format code with black
2
0
14,733
8
1
15
def test_previous_pods_ignored_for_reattached(self): k = KubernetesPodOperator( namespace="default", image="ubuntu:16.04", name="test", task_id="task", ) self.run_pod(k) k.client.list_namespaced_pod.assert_called_once() _, kwargs = k.client.list_namespaced_pod.call_args assert 'already_checked!=True' in kwargs['label_selector']
tests/providers/cncf/kubernetes/operators/test_kubernetes_pod.py
106
airflow
{ "docstring": "\n When looking for pods to possibly reattach to,\n ignore pods from previous tries that were properly finished\n ", "language": "en", "n_whitespaces": 39, "n_words": 17, "vocab_size": 16 }
20
Python
19
60eb9e106f5915398eafd6aa339ec710c102dc09
test_kubernetes_pod.py
42,808
11
60
test_previous_pods_ignored_for_reattached
https://github.com/apache/airflow.git
Use KubernetesHook to create api client in KubernetesPodOperator (#20578) Add support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them. KPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn.
113
0
7,739
10
3
20
def tight_layout(self, *, pad=1.08, h_pad=None, w_pad=None, rect=None): from contextlib import nullcontext from ._tight_layout import ( get_subplotspec_list, get_tight_layout_figure) subplotspec_list = get_subplotspec_list(self.axes) if None in subplotspec_list: _api.warn_external("This figure includes Axes that are not " "compatible with tight_layout, so results " "might be incorrect.") renderer = _get_renderer(self) with getattr(renderer, "_draw_disabled", nullcontext)(): kwargs = get_tight_layout_figure( self, self.axes, subplotspec_list, renderer, pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect) if kwargs: self.subplots_adjust(**kwargs)
lib/matplotlib/figure.py
186
matplotlib
{ "docstring": "\n Adjust the padding between and around subplots.\n\n To exclude an artist on the Axes from the bounding box calculation\n that determines the subplot parameters (i.e. legend, or annotation),\n set ``a.set_in_layout(False)`` for that artist.\n\n Parameters\n ----------\n pad : float, default: 1.08\n Padding between the figure edge and the edges of subplots,\n as a fraction of the font size.\n h_pad, w_pad : float, default: *pad*\n Padding (height/width) between edges of adjacent subplots,\n as a fraction of the font size.\n rect : tuple (left, bottom, right, top), default: (0, 0, 1, 1)\n A rectangle in normalized figure coordinates into which the whole\n subplots area (including labels) will fit.\n\n See Also\n --------\n .Figure.set_tight_layout\n .pyplot.tight_layout\n ", "language": "en", "n_whitespaces": 275, "n_words": 110, "vocab_size": 81 }
62
Python
55
2d8bd625813e4c513bbe8bedddca45da368bca9b
figure.py
107,057
16
118
tight_layout
https://github.com/matplotlib/matplotlib.git
Recreated deprecated files and changed references
252
0
22,574
11
5
19
def test_load_files_allowed_extensions(tmp_path, allowed_extensions): d = tmp_path / "sub" d.mkdir() files = ("file1.txt", "file2.json", "file3.json", "file4.md") paths = [d / f for f in files] for p in paths: p.touch() res = load_files(tmp_path, allowed_extensions=allowed_extensions) assert set([str(p) for p in paths if p.suffix in allowed_extensions]) == set( res.filenames ) @pytest.mark.parametrize( "filename, expected_n_samples, expected_n_features, expected_target_names", [ ("wine_data.csv", 178, 13, ["class_0", "class_1", "class_2"]), ("iris.csv", 150, 4, ["setosa", "versicolor", "virginica"]), ("breast_cancer.csv", 569, 30, ["malignant", "benign"]), ], )
sklearn/datasets/tests/test_base.py
240
@pytest.mark.parametrize( "filename, expected_n_samples, expected_n_features, expected_target_names", [ ("wine_data.csv", 178, 13, ["class_0", "class_1", "class_2"]), ("iris.csv", 150, 4, ["setosa", "versicolor", "virginica"]), ("breast_cancer.csv", 569, 30, ["malignant", "benign"]), ], )
scikit-learn
{ "docstring": "Check the behaviour of `allowed_extension` in `load_files`.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
72
Python
59
fc72ebe61c833f227560bd9d0dcf88cdda6c6adb
test_base.py
258,977
11
87
test_load_files_allowed_extensions
https://github.com/scikit-learn/scikit-learn.git
ENH Adds file extension selection to load_files (#22498) Co-authored-by: Thomas J. Fan <[email protected]> Co-authored-by: Tony <[email protected]> Co-authored-by: Kazim <[email protected]> Co-authored-by: Tony Attalla <[email protected]> Co-authored-by: Guillaume Lemaitre <[email protected]>
141
1
75,505
12
1
9
def test_rolling_non_monotonic(method, expected): # Based on an example found in computation.rst use_expanding = [True, False, True, False, True, True, True, True] df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})
pandas/tests/window/test_rolling.py
72
pandas
{ "docstring": "\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n ", "language": "en", "n_whitespaces": 48, "n_words": 32, "vocab_size": 29 }
27
Python
22
6caefb19f4d7c05451fafca182c6eb39fe9901ed
test_rolling.py
165,326
9
100
test_rolling_non_monotonic
https://github.com/pandas-dev/pandas.git
ENH: Rolling window with step size (GH-15354) (#45765)
39
0
39,660
15
1
9
def test_remember_collapsed(self): # Sidebar should not be collapsed self.client.cookies["wagtail_sidebar_collapsed"] = "0" response = self.client.get(reverse("wagtailadmin_home")) self.assertNotContains(response, "sidebar-collapsed") # Sidebar should be collapsed self.client.cookies["wagtail_sidebar_collapsed"] = "1" response = self.client.get(reverse("wagtailadmin_home")) self.assertContains(response, "sidebar-collapsed")
wagtail/admin/tests/test_menu.py
127
wagtail
{ "docstring": "Sidebar should render with collapsed class applied.", "language": "en", "n_whitespaces": 6, "n_words": 7, "vocab_size": 7 }
29
Python
17
d10f15e55806c6944827d801cd9c2d53f5da4186
test_menu.py
72,022
7
68
test_remember_collapsed
https://github.com/wagtail/wagtail.git
Reformat with black
92
0
15,818
11
1
8
def _get_index_and_columns(df): return len(df.index), len(df.columns) @ray.remote(num_returns=4)
modin/core/execution/ray/implementations/pandas_on_ray/partitioning/partition.py
50
@ray.remote(num_returns=4)
modin
{ "docstring": "\n Get the number of rows and columns of a pandas DataFrame.\n\n Parameters\n ----------\n df : pandas.DataFrame\n A pandas DataFrame which dimensions are needed.\n\n Returns\n -------\n int\n The number of rows.\n int\n The number of columns.\n ", "language": "en", "n_whitespaces": 84, "n_words": 35, "vocab_size": 27 }
6
Python
6
e7cb2e82f8b9c7a68f82abdd3b6011d661230b7e
partition.py
153,345
2
20
get_index_and_columns
https://github.com/modin-project/modin.git
REFACTOR-#4251: define public interfaces in `modin.core.execution.ray` module (#3868) Signed-off-by: Anatoly Myachev <[email protected]>
11
1
35,381
9
1
6
def explain(self) -> Tuple[List[Explanation], List[float]]:
ludwig/explain/explainer.py
30
ludwig
{ "docstring": "Explain the model's predictions.\n\n # Return\n\n :return: (Tuple[List[Explanation], List[float]]) `(explanations, expected_values)`\n `explanations`: (List[Explanation]) A list of explanations, one for each row in the input data. Each\n explanation contains the feature attributions for each label in the target feature's vocab.\n\n `expected_values`: (List[float]) of length [output feature cardinality] Expected value for each label in\n the target feature's vocab.\n ", "language": "en", "n_whitespaces": 121, "n_words": 56, "vocab_size": 40 }
5
Python
5
1caede3a2da4ec71cb8650c7e45120c26948a5b9
explainer.py
8,248
12
19
explain
https://github.com/ludwig-ai/ludwig.git
Explanation API and feature importance for GBM (#2564) * add docstring for explain_ig * solidify Explainer API * add gbm explainer * add dataclasses for typed explanations * add GBM feature importance * remove unused imports * add tests * fix test * extract explanation into file * rename base to explainer * remove unused kwargs * remove device placement from base explainer * use proper field from gbm
12
0
1,382
6