ast_errors
stringlengths 0
3.2k
| d_id
int64 44
121k
| id
int64 70
338k
| n_whitespaces
int64 3
14k
| path
stringlengths 8
134
| n_words
int64 4
4.82k
| n_identifiers
int64 1
131
| random_cut
stringlengths 16
15.8k
| commit_message
stringlengths 2
15.3k
| fun_name
stringlengths 1
84
| commit_id
stringlengths 40
40
| repo
stringlengths 3
28
| file_name
stringlengths 5
79
| ast_levels
int64 6
31
| nloc
int64 1
548
| url
stringlengths 31
59
| complexity
int64 1
66
| token_counts
int64 6
2.13k
| n_ast_errors
int64 0
28
| vocab_size
int64 4
1.11k
| n_ast_nodes
int64 15
19.2k
| language
stringclasses 1
value | documentation
dict | code
stringlengths 101
62.2k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15,823 | 72,088 | 48 | wagtail/admin/tests/test_page_chooser.py | 9 | 6 | def test_type_eventpage_two_indexes(self):
self.make_event_section("Other events")
self.assertEqual(
self.get_best_root({"page_type": "tests.EventPage"}), se | Reformat with black | test_type_eventpage_two_indexes | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_page_chooser.py | 12 | 5 | https://github.com/wagtail/wagtail.git | 1 | 31 | 0 | 9 | 58 | Python | {
"docstring": "\n The chooser should start at the home page, as there are two\n EventIndexes with EventPages.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 15
} | def test_type_eventpage_two_indexes(self):
self.make_event_section("Other events")
self.assertEqual(
self.get_best_root({"page_type": "tests.EventPage"}), self.home_page
)
|
|
12,368 | 60,979 | 35 | .venv/lib/python3.8/site-packages/pip/_internal/req/req_file.py | 14 | 5 | def parse(self, filename, constraint):
# type: ( | upd; format | parse | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | req_file.py | 8 | 2 | https://github.com/jindongwang/transferlearning.git | 1 | 20 | 0 | 14 | 33 | Python | {
"docstring": "Parse a given file, yielding parsed lines.\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def parse(self, filename, constraint):
# type: (str, bool) -> Iterator[ParsedLine]
yield from self._parse_and_recurse(filename, constraint)
|
|
81,114 | 273,358 | 141 | keras/layers/preprocessing/preprocessing_utils.py | 44 | 23 | def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):
result = tf.sparse.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
axis=-1,
binary_output=binary_output,
)
result = tf.cast(result, dtype)
if inputs.shape.rank == 1:
output_shape = (depth,)
else:
batch_size = tf.shape(result)[0]
output_shape = (batch_size, depth)
result = tf | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | sparse_bincount | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | preprocessing_utils.py | 12 | 19 | https://github.com/keras-team/keras.git | 2 | 117 | 0 | 34 | 172 | Python | {
"docstring": "Apply binary or count encoding to an input and return a sparse tensor.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def sparse_bincount(inputs, depth, binary_output, dtype, count_weights=None):
result = tf.sparse.bincount(
inputs,
weights=count_weights,
minlength=depth,
maxlength=depth,
axis=-1,
binary_output=binary_output,
)
result = tf.cast(result, dtype)
if inputs.shape.rank == 1:
output_shape = (depth,)
else:
batch_size = tf.shape(result)[0]
output_shape = (batch_size, depth)
result = tf.SparseTensor(
indices=result.indices, values=result.values, dense_shape=output_shape
)
return result
|
|
20,386 | 100,939 | 110 | lib/serializer.py | 35 | 13 | def unmarshal(self, serialized_data):
logger.debug("data type: %s", type(serialized_data))
try:
retval = self._unmarshal(serialized_data)
except Exception as err:
msg | Core updates
- Change loss loading mechanism
- Autosize tooltips based on content size
- Random linting + code modernisation | unmarshal | bad5025aea1adb9126580e14e064e6c99089243d | faceswap | serializer.py | 13 | 9 | https://github.com/deepfakes/faceswap.git | 2 | 58 | 0 | 30 | 117 | Python | {
"docstring": " Unserialize data to its original object type\n\n Parameters\n ----------\n serialized_data: varies\n Data in serializer format that is to be unmarshalled to its original object\n\n Returns\n -------\n data: varies\n The data in a python object format\n\n Example\n ------\n >>> serializer = get_serializer('json')\n >>> json_data = <json object>\n >>> data = serializer.unmarshal(json_data)\n ",
"language": "en",
"n_whitespaces": 157,
"n_words": 50,
"vocab_size": 34
} | def unmarshal(self, serialized_data):
logger.debug("data type: %s", type(serialized_data))
try:
retval = self._unmarshal(serialized_data)
except Exception as err:
msg = f"Error unserializing data for type {type(serialized_data)}: {str(err)}"
raise FaceswapError(msg) from err
logger.debug("returned data type: %s", type(retval))
return retval
|
|
42,859 | 178,914 | 111 | nuitka/freezer/IncludedDataFiles.py | 25 | 7 | def copyDataFiles():
for included_datafile in getIncludedDataFiles():
# TODO: directories should be resolved | Plugins: Massive cleanup of data file handling
* Move data file handling out of standalone only, allowing support
for other modes as well.
* Attach logger and tags to data file objects. | copyDataFiles | abfb99b0a05dd76d2ecc6ebc20732a271857c6c8 | Nuitka | IncludedDataFiles.py | 13 | 9 | https://github.com/Nuitka/Nuitka.git | 4 | 36 | 0 | 25 | 62 | Python | {
"docstring": "Copy the data files needed for standalone distribution.\n\n Notes:\n This is for data files only, not DLLs or even extension modules,\n those must be registered as entry points, and would not go through\n necessary handling if provided like this.\n ",
"language": "en",
"n_whitespaces": 66,
"n_words": 39,
"vocab_size": 35
} | def copyDataFiles():
for included_datafile in getIncludedDataFiles():
# TODO: directories should be resolved to files.
if (
not isinstance(included_datafile, (IncludedDataFile))
or included_datafile.needsCopy()
):
_handleDataFile(
included_datafile,
)
|
|
73,733 | 251,426 | 81 | mitmproxy/optmanager.py | 31 | 9 | def toggler(self, attr):
if attr not in self._options:
raise KeyErr | make it black! | toggler | b3587b52b25077f68116b9852b041d33e7fc6601 | mitmproxy | optmanager.py | 11 | 8 | https://github.com/mitmproxy/mitmproxy.git | 3 | 47 | 0 | 29 | 73 | Python | {
"docstring": "\n Generate a toggler for a boolean attribute. This returns a callable\n that takes no arguments.\n ",
"language": "en",
"n_whitespaces": 37,
"n_words": 15,
"vocab_size": 13
} | def toggler(self, attr):
if attr not in self._options:
raise KeyError("No such option: %s" % attr)
o = self._options[attr]
if o.typespec != bool:
raise ValueError("Toggler can only be used with boolean options")
|
|
11,221 | 55,127 | 22 | src/prefect/testing/cli.py | 6 | 7 | def disable_terminal_wrapping(monkeypatch):
monkeypatch.setattr(
"prefect.cli.profile.console", rich.console.Console(soft_wrap=True)
)
| Continue moving objects to sensible locations | disable_terminal_wrapping | 05b2cf58e0610cedcea27e4d8cb96ad95307a068 | prefect | cli.py | 10 | 4 | https://github.com/PrefectHQ/prefect.git | 1 | 23 | 0 | 6 | 41 | Python | {
"docstring": "\n Sometimes, line wrapping makes it hard to make deterministic assertions about the\n output of a CLI command. Wrapping can be disabled by using this fixture.\n ",
"language": "en",
"n_whitespaces": 35,
"n_words": 25,
"vocab_size": 25
} | def disable_terminal_wrapping(monkeypatch):
monkeypatch.setattr(
"prefect.cli.profile.console", rich.console.Console(soft_wrap=True)
)
|
|
47,660 | 196,160 | 54 | sympy/combinatorics/permutations.py | 19 | 9 | def __add__(self, other):
rank = (self.rank() + other) % self.cardinality
rv = self.unrank_lex(self.size, rank)
rv._rank = rank
| Updated import locations | __add__ | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | permutations.py | 11 | 5 | https://github.com/sympy/sympy.git | 1 | 42 | 0 | 15 | 68 | Python | {
"docstring": "Return permutation that is other higher in rank than self.\n\n The rank is the lexicographical rank, with the identity permutation\n having rank of 0.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import Permutation\n >>> I = Permutation([0, 1, 2, 3])\n >>> a = Permutation([2, 1, 3, 0])\n >>> I + a.rank() == a\n True\n\n See Also\n ========\n\n __sub__, inversion_vector\n\n ",
"language": "en",
"n_whitespaces": 148,
"n_words": 57,
"vocab_size": 44
} | def __add__(self, other):
rank = (self.rank() + other) % self.cardinality
rv = self.unrank_lex(self.size, rank)
rv._rank = rank
return rv
|
|
72,059 | 248,031 | 167 | tests/handlers/test_presence.py | 43 | 16 | def test_set_presence_from_syncing_not_set(self):
user_id = "@test:server"
status_msg = "I'm here!"
self._set_presencestate_with | Prevent a sync request from removing a user's busy presence status (#12213)
In trying to use the MSC3026 busy presence status, the user's status
would be set back to 'online' next time they synced. This change makes
it so that syncing does not affect a user's presence status if it
is currently set to 'busy': it must be removed through the presence
API.
The MSC defers to implementations on the behaviour of busy presence,
so this ought to remain compatible with the MSC. | test_set_presence_from_syncing_not_set | 73d8ded0b030a81e828c07bb134c08db67569e5d | synapse | test_presence.py | 12 | 14 | https://github.com/matrix-org/synapse.git | 1 | 85 | 0 | 33 | 139 | Python | {
"docstring": "Test that presence is not set by syncing if affect_presence is false",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def test_set_presence_from_syncing_not_set(self):
user_id = "@test:server"
status_msg = "I'm here!"
self._set_presencestate_with_status_msg(
user_id, PresenceState.UNAVAILABLE, status_msg
)
self.get_success(
self.presence_handler.user_syncing(user_id, False, PresenceState.ONLINE)
)
state = self.get_success(
self.presence_handler.get_state(UserID.from_string(user_id))
)
# we should still be unavailable
self.assertEqual(state.state, PresenceState.UNAVAILABLE)
# and status message should still be the same
self.assertEqual(state.status_msg, status_msg)
|
|
72,155 | 248,209 | 234 | tests/events/test_utils.py | 37 | 7 | def test_stringy_integers(self):
input = {
"a": "100",
"b": {
"foo": 99,
" | Convert stringy power levels to integers on room upgrade (#12657) | test_stringy_integers | 051a1c3f220938a0ea1a5b328c268bdb3d1ad592 | synapse | test_utils.py | 11 | 19 | https://github.com/matrix-org/synapse.git | 1 | 71 | 0 | 24 | 131 | Python | {
"docstring": "String representations of decimal integers are converted to integers.",
"language": "en",
"n_whitespaces": 8,
"n_words": 9,
"vocab_size": 9
} | def test_stringy_integers(self):
input = {
"a": "100",
"b": {
"foo": 99,
"bar": "-98",
},
"d": "0999",
}
output = copy_and_fixup_power_levels_contents(input)
expected_output = {
"a": 100,
"b": {
"foo": 99,
"bar": -98,
},
"d": 999,
}
self.assertEqual(output, expected_output)
|
|
13,722 | 64,785 | 30 | erpnext/accounts/doctype/bank_reconciliation_tool/bank_reconciliation_tool.py | 41 | 14 | def get_ec_matching_query(bank_account, company, amount_condition):
# get matching Expense Claim query
mod | style: format code with black | get_ec_matching_query | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | bank_reconciliation_tool.py | 14 | 31 | https://github.com/frappe/erpnext.git | 2 | 63 | 0 | 37 | 121 | Python | {
"docstring": "\n\t\tSELECT\n\t\t\t( CASE WHEN employee = %(party)s THEN 1 ELSE 0 END\n\t\t\t+ 1 ) AS rank,\n\t\t\t'Expense Claim' as doctype,\n\t\t\tname,\n\t\t\ttotal_sanctioned_amount as paid_amount,\n\t\t\t'' as reference_no,\n\t\t\t'' as reference_date,\n\t\t\temployee as party,\n\t\t\t'Employee' as party_type,\n\t\t\tposting_date,\n\t\t\t'{company_currency}' as currency\n\t\tFROM\n\t\t\t`tabExpense Claim`\n\t\tWHERE\n\t\t\ttotal_sanctioned_amount {amount_condition} %(amount)s\n\t\t\tAND docstatus = 1\n\t\t\tAND is_paid = 1\n\t\t\tAND ifnull(clearance_date, '') = \"\"\n\t\t\tAND mode_of_payment in {mode_of_payments}\n\t",
"language": "en",
"n_whitespaces": 45,
"n_words": 65,
"vocab_size": 47
} | def get_ec_matching_query(bank_account, company, amount_condition):
# get matching Expense Claim query
mode_of_payments = [
x["parent"]
for x in frappe.db.get_all(
"Mode of Payment Account", filters={"default_account": bank_account}, fields=["parent"]
)
]
mode_of_payments = "('" + "', '".join(mode_of_payments) + "' )"
company_currency = get_company_currency(company)
return f
|
|
36,108 | 154,602 | 453 | modin/experimental/core/execution/native/implementations/hdk_on_native/partitioning/partition_manager.py | 114 | 47 | def run_exec_plan(cls, plan, index_cols, dtypes, columns):
omniSession = DbWorker()
# First step is to make sure all partitions are in | FEAT-#4946: Replace OmniSci with HDK (#4947)
Co-authored-by: Iaroslav Igoshev <[email protected]>
Signed-off-by: Andrey Pavlenko <[email protected]> | run_exec_plan | e5b1888cd932909e49194d58035da34b210b91c4 | modin | partition_manager.py | 17 | 27 | https://github.com/modin-project/modin.git | 9 | 225 | 0 | 80 | 364 | Python | {
"docstring": "\n Run execution plan in HDK storage format to materialize frame.\n\n Parameters\n ----------\n plan : DFAlgNode\n A root of an execution plan tree.\n index_cols : list of str\n A list of index columns.\n dtypes : pandas.Index\n Column data types.\n columns : list of str\n A frame column names.\n\n Returns\n -------\n np.array\n Created frame's partitions.\n ",
"language": "en",
"n_whitespaces": 186,
"n_words": 53,
"vocab_size": 39
} | def run_exec_plan(cls, plan, index_cols, dtypes, columns):
omniSession = DbWorker()
# First step is to make sure all partitions are in HDK.
frames = plan.collect_frames()
for frame in frames:
if frame._partitions.size != 1:
raise NotImplementedError(
"HdkOnNative engine doesn't suport partitioned frames"
)
for p in frame._partitions.flatten():
if p.frame_id is None:
obj = p.get()
if isinstance(obj, (pandas.DataFrame, pandas.Series)):
p.frame_id = omniSession.import_pandas_dataframe(obj)
else:
assert isinstance(obj, pyarrow.Table)
p.frame_id = omniSession.import_arrow_table(obj)
calcite_plan = CalciteBuilder().build(plan)
calcite_json = CalciteSerializer().serialize(calcite_plan)
cmd_prefix = "execute relalg "
if DoUseCalcite.get():
cmd_prefix = "execute calcite "
at = omniSession.executeRA(cmd_prefix + calcite_json)
res = np.empty((1, 1), dtype=np.dtype(object))
# workaround for https://github.com/modin-project/modin/issues/1851
if DoUseCalcite.get():
at = at.rename_columns(["F_" + str(c) for c in columns])
res[0][0] = cls._partition_class.put_arrow(at)
return res
|
|
27,062 | 121,448 | 16 | jax/_src/dtypes.py | 13 | 3 | def to_numeric_dtype(dtype):
dtype = np.dt | jax.numpy: improve support for boolean inputs | to_numeric_dtype | 3f0619599499fc0751cd6181c04d50245ef5dcce | jax | dtypes.py | 10 | 3 | https://github.com/google/jax.git | 2 | 32 | 0 | 11 | 57 | Python | {
"docstring": "Promotes a dtype into an numeric dtype, if it is not already one.",
"language": "en",
"n_whitespaces": 12,
"n_words": 13,
"vocab_size": 13
} | def to_numeric_dtype(dtype):
dtype = np.dtype(dtype)
return np.dtype('int32') if dtype == np.dtype('bool') else dtype
|
|
9,133 | 47,501 | 266 | tests/jobs/test_scheduler_job.py | 58 | 43 | def test_do_schedule_max_active_runs_task_removed(self, session, dag_maker):
with dag_maker(
dag_id='test_do_schedule_max_active_runs_task_removed',
start_date=DEFAULT_DATE,
schedule_interval='@once',
max_active_runs=1,
session=session,
):
# Can't use EmptyOperator as that goes straight to success
BashOperator(task_id='dummy1', bash_command='true')
run1 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE + timedelta(hours=1),
state=State.RUNNING,
)
self.scheduler_job = Schedule | Replace usage of `DummyOperator` with `EmptyOperator` (#22974)
* Replace usage of `DummyOperator` with `EmptyOperator` | test_do_schedule_max_active_runs_task_removed | 49e336ae0302b386a2f47269a6d13988382d975f | airflow | test_scheduler_job.py | 13 | 23 | https://github.com/apache/airflow.git | 1 | 156 | 0 | 50 | 249 | Python | {
"docstring": "Test that tasks in removed state don't count as actively running.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_do_schedule_max_active_runs_task_removed(self, session, dag_maker):
with dag_maker(
dag_id='test_do_schedule_max_active_runs_task_removed',
start_date=DEFAULT_DATE,
schedule_interval='@once',
max_active_runs=1,
session=session,
):
# Can't use EmptyOperator as that goes straight to success
BashOperator(task_id='dummy1', bash_command='true')
run1 = dag_maker.create_dagrun(
run_type=DagRunType.SCHEDULED,
execution_date=DEFAULT_DATE + timedelta(hours=1),
state=State.RUNNING,
)
self.scheduler_job = SchedulerJob(subdir=os.devnull)
self.scheduler_job.executor = MockExecutor(do_update=False)
self.scheduler_job.processor_agent = mock.MagicMock(spec=DagFileProcessorAgent)
num_queued = self.scheduler_job._do_scheduling(session)
assert num_queued == 1
session.flush()
ti = run1.task_instances[0]
ti.refresh_from_db(session=session)
assert ti.state == State.QUEUED
|
|
38,908 | 161,097 | 129 | ppg_extractor/encoder/encoder.py | 36 | 12 | def forward(self, xs, masks):
if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
xs, masks = self.encoders(xs, masks)
if isinstance(xs, tuple):
xs = xs[0]
if self.normalize_before:
| Init ppg extractor and ppg2mel (#375)
* Init ppg extractor and ppg2mel
* add preprocess and training
* FIx known issues
* Update __init__.py
Allow to gen audio
* Fix length issue
* Fix bug of preparing fid
* Fix sample issues
* Add UI usage of PPG-vc | forward | b617a87ee40ab384767a27335313c2c65ee094ec | MockingBird | encoder.py | 11 | 11 | https://github.com/babysor/MockingBird.git | 4 | 89 | 0 | 22 | 138 | Python | {
"docstring": "Encode input sequence.\n\n :param torch.Tensor xs: input tensor\n :param torch.Tensor masks: input mask\n :return: position embedded tensor and mask\n :rtype Tuple[torch.Tensor, torch.Tensor]:\n ",
"language": "en",
"n_whitespaces": 57,
"n_words": 22,
"vocab_size": 16
} | def forward(self, xs, masks):
if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):
xs, masks = self.embed(xs, masks)
else:
xs = self.embed(xs)
xs, masks = self.encoders(xs, masks)
if isinstance(xs, tuple):
xs = xs[0]
if self.normalize_before:
xs = self.after_norm(xs)
return xs, masks
|
|
83,921 | 281,630 | 32 | gamestonk_terminal/parent_classes.py | 7 | 6 | def save_class(self):
if gtff.REMEMBER_CONTEXTS:
| Remember Contexts (#1187)
* Refacotred classes
* Handling for new instance desired
* Added feature flag
* Converted all menu calls | save_class | 9e671aeba98dacc69ecbbfec1f087aca3b139ee7 | OpenBBTerminal | parent_classes.py | 10 | 3 | https://github.com/OpenBB-finance/OpenBBTerminal.git | 2 | 19 | 0 | 7 | 33 | Python | {
"docstring": "Saves the current instance of the class to be loaded later",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | def save_class(self):
if gtff.REMEMBER_CONTEXTS:
controllers[self.PATH] = self
|
|
53,912 | 215,291 | 416 | salt/transport/zeromq.py | 119 | 23 | def _decode_messages(self, messages):
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = salt.payload.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (
self.opts.get("__role") != "syndic"
and message_target not in ("broadcast", self.hexid)
) or (
self.opts.get("__role") == "syndic"
and message_target not in ("broadcast", "syndic")
):
log.debug("Publish received for not this minion: %s", message_target)
raise salt.ext.tornado.gen.Return(None)
payload = salt.payload.loads(messages[1])
else:
raise Exception(
| Refactor into transports and channels | _decode_messages | d4e6111086ff713eb6609dc6c98cec98aded2564 | salt | zeromq.py | 15 | 23 | https://github.com/saltstack/salt.git | 7 | 161 | 0 | 84 | 272 | Python | {
"docstring": "\n Take the zmq messages, decrypt/decode them into a payload\n\n :param list messages: A list of messages to be decoded\n ",
"language": "en",
"n_whitespaces": 41,
"n_words": 19,
"vocab_size": 18
} | def _decode_messages(self, messages):
messages_len = len(messages)
# if it was one message, then its old style
if messages_len == 1:
payload = salt.payload.loads(messages[0])
# 2 includes a header which says who should do it
elif messages_len == 2:
message_target = salt.utils.stringutils.to_str(messages[0])
if (
self.opts.get("__role") != "syndic"
and message_target not in ("broadcast", self.hexid)
) or (
self.opts.get("__role") == "syndic"
and message_target not in ("broadcast", "syndic")
):
log.debug("Publish received for not this minion: %s", message_target)
raise salt.ext.tornado.gen.Return(None)
payload = salt.payload.loads(messages[1])
else:
raise Exception(
"Invalid number of messages ({}) in zeromq pubmessage from master".format(
len(messages_len)
)
)
# Yield control back to the caller. When the payload has been decoded, assign
# the decoded payload to 'ret' and resume operation
raise salt.ext.tornado.gen.Return(payload)
|
|
31,197 | 137,593 | 193 | python/ray/tests/test_runtime_env.py | 74 | 14 | def test_get_release_wheel_url():
# This should be a commit for which wheels have al | [runtime env] Support python 3.10 for runtime_env conda (#30970)
Signed-off-by: Archit Kulkarni <[email protected]>
conda environments are isolated, so when runtime_env sets up a conda environment it must download the Ray wheel into the conda environment. It must download the wheel that matches the current Python and Ray version running, otherwise there will be incompatibility issues between the workers that use this runtime_env and the other workers and Ray processes.
This PR updates the wheel name format logic to support Python 3.10. | test_get_release_wheel_url | 98fef7732852cdb3e9377cd87c1ee1085b894928 | ray | test_runtime_env.py | 15 | 9 | https://github.com/ray-project/ray.git | 6 | 80 | 0 | 53 | 136 | Python | {
"docstring": "Test the code that generates the filenames of the `release` branch wheels.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 10
} | def test_get_release_wheel_url():
# This should be a commit for which wheels have already been built for
# all platforms and python versions at
# `s3://ray-wheels/releases/2.2.0/<commit>/`.
test_commits = {"2.2.0": "b6af0887ee5f2e460202133791ad941a41f15beb"}
for sys_platform in ["darwin", "linux", "win32"]:
for py_version in ray_constants.RUNTIME_ENV_CONDA_PY_VERSIONS:
for version, commit in test_commits.items():
if sys_platform == "win32" and py_version == (3, 6):
# Windows wheels are not built for py3.6 anymore
continue
url = get_release_wheel_url(commit, sys_platform, version, py_version)
assert requests.head(url).status_code == 200, url
|
|
72,618 | 249,111 | 282 | tests/rest/admin/test_media.py | 61 | 19 | def test_keep_media_by_date(self) -> None:
# timestamp before upload
now_ms = self.clock.time_msec()
server_and_media_id = self._create_media()
self._access_media(server_and_media_id)
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
# timestamp after upload
now_ms = sel | Use literals in place of `HTTPStatus` constants in tests (#13469) | test_keep_media_by_date | c97042f7eef3748e17c90e48a4122389a89c4735 | synapse | test_media.py | 11 | 28 | https://github.com/matrix-org/synapse.git | 1 | 188 | 0 | 35 | 304 | Python | {
"docstring": "\n Tests that media is not deleted if it is newer than `before_ts`\n ",
"language": "en",
"n_whitespaces": 27,
"n_words": 12,
"vocab_size": 11
} | def test_keep_media_by_date(self) -> None:
# timestamp before upload
now_ms = self.clock.time_msec()
server_and_media_id = self._create_media()
self._access_media(server_and_media_id)
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(0, channel.json_body["total"])
self._access_media(server_and_media_id)
# timestamp after upload
now_ms = self.clock.time_msec()
channel = self.make_request(
"POST",
self.url + "?before_ts=" + str(now_ms),
access_token=self.admin_user_tok,
)
self.assertEqual(200, channel.code, msg=channel.json_body)
self.assertEqual(1, channel.json_body["total"])
self.assertEqual(
server_and_media_id.split("/")[1],
channel.json_body["deleted_media"][0],
)
self._access_media(server_and_media_id, False)
|
|
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
# TODO(1.4): remove this filterwarning decorator for `parser`
@pytest.mark.filterwarnings("ignore:The default value of `parser` will change")
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"parser": "pandas"},
"Sparse ARFF datasets cannot be loaded with parser='pandas'",
),
(
{"as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
(
{"parser": "pandas", "as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
],
) | 75,970 | 259,885 | 306 | sklearn/datasets/tests/test_openml.py | 112 | 17 | def test_fetch_openml_requires_pandas_in_future(monkeypatch):
params = {"as_frame": False, "parser": "auto"}
data_id = 1119
try:
check_pandas_support("test_fetch_openml_requires_pandas")
except ImportError:
_monkey_patch_webbased_functions(monk | ENH improve ARFF parser using pandas (#21938)
Co-authored-by: Thomas J. Fan <[email protected]>
Co-authored-by: Olivier Grisel <[email protected]>
Co-authored-by: Adrin Jalali <[email protected]> | test_fetch_openml_requires_pandas_in_future | a47d569e670fd4102af37c3165c9b1ddf6fd3005 | scikit-learn | test_openml.py | 13 | 14 | https://github.com/scikit-learn/scikit-learn.git | 2 | 70 | 1 | 80 | 247 | Python | {
"docstring": "Check that we raise a warning that pandas will be required in the future.",
"language": "en",
"n_whitespaces": 13,
"n_words": 14,
"vocab_size": 13
} | def test_fetch_openml_requires_pandas_in_future(monkeypatch):
params = {"as_frame": False, "parser": "auto"}
data_id = 1119
try:
check_pandas_support("test_fetch_openml_requires_pandas")
except ImportError:
_monkey_patch_webbased_functions(monkeypatch, data_id, True)
warn_msg = (
"From version 1.4, `parser='auto'` with `as_frame=False` will use pandas"
)
with pytest.warns(FutureWarning, match=warn_msg):
fetch_openml(data_id=data_id, **params)
else:
raise SkipTest("This test requires pandas to not be installed.")
@pytest.mark.filterwarnings("ignore:Version 1 of dataset Australian is inactive")
# TODO(1.4): remove this filterwarning decorator for `parser`
@pytest.mark.filterwarnings("ignore:The default value of `parser` will change")
@pytest.mark.parametrize(
"params, err_msg",
[
(
{"parser": "pandas"},
"Sparse ARFF datasets cannot be loaded with parser='pandas'",
),
(
{"as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
(
{"parser": "pandas", "as_frame": True},
"Sparse ARFF datasets cannot be loaded with as_frame=True.",
),
],
) |
4,130 | 22,041 | 264 | pipenv/patched/pip/_vendor/requests/adapters.py | 62 | 19 | def get_connection(self, url, proxies=None):
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, "http")
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL(
"Please check proxy URL. It is malformed "
"and could be missing the host."
| Rename notpip to pip. Vendor in pip-22.2.1 and latest requirementslib and vistir. | get_connection | cd5a9683be69c86c8f3adcd13385a9bc5db198ec | pipenv | adapters.py | 13 | 17 | https://github.com/pypa/pipenv.git | 3 | 92 | 0 | 49 | 157 | Python | {
"docstring": "Returns a urllib3 connection for the given URL. This should not be\n called from user code, and is only exposed for use when subclassing the\n :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.\n\n :param url: The URL to connect to.\n :param proxies: (optional) A Requests-style dictionary of proxies used on this request.\n :rtype: urllib3.ConnectionPool\n ",
"language": "en",
"n_whitespaces": 90,
"n_words": 48,
"vocab_size": 45
} | def get_connection(self, url, proxies=None):
proxy = select_proxy(url, proxies)
if proxy:
proxy = prepend_scheme_if_needed(proxy, "http")
proxy_url = parse_url(proxy)
if not proxy_url.host:
raise InvalidProxyURL(
"Please check proxy URL. It is malformed "
"and could be missing the host."
)
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
|
|
35,165 | 151,923 | 258 | freqtrade/templates/FreqaiExampleStrategy.py | 70 | 21 | def feature_engineering_expand_all(self, dataframe, period, **kwargs):
dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
dataframe["%-sma-period"] = ta.SMA(dataframe, timeperiod=period)
dataframe["%-ema-period"] = ta.EMA(dataframe, timeperiod=period)
bollinger = qtpylib.bollinger_bands(
qtpylib.typical_price(dataframe), window=period, stds=2.2
)
dataframe["bb_lowerband-period"] = bollinger["lower"]
dataframe["bb_middleband-period"] = bollinger["mid"]
dataframe["bb_upperband-period"] = bollinger["upper"]
dataframe["%-bb_width-period"] = (
dataframe["bb_upperband-period"]
- dataframe["bb_lowerband-period"]
) / dataframe["bb_middleband-period"]
dataframe["%-close-bb_lower-period"] = (
dataframe["close"] / dataframe["bb_lowerband-period"]
)
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
dataframe["%-relative_volume-period"] = (
dataframe["volume"] / dataframe["volume"].rolling(period).mean()
)
return dataframe
| improve doc, update test strats, change function names | feature_engineering_expand_all | c2936d551b8ad6ccf7b57e2ac6cb55d8550622cf | freqtrade | FreqaiExampleStrategy.py | 14 | 24 | https://github.com/freqtrade/freqtrade.git | 1 | 217 | 0 | 42 | 361 | Python | {
"docstring": "\n *Only functional with FreqAI enabled strategies*\n This function will automatically expand the defined features on the config defined\n `indicator_periods_candles`, `include_timeframes`, `include_shifted_candles`, and\n `include_corr_pairs`. In other words, a single feature defined in this function\n will automatically expand to a total of\n `indicator_periods_candles` * `include_timeframes` * `include_shifted_candles` *\n `include_corr_pairs` numbers of features added to the model.\n\n All features must be prepended with `%` to be recognized by FreqAI internals.\n\n More details on how these config defined parameters accelerate feature engineering\n in the documentation at:\n\n https://www.freqtrade.io/en/latest/freqai-parameter-table/#feature-parameters\n\n https://www.freqtrade.io/en/latest/freqai-feature-engineering/#defining-the-features\n\n :param df: strategy dataframe which will receive the features\n :param period: period of the indicator - usage example:\n dataframe[\"%-ema-period\"] = ta.EMA(dataframe, timeperiod=period)\n ",
"language": "en",
"n_whitespaces": 219,
"n_words": 106,
"vocab_size": 75
} | def feature_engineering_expand_all(self, dataframe, period, **kwargs):
dataframe["%-rsi-period"] = ta.RSI(dataframe, timeperiod=period)
dataframe["%-mfi-period"] = ta.MFI(dataframe, timeperiod=period)
dataframe["%-adx-period"] = ta.ADX(dataframe, timeperiod=period)
dataframe["%-sma-period"] = ta.SMA(dataframe, timeperiod=period)
dataframe["%-ema-period"] = ta.EMA(dataframe, timeperiod=period)
bollinger = qtpylib.bollinger_bands(
qtpylib.typical_price(dataframe), window=period, stds=2.2
)
dataframe["bb_lowerband-period"] = bollinger["lower"]
dataframe["bb_middleband-period"] = bollinger["mid"]
dataframe["bb_upperband-period"] = bollinger["upper"]
dataframe["%-bb_width-period"] = (
dataframe["bb_upperband-period"]
- dataframe["bb_lowerband-period"]
) / dataframe["bb_middleband-period"]
dataframe["%-close-bb_lower-period"] = (
dataframe["close"] / dataframe["bb_lowerband-period"]
)
dataframe["%-roc-period"] = ta.ROC(dataframe, timeperiod=period)
dataframe["%-relative_volume-period"] = (
dataframe["volume"] / dataframe["volume"].rolling(period).mean()
)
return dataframe
|
|
50,767 | 204,521 | 44 | django/core/handlers/asgi.py | 12 | 6 | def get_script_prefix(self, scope):
if settings | Refs #33476 -- Reformatted code with Black. | get_script_prefix | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | asgi.py | 9 | 4 | https://github.com/django/django.git | 3 | 28 | 0 | 11 | 51 | Python | {
"docstring": "\n Return the script prefix to use from either the scope or a setting.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 13,
"vocab_size": 12
} | def get_script_prefix(self, scope):
if settings.FORCE_SCRIPT_NAME:
return settings.FORCE_SCRIPT_NAME
return scope.get("root_path", "") or ""
|
|
11,327 | 55,476 | 65 | tests/cli/test_storage_cli.py | 19 | 9 | def test_get_first_menu_and_fail():
part_one = f
part_two = f
command = ["storage", "create"]
invoke_and_assert_in(
command=command,
desired_contents=(part_one, part_two),
e | Update tests | test_get_first_menu_and_fail | 11638691240b7595c0d02542af506a96d344ae8b | prefect | test_storage_cli.py | 10 | 23 | https://github.com/PrefectHQ/prefect.git | 1 | 44 | 0 | 16 | 82 | Python | {
"docstring": "\n Make sure that our utility function is returning as expected\n \n Found the following storage types:\n 0) Azure Blob Storage\n Store data in an Azure blob storage container.\n 1) File Storage\n Store data as a file on local or remote file systems.\n 2) Google Cloud Storage\n Store data in a GCS bucket.\n 3) Local Storage\n Store data in a run's local file system.\n \n Select a storage type to create: 99999999\n Invalid selection {INVALID_OPTION}\n ",
"language": "en",
"n_whitespaces": 136,
"n_words": 72,
"vocab_size": 51
} | def test_get_first_menu_and_fail():
part_one = f
part_two = f
command = ["storage", "create"]
invoke_and_assert_in(
command=command,
desired_contents=(part_one, part_two),
expected_code=1,
user_input=f"{INVALID_OPTION}\n",
)
|
|
41,953 | 176,544 | 107 | networkx/algorithms/planarity.py | 35 | 8 | def check_planarity(G, counterexample=False):
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample(G)
else:
return False, None
else:
| Improve documentation of PlanarEmbedding class (#5523)
* Improve documentation of PlanarEmbedding
* Fix type
* Make suggested changes
* rst formatting nits.
* Update networkx/algorithms/planarity.py
Co-authored-by: Dan Schult <[email protected]>
* Run black for formatting
Co-authored-by: Ross Barnowski <[email protected]>
Co-authored-by: Dan Schult <[email protected]> | check_planarity | 1af7d49d70869081e5cb64d17165652f1b26c57b | networkx | planarity.py | 12 | 10 | https://github.com/networkx/networkx.git | 3 | 50 | 0 | 22 | 86 | Python | {
"docstring": "Check if a graph is planar and return a counterexample or an embedding.\n\n A graph is planar iff it can be drawn in a plane without\n any edge intersections.\n\n Parameters\n ----------\n G : NetworkX graph\n counterexample : bool\n A Kuratowski subgraph (to proof non planarity) is only returned if set\n to true.\n\n Returns\n -------\n (is_planar, certificate) : (bool, NetworkX graph) tuple\n is_planar is true if the graph is planar.\n If the graph is planar `certificate` is a PlanarEmbedding\n otherwise it is a Kuratowski subgraph.\n\n Examples\n --------\n >>> G = nx.Graph([(0, 1), (0, 2)])\n >>> is_planar, P = nx.check_planarity(G)\n >>> print(is_planar)\n True\n\n When `G` is planar, a `PlanarEmbedding` instance is returned:\n\n >>> P.get_data()\n {0: [1, 2], 1: [0], 2: [0]}\n\n Notes\n -----\n A (combinatorial) embedding consists of cyclic orderings of the incident\n edges at each vertex. Given such an embedding there are multiple approaches\n discussed in literature to drawing the graph (subject to various\n constraints, e.g. integer coordinates), see e.g. [2].\n\n The planarity check algorithm and extraction of the combinatorial embedding\n is based on the Left-Right Planarity Test [1].\n\n A counterexample is only generated if the corresponding parameter is set,\n because the complexity of the counterexample generation is higher.\n\n References\n ----------\n .. [1] Ulrik Brandes:\n The Left-Right Planarity Test\n 2009\n http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.217.9208\n .. [2] Takao Nishizeki, Md Saidur Rahman:\n Planar graph drawing\n Lecture Notes Series on Computing: Volume 12\n 2004\n ",
"language": "en",
"n_whitespaces": 404,
"n_words": 228,
"vocab_size": 154
} | def check_planarity(G, counterexample=False):
planarity_state = LRPlanarity(G)
embedding = planarity_state.lr_planarity()
if embedding is None:
# graph is not planar
if counterexample:
return False, get_counterexample(G)
else:
return False, None
else:
# graph is planar
return True, embedding
|
|
7,734 | 42,786 | 81 | airflow/providers/cncf/kubernetes/hooks/kubernetes.py | 26 | 8 | def _get_bool(val) -> Optional[bool]:
if isinstance(val, bool):
return val
elif isinstance(val, str):
if val.strip().lower() == 'true':
return True
| Use KubernetesHook to create api client in KubernetesPodOperator (#20578)
Add support for k8s hook in KPO; use it always (even when no conn id); continue to consider the core k8s settings that KPO already takes into account but emit deprecation warning about them.
KPO historically takes into account a few settings from core airflow cfg (e.g. verify ssl, tcp keepalive, context, config file, and in_cluster). So to use the hook to generate the client, somehow the hook has to take these settings into account. But we don't want the hook to consider these settings in general. So we read them in KPO and if necessary patch the hook and warn. | _get_bool | 60eb9e106f5915398eafd6aa339ec710c102dc09 | airflow | kubernetes.py | 14 | 13 | https://github.com/apache/airflow.git | 5 | 61 | 0 | 18 | 104 | Python | {
"docstring": "\n Converts val to bool if can be done with certainty.\n If we cannot infer intention we return None.\n ",
"language": "en",
"n_whitespaces": 28,
"n_words": 18,
"vocab_size": 17
} | def _get_bool(val) -> Optional[bool]:
if isinstance(val, bool):
return val
elif isinstance(val, str):
if val.strip().lower() == 'true':
return True
elif val.strip().lower() == 'false':
return False
return None
|
|
37,345 | 158,166 | 259 | d2l/mxnet.py | 76 | 33 | def load_data_ptb(batch_size, max_window_size, num_noise_words):
sentences = read_ptb()
vocab = d2l.Vocab(sentences, min_freq=10)
subsampled, counter = subsample(sentences, vocab)
corpus = [vocab[line] for line in subsampled]
all_centers, all_contexts = get_centers_and_contexts(
corpus, max_window_size)
all_negatives = get_negatives(
all_contexts, vocab, counter, num_noise_words)
dataset = gluon.data.ArrayDataset(
all_centers, all_contexts, all_negatives)
data_iter = gluon.data.DataLoader(
dataset, batch_size, shuffle=True,batchify_fn=batc | [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "bert.mall" -> "bert.small" (#1130)
* fix: update language as native reader (#1114)
* Fix the translation of "stride" (#1115)
* Update index.md (#1118)
修改部分语义表述
* Update self-attention-and-positional-encoding.md (#1133)
依照本书的翻译习惯,将pooling翻译成汇聚
* maybe a comment false (#1149)
* maybe a little false
* maybe a little false
* A minor bug in the rcnn section (Chinese edition) (#1148)
* Update bert.md (#1137)
一个笔误
# 假设batch_size=2,num_pred_positions=3
# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]
* Update calculus.md (#1135)
* fix typo in git documentation (#1106)
* fix: Update the Chinese translation in lr-scheduler.md (#1136)
* Update lr-scheduler.md
* Update chapter_optimization/lr-scheduler.md
Co-authored-by: goldmermaid <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
* fix translation for kaggle-house-price.md (#1107)
* fix translation for kaggle-house-price.md
* fix translation for kaggle-house-price.md
Signed-off-by: sunhaizhou <[email protected]>
* Update weight-decay.md (#1150)
* Update weight-decay.md
关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解
关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。
并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释
解释为何会增加复杂性以及为何需要细粒度工具。
* Update chapter_multilayer-perceptrons/weight-decay.md
yep
Co-authored-by: goldmermaid <[email protected]>
* Update chapter_multilayer-perceptrons/weight-decay.md
yep
Co-authored-by: goldmermaid <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
* Fix a spelling error (#1161)
* Update gru.md (#1152)
The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.
翻译错误
* Unify the function naming (#1113)
Unify naming of the function 'init_xavier()'.
* Update mlp-concise.md (#1166)
* Update mlp-concise.md
语句不通顺
* Update environment.md
语序异常
* Update config.ini
* fix the imprecise description (#1168)
Co-authored-by: yuande <yuande>
* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)
* Fix some typos. (#1163)
* Update batch-norm.md (#1170)
fixing typos u->x in article
* Update linear-regression.md (#1090)
We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that
原译文把who也直接翻译出来了。
* Update mlp.md (#1117)
* Update mlp.md
修改部分语义表述
* Update chapter_multilayer-perceptrons/mlp.md
Co-authored-by: goldmermaid <[email protected]>
* Update chapter_multilayer-perceptrons/mlp.md
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
* Correct a translation error. (#1091)
* Correct a translation error.
* Update chapter_computer-vision/image-augmentation.md
Co-authored-by: Aston Zhang <[email protected]>
* Update aws.md (#1121)
* Update aws.md
* Update chapter_appendix-tools-for-deep-learning/aws.md
Co-authored-by: Aston Zhang <[email protected]>
* Update image-augmentation.md (#1093)
* Update anchor.md (#1088)
fix a minor issue in code
* Update anchor.md
* Update image-augmentation.md
* fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087)
* Avoid `torch.meshgrid` user warning (#1174)
Avoids the following user warning:
```python
~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
```
* bump to 2.0.0-beta1
* Update sequence.md
* bump beta1 on readme
* Add latex code block background to config
* BLD: Bump python support version 3.9 (#1183)
* BLD: Bump python support version 3.9
* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4
* BLD: Bump torch and tensorflow
* Update Jenkinsfile
* Update chapter_installation/index.md
* Update chapter_installation/index.md
Co-authored-by: Aston Zhang <[email protected]>
* Update config.ini
* Update INFO.md
* Update INFO.md
* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)
* resolve the conflicts
* revise from publisher (#1089)
* revise from publisher
* d2l api
* post_latex
* revise from publisher
* revise ch11
* Delete d2l-Copy1.bib
* clear cache
* rm d2lbook clear
* debug anchor
* keep original d2l doc
Co-authored-by: Ubuntu <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
* 重复语句 (#1188)
Co-authored-by: Aston Zhang <[email protected]>
* Improve expression for chapter_preliminaries/pandas.md (#1184)
* Update pandas.md
* Improve expression
* Improve expression
* Update chapter_preliminaries/pandas.md
Co-authored-by: Aston Zhang <[email protected]>
* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)
* Improce expression
* Improve code comments
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
Co-authored-by: Aston Zhang <[email protected]>
* Fix multibox_detection bugs
* Update d2l to 0.17.5 version
* restore older version
* Upgrade pandas
* change to python3.8
* Test warning log
* relocate warning log
* test logs filtering
* Update gru.md
* Add DeprecationWarning filter
* Test warning log
* Update attention mechanisms & computational performance
* Update multilayer perceptron& linear & convolution networks & computer vision
* Update recurrent&optimition&nlp pretraining & nlp applications
* ignore warnings
* Update index.md
* Update linear networks
* Update multilayer perceptrons&deep learning computation
* Update preliminaries
* Check and Add warning filter
* Update kaggle-cifar10.md
* Update object-detection-dataset.md
* Update ssd.md fcn.md
* Update hybridize.md
* Update hybridize.md
Signed-off-by: sunhaizhou <[email protected]>
Co-authored-by: zhou201505013 <[email protected]>
Co-authored-by: Xinwei Liu <[email protected]>
Co-authored-by: Anirudh Dagar <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: hugo_han <[email protected]>
Co-authored-by: gyro永不抽风 <[email protected]>
Co-authored-by: CanChengZheng <[email protected]>
Co-authored-by: linlin <[email protected]>
Co-authored-by: iuk <[email protected]>
Co-authored-by: yoos <[email protected]>
Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]>
Co-authored-by: Chiyuan Fu <[email protected]>
Co-authored-by: Sunhuashan <[email protected]>
Co-authored-by: Haiker Sun <[email protected]>
Co-authored-by: Ming Liu <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
Co-authored-by: silenceZheng66 <[email protected]>
Co-authored-by: Wenchao Yan <[email protected]>
Co-authored-by: Kiki2049 <[email protected]>
Co-authored-by: Krahets <[email protected]>
Co-authored-by: friedmainfunction <[email protected]>
Co-authored-by: Jameson <[email protected]>
Co-authored-by: P. Yao <[email protected]>
Co-authored-by: Yulv-git <[email protected]>
Co-authored-by: Liu,Xiao <[email protected]>
Co-authored-by: YIN, Gang <[email protected]>
Co-authored-by: Joe-HZ <[email protected]>
Co-authored-by: lybloveyou <[email protected]>
Co-authored-by: VigourJiang <[email protected]>
Co-authored-by: zxhd863943427 <[email protected]>
Co-authored-by: LYF <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: xiaotinghe <[email protected]>
Co-authored-by: Ubuntu <[email protected]>
Co-authored-by: Holly-Max <[email protected]>
Co-authored-by: HinGwenWoong <[email protected]>
Co-authored-by: Shuai Zhang <[email protected]> | load_data_ptb | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | mxnet.py | 11 | 15 | https://github.com/d2l-ai/d2l-zh.git | 2 | 117 | 0 | 56 | 289 | Python | {
"docstring": "Download the PTB dataset and then load it into memory.\n\n Defined in :numref:`subsec_word2vec-minibatch-loading`",
"language": "en",
"n_whitespaces": 15,
"n_words": 13,
"vocab_size": 13
} | def load_data_ptb(batch_size, max_window_size, num_noise_words):
sentences = read_ptb()
vocab = d2l.Vocab(sentences, min_freq=10)
subsampled, counter = subsample(sentences, vocab)
corpus = [vocab[line] for line in subsampled]
all_centers, all_contexts = get_centers_and_contexts(
corpus, max_window_size)
all_negatives = get_negatives(
all_contexts, vocab, counter, num_noise_words)
dataset = gluon.data.ArrayDataset(
all_centers, all_contexts, all_negatives)
data_iter = gluon.data.DataLoader(
dataset, batch_size, shuffle=True,batchify_fn=batchify,
num_workers=d2l.get_dataloader_workers())
return data_iter, vocab
d2l.DATA_HUB['glove.6b.50d'] = (d2l.DATA_URL + 'glove.6B.50d.zip',
'0b8703943ccdb6eb788e6f091b8946e82231bc4d')
d2l.DATA_HUB['glove.6b.100d'] = (d2l.DATA_URL + 'glove.6B.100d.zip',
'cd43bfb07e44e6f27cbcc7bc9ae3d80284fdaf5a')
d2l.DATA_HUB['glove.42b.300d'] = (d2l.DATA_URL + 'glove.42B.300d.zip',
'b5116e234e9eb9076672cfeabf5469f3eec904fa')
d2l.DATA_HUB['wiki.en'] = (d2l.DATA_URL + 'wiki.en.zip',
'c1816da3821ae9f43899be655002f6c723e91b88')
|
|
88,252 | 289,104 | 33 | homeassistant/components/homekit/__init__.py | 12 | 6 | async def async_config_changed(self) -> None:
assert self.driver is not None
await self.hass.async_add_executor_job(self.driver.config_changed)
| Add support for restoring HomeKit IIDs (#79913) | async_config_changed | 3b33e0d832b238b40360383099391e2093ea05cb | core | __init__.py | 10 | 4 | https://github.com/home-assistant/core.git | 1 | 28 | 0 | 12 | 48 | Python | {
"docstring": "Call config changed which writes out the new config to disk.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | async def async_config_changed(self) -> None:
assert self.driver is not None
await self.hass.async_add_executor_job(self.driver.config_changed)
|
|
13,314 | 63,461 | 105 | .venv/lib/python3.8/site-packages/pip/_vendor/pyparsing.py | 21 | 10 | def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = (startAction or _defaultStartDebug | upd; format | setDebugActions | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | pyparsing.py | 8 | 6 | https://github.com/jindongwang/transferlearning.git | 4 | 36 | 0 | 18 | 54 | Python | {
"docstring": "\n Enable display of debugging messages while doing pattern matching.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def setDebugActions(self, startAction, successAction, exceptionAction):
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
|
|
13,771 | 64,990 | 65 | erpnext/accounts/doctype/pricing_rule/utils.py | 99 | 34 | def get_qty_amount_data_for_cumulative(pr_doc, doc, items=None):
if items is None:
items = []
sum_qty, sum_amt = [0, 0]
doctype = doc.get("parenttype") or doc.doctype
date_field = (
"transaction_date" if frappe.get_meta(doctype).has_field("transaction_date") else "posting_date"
)
child_doctype = "{0} Item".format(doct | style: format code with black | get_qty_amount_data_for_cumulative | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | utils.py | 16 | 42 | https://github.com/frappe/erpnext.git | 7 | 245 | 0 | 72 | 406 | Python | {
"docstring": " and `tab{child_doc}`.warehouse in ({warehouses})\n\t\t\t SELECT `tab{child_doc}`.stock_qty,\n\t\t\t`tab{child_doc}`.amount\n\t\tFROM `tab{child_doc}`, `tab{parent_doc}`\n\t\tWHERE\n\t\t\t`tab{child_doc}`.parent = `tab{parent_doc}`.name and `tab{parent_doc}`.{date_field}\n\t\t\tbetween %s and %s and `tab{parent_doc}`.docstatus = 1\n\t\t\t{condition} group by `tab{child_doc}`.name\n\t",
"language": "en",
"n_whitespaces": 22,
"n_words": 28,
"vocab_size": 23
} | def get_qty_amount_data_for_cumulative(pr_doc, doc, items=None):
if items is None:
items = []
sum_qty, sum_amt = [0, 0]
doctype = doc.get("parenttype") or doc.doctype
date_field = (
"transaction_date" if frappe.get_meta(doctype).has_field("transaction_date") else "posting_date"
)
child_doctype = "{0} Item".format(doctype)
apply_on = frappe.scrub(pr_doc.get("apply_on"))
values = [pr_doc.valid_from, pr_doc.valid_upto]
condition = ""
if pr_doc.warehouse:
warehouses = get_child_warehouses(pr_doc.warehouse)
condition += .format(
child_doc=child_doctype, warehouses=",".join(["%s"] * len(warehouses))
)
values.extend(warehouses)
if items:
condition = " and `tab{child_doc}`.{apply_on} in ({items})".format(
child_doc=child_doctype, apply_on=apply_on, items=",".join(["%s"] * len(items))
)
values.extend(items)
data_set = frappe.db.sql(
.format(
parent_doc=doctype, child_doc=child_doctype, condition=condition, date_field=date_field
),
tuple(values),
as_dict=1,
)
for data in data_set:
sum_qty += data.get("stock_qty")
sum_amt += data.get("amount")
return [sum_qty, sum_amt]
|
|
69,699 | 241,795 | 132 | scipy/sparse/linalg/_isolve/utils.py | 62 | 14 | def make_system(A, M, x0, b):
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '
'incompatib | MAINT: sparse.linalg: Remove unnecessary operations | make_system | 5628849933f1ba002f34b88b4d3af24f68008b39 | scipy | utils.py | 13 | 51 | https://github.com/scipy/scipy.git | 17 | 379 | 0 | 48 | 194 | Python | {
"docstring": "Make a linear system Ax=b\n\n Parameters\n ----------\n A : LinearOperator\n sparse or dense matrix (or any valid input to aslinearoperator)\n M : {LinearOperator, Nones}\n preconditioner\n sparse or dense matrix (or any valid input to aslinearoperator)\n x0 : {array_like, str, None}\n initial guess to iterative method.\n ``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.\n Default is `None`, which means using the zero initial guess.\n b : array_like\n right hand side\n\n Returns\n -------\n (A, M, x, b, postprocess)\n A : LinearOperator\n matrix of the linear system\n M : LinearOperator\n preconditioner\n x : rank 1 ndarray\n initial guess\n b : rank 1 ndarray\n right hand side\n postprocess : function\n converts the solution vector to the appropriate\n type and dimensions (e.g. (N,1) matrix)\n\n ",
"language": "en",
"n_whitespaces": 303,
"n_words": 123,
"vocab_size": 77
} | def make_system(A, M, x0, b):
A_ = A
A = aslinearoperator(A)
if A.shape[0] != A.shape[1]:
raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')
N = A.shape[0]
b = asanyarray(b)
if not (b.shape == (N,1) or b.shape == (N,)):
raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '
'incompatible')
if b.dtype.char not in 'fdFD':
b = b.astype('d') # upcast non-FP types to double
|
|
41,733 | 176,163 | 35 | networkx/generators/small.py | 23 | 5 | def dodecahedral_graph(create_using=None):
G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using)
G.name = "Dodecahedral Graph"
re | Docstrings for the small.py module (#5240)
* added description for the first 5 small graphs
* modified descriptions based on comment and added description for two more functions
* added doctrings to all the functions
* Minor touchups.
Co-authored-by: Ross Barnowski <[email protected]> | dodecahedral_graph | dec723f072eb997a497a159dbe8674cd39999ee9 | networkx | small.py | 10 | 4 | https://github.com/networkx/networkx.git | 1 | 51 | 0 | 18 | 74 | Python | {
"docstring": "\n Returns the Platonic Dodecahedral graph.\n\n The dodecahedral graph has 20 nodes and 30 edges. The skeleton of the\n dodecahedron forms a graph. It is one of 5 Platonic graphs [1]_.\n It can be described in LCF notation as:\n ``[10, 7, 4, -4, -7, 10, -4, 7, -7, 4]^2`` [2]_.\n\n Parameters\n ----------\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Returns\n -------\n G : networkx Graph\n Dodecahedral Graph with 20 nodes and 30 edges\n\n References\n ----------\n .. [1] https://en.wikipedia.org/wiki/Regular_dodecahedron#Dodecahedral_graph\n .. [2] https://mathworld.wolfram.com/DodecahedralGraph.html\n\n ",
"language": "en",
"n_whitespaces": 153,
"n_words": 91,
"vocab_size": 69
} | def dodecahedral_graph(create_using=None):
G = LCF_graph(20, [10, 7, 4, -4, -7, 10, -4, 7, -7, 4], 2, create_using)
G.name = "Dodecahedral Graph"
return G
|
|
99,478 | 300,618 | 58 | homeassistant/helpers/template.py | 17 | 10 | def arc_tangent(value, default=_SENTINEL):
try:
return math.atan(float(value))
except (ValueError, TypeError):
if default is _SENTINEL:
ra | Fail template functions when no default specified (#71687) | arc_tangent | 4885331509eeffe50f42d76b234996467b06170f | core | template.py | 13 | 7 | https://github.com/home-assistant/core.git | 3 | 42 | 0 | 15 | 70 | Python | {
"docstring": "Filter and function to get arc tangent of the value.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def arc_tangent(value, default=_SENTINEL):
try:
return math.atan(float(value))
except (ValueError, TypeError):
if default is _SENTINEL:
raise_no_default("atan", value)
return default
|
|
81,815 | 276,983 | 220 | keras/utils/metrics_utils.py | 92 | 22 | def sparse_top_k_categorical_matches(y_true, y_pred, k=5):
reshape_matches = False
y_true = tf.convert_to_tensor(y_true)
y_pred = tf.convert_to_tensor(y_pred)
y_true_rank = y_true.shape.ndims
y_pred_rank = y_pred.shape.ndims
y_true_org_shape = tf.shape(y_true)
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
reshape_matches = True
y_true = tf.reshape(y_true, [-1])
matches = tf.cast(
tf.math.in_top_k(
predictions=y_pred, ta | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | sparse_top_k_categorical_matches | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | metrics_utils.py | 15 | 22 | https://github.com/keras-team/keras.git | 6 | 172 | 0 | 61 | 268 | Python | {
"docstring": "Creates float Tensor, 1.0 for label-TopK_prediction match, 0.0 for mismatch.\n\n Args:\n y_true: tensor of true targets.\n y_pred: tensor of predicted targets.\n k: (Optional) Number of top elements to look at for computing accuracy.\n Defaults to 5.\n\n Returns:\n Match tensor: 1.0 for label-prediction match, 0.0 for mismatch.\n ",
"language": "en",
"n_whitespaces": 82,
"n_words": 46,
"vocab_size": 33
} | def sparse_top_k_categorical_matches(y_true, y_pred, k=5):
reshape_matches = False
y_true = tf.convert_to_tensor(y_true)
y_pred = tf.convert_to_tensor(y_pred)
y_true_rank = y_true.shape.ndims
y_pred_rank = y_pred.shape.ndims
y_true_org_shape = tf.shape(y_true)
# Flatten y_pred to (batch_size, num_samples) and y_true to (num_samples,)
if (y_true_rank is not None) and (y_pred_rank is not None):
if y_pred_rank > 2:
y_pred = tf.reshape(y_pred, [-1, y_pred.shape[-1]])
if y_true_rank > 1:
reshape_matches = True
y_true = tf.reshape(y_true, [-1])
matches = tf.cast(
tf.math.in_top_k(
predictions=y_pred, targets=tf.cast(y_true, "int32"), k=k
),
dtype=backend.floatx(),
)
# returned matches is expected to have same shape as y_true input
if reshape_matches:
return tf.reshape(matches, shape=y_true_org_shape)
return matches
|
|
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs | 14,815 | 68,540 | 28 | erpnext/controllers/queries.py | 57 | 31 | def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
company_currency = erpnext.get_company_currency(filters.get("company"))
def get_accounts(with_account_type_filter):
account_type_condition = ""
if with_account_type_filter:
account_type_condition = "AND account_type in %(account_types)s"
accounts = frappe.db.sql(
.format(
account_type_condition=account_type_condition,
searchfield=searchfield,
mcond=get_match_cond(doctype),
),
dict(
account_types=filters.get("account_type"),
company=filters.get("company"),
disabled=filters.get("disabled", 0),
currency=company_currency,
txt="%{}%".format(txt),
offset=start,
limit=page_len,
),
)
return accounts
tax_accounts = get_accounts(True)
if not tax_accounts:
tax_accounts = get_accounts(False)
return tax_accounts
| fix: user can select disabled accounts in taxes table | tax_account_query | a1e3ae8869194a487acccc706a381db74c4aa1ff | erpnext | queries.py | 16 | 7 | https://github.com/frappe/erpnext.git | 2 | 48 | 1 | 44 | 249 | Python | {
"docstring": "\n\t\t\tSELECT name, parent_account\n\t\t\tFROM `tabAccount`\n\t\t\tWHERE `tabAccount`.docstatus!=2\n\t\t\t\t{account_type_condition}\n\t\t\t\tAND is_group = 0\n\t\t\t\tAND company = %(company)s\n\t\t\t\tAND disabled = %(disabled)s\n\t\t\t\tAND (account_currency = %(currency)s or ifnull(account_currency, '') = '')\n\t\t\t\tAND `{searchfield}` LIKE %(txt)s\n\t\t\t\t{mcond}\n\t\t\tORDER BY idx DESC, name\n\t\t\tLIMIT %(offset)s, %(limit)s\n\t\t",
"language": "en",
"n_whitespaces": 30,
"n_words": 42,
"vocab_size": 33
} | def tax_account_query(doctype, txt, searchfield, start, page_len, filters):
company_currency = erpnext.get_company_currency(filters.get("company"))
def get_accounts(with_account_type_filter):
account_type_condition = ""
if with_account_type_filter:
account_type_condition = "AND account_type in %(account_types)s"
accounts = frappe.db.sql(
.format(
account_type_condition=account_type_condition,
searchfield=searchfield,
mcond=get_match_cond(doctype),
),
dict(
account_types=filters.get("account_type"),
company=filters.get("company"),
disabled=filters.get("disabled", 0),
currency=company_currency,
txt="%{}%".format(txt),
offset=start,
limit=page_len,
),
)
return accounts
tax_accounts = get_accounts(True)
if not tax_accounts:
tax_accounts = get_accounts(False)
return tax_accounts
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs |
16,628 | 77,101 | 196 | wagtail/images/tests/test_admin_views.py | 49 | 25 | def test_add_post_duplicate_choose_permission(self):
# Create group with access to admin and add permission.
bakers_group = Group.objects.create(name="Bakers")
access_admin_perm = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
bakers_group.permissions.add(access_admin_perm)
# Create the "Bakery" Collection and grant "add" permission to the Bakers group.
root = Collection.objects.get(id=get_root_collection_id())
bak | Add duplicate detection to multiple image upload view
Add utility function to find an image's potential duplicates
Add logic to detect duplicates on multiple images upload view
Add template shown when a user is prompted to confirm a duplicate upload
Add client-side logic to confirm a duplicate upload
Add/update styles
Add tests for duplicate image uploads
Index Image file_hash field
Ensure that a user can choose an image from duplicates returned by find_image_duplicates
Use CSS classes instead of HTML elements to hide edit form on duplicate upload
Add ImagesPermissionPolicy helper to retrieve the permission policy dynamically
This allows test cases that override the base image model to pick up the corresponding permission policy, should they need it.
Remove usage of sibling selector
Use wagtail image templatetag to generate image
Renamed ImagesPermissionPolicy to ImagesPermissionPolicyGetter
Fail loudly when setting permission policy and a wromg image model is provided
Add decorator to disconnect a signal's receiver during a test execution and use it in get_image_model tests
Improve warning message on duplicate upload in multiple upload view
Show matching form when confirming a duplicate upload | test_add_post_duplicate_choose_permission | c136f461bc052cef362991458e1bd1fca37a3da9 | wagtail | test_admin_views.py | 13 | 31 | https://github.com/wagtail/wagtail.git | 1 | 221 | 0 | 40 | 176 | Python | {
"docstring": "\n When a duplicate image is added but the user doesn't have permission to choose the original image,\n the add views lets the user upload it as if it weren't a duplicate.\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 31,
"vocab_size": 25
} | def test_add_post_duplicate_choose_permission(self):
# Create group with access to admin and add permission.
bakers_group = Group.objects.create(name="Bakers")
access_admin_perm = Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
bakers_group.permissions.add(access_admin_perm)
# Create the "Bakery" Collection and grant "add" permission to the Bakers group.
root = Collection.objects.get(id=get_root_collection_id())
bakery_collection = root.add_child(instance=Collection(name="Bakery"))
GroupCollectionPermission.objects.create(
group=bakers_group,
collection=bakery_collection,
permission=Permission.objects.get(
content_type__app_label="wagtailimages", codename="add_image"
),
)
|
|
@proxy_napalm_wrap | 54,638 | 216,561 | 38 | salt/modules/napalm_mod.py | 17 | 10 | def netmiko_commands(*commands, **kwargs):
conn = _netmiko_conn(**kwargs)
ret = []
for cmd in commands:
ret.append(conn.send_command(cmd)) | Deprecated netmiko_conn and pyeapi_conn in napalm_mod.py as these function should not be called from the CLI | netmiko_commands | d8305bfaa7b98d898f5963b01ca75f277c266322 | salt | napalm_mod.py | 11 | 6 | https://github.com/saltstack/salt.git | 2 | 39 | 1 | 15 | 70 | Python | {
"docstring": "\n .. versionadded:: 2019.2.0\n\n Invoke one or more commands to be executed on the remote device, via Netmiko.\n Returns a list of strings, with the output from each command.\n\n commands\n A list of commands to be executed.\n\n expect_string\n Regular expression pattern to use for determining end of output.\n If left blank will default to being based on router prompt.\n\n delay_factor: ``1``\n Multiplying factor used to adjust delays (default: ``1``).\n\n max_loops: ``500``\n Controls wait time in conjunction with delay_factor. Will default to be\n based upon self.timeout.\n\n auto_find_prompt: ``True``\n Whether it should try to auto-detect the prompt (default: ``True``).\n\n strip_prompt: ``True``\n Remove the trailing router prompt from the output (default: ``True``).\n\n strip_command: ``True``\n Remove the echo of the command from the output (default: ``True``).\n\n normalize: ``True``\n Ensure the proper enter is sent at end of command (default: ``True``).\n\n use_textfsm: ``False``\n Process command output through TextFSM template (default: ``False``).\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' napalm.netmiko_commands 'show version' 'show interfaces'\n ",
"language": "en",
"n_whitespaces": 287,
"n_words": 157,
"vocab_size": 106
} | def netmiko_commands(*commands, **kwargs):
conn = _netmiko_conn(**kwargs)
ret = []
for cmd in commands:
ret.append(conn.send_command(cmd))
return ret
@proxy_napalm_wrap |
19,938 | 100,464 | 194 | plugins/train/model/original.py | 58 | 16 | def decoder(self, side):
input_ = Input(shape=(8, 8, 512))
var_x = input_
var_x = UpscaleBlock(256, activation="leakyrelu")(var_x)
var_x = UpscaleBlock(128, activation="leakyrelu")(var_x)
var_x = UpscaleBlock(64, activation="leakyrelu")(var_x)
var_x = Conv2DOutput(3, 5, name=f"face_out_{side}")(var_x)
outputs = [var_x]
if self.learn_mask:
var_y = input_
var_y = UpscaleBlock(256, activation="leakyrelu")(var_y)
var_y = UpscaleBlock(128, activation="leakyrelu")(var_y)
var_y = UpscaleBlock(64, activation="leakyrelu")(var_y)
var_y = Conv2DOutput(1, 5, name=f"mask_out_{side}")(var_y)
outputs.append(var_y)
return KerasModel(input_, outputs=outputs, name=f"dec | Update all Keras Imports to be conditional (#1214)
* Remove custom keras importer
* first round keras imports fix
* launcher.py: Remove KerasFinder references
* 2nd round keras imports update (lib and extract)
* 3rd round keras imports update (train)
* remove KerasFinder from tests
* 4th round keras imports update (tests) | decoder | aa39234538a8f83e6aa2b60b8275a570e8876ac2 | faceswap | original.py | 14 | 16 | https://github.com/deepfakes/faceswap.git | 2 | 168 | 0 | 29 | 283 | Python | {
"docstring": " The original Faceswap Decoder Network.\r\n\r\n The decoders for the original model have separate weights for each side \"A\" and \"B\", so two\r\n instances are created in :func:`build_model`, one for each side.\r\n\r\n Parameters\r\n ----------\r\n side: str\r\n Either `\"a` or `\"b\"`. This is used for naming the decoder model.\r\n\r\n Returns\r\n -------\r\n :class:`keras.models.Model`\r\n The Keras decoder model. This will be called twice, once for each side.\r\n ",
"language": "en",
"n_whitespaces": 149,
"n_words": 63,
"vocab_size": 49
} | def decoder(self, side):
input_ = Input(shape=(8, 8, 512))
var_x = input_
var_x = UpscaleBlock(256, activation="leakyrelu")(var_x)
var_x = UpscaleBlock(128, activation="leakyrelu")(var_x)
var_x = UpscaleBlock(64, activation="leakyrelu")(var_x)
var_x = Conv2DOutput(3, 5, name=f"face_out_{side}")(var_x)
outputs = [var_x]
if self.learn_mask:
var_y = input_
var_y = UpscaleBlock(256, activation="leakyrelu")(var_y)
var_y = UpscaleBlock(128, activation="leakyrelu")(var_y)
var_y = UpscaleBlock(64, activation="leakyrelu")(var_y)
var_y = Conv2DOutput(1, 5, name=f"mask_out_{side}")(var_y)
outputs.append(var_y)
return KerasModel(input_, outputs=outputs, name=f"decoder_{side}")
|
|
17,720 | 83,758 | 382 | zerver/tests/test_subs.py | 101 | 34 | def test_users_getting_add_peer_event(self) -> None:
streams_to_sub = ["multi_user_stream"]
othello = self.example_user("othello")
cordelia = self.example_user("cordelia")
iago = self.example_user("iago")
orig_user_ids_to_subscribe = [self.test_user.id, othello.id]
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),
)
new_user_ids_to_subscribe = [iago.id, cordelia.id]
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=5):
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),
)
add_peer_events = [event for event in events if event["event"].get("op") == "peer_add"]
(add_peer_event,) = add_peer_events
self.assertEqual(add_peer_event["event"]["type"], "subscription")
self.a | Correctly hyphenate “non-”.
Signed-off-by: Anders Kaseorg <[email protected]> | test_users_getting_add_peer_event | 6331a314d464f9c49a612023a5969e5d7b8e00a0 | zulip | test_subs.py | 16 | 31 | https://github.com/zulip/zulip.git | 5 | 228 | 0 | 73 | 378 | Python | {
"docstring": "\n Check users getting add_peer_event is correct\n ",
"language": "en",
"n_whitespaces": 21,
"n_words": 6,
"vocab_size": 6
} | def test_users_getting_add_peer_event(self) -> None:
streams_to_sub = ["multi_user_stream"]
othello = self.example_user("othello")
cordelia = self.example_user("cordelia")
iago = self.example_user("iago")
orig_user_ids_to_subscribe = [self.test_user.id, othello.id]
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=orjson.dumps(orig_user_ids_to_subscribe).decode()),
)
new_user_ids_to_subscribe = [iago.id, cordelia.id]
events: List[Mapping[str, Any]] = []
with self.tornado_redirected_to_list(events, expected_num_events=5):
self.common_subscribe_to_streams(
self.test_user,
streams_to_sub,
dict(principals=orjson.dumps(new_user_ids_to_subscribe).decode()),
)
add_peer_events = [event for event in events if event["event"].get("op") == "peer_add"]
(add_peer_event,) = add_peer_events
self.assertEqual(add_peer_event["event"]["type"], "subscription")
self.assertEqual(add_peer_event["event"]["op"], "peer_add")
event_sent_to_ids = add_peer_event["users"]
for user_id in new_user_ids_to_subscribe:
# Make sure new users subscribed to stream is not in
# peer_add event recipient list
self.assertNotIn(user_id, event_sent_to_ids)
for old_user in orig_user_ids_to_subscribe:
# Check non-new users are in peer_add event recipient list.
self.assertIn(old_user, event_sent_to_ids)
|
|
37,343 | 158,164 | 100 | d2l/mxnet.py | 29 | 19 | def show_trace_2d(f, results):
d2l.set_figsize()
d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = d2 | [PaddlePaddle] Merge master into Paddle branch (#1186)
* change 15.2 title in chinese version (#1109)
change title ’15.2. 情感分析:使用递归神经网络‘ to ’15.2. 情感分析:使用循环神经网络‘
* 修改部分语义表述 (#1105)
* Update r0.17.5 (#1120)
* Bump versions in installation
* 94行typo: (“bert.mall”)->(“bert.small”) (#1129)
* line 313: "bert.mall" -> "bert.small" (#1130)
* fix: update language as native reader (#1114)
* Fix the translation of "stride" (#1115)
* Update index.md (#1118)
修改部分语义表述
* Update self-attention-and-positional-encoding.md (#1133)
依照本书的翻译习惯,将pooling翻译成汇聚
* maybe a comment false (#1149)
* maybe a little false
* maybe a little false
* A minor bug in the rcnn section (Chinese edition) (#1148)
* Update bert.md (#1137)
一个笔误
# 假设batch_size=2,num_pred_positions=3
# 那么batch_idx应该是np.repeat( [0,1], 3 ) = [0,0,0,1,1,1]
* Update calculus.md (#1135)
* fix typo in git documentation (#1106)
* fix: Update the Chinese translation in lr-scheduler.md (#1136)
* Update lr-scheduler.md
* Update chapter_optimization/lr-scheduler.md
Co-authored-by: goldmermaid <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
* fix translation for kaggle-house-price.md (#1107)
* fix translation for kaggle-house-price.md
* fix translation for kaggle-house-price.md
Signed-off-by: sunhaizhou <[email protected]>
* Update weight-decay.md (#1150)
* Update weight-decay.md
关于“k多选d”这一部分,中文读者使用排列组合的方式可能更容易理解
关于“给定k个变量,阶数的个数为...”这句话是有歧义的,不是很像中国话,应该是说“阶数为d的项的个数为...”。
并增加了一句对“因此即使是阶数上的微小变化,比如从$2$到$3$,也会显著增加我们模型的复杂性。”的解释
解释为何会增加复杂性以及为何需要细粒度工具。
* Update chapter_multilayer-perceptrons/weight-decay.md
yep
Co-authored-by: goldmermaid <[email protected]>
* Update chapter_multilayer-perceptrons/weight-decay.md
yep
Co-authored-by: goldmermaid <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
* Fix a spelling error (#1161)
* Update gru.md (#1152)
The key distinction between vanilla RNNs and GRUs is that the latter support gating of the hidden state.
翻译错误
* Unify the function naming (#1113)
Unify naming of the function 'init_xavier()'.
* Update mlp-concise.md (#1166)
* Update mlp-concise.md
语句不通顺
* Update environment.md
语序异常
* Update config.ini
* fix the imprecise description (#1168)
Co-authored-by: yuande <yuande>
* fix typo in chapter_natural-language-processing-pretraining/glove.md (#1175)
* Fix some typos. (#1163)
* Update batch-norm.md (#1170)
fixing typos u->x in article
* Update linear-regression.md (#1090)
We invoke Stuart Russell and Peter Norvig who, in their classic AI text book Artificial Intelligence: A Modern Approach :cite:Russell.Norvig.2016, pointed out that
原译文把who也直接翻译出来了。
* Update mlp.md (#1117)
* Update mlp.md
修改部分语义表述
* Update chapter_multilayer-perceptrons/mlp.md
Co-authored-by: goldmermaid <[email protected]>
* Update chapter_multilayer-perceptrons/mlp.md
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
* Correct a translation error. (#1091)
* Correct a translation error.
* Update chapter_computer-vision/image-augmentation.md
Co-authored-by: Aston Zhang <[email protected]>
* Update aws.md (#1121)
* Update aws.md
* Update chapter_appendix-tools-for-deep-learning/aws.md
Co-authored-by: Aston Zhang <[email protected]>
* Update image-augmentation.md (#1093)
* Update anchor.md (#1088)
fix a minor issue in code
* Update anchor.md
* Update image-augmentation.md
* fix typo and improve translation in chapter_linear-networks\softmax-regression.md (#1087)
* Avoid `torch.meshgrid` user warning (#1174)
Avoids the following user warning:
```python
~/anaconda3/envs/torch/lib/python3.10/site-packages/torch/functional.py:568: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at ../aten/src/ATen/native/TensorShape.cpp:2228.)
return _VF.meshgrid(tensors, **kwargs) # type: ignore[attr-defined]
```
* bump to 2.0.0-beta1
* Update sequence.md
* bump beta1 on readme
* Add latex code block background to config
* BLD: Bump python support version 3.9 (#1183)
* BLD: Bump python support version 3.9
* Remove clear and manually downgrade protobuf 4.21.4 to 3.19.4
* BLD: Bump torch and tensorflow
* Update Jenkinsfile
* Update chapter_installation/index.md
* Update chapter_installation/index.md
Co-authored-by: Aston Zhang <[email protected]>
* Update config.ini
* Update INFO.md
* Update INFO.md
* Drop mint to show code in pdf, use Inconsolata font, apply code cell color (#1187)
* resolve the conflicts
* revise from publisher (#1089)
* revise from publisher
* d2l api
* post_latex
* revise from publisher
* revise ch11
* Delete d2l-Copy1.bib
* clear cache
* rm d2lbook clear
* debug anchor
* keep original d2l doc
Co-authored-by: Ubuntu <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
* 重复语句 (#1188)
Co-authored-by: Aston Zhang <[email protected]>
* Improve expression for chapter_preliminaries/pandas.md (#1184)
* Update pandas.md
* Improve expression
* Improve expression
* Update chapter_preliminaries/pandas.md
Co-authored-by: Aston Zhang <[email protected]>
* Improce expression for chapter_preliminaries/linear-algebra.md (#1185)
* Improce expression
* Improve code comments
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
* Update chapter_preliminaries/linear-algebra.md
Co-authored-by: Aston Zhang <[email protected]>
* Fix multibox_detection bugs
* Update d2l to 0.17.5 version
* restore older version
* Upgrade pandas
* change to python3.8
* Test warning log
* relocate warning log
* test logs filtering
* Update gru.md
* Add DeprecationWarning filter
* Test warning log
* Update attention mechanisms & computational performance
* Update multilayer perceptron& linear & convolution networks & computer vision
* Update recurrent&optimition&nlp pretraining & nlp applications
* ignore warnings
* Update index.md
* Update linear networks
* Update multilayer perceptrons&deep learning computation
* Update preliminaries
* Check and Add warning filter
* Update kaggle-cifar10.md
* Update object-detection-dataset.md
* Update ssd.md fcn.md
* Update hybridize.md
* Update hybridize.md
Signed-off-by: sunhaizhou <[email protected]>
Co-authored-by: zhou201505013 <[email protected]>
Co-authored-by: Xinwei Liu <[email protected]>
Co-authored-by: Anirudh Dagar <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: hugo_han <[email protected]>
Co-authored-by: gyro永不抽风 <[email protected]>
Co-authored-by: CanChengZheng <[email protected]>
Co-authored-by: linlin <[email protected]>
Co-authored-by: iuk <[email protected]>
Co-authored-by: yoos <[email protected]>
Co-authored-by: Mr. Justice Lawrence John Wargrave <[email protected]>
Co-authored-by: Chiyuan Fu <[email protected]>
Co-authored-by: Sunhuashan <[email protected]>
Co-authored-by: Haiker Sun <[email protected]>
Co-authored-by: Ming Liu <[email protected]>
Co-authored-by: goldmermaid <[email protected]>
Co-authored-by: silenceZheng66 <[email protected]>
Co-authored-by: Wenchao Yan <[email protected]>
Co-authored-by: Kiki2049 <[email protected]>
Co-authored-by: Krahets <[email protected]>
Co-authored-by: friedmainfunction <[email protected]>
Co-authored-by: Jameson <[email protected]>
Co-authored-by: P. Yao <[email protected]>
Co-authored-by: Yulv-git <[email protected]>
Co-authored-by: Liu,Xiao <[email protected]>
Co-authored-by: YIN, Gang <[email protected]>
Co-authored-by: Joe-HZ <[email protected]>
Co-authored-by: lybloveyou <[email protected]>
Co-authored-by: VigourJiang <[email protected]>
Co-authored-by: zxhd863943427 <[email protected]>
Co-authored-by: LYF <[email protected]>
Co-authored-by: Aston Zhang <[email protected]>
Co-authored-by: xiaotinghe <[email protected]>
Co-authored-by: Ubuntu <[email protected]>
Co-authored-by: Holly-Max <[email protected]>
Co-authored-by: HinGwenWoong <[email protected]>
Co-authored-by: Shuai Zhang <[email protected]> | show_trace_2d | b64b41d8c1ac23c43f7a4e3f9f6339d6f0012ab2 | d2l-zh | mxnet.py | 11 | 8 | https://github.com/d2l-ai/d2l-zh.git | 1 | 113 | 0 | 27 | 193 | Python | {
"docstring": "Show the trace of 2D variables during optimization.\n\n Defined in :numref:`subsec_gd-learningrate`",
"language": "en",
"n_whitespaces": 13,
"n_words": 11,
"vocab_size": 11
} | def show_trace_2d(f, results):
d2l.set_figsize()
d2l.plt.plot(*zip(*results), '-o', color='#ff7f0e')
x1, x2 = d2l.meshgrid(d2l.arange(-5.5, 1.0, 0.1),
d2l.arange(-3.0, 1.0, 0.1))
d2l.plt.contour(x1, x2, f(x1, x2), colors='#1f77b4')
d2l.plt.xlabel('x1')
d2l.plt.ylabel('x2')
d2l.DATA_HUB['airfoil'] = (d2l.DATA_URL + 'airfoil_self_noise.dat',
'76e5be1548fd8222e5074cf0faae75edff8cf93f')
|
|
@pytest.mark.parametrize("use_local", [True, False]) | 29,963 | 133,242 | 141 | python/ray/util/sgd/tests/test_torch_2.py | 51 | 31 | def test_dataset(ray_start_4_cpus, use_local):
model_creator = mlp_identity.model_creator
optimizer_creator = mlp_identity.optimizer_creator
dataset_creator = mlp_identity.dataset_creator
DatasetOperator = TrainingOperator.from_creators(
model_creator=model_creator,
optimizer_creator=optimizer_creator,
loss_creator=nn.MSELoss,
)
trainer = TorchTrainer(
training_operator_cls=DatasetOperator,
use_local=use_local,
num_workers=2,
)
dataset = dataset_creator()
for i in range(5):
trainer.train(dataset=dataset, num_steps=100)
x = mlp_identity.to_mat(0.5)
prediction = float(trainer.get_model()(x)[0][0])
assert 0.4 <= prediction <= 0.6
trainer.shutdown( | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | test_dataset | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_torch_2.py | 13 | 21 | https://github.com/ray-project/ray.git | 2 | 130 | 1 | 41 | 216 | Python | {
"docstring": "\n This test tries training the mlp_identity example. We check the accuracy of\n the model as an all inclusive way of ensuring that we are properly sharding\n and iterating over the entire dataset (instead of repeating the first set\n of points for example).\n ",
"language": "en",
"n_whitespaces": 58,
"n_words": 42,
"vocab_size": 35
} | def test_dataset(ray_start_4_cpus, use_local):
model_creator = mlp_identity.model_creator
optimizer_creator = mlp_identity.optimizer_creator
dataset_creator = mlp_identity.dataset_creator
DatasetOperator = TrainingOperator.from_creators(
model_creator=model_creator,
optimizer_creator=optimizer_creator,
loss_creator=nn.MSELoss,
)
trainer = TorchTrainer(
training_operator_cls=DatasetOperator,
use_local=use_local,
num_workers=2,
)
dataset = dataset_creator()
for i in range(5):
trainer.train(dataset=dataset, num_steps=100)
x = mlp_identity.to_mat(0.5)
prediction = float(trainer.get_model()(x)[0][0])
assert 0.4 <= prediction <= 0.6
trainer.shutdown()
@pytest.mark.parametrize("use_local", [True, False]) |
1,551 | 9,114 | 302 | parsing/dml_csr/loss/lovasz_softmax.py | 115 | 33 | def lovasz_softmax_flat(probas, labels, classes='present', weighted=None):
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
if weighted is not None:
losses.append(wei | Create lovasz_softmax.py | lovasz_softmax_flat | db307ffb12d6ba1f8eaeeafd29ee6d4a3fd6fa97 | insightface | lovasz_softmax.py | 18 | 25 | https://github.com/deepinsight/insightface.git | 9 | 226 | 0 | 83 | 365 | Python | {
"docstring": "\n Multi-class Lovasz-Softmax loss\n probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)\n labels: [P] Tensor, ground truth labels (between 0 and C - 1)\n classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.\n ",
"language": "en",
"n_whitespaces": 67,
"n_words": 45,
"vocab_size": 39
} | def lovasz_softmax_flat(probas, labels, classes='present', weighted=None):
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
if weighted is not None:
losses.append(weighted[c]*torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
else:
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
|
|
1,999 | 10,924 | 182 | jina/parsers/orchestrate/runtimes/distributed.py | 53 | 16 | def mixin_distributed_feature_parser(parser):
gp = add_arg_group(parser, title='Distributed')
gp.add_argument(
'--quiet-remote-logs',
action='store_true',
default=False,
help='Do not display the streaming of remote logs on local console',
)
gp.add_argument(
'--upload-files',
type=str,
nargs='*',
metavar='FILE',
help=,
)
gp.add_argument(
| refactor: rename pod to deployment (#4230)
* refactor: rename pod to deployment
* style: fix overload and cli autocomplete
* fix: undo daemon mistake
* refactor: leftover cleanup
* fix: more test fixes
* fix: more fixes
* fix: more fixes
* fix: more fixes
* fix: more tests
* fix: fix more tests
* refactor: fix more tests
* refactor: more tests fixes
* refactor: rename pea to pod
* refactor: adjust docs
* refactor: complete pea renaming
* refactor: more fixes
* fix: pea_type in k8s yamls
* fix: adjust pod args name
* refactor: rename peapods parser folder
* fix: da init
Co-authored-by: Jina Dev Bot <[email protected]> | mixin_distributed_feature_parser | 13edc16d806fb5d77a6849551178ccc75937f25f | jina | distributed.py | 10 | 33 | https://github.com/jina-ai/jina.git | 2 | 83 | 0 | 44 | 141 | Python | {
"docstring": "Mixing in arguments required by :class:`BaseDeployment` into the given parser.\n :param parser: the parser instance to which we add arguments\n \nThe files on the host to be uploaded to the remote\nworkspace. This can be useful when your Deployment has more\nfile dependencies beyond a single YAML file, e.g.\nPython files, data files.\n\nNote,\n- currently only flatten structure is supported, which means if you upload `[./foo/a.py, ./foo/b.pp, ./bar/c.yml]`, then they will be put under the _same_ workspace on the remote, losing all hierarchies.\n- by default, `--uses` YAML file is always uploaded.\n- uploaded files are by default isolated across the runs. To ensure files are submitted to the same workspace across different runs, use `--workspace-id` to specify the workspace.\n",
"language": "en",
"n_whitespaces": 119,
"n_words": 121,
"vocab_size": 90
} | def mixin_distributed_feature_parser(parser):
gp = add_arg_group(parser, title='Distributed')
gp.add_argument(
'--quiet-remote-logs',
action='store_true',
default=False,
help='Do not display the streaming of remote logs on local console',
)
gp.add_argument(
'--upload-files',
type=str,
nargs='*',
metavar='FILE',
help=,
)
gp.add_argument(
'--disable-remote',
action='store_true',
default=False,
help='If set, remote pod invocation is avoided. This is used by pods created by JinaD'
if _SHOW_ALL_ARGS
else argparse.SUPPRESS,
)
|
|
77,473 | 263,858 | 317 | PyInstaller/depend/analysis.py | 155 | 19 | def get_bootstrap_modules():
# Import 'struct' modules to get real paths to module file names.
mod_struct = __import__('struct')
# Basic modules necessary for the bootstrap process.
loader_mods = TOC()
loaderpath = os.path.join(HOMEPATH, 'PyInstaller', 'loader')
# On some platforms (Windows, Debian/Ubuntu) '_struct' and zlib modules are built-in modules (linked statically)
# and thus does not have attribute __file__. 'struct' module is required for reading Python bytecode from
# executable. 'zlib' is required to decompress this bytecode.
for mod_name in ['_struct', 'zlib']:
mod = __import__(mod_name) # C extension.
if hasattr(mod, '__file__'):
mod_file = os.path.abspath(mod.__file__)
if os.path.basename(os.path.dirname(mod_file)) == 'lib-dynload':
# Divert extensions originating from python's lib-dynload directory, to match behavior of #5604.
mod_name = os.path.join('lib-dynload', mod_name)
loader_mods.append((mod_name, mod_file, 'EXTEN | utils: remove compile_py_files helper
The only remaining use is in `PYZ.__init__`, and that can be
replaced with a loop that uses the new `compile_pymodule` helper.
This change, however, requires `get_boostrap_modules()` helper
from `PyInstaller.depend˙ to return paths to source `.py`
files instead of non-existing `.pyc` files (the old
`compile_py_files` helper went to great lengths to convert
these back to source file names...). | get_bootstrap_modules | 83193a1897232e133966d15e30758a149de50407 | pyinstaller | analysis.py | 15 | 20 | https://github.com/pyinstaller/pyinstaller.git | 4 | 216 | 0 | 116 | 372 | Python | {
"docstring": "\n Get TOC with the bootstrapping modules and their dependencies.\n :return: TOC with modules\n ",
"language": "en",
"n_whitespaces": 23,
"n_words": 13,
"vocab_size": 10
} | def get_bootstrap_modules():
# Import 'struct' modules to get real paths to module file names.
mod_struct = __import__('struct')
# Basic modules necessary for the bootstrap process.
loader_mods = TOC()
loaderpath = os.path.join(HOMEPATH, 'PyInstaller', 'loader')
# On some platforms (Windows, Debian/Ubuntu) '_struct' and zlib modules are built-in modules (linked statically)
# and thus does not have attribute __file__. 'struct' module is required for reading Python bytecode from
# executable. 'zlib' is required to decompress this bytecode.
for mod_name in ['_struct', 'zlib']:
mod = __import__(mod_name) # C extension.
if hasattr(mod, '__file__'):
mod_file = os.path.abspath(mod.__file__)
if os.path.basename(os.path.dirname(mod_file)) == 'lib-dynload':
# Divert extensions originating from python's lib-dynload directory, to match behavior of #5604.
mod_name = os.path.join('lib-dynload', mod_name)
loader_mods.append((mod_name, mod_file, 'EXTENSION'))
# NOTE:These modules should be kept simple without any complicated dependencies.
loader_mods += [
('struct', os.path.abspath(mod_struct.__file__), 'PYMODULE'),
('pyimod01_os_path', os.path.join(loaderpath, 'pyimod01_os_path.py'), 'PYMODULE'),
('pyimod02_archive', os.path.join(loaderpath, 'pyimod02_archive.py'), 'PYMODULE'),
('pyimod03_importers', os.path.join(loaderpath, 'pyimod03_importers.py'), 'PYMODULE'),
('pyimod04_ctypes', os.path.join(loaderpath, 'pyimod04_ctypes.py'), 'PYMODULE'),
('pyiboot01_bootstrap', os.path.join(loaderpath, 'pyiboot01_bootstrap.py'), 'PYSOURCE'),
]
return loader_mods
|
|
50,544 | 203,823 | 147 | django/contrib/gis/db/backends/postgis/adapter.py | 41 | 7 | def getquoted(self):
| Refs #33476 -- Reformatted code with Black. | getquoted | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | adapter.py | 12 | 8 | https://github.com/django/django.git | 3 | 48 | 0 | 36 | 81 | Python | {
"docstring": "\n Return a properly quoted string for use in PostgreSQL/PostGIS.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def getquoted(self):
if self.is_geometry:
# Psycopg will figure out whether to use E'\\000' or '\000'.
return b"%s(%s)" % (
b"ST_GeogFromWKB" if self.geography else b"ST_GeomFromEWKB",
self._adapter.getquoted(),
)
else:
# For rasters, add explicit type cast to WKB string.
return b"'%s'::raster" % self.ewkb.encode()
|
|
20,112 | 100,650 | 331 | scripts/extract.py | 90 | 21 | def _set_skip_list(self) -> None:
if self._skip_num == 1 and not self._alignments.data:
logger.debug("No frames to be skipped")
return
skip_list = []
for idx, filename in enumerate(self._images.file_list):
if idx % self._skip_num != 0:
logger.trace("Adding image '%s' to skip list due to extract_every_n = %s",
filename, self._skip_num)
skip_list.append | bugfix: extract - stop progress bar from going over max value | _set_skip_list | 0d23714875f81ddabdbe8f4e40bef6e5f29eeb19 | faceswap | extract.py | 13 | 25 | https://github.com/deepfakes/faceswap.git | 7 | 142 | 0 | 66 | 236 | Python | {
"docstring": " Add the skip list to the image loader\n\n Checks against `extract_every_n` and the existence of alignments data (can exist if\n `skip_existing` or `skip_existing_faces` has been provided) and compiles a list of frame\n indices that should not be processed, providing these to :class:`lib.image.ImagesLoader`.\n ",
"language": "en",
"n_whitespaces": 71,
"n_words": 42,
"vocab_size": 36
} | def _set_skip_list(self) -> None:
if self._skip_num == 1 and not self._alignments.data:
logger.debug("No frames to be skipped")
return
skip_list = []
for idx, filename in enumerate(self._images.file_list):
if idx % self._skip_num != 0:
logger.trace("Adding image '%s' to skip list due to extract_every_n = %s",
filename, self._skip_num)
skip_list.append(idx)
# Items may be in the alignments file if skip-existing[-faces] is selected
elif os.path.basename(filename) in self._alignments.data:
self._existing_count += 1
logger.trace("Removing image: '%s' due to previously existing", filename)
skip_list.append(idx)
if self._existing_count != 0:
logger.info("Skipping %s frames due to skip_existing/skip_existing_faces.",
self._existing_count)
logger.debug("Adding skip list: %s", skip_list)
self._images.add_skip_list(skip_list)
|
|
11,947 | 59,781 | 61 | tests/conftest.py | 20 | 12 | def caplog(caplog):
config = setup_logging()
for name, logg | Update logging setup to support incremental configuration (#7569) | caplog | 8ac2498a0203d3ccb9070d30d7b3a0c475afab92 | prefect | conftest.py | 12 | 7 | https://github.com/PrefectHQ/prefect.git | 3 | 54 | 0 | 19 | 94 | Python | {
"docstring": "\n Overrides caplog to apply to all of our loggers that do not propagate and\n consequently would not be captured by caplog.\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 21,
"vocab_size": 19
} | def caplog(caplog):
config = setup_logging()
for name, logger_config in config["loggers"].items():
if not logger_config.get("propagate", True):
logger = get_logger(name)
logger.handlers.append(caplog.handler)
yield caplog
|
|
20,685 | 101,266 | 413 | tools/manual/faceviewer/viewport.py | 57 | 34 | def _show_mesh(self, mesh_ids, face_index, detected_face, top_left):
state = "normal" if (self._tk_vars["selected_editor"].get() != "Mask" or
self._optional_annotations["mesh"]) else "hidden"
kwargs = dict(polygon=dict(fill="", width=2, outline=self._canvas.control_colors["Mesh"]),
line=dict(fill=self._canvas.control_colors["Mesh"], width=2))
edi | lib.align updates:
- alignments.py
- Add typed dicts for imported alignments
- Explicitly check for presence of thumb value in alignments dict
- linting
- detected_face.py
- Typing
- Linting
- Legacy support for pre-aligned face
- Update dependencies to new property names | _show_mesh | 5e73437be47f2410439a3c6716de96354e6a0c94 | faceswap | viewport.py | 16 | 17 | https://github.com/deepfakes/faceswap.git | 6 | 212 | 0 | 49 | 340 | Python | {
"docstring": " Display the mesh annotation for the given face, at the given location.\n\n Parameters\n ----------\n mesh_ids: dict\n Dictionary containing the `polygon` and `line` tkinter canvas identifiers that make up\n the mesh for the given face\n face_index: int\n The face index within the frame for the given face\n detected_face: :class:`~lib.align.DetectedFace`\n The detected face object that contains the landmarks for generating the mesh\n top_left: tuple\n The (x, y) top left co-ordinates of the mesh's bounding box\n ",
"language": "en",
"n_whitespaces": 178,
"n_words": 73,
"vocab_size": 49
} | def _show_mesh(self, mesh_ids, face_index, detected_face, top_left):
state = "normal" if (self._tk_vars["selected_editor"].get() != "Mask" or
self._optional_annotations["mesh"]) else "hidden"
kwargs = dict(polygon=dict(fill="", width=2, outline=self._canvas.control_colors["Mesh"]),
line=dict(fill=self._canvas.control_colors["Mesh"], width=2))
edited = (self._tk_vars["edited"].get() and
self._tk_vars["selected_editor"].get() not in ("Mask", "View"))
landmarks = self._viewport.get_landmarks(self.frame_index,
face_index,
detected_face,
top_left,
edited)
for key, kwarg in kwargs.items():
for idx, mesh_id in enumerate(mesh_ids[key]):
self._canvas.coords(mesh_id, *landmarks[key][idx].flatten())
self._canvas.itemconfig(mesh_id, state=state, **kwarg)
self._canvas.addtag_withtag(f"active_mesh_{key}", mesh_id)
|
|
33,279 | 144,666 | 1,143 | python/ray/serve/deployment_state.py | 248 | 30 | def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]:
# TODO(edoakes): we could make this more efficient in steady-state by
# having a "healthy" flag that gets flipped if an update or replica
# failure happens.
target_version = self._target_version
target_replica_count = self._target_replicas
all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING])
running_at_target_version_replica_cnt = self._replicas.count(
states=[ReplicaState.RUNNING], version=target_version
)
failed_to_start_count = self._replica_constructor_retry_counter
failed_to_start_threshold = min(
MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_CO | [serve] Introduce DeploymentStatus, poll for statuses instead of using async goals (#22121) | _get_curr_status | 48adb6f7bb335b28fb0fb0d1190bd6c5dfc8ddfa | ray | deployment_state.py | 18 | 66 | https://github.com/ray-project/ray.git | 8 | 216 | 0 | 151 | 356 | Python | {
"docstring": "Get the current deployment status.\n\n Checks the difference between the target vs. running replica count for\n the target version.\n\n TODO(edoakes): we should report the status as FAILED if replicas are\n repeatedly failing health checks. Need a reasonable heuristic here.\n\n Returns:\n (DeploymentStatusInfo, was_deleted)\n ",
"language": "en",
"n_whitespaces": 95,
"n_words": 42,
"vocab_size": 37
} | def _get_curr_status(self) -> Tuple[DeploymentStatusInfo, bool]:
# TODO(edoakes): we could make this more efficient in steady-state by
# having a "healthy" flag that gets flipped if an update or replica
# failure happens.
target_version = self._target_version
target_replica_count = self._target_replicas
all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING])
running_at_target_version_replica_cnt = self._replicas.count(
states=[ReplicaState.RUNNING], version=target_version
)
failed_to_start_count = self._replica_constructor_retry_counter
failed_to_start_threshold = min(
MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT, target_replica_count * 3
)
# Got to make a call to complete current deploy() goal after
# start failure threshold reached, while we might still have
# pending replicas in current goal.
if (
failed_to_start_count >= failed_to_start_threshold
and failed_to_start_threshold != 0
):
if running_at_target_version_replica_cnt > 0:
# At least one RUNNING replica at target state, partial
# success; We can stop tracking constructor failures and
# leave it to the controller to fully scale to target
# number of replicas and only return as completed once
# reached target replica count
self._replica_constructor_retry_counter = -1
else:
return (
DeploymentStatusInfo(
status=DeploymentStatus.FAILED,
message=(
"The Deployment constructor failed "
f"{failed_to_start_count} times in a row. See "
"logs for details."
),
),
False,
)
# If we have pending ops, the current goal is *not* ready.
if (
self._replicas.count(
states=[
ReplicaState.STARTING,
ReplicaState.UPDATING,
ReplicaState.RECOVERING,
ReplicaState.STOPPING,
]
)
== 0
):
# Check for deleting.
if target_replica_count == 0 and all_running_replica_cnt == 0:
return DeploymentStatusInfo(status=DeploymentStatus.UPDATING), True
# Check for a non-zero number of deployments.
elif target_replica_count == running_at_target_version_replica_cnt:
return DeploymentStatusInfo(status=DeploymentStatus.RUNNING), False
return (
DeploymentStatusInfo(
status=DeploymentStatus.UPDATING,
message=(
f"Running replicas of target version: "
f"{running_at_target_version_replica_cnt}, target "
"replicas: {target_replica_count}"
),
),
False,
)
|
|
118,325 | 322,996 | 63 | examples/model_interpretation/task/transformer.py | 12 | 11 | def generate_square_subsequent_mask(self, length):
return paddle.tensor.triu(
(paddle.ones(
(lengt | Add NLP model interpretation (#1752)
* upload NLP interpretation
* fix problems and relocate project
* remove abandoned picture
* remove abandoned picture
* fix dead link in README
* fix dead link in README
* fix code style problems
* fix CR round 1
* remove .gitkeep files
* fix code style
* fix file encoding problem
* fix code style
* delete duplicated files due to directory rebuild
* fix CR round 2
* fix code style
* fix ernie tokenizer
* fix code style
* fix problem from CR round 1
* fix bugs
* fix README
* remove duplicated files
* deal with diff of old and new tokenizer results
* fix CR round 4
* fix code style
* add missing dependence
* fix broken import path
* move some data file to cloud
* MRC upper case to lower case
Co-authored-by: Zeyu Chen <[email protected]>
Co-authored-by: binlinquge <xxx>
Co-authored-by: Guo Sheng <[email protected]> | generate_square_subsequent_mask | 93cae49c0c572b5c1ac972759140fbe924b0374d | PaddleNLP | transformer.py | 14 | 5 | https://github.com/PaddlePaddle/PaddleNLP.git | 1 | 43 | 0 | 12 | 67 | Python | {
"docstring": "\n Generate a square mask for the sequence. The mask ensures that the\n predictions for position i can depend only on the known outputs at\n positions less than i.\n\n Parameters:\n length (int|Tensor): The length of sequence.\n\n Returns:\n Tensor: Generated square mask according to the given length.\n\n Examples:\n .. code-block:: python\n\n import paddle\n from paddle.nn.layer.transformer import Transformer\n length = 5\n d_model, n_head, dim_feedforward = 8, 4, 64\n transformer_paddle = Transformer(\n d_model, n_head, dim_feedforward=dim_feedforward)\n mask = transformer_paddle.generate_square_subsequent_mask(length)\n print(mask)\n\n # [[ 0. -inf -inf -inf -inf]\n # [ 0. 0. -inf -inf -inf]\n # [ 0. 0. 0. -inf -inf]\n # [ 0. 0. 0. 0. -inf]\n # [ 0. 0. 0. 0. 0.]]\n\n ",
"language": "en",
"n_whitespaces": 417,
"n_words": 110,
"vocab_size": 64
} | def generate_square_subsequent_mask(self, length):
return paddle.tensor.triu(
(paddle.ones(
(length, length), dtype=paddle.get_default_dtype()) * -np.inf),
1)
|
|
12,265 | 60,728 | 118 | .venv/lib/python3.8/site-packages/pip/_internal/index/package_finder.py | 37 | 15 | def find_requirement(self, req, upgrade):
# type: (InstallRequirement, bool) -> Optional[InstallationCandidate]
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name, specifier=req.specifier, hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
| upd; format | find_requirement | f638f5d0e6c8ebed0e69a6584bc7f003ec646580 | transferlearning | package_finder.py | 12 | 55 | https://github.com/jindongwang/transferlearning.git | 11 | 214 | 0 | 30 | 106 | Python | {
"docstring": "Try to find a Link matching req\n\n Expects req, an InstallRequirement and upgrade, a boolean\n Returns a InstallationCandidate if found,\n Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise\n ",
"language": "en",
"n_whitespaces": 53,
"n_words": 25,
"vocab_size": 23
} | def find_requirement(self, req, upgrade):
# type: (InstallRequirement, bool) -> Optional[InstallationCandidate]
hashes = req.hashes(trust_internet=False)
best_candidate_result = self.find_best_candidate(
req.name, specifier=req.specifier, hashes=hashes,
)
best_candidate = best_candidate_result.best_candidate
installed_version = None # type: Optional[_BaseVersion]
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
|
|
3,317 | 20,307 | 54 | pipenv/patched/notpip/_vendor/pygments/formatters/html.py | 11 | 8 | def wrap(self, source, outfile):
if s | check point progress on only bringing in pip==22.0.4 (#4966)
* vendor in pip==22.0.4
* updating vendor packaging version
* update pipdeptree to fix pipenv graph with new version of pip.
* Vendoring of pip-shims 0.7.0
* Vendoring of requirementslib 1.6.3
* Update pip index safety restrictions patch for pip==22.0.4
* Update patches
* exclude pyptoject.toml from black to see if that helps.
* Move this part of the hash collection back to the top (like prior implementation) because it affects the outcome of this test now in pip 22.0.4 | wrap | f3166e673fe8d40277b804d35d77dcdb760fc3b3 | pipenv | html.py | 13 | 5 | https://github.com/pypa/pipenv.git | 2 | 46 | 0 | 10 | 75 | Python | {
"docstring": "\n Wrap the ``source``, which is a generator yielding\n individual lines, in custom generators. See docstring\n for `format`. Can be overridden.\n ",
"language": "en",
"n_whitespaces": 49,
"n_words": 20,
"vocab_size": 20
} | def wrap(self, source, outfile):
if self.wrapcode:
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
else:
return self._wrap_div(self._wrap_pre(source))
|
|
@frappe.whitelist() | 14,035 | 65,846 | 6 | erpnext/education/api.py | 14 | 8 | def get_assessment_criteria(course):
return frappe.get_all(
"Course Assessment Criteria",
fields=["assessment_criteria", "weightage"],
filt | style: format code with black | get_assessment_criteria | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | api.py | 11 | 7 | https://github.com/frappe/erpnext.git | 1 | 34 | 1 | 14 | 72 | Python | {
"docstring": "Returns Assessmemt Criteria and their Weightage from Course Master.\n\n\t:param Course: Course\n\t",
"language": "en",
"n_whitespaces": 10,
"n_words": 12,
"vocab_size": 11
} | def get_assessment_criteria(course):
return frappe.get_all(
"Course Assessment Criteria",
fields=["assessment_criteria", "weightage"],
filters={"parent": course},
order_by="idx",
)
@frappe.whitelist() |
54,316 | 216,002 | 379 | salt/modules/mount.py | 93 | 33 | def rm_filesystems(name, device, config="/etc/filesystems"):
modified = False
view_lines = []
if "AIX" not in __grains__["kernel"]:
return modified
criteria = _FileSystemsEntry(name=name, dev=device)
try:
fsys_filedict = _filesystems(config, False)
for fsys_view in fsys_filedict.items():
try:
if criteria.match(fsys_view):
modified = True
else:
view_lines.append(fsys_view)
except _FileSystemsEntry.ParseError:
view_lines.append(fsys_view)
except OSError as exc:
raise CommandExecutionError("Couldn't read from {}: {}".format(config, exc))
if modified:
try:
with salt.utils.files.fopen(config, "wb") as ofile:
for fsys_view in view_lines:
entry = fsys_view[1]
list_strgs = _FileSystemsEntry.dict_to_list_lines(entry)
ofile.writelines(salt.utils.data.encode(list_strgs))
except OSError as exc:
raise CommandExecutionError("Couldn't write to {}: {}".format(config, exc))
except Exception as exc:
raise CommandExecutionError("rm_filesystems error exception {exc}")
return modified
| Convert Py 2'isms to Python 3, and add tests for set_filesystems on AIX | rm_filesystems | 9354c15e0818715d055242d14b1308643a6918d7 | salt | mount.py | 19 | 30 | https://github.com/saltstack/salt.git | 10 | 194 | 0 | 59 | 327 | Python | {
"docstring": "\n .. versionadded:: 2018.3.3\n\n Remove the mount point from the filesystems\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' mount.rm_filesystems /mnt/foo /dev/sdg\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 20,
"vocab_size": 18
} | def rm_filesystems(name, device, config="/etc/filesystems"):
modified = False
view_lines = []
if "AIX" not in __grains__["kernel"]:
return modified
criteria = _FileSystemsEntry(name=name, dev=device)
try:
fsys_filedict = _filesystems(config, False)
for fsys_view in fsys_filedict.items():
try:
if criteria.match(fsys_view):
modified = True
else:
view_lines.append(fsys_view)
except _FileSystemsEntry.ParseError:
view_lines.append(fsys_view)
except OSError as exc:
raise CommandExecutionError("Couldn't read from {}: {}".format(config, exc))
if modified:
try:
with salt.utils.files.fopen(config, "wb") as ofile:
for fsys_view in view_lines:
entry = fsys_view[1]
list_strgs = _FileSystemsEntry.dict_to_list_lines(entry)
ofile.writelines(salt.utils.data.encode(list_strgs))
except OSError as exc:
raise CommandExecutionError("Couldn't write to {}: {}".format(config, exc))
except Exception as exc:
raise CommandExecutionError("rm_filesystems error exception {exc}")
return modified
|
|
42,004 | 176,622 | 87 | networkx/generators/classic.py | 29 | 14 | def complete_graph(n, create_using=None):
_, nodes = n
G = empty_graph(nodes, create_using)
if len(nodes) > 1:
if G.is_directed():
edges = itertools.permutations(nodes, 2)
else:
edges = itertools.combinations(nodes, 2)
G.add_edges_from(edges)
return G
| Adjust the usage of nodes_or_number decorator (#5599)
* recorrect typo in decorators.py
* Update tests to show troubles in current code
* fix troubles with usage of nodes_or_number
* fix typo
* remove nodes_or_number where that makes sense
* Reinclude nodes_or_numbers and add some tests for nonstandard usage
* fix typowq
* hopefully final tweaks (no behavior changes
* Update test_classic.py
Co-authored-by: Jarrod Millman <[email protected]> | complete_graph | de1d00f20e0bc14f1cc911b3486e50225a8fa168 | networkx | classic.py | 13 | 10 | https://github.com/networkx/networkx.git | 3 | 68 | 0 | 22 | 110 | Python | {
"docstring": "Return the complete graph `K_n` with n nodes.\n\n A complete graph on `n` nodes means that all pairs\n of distinct nodes have an edge connecting them.\n\n Parameters\n ----------\n n : int or iterable container of nodes\n If n is an integer, nodes are from range(n).\n If n is a container of nodes, those nodes appear in the graph.\n create_using : NetworkX graph constructor, optional (default=nx.Graph)\n Graph type to create. If graph instance, then cleared before populated.\n\n Examples\n --------\n >>> G = nx.complete_graph(9)\n >>> len(G)\n 9\n >>> G.size()\n 36\n >>> G = nx.complete_graph(range(11, 14))\n >>> list(G.nodes())\n [11, 12, 13]\n >>> G = nx.complete_graph(4, nx.DiGraph())\n >>> G.is_directed()\n True\n\n ",
"language": "en",
"n_whitespaces": 186,
"n_words": 106,
"vocab_size": 76
} | def complete_graph(n, create_using=None):
_, nodes = n
G = empty_graph(nodes, create_using)
if len(nodes) > 1:
if G.is_directed():
edges = itertools.permutations(nodes, 2)
else:
edges = itertools.combinations(nodes, 2)
G.add_edges_from(edges)
return G
|
|
76,967 | 261,735 | 89 | sklearn/pipeline.py | 29 | 16 | def fit_predict(self, X, y=None, **fit_params):
self._validate_params()
fit_params_steps = self._check_fit_params(**fit_params)
Xt = self._fit(X, y, **fit_params_steps)
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step)
r | MAINT validate parameters of Pipeline (#25133) | fit_predict | 754bd5245aa46b89a1d686a3326c2b853012ff3e | scikit-learn | pipeline.py | 15 | 8 | https://github.com/scikit-learn/scikit-learn.git | 1 | 101 | 0 | 24 | 159 | Python | {
"docstring": "Transform the data, and apply `fit_predict` with the final estimator.\n\n Call `fit_transform` of each transformer in the pipeline. The\n transformed data are finally passed to the final estimator that calls\n `fit_predict` method. Only valid if the final estimator implements\n `fit_predict`.\n\n Parameters\n ----------\n X : iterable\n Training data. Must fulfill input requirements of first step of\n the pipeline.\n\n y : iterable, default=None\n Training targets. Must fulfill label requirements for all steps\n of the pipeline.\n\n **fit_params : dict of string -> object\n Parameters passed to the ``fit`` method of each step, where\n each parameter name is prefixed such that parameter ``p`` for step\n ``s`` has key ``s__p``.\n\n Returns\n -------\n y_pred : ndarray\n Result of calling `fit_predict` on the final estimator.\n ",
"language": "en",
"n_whitespaces": 297,
"n_words": 118,
"vocab_size": 79
} | def fit_predict(self, X, y=None, **fit_params):
self._validate_params()
fit_params_steps = self._check_fit_params(**fit_params)
Xt = self._fit(X, y, **fit_params_steps)
fit_params_last_step = fit_params_steps[self.steps[-1][0]]
with _print_elapsed_time("Pipeline", self._log_message(len(self.steps) - 1)):
y_pred = self.steps[-1][1].fit_predict(Xt, y, **fit_params_last_step)
return y_pred
|
|
@pytest.mark.issue(5918)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) | 24,386 | 111,358 | 205 | spacy/tests/pipeline/test_entity_ruler.py | 94 | 27 | def test_issue4849(entity_ruler_factory):
nlp = English()
patterns = [
{"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"},
{"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"},
]
ruler = nlp.add_pipe(
entity_ruler_factory,
name="entity_ruler",
config={"phrase_matcher_attr": "LOWER"},
)
ruler.add_patterns(patterns)
text =
# USING 1 PROCESS
count_ents = 0
for doc in nlp.pipe([text], n_process=1):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
asser | Add SpanRuler component (#9880)
* Add SpanRuler component
Add a `SpanRuler` component similar to `EntityRuler` that saves a list
of matched spans to `Doc.spans[spans_key]`. The matches from the token
and phrase matchers are deduplicated and sorted before assignment but
are not otherwise filtered.
* Update spacy/pipeline/span_ruler.py
Co-authored-by: Sofie Van Landeghem <[email protected]>
* Fix cast
* Add self.key property
* Use number of patterns as length
* Remove patterns kwarg from init
* Update spacy/tests/pipeline/test_span_ruler.py
Co-authored-by: Sofie Van Landeghem <[email protected]>
* Add options for spans filter and setting to ents
* Add `spans_filter` option as a registered function'
* Make `spans_key` optional and if `None`, set to `doc.ents` instead of
`doc.spans[spans_key]`.
* Update and generalize tests
* Add test for setting doc.ents, fix key property type
* Fix typing
* Allow independent doc.spans and doc.ents
* If `spans_key` is set, set `doc.spans` with `spans_filter`.
* If `annotate_ents` is set, set `doc.ents` with `ents_fitler`.
* Use `util.filter_spans` by default as `ents_filter`.
* Use a custom warning if the filter does not work for `doc.ents`.
* Enable use of SpanC.id in Span
* Support id in SpanRuler as Span.id
* Update types
* `id` can only be provided as string (already by `PatternType`
definition)
* Update all uses of Span.id/ent_id in Doc
* Rename Span id kwarg to span_id
* Update types and docs
* Add ents filter to mimic EntityRuler overwrite_ents
* Refactor `ents_filter` to take `entities, spans` args for more
filtering options
* Give registered filters more descriptive names
* Allow registered `filter_spans` filter
(`spacy.first_longest_spans_filter.v1`) to take any number of
`Iterable[Span]` objects as args so it can be used for spans filter
or ents filter
* Implement future entity ruler as span ruler
Implement a compatible `entity_ruler` as `future_entity_ruler` using
`SpanRuler` as the underlying component:
* Add `sort_key` and `sort_reverse` to allow the sorting behavior to be
customized. (Necessary for the same sorting/filtering as in
`EntityRuler`.)
* Implement `overwrite_overlapping_ents_filter` and
`preserve_existing_ents_filter` to support
`EntityRuler.overwrite_ents` settings.
* Add `remove_by_id` to support `EntityRuler.remove` functionality.
* Refactor `entity_ruler` tests to parametrize all tests to test both
`entity_ruler` and `future_entity_ruler`
* Implement `SpanRuler.token_patterns` and `SpanRuler.phrase_patterns`
properties.
Additional changes:
* Move all config settings to top-level attributes to avoid duplicating
settings in the config vs. `span_ruler/cfg`. (Also avoids a lot of
casting.)
* Format
* Fix filter make method name
* Refactor to use same error for removing by label or ID
* Also provide existing spans to spans filter
* Support ids property
* Remove token_patterns and phrase_patterns
* Update docstrings
* Add span ruler docs
* Fix types
* Apply suggestions from code review
Co-authored-by: Sofie Van Landeghem <[email protected]>
* Move sorting into filters
* Check for all tokens in seen tokens in entity ruler filters
* Remove registered sort key
* Set Token.ent_id in a backwards-compatible way in Doc.set_ents
* Remove sort options from API docs
* Update docstrings
* Rename entity ruler filters
* Fix and parameterize scoring
* Add id to Span API docs
* Fix typo in API docs
* Include explicit labeled=True for scorer
Co-authored-by: Sofie Van Landeghem <[email protected]> | test_issue4849 | a322d6d5f2f85c2da6cded4fcd6143d41b5a9e96 | spaCy | test_entity_ruler.py | 16 | 25 | https://github.com/explosion/spaCy.git | 8 | 166 | 1 | 56 | 313 | Python | {
"docstring": "\n The left is starting to take aim at Democratic front-runner Joe Biden.\n Sen. Bernie Sanders joined in her criticism: \"There is no 'middle ground' when it comes to climate policy.\"\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 30,
"vocab_size": 28
} | def test_issue4849(entity_ruler_factory):
nlp = English()
patterns = [
{"label": "PERSON", "pattern": "joe biden", "id": "joe-biden"},
{"label": "PERSON", "pattern": "bernie sanders", "id": "bernie-sanders"},
]
ruler = nlp.add_pipe(
entity_ruler_factory,
name="entity_ruler",
config={"phrase_matcher_attr": "LOWER"},
)
ruler.add_patterns(patterns)
text =
# USING 1 PROCESS
count_ents = 0
for doc in nlp.pipe([text], n_process=1):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
# USING 2 PROCESSES
if isinstance(get_current_ops, NumpyOps):
count_ents = 0
for doc in nlp.pipe([text], n_process=2):
count_ents += len([ent for ent in doc.ents if ent.ent_id > 0])
assert count_ents == 2
@pytest.mark.issue(5918)
@pytest.mark.parametrize("entity_ruler_factory", ENTITY_RULERS) |
77,757 | 264,576 | 93 | netbox/netbox/api/viewsets/__init__.py | 17 | 14 | def get_serializer_context(self):
context = super().get_serializer_context()
if hasattr(self.queryset.model, 'custom_fields'):
content_type = ContentType.objects.get_for_model(self.queryset.model)
context.update({
'custom_fields': content_type.custom_fields.all(),
| Move CustomFieldModelViewSet functionality into NetBoxModelViewSet | get_serializer_context | bbdeae0ed9bcc06fb96ffa2970272e1a3447448c | netbox | __init__.py | 14 | 8 | https://github.com/netbox-community/netbox.git | 2 | 60 | 0 | 15 | 103 | Python | {
"docstring": "\n For models which support custom fields, populate the `custom_fields` context.\n ",
"language": "en",
"n_whitespaces": 25,
"n_words": 10,
"vocab_size": 10
} | def get_serializer_context(self):
context = super().get_serializer_context()
if hasattr(self.queryset.model, 'custom_fields'):
content_type = ContentType.objects.get_for_model(self.queryset.model)
context.update({
'custom_fields': content_type.custom_fields.all(),
})
return context
|
|
56,885 | 223,398 | 87 | python3.10.4/Lib/distutils/util.py | 36 | 9 | def execute (func, args, msg=None, verbose=0, dry_run=0):
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
| add python 3.10.4 for windows | execute | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | util.py | 14 | 8 | https://github.com/XX-net/XX-Net.git | 4 | 72 | 0 | 31 | 120 | Python | {
"docstring": "Perform some action that affects the outside world (eg. by\n writing to the filesystem). Such actions are special because they\n are disabled by the 'dry_run' flag. This method takes care of all\n that bureaucracy for you; all you have to do is supply the\n function to call and an argument tuple for it (to embody the\n \"external action\" being performed), and an optional message to\n print.\n ",
"language": "en",
"n_whitespaces": 90,
"n_words": 66,
"vocab_size": 52
} | def execute (func, args, msg=None, verbose=0, dry_run=0):
if msg is None:
msg = "%s%r" % (func.__name__, args)
if msg[-2:] == ',)': # correct for singleton tuple
msg = msg[0:-2] + ')'
log.info(msg)
if not dry_run:
func(*args)
|
|
80,946 | 272,035 | 462 | keras/feature_column/dense_features.py | 74 | 28 | def call(self, features, cols_to_output_tensors=None, training=None):
if training is None:
training = backend.learning_phase()
if not isinstance(features, dict):
raise ValueError(
"We expected a dictionary here. Instead we got: ", features
)
transformation_cache = (
tf.__internal__.feature_column.FeatureTransformationCache(features)
)
output_tens | Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | call | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | dense_features.py | 16 | 28 | https://github.com/keras-team/keras.git | 6 | 148 | 0 | 55 | 233 | Python | {
"docstring": "Returns a dense tensor corresponding to the `feature_columns`.\n\n Example usage:\n\n >>> t1 = tf.feature_column.embedding_column(\n ... tf.feature_column.categorical_column_with_hash_bucket(\"t1\", 2),\n ... dimension=8)\n >>> t2 = tf.feature_column.numeric_column('t2')\n >>> feature_layer = tf.compat.v1.keras.layers.DenseFeatures([t1, t2])\n >>> features = {\"t1\": tf.constant([\"a\", \"b\"]), \"t2\": tf.constant([1, 2])}\n >>> dense_tensor = feature_layer(features, training=True)\n\n Args:\n features: A mapping from key to tensors. `FeatureColumn`s look up via\n these keys. For example `numeric_column('price')` will look at 'price'\n key in this dict. Values can be a `SparseTensor` or a `Tensor` depends\n on corresponding `FeatureColumn`.\n cols_to_output_tensors: If not `None`, this will be filled with a dict\n mapping feature columns to output tensors created.\n training: Python boolean or None, indicating whether to the layer is being\n run in training mode. This argument is passed to the call method of any\n `FeatureColumn` that takes a `training` argument. For example, if a\n `FeatureColumn` performed dropout, the column could expose a `training`\n argument to control whether the dropout should be applied. If `None`,\n defaults to `tf.keras.backend.learning_phase()`.\n\n\n Returns:\n A `Tensor` which represents input layer of a model. Its shape\n is (batch_size, first_layer_dimension) and its dtype is `float32`.\n first_layer_dimension is determined based on given `feature_columns`.\n\n Raises:\n ValueError: If features are not a dictionary.\n ",
"language": "en",
"n_whitespaces": 443,
"n_words": 191,
"vocab_size": 134
} | def call(self, features, cols_to_output_tensors=None, training=None):
if training is None:
training = backend.learning_phase()
if not isinstance(features, dict):
raise ValueError(
"We expected a dictionary here. Instead we got: ", features
)
transformation_cache = (
tf.__internal__.feature_column.FeatureTransformationCache(features)
)
output_tensors = []
for column in self._feature_columns:
with backend.name_scope(column.name):
try:
tensor = column.get_dense_tensor(
transformation_cache,
self._state_manager,
training=training,
)
except TypeError:
tensor = column.get_dense_tensor(
transformation_cache, self._state_manager
)
processed_tensors = self._process_dense_tensor(column, tensor)
if cols_to_output_tensors is not None:
cols_to_output_tensors[column] = processed_tensors
output_tensors.append(processed_tensors)
return self._verify_and_concat_tensors(output_tensors)
|
|
40,053 | 167,600 | 49 | pandas/compat/pickle_compat.py | 16 | 6 | def patch_pickle() -> Iterator[None]:
orig_loads = pkl.loads
try:
setattr(pkl, "loads", loads)
yield
finally:
setattr(pkl, "loads", orig_loads)
| TYP: misc return type annotations (#47558) | patch_pickle | f538568afc2c76c2d738d32e3544cf9fe6742960 | pandas | pickle_compat.py | 11 | 10 | https://github.com/pandas-dev/pandas.git | 2 | 36 | 0 | 14 | 64 | Python | {
"docstring": "\n Temporarily patch pickle to use our unpickler.\n ",
"language": "en",
"n_whitespaces": 14,
"n_words": 7,
"vocab_size": 7
} | def patch_pickle() -> Iterator[None]:
orig_loads = pkl.loads
try:
setattr(pkl, "loads", loads)
yield
finally:
setattr(pkl, "loads", orig_loads)
|
|
8,995 | 46,791 | 100 | dev/breeze/src/airflow_breeze/utils/run_utils.py | 35 | 9 | def get_filesystem_type(filepath):
# We import it locally so that click autocomplete wor | Prepare Breeze2 for prime time :) (#22713)
This is a review and clean-up for all the parameters and
commands for Breeze2 in order to prepare it for being
used by the contribugors.
There are various small fixes here and there, removal
of duplicated code, refactoring and moving code around
as well as cleanup and review all the parameters used
for all implemented commands.
The parameters, default values and their behaviours were
updated to match "new" life of Breeze rather than old
one.
Some improvements are made to the autocomplete and
click help messages printed. Full list of choices is
always displayed, parameters are groups according to
their target audience, and they were sorted according
to importance and frequency of use.
Various messages have been colourised according to their
meaning - warnings as yellow, errors as red and
informational messages as bright_blue.
The `dry-run` option has been added to just show what
would have been run without actually running some
potentially "write" commands (read commands are still
executed) so that you can easily verify and manually
copy and execute the commands with option to modify
them before. The `dry_run` and `verbose` options are
now used for all commands.
The "main" command now runs "shell" by default similarly
as the original Breeze.
All "shortcut" parameters have been standardized - i.e
common options (verbose/dry run/help) have one and all
common flags that are likely to be used often have an
assigned shortcute.
The "stop" and "cleanup" command have been added
as they are necessary for average user to complete the
regular usage cycle.
Documentation for all the important methods have been
updated. | get_filesystem_type | 4ffd4f09532fceb67675fce4c1f5cd383eff992e | airflow | run_utils.py | 11 | 10 | https://github.com/apache/airflow.git | 4 | 49 | 0 | 28 | 87 | Python | {
"docstring": "\n Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.\n :param filepath: path to check\n :return: type of filesystem\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 27,
"vocab_size": 23
} | def get_filesystem_type(filepath):
# We import it locally so that click autocomplete works
import psutil
root_type = "unknown"
for part in psutil.disk_partitions():
if part.mountpoint == '/':
root_type = part.fstype
continue
if filepath.startswith(part.mountpoint):
return part.fstype
return root_type
|
|
41,928 | 176,480 | 447 | networkx/algorithms/similarity.py | 240 | 56 | def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None):
r
import numpy as np
num_nodes = G.number_of_nodes()
if num_nodes < k:
warnings.warn(
f"Number of nodes is {num_nodes}, but requested k is {k}. "
"Setting k to number of nodes."
)
k = num_nodes
# According to [1], they empirically determined
# a good value for ``eps`` to be sqrt( 1 / |E| )
if eps is None:
eps = np.sqrt(1.0 / G.number_of_edges())
inv_node_map = {name: index for index, name in enumerate(G.nodes)}
node_map = np.array(G)
# Calculate the sample size ``R`` for how many paths
# to randomly generate
t_choose_2 = math.comb(path_length, 2)
sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))
index_map = {}
_ = list(
generate_random_paths(
G, sample_size, path_length=path_length, index_map=index_map
)
)
S = np.zeros(num_nodes)
inv_sample_size = 1 / sample_size
source_paths = set(index_map[source])
# Calculate the path similarities
# between ``source`` (v) and ``node`` (v_j)
# using our inverted index mapping of
# vertices to paths
for node, paths in index_map.items():
# Only consider paths where both
# ``node`` and ``source`` are present
common_paths = source_paths.intersection(paths)
S[inv_node_map[node]] = len(common_paths) * inv_sample_size
# Retrieve top ``k`` similar
# Note: the below performed anywhere from 4-10x faster
# (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``
top_k_unsorted = np.argpartition(S, -k)[-k:]
top_k_sorte | Update black (#5438)
* CI: sync up black dev requirements version with precommit
* Run black
Co-authored-by: Jarrod Millman <[email protected]> | panther_similarity | f6755ffa00211b523c6c0bec5398bc6c3c43c8b1 | networkx | similarity.py | 14 | 79 | https://github.com/networkx/networkx.git | 5 | 300 | 0 | 164 | 479 | Python | {
"docstring": "Returns the Panther similarity of nodes in the graph `G` to node ``v``.\n\n Panther is a similarity metric that says \"two objects are considered\n to be similar if they frequently appear on the same paths.\" [1]_.\n\n Parameters\n ----------\n G : NetworkX graph\n A NetworkX graph\n source : node\n Source node for which to find the top `k` similar other nodes\n k : int (default = 5)\n The number of most similar nodes to return\n path_length : int (default = 5)\n How long the randomly generated paths should be (``T`` in [1]_)\n c : float (default = 0.5)\n A universal positive constant used to scale the number\n of sample random paths to generate.\n delta : float (default = 0.1)\n The probability that the similarity $S$ is not an epsilon-approximation to (R, phi),\n where $R$ is the number of random paths and $\\phi$ is the probability\n that an element sampled from a set $A \\subseteq D$, where $D$ is the domain.\n eps : float or None (default = None)\n The error bound. Per [1]_, a good value is ``sqrt(1/|E|)``. Therefore,\n if no value is provided, the recommended computed value will be used.\n\n Returns\n -------\n similarity : dictionary\n Dictionary of nodes to similarity scores (as floats). Note:\n the self-similarity (i.e., ``v``) will not be included in\n the returned dictionary.\n\n Examples\n --------\n >>> G = nx.star_graph(10)\n >>> sim = nx.panther_similarity(G, 0)\n\n References\n ----------\n .. [1] Zhang, J., Tang, J., Ma, C., Tong, H., Jing, Y., & Li, J.\n Panther: Fast top-k similarity search on large networks.\n In Proceedings of the ACM SIGKDD International Conference\n on Knowledge Discovery and Data Mining (Vol. 2015-August, pp. 1445–1454).\n Association for Computing Machinery. https://doi.org/10.1145/2783258.2783267.\n ",
"language": "en",
"n_whitespaces": 479,
"n_words": 275,
"vocab_size": 173
} | def panther_similarity(G, source, k=5, path_length=5, c=0.5, delta=0.1, eps=None):
r
import numpy as np
num_nodes = G.number_of_nodes()
if num_nodes < k:
warnings.warn(
f"Number of nodes is {num_nodes}, but requested k is {k}. "
"Setting k to number of nodes."
)
k = num_nodes
# According to [1], they empirically determined
# a good value for ``eps`` to be sqrt( 1 / |E| )
if eps is None:
eps = np.sqrt(1.0 / G.number_of_edges())
inv_node_map = {name: index for index, name in enumerate(G.nodes)}
node_map = np.array(G)
# Calculate the sample size ``R`` for how many paths
# to randomly generate
t_choose_2 = math.comb(path_length, 2)
sample_size = int((c / eps**2) * (np.log2(t_choose_2) + 1 + np.log(1 / delta)))
index_map = {}
_ = list(
generate_random_paths(
G, sample_size, path_length=path_length, index_map=index_map
)
)
S = np.zeros(num_nodes)
inv_sample_size = 1 / sample_size
source_paths = set(index_map[source])
# Calculate the path similarities
# between ``source`` (v) and ``node`` (v_j)
# using our inverted index mapping of
# vertices to paths
for node, paths in index_map.items():
# Only consider paths where both
# ``node`` and ``source`` are present
common_paths = source_paths.intersection(paths)
S[inv_node_map[node]] = len(common_paths) * inv_sample_size
# Retrieve top ``k`` similar
# Note: the below performed anywhere from 4-10x faster
# (depending on input sizes) vs the equivalent ``np.argsort(S)[::-1]``
top_k_unsorted = np.argpartition(S, -k)[-k:]
top_k_sorted = top_k_unsorted[np.argsort(S[top_k_unsorted])][::-1]
# Add back the similarity scores
top_k_sorted_names = map(lambda n: node_map[n], top_k_sorted)
top_k_with_val = dict(zip(top_k_sorted_names, S[top_k_sorted]))
# Remove the self-similarity
top_k_with_val.pop(source, None)
return top_k_with_val
|
|
77,103 | 262,017 | 72 | TTS/tts/utils/text/phonemizers/base.py | 22 | 10 | def _phonemize_preprocess(self, text) -> Tuple[List[str], List]:
text = text.strip()
if self._keep_puncs:
# a tuple (text, punctuation marks)
return self._punctuator.strip_to_restore(text)
return [self._punctuator.strip(text)], []
| Fix BasePhonemizer | _phonemize_preprocess | ff7c3858389ba250f761d76592cb060ac6be05c0 | TTS | base.py | 10 | 12 | https://github.com/coqui-ai/TTS.git | 2 | 53 | 0 | 21 | 85 | Python | {
"docstring": "Preprocess the text before phonemization\n\n 1. remove spaces\n 2. remove punctuation\n\n Override this if you need a different behaviour\n ",
"language": "en",
"n_whitespaces": 47,
"n_words": 19,
"vocab_size": 18
} | def _phonemize_preprocess(self, text) -> Tuple[List[str], List]:
text = text.strip()
if self._keep_puncs:
# a tuple (text, punctuation marks)
return self._punctuator.strip_to_restore(text)
return [self._punctuator.strip(text)], []
|
|
24,448 | 111,596 | 21 | spacy/cli/_util.py | 11 | 5 | def ensure_pathy(path):
from pathy import Pathy # noqa: F811
return Pathy.fluid(path)
| Support local filesystem remotes for projects (#11762)
* Support local filesystem remotes for projects
* Fix support for local filesystem remotes for projects
* Use `FluidPath` instead of `Pathy` to support both filesystem and
remote paths
* Create missing parent directories if required for local filesystem
* Add a more general `_file_exists` method to support both `Pathy`,
`Path`, and `smart_open`-compatible URLs
* Add explicit `smart_open` dependency starting with support for
`compression` flag
* Update `pathy` dependency to exclude older versions that aren't
compatible with required `smart_open` version
* Update docs to refer to `Pathy` instead of `smart_open` for project
remotes (technically you can still push to any `smart_open`-compatible
path but you can't pull from them)
* Add tests for local filesystem remotes
* Update pathy for general BlobStat sorting
* Add import
* Remove _file_exists since only Pathy remotes are supported
* Format CLI docs
* Clean up merge | ensure_pathy | 1ebe7db07c8dbb1a55dafb09131b1d08242b79c5 | spaCy | _util.py | 7 | 3 | https://github.com/explosion/spaCy.git | 1 | 17 | 0 | 11 | 32 | Python | {
"docstring": "Temporary helper to prevent importing Pathy globally (which can cause\n slow and annoying Google Cloud warning).",
"language": "en",
"n_whitespaces": 18,
"n_words": 16,
"vocab_size": 16
} | def ensure_pathy(path):
from pathy import Pathy # noqa: F811
return Pathy.fluid(path)
|
|
50,400 | 203,480 | 147 | django/contrib/admin/sites.py | 32 | 13 | def index(self, request, extra_context=None):
app_list = self.get_app_list(request)
context = {
**self.each_context(request),
"title": self.index_title,
"subtitle": None,
"app_list": app_list,
**(extra_context or {}),
}
| Refs #33476 -- Reformatted code with Black. | index | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | sites.py | 11 | 13 | https://github.com/django/django.git | 3 | 74 | 0 | 27 | 119 | Python | {
"docstring": "\n Display the main admin index page, which lists all of the installed\n apps that have been registered in this site.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 20,
"vocab_size": 19
} | def index(self, request, extra_context=None):
app_list = self.get_app_list(request)
context = {
**self.each_context(request),
"title": self.index_title,
"subtitle": None,
"app_list": app_list,
**(extra_context or {}),
}
request.current_app = self.name
return TemplateResponse(
request, self.index_template or "admin/index.html", context
)
|
|
72,022 | 247,955 | 858 | docker/start.py | 279 | 39 | def generate_config_from_template(config_dir, config_path, environ, ownership):
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in environ:
error(
"Environment variable '%s' is mandatory when generating a config file."
% (v,)
)
# populate some params from data files (if they exist, else create new ones)
environ = environ.copy()
secrets = {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY",
}
for name, secret in secrets.items():
if secret not in environ:
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
# if the file already exists, load in the existing value; otherwise,
# generate a new secret and write | Poetry: use locked environment in Docker images (#12385) | generate_config_from_template | 3a7e97c7ade17a47517aadc0e9e305a1894119ac | synapse | start.py | 19 | 63 | https://github.com/matrix-org/synapse.git | 12 | 375 | 0 | 174 | 674 | Python | {
"docstring": "Generate a homeserver.yaml from environment variables\n\n Args:\n config_dir (str): where to put generated config files\n config_path (str): where to put the main config file\n environ (dict): environment dictionary\n ownership (str|None): \"<user>:<group>\" string which will be used to set\n ownership of the generated configs. If None, ownership will not change.\n ",
"language": "en",
"n_whitespaces": 94,
"n_words": 49,
"vocab_size": 37
} | def generate_config_from_template(config_dir, config_path, environ, ownership):
for v in ("SYNAPSE_SERVER_NAME", "SYNAPSE_REPORT_STATS"):
if v not in environ:
error(
"Environment variable '%s' is mandatory when generating a config file."
% (v,)
)
# populate some params from data files (if they exist, else create new ones)
environ = environ.copy()
secrets = {
"registration": "SYNAPSE_REGISTRATION_SHARED_SECRET",
"macaroon": "SYNAPSE_MACAROON_SECRET_KEY",
}
for name, secret in secrets.items():
if secret not in environ:
filename = "/data/%s.%s.key" % (environ["SYNAPSE_SERVER_NAME"], name)
# if the file already exists, load in the existing value; otherwise,
# generate a new secret and write it to a file
if os.path.exists(filename):
log("Reading %s from %s" % (secret, filename))
with open(filename) as handle:
value = handle.read()
else:
log("Generating a random secret for {}".format(secret))
value = codecs.encode(os.urandom(32), "hex").decode()
with open(filename, "w") as handle:
handle.write(value)
environ[secret] = value
environ["SYNAPSE_APPSERVICES"] = glob.glob("/data/appservices/*.yaml")
if not os.path.exists(config_dir):
os.mkdir(config_dir)
# Convert SYNAPSE_NO_TLS to boolean if exists
if "SYNAPSE_NO_TLS" in environ:
tlsanswerstring = str.lower(environ["SYNAPSE_NO_TLS"])
if tlsanswerstring in ("true", "on", "1", "yes"):
environ["SYNAPSE_NO_TLS"] = True
else:
if tlsanswerstring in ("false", "off", "0", "no"):
environ["SYNAPSE_NO_TLS"] = False
else:
error(
'Environment variable "SYNAPSE_NO_TLS" found but value "'
+ tlsanswerstring
+ '" unrecognized; exiting.'
)
if "SYNAPSE_LOG_CONFIG" not in environ:
environ["SYNAPSE_LOG_CONFIG"] = config_dir + "/log.config"
log("Generating synapse config file " + config_path)
convert("/conf/homeserver.yaml", config_path, environ)
log_config_file = environ["SYNAPSE_LOG_CONFIG"]
log("Generating log config file " + log_config_file)
convert("/conf/log.config", log_config_file, environ)
# Hopefully we already have a signing key, but generate one if not.
args = [
sys.executable,
"-m",
"synapse.app.homeserver",
"--config-path",
config_path,
# tell synapse to put generated keys in /data rather than /compiled
"--keys-directory",
config_dir,
"--generate-keys",
]
if ownership is not None:
log(f"Setting ownership on /data to {ownership}")
subprocess.check_output(["chown", "-R", ownership, "/data"])
args = ["gosu", ownership] + args
subprocess.check_output(args)
|
|
27,173 | 122,389 | 40 | jax/_src/api_util.py | 29 | 16 | def donation_vector(donate_argnums, args, kwargs) -> Tuple[bool, ...]:
res: List[bool] = []
for i, arg in enumerate(args):
donate = bool(i in donate_argnums)
res.extend((donate,) * tree_structur | Annotate tree_util | donation_vector | b742b04380ebe2e824403e603924ca505173bf7a | jax | api_util.py | 13 | 8 | https://github.com/google/jax.git | 2 | 81 | 0 | 26 | 126 | Python | {
"docstring": "Returns a tuple with a boolean value for each leaf in args.",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 11
} | def donation_vector(donate_argnums, args, kwargs) -> Tuple[bool, ...]:
res: List[bool] = []
for i, arg in enumerate(args):
donate = bool(i in donate_argnums)
res.extend((donate,) * tree_structure(arg).num_leaves)
res.extend((False,) * tree_structure(kwargs).num_leaves)
return tuple(res)
|
|
56,024 | 220,514 | 160 | python3.10.4/Lib/asyncio/futures.py | 44 | 15 | def set_exception(self, exception):
if self._state != _PENDING:
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
if isinstance(exception, type):
exception = exception()
if type(exception) is StopIteration:
raise TypeError("StopIteration interacts badly with generators "
"and cannot be raised into a Future")
self._exception = exception | add python 3.10.4 for windows | set_exception | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | futures.py | 12 | 12 | https://github.com/XX-net/XX-Net.git | 4 | 70 | 0 | 36 | 132 | Python | {
"docstring": "Mark the future done and set an exception.\n\n If the future is already done when this method is called, raises\n InvalidStateError.\n ",
"language": "en",
"n_whitespaces": 42,
"n_words": 21,
"vocab_size": 17
} | def set_exception(self, exception):
if self._state != _PENDING:
raise exceptions.InvalidStateError(f'{self._state}: {self!r}')
if isinstance(exception, type):
exception = exception()
if type(exception) is StopIteration:
raise TypeError("StopIteration interacts badly with generators "
"and cannot be raised into a Future")
self._exception = exception
self._state = _FINISHED
self.__schedule_callbacks()
self.__log_traceback = True
|
|
50,606 | 204,002 | 369 | django/contrib/gis/gdal/raster/band.py | 98 | 24 | def statistics(self, refresh=False, approximate=False):
# Prepare array with arguments for capi function
smin, smax | Refs #33476 -- Reformatted code with Black. | statistics | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | band.py | 12 | 25 | https://github.com/django/django.git | 4 | 156 | 0 | 77 | 241 | Python | {
"docstring": "\n Compute statistics on the pixel values of this band.\n\n The return value is a tuple with the following structure:\n (minimum, maximum, mean, standard deviation).\n\n If approximate=True, the statistics may be computed based on overviews\n or a subset of image tiles.\n\n If refresh=True, the statistics will be computed from the data directly,\n and the cache will be updated where applicable.\n\n For empty bands (where all pixel values are nodata), all statistics\n values are returned as None.\n\n For raster formats using Persistent Auxiliary Metadata (PAM) services,\n the statistics might be cached in an auxiliary file.\n ",
"language": "en",
"n_whitespaces": 178,
"n_words": 93,
"vocab_size": 68
} | def statistics(self, refresh=False, approximate=False):
# Prepare array with arguments for capi function
smin, smax, smean, sstd = c_double(), c_double(), c_double(), c_double()
stats_args = [
self._ptr,
c_int(approximate),
byref(smin),
byref(smax),
byref(smean),
byref(sstd),
c_void_p(),
c_void_p(),
]
if refresh or self._stats_refresh:
func = capi.compute_band_statistics
else:
# Add additional argument to force computation if there is no
# existing PAM file to take the values from.
force = True
stats_args.insert(2, c_int(force))
func = capi.get_band_statistics
# Computation of statistics fails for empty bands.
try:
func(*stats_args)
result = smin.value, smax.value, smean.value, sstd.value
except GDALException:
result = (None, None, None, None)
self._stats_refresh = False
return result
|
|
71,179 | 246,367 | 387 | tests/storage/databases/test_state_store.py | 116 | 22 | def test_smaller_request_deduplicated(self) -> None:
req1 = ensureDeferred(
self.state_datastore._get_state_for_group_using_inflight_cache(
42, StateFilter.from_types((("test.type", None),))
)
)
self.pump(by=0.1)
# This should have gone to the database
self.assertEqual(len(self.get_state_group_calls), 1)
self.assertFalse(req1.called)
req2 = ensureDeferred(
self.state_datastore._get_state_for_group_using_inflight_cache(
42, StateFilter.from_types((("test.type", "b"),))
)
)
self.pump(by=0.1)
# No more calls should have gone to the database, because the second
# request was already in the i | Add more tests for in-flight state query duplication. (#12033) | test_smaller_request_deduplicated | 546b9c9e648f5e2b25bb7c8350570787ff9befae | synapse | test_state_store.py | 15 | 37 | https://github.com/matrix-org/synapse.git | 1 | 224 | 0 | 80 | 363 | Python | {
"docstring": "\n Tests that duplicate requests for state are deduplicated.\n\n This test:\n - requests some state (state group 42, 'all' state filter)\n - requests a subset of that state, before the first request finishes\n - checks to see that only one database query was made\n - completes the database query\n - checks that both requests see the correct retrieved state\n ",
"language": "en",
"n_whitespaces": 115,
"n_words": 58,
"vocab_size": 39
} | def test_smaller_request_deduplicated(self) -> None:
req1 = ensureDeferred(
self.state_datastore._get_state_for_group_using_inflight_cache(
42, StateFilter.from_types((("test.type", None),))
)
)
self.pump(by=0.1)
# This should have gone to the database
self.assertEqual(len(self.get_state_group_calls), 1)
self.assertFalse(req1.called)
req2 = ensureDeferred(
self.state_datastore._get_state_for_group_using_inflight_cache(
42, StateFilter.from_types((("test.type", "b"),))
)
)
self.pump(by=0.1)
# No more calls should have gone to the database, because the second
# request was already in the in-flight cache!
self.assertEqual(len(self.get_state_group_calls), 1)
self.assertFalse(req1.called)
self.assertFalse(req2.called)
groups, sf, d = self.get_state_group_calls[0]
self.assertEqual(groups, (42,))
# The state filter is expanded internally for increased cache hit rate,
# so we the database sees a wider state filter than requested.
self.assertEqual(sf, ALL_NON_MEMBERS_STATE_FILTER)
# Now we can complete the request
self._complete_request_fake(groups, sf, d)
self.assertEqual(
self.get_success(req1),
{("test.type", "a"): "AAA", ("test.type", "b"): "BBB"},
)
self.assertEqual(self.get_success(req2), {("test.type", "b"): "BBB"})
|
|
33,038 | 143,733 | 232 | rllib/examples/simulators/sumo/marlenvironment.py | 55 | 20 | def get_observation(self, agent):
speed = 0
distance = self._config["scenari | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | get_observation | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | marlenvironment.py | 15 | 15 | https://github.com/ray-project/ray.git | 4 | 108 | 0 | 40 | 176 | Python | {
"docstring": "\n Returns the observation of a given agent.\n See http://sumo.sourceforge.net/pydoc/traci._simulation.html\n ",
"language": "en",
"n_whitespaces": 31,
"n_words": 9,
"vocab_size": 9
} | def get_observation(self, agent):
speed = 0
distance = self._config["scenario_config"]["misc"]["max_distance"]
if agent in self.simulation.veh_subscriptions:
speed = round(
self.simulation.veh_subscriptions[agent][tc.VAR_SPEED] * MS_TO_KMH
)
leader = self.simulation.veh_subscriptions[agent][tc.VAR_LEADER]
if leader: # compatible with traci
veh, dist = leader
if veh:
# compatible with libsumo
distance = round(dist)
ret = [speed, distance]
logger.debug("Agent %s --> Obs: %s", agent, pformat(ret))
return ret
|
|
23,144 | 108,332 | 197 | lib/matplotlib/colors.py | 51 | 10 | def register(self, name, color_list):
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(f"{name!r} is a reserved name for a builtin "
"color sequence")
color_list = list(color_list) # force copy and coerce type to list
for color in color_list:
try:
to_rgba(color)
except ValueError:
raise ValueError(
f"{color!r} is not a valid color specification")
self._color_sequences[name] = color_list
| Add a registry for color sequences
Color sequences are simply lists of colors, that we store by name in
a registry. The registry is modelled similar to the ColormapRegistry
to 1) support immutable builtin color sequences and 2) to return copies
so that one cannot mess with the global definition of the color sequence
through an obtained instance.
For now, I've made the sequences used for `ListedColormap`s available
as builtin sequences, but that's open for discussion.
More usage documentation should be added in the color examples and/or
tutorials, but I'll wait with that till after the general approval of
the structure and API. One common use case will be
```
plt.rc_params['axes.prop_cycle'] = plt.cycler(color=plt.color_sequences['Pastel1')
```
Co-authored-by: Elliott Sales de Andrade <[email protected]> | register | 0abe0ce2f2748d1d0383154d045da3609a4b871b | matplotlib | colors.py | 14 | 12 | https://github.com/matplotlib/matplotlib.git | 4 | 58 | 0 | 41 | 108 | Python | {
"docstring": "\n Register a new color sequence.\n\n The color sequence registry stores a copy of the given *color_list*, so\n that future changes to the original list do not affect the registered\n color sequence. Think of this as the registry taking a snapshot\n of *color_list* at registration.\n\n Parameters\n ----------\n name : str\n The name for the color sequence.\n\n color_list : list of colors\n An iterable returning valid Matplotlib colors when iterating over.\n Note however that the returned color sequence will always be a\n list regardless of the input type.\n\n ",
"language": "en",
"n_whitespaces": 201,
"n_words": 86,
"vocab_size": 58
} | def register(self, name, color_list):
if name in self._BUILTIN_COLOR_SEQUENCES:
raise ValueError(f"{name!r} is a reserved name for a builtin "
"color sequence")
color_list = list(color_list) # force copy and coerce type to list
for color in color_list:
try:
to_rgba(color)
except ValueError:
raise ValueError(
f"{color!r} is not a valid color specification")
self._color_sequences[name] = color_list
|
|
@pytest.mark.parametrize(
"attributes, no_attributes, limit",
[
({"attr": True}, False, 5000),
({}, True, 5000),
({"attr": True}, False, 3),
({}, True, 3),
],
) | 92,957 | 293,911 | 362 | tests/components/recorder/test_history.py | 145 | 28 | def test_get_states_no_attributes(hass_recorder):
hass = hass_recorder()
now, future, states = _setup_get_states(hass)
for state in states:
state.attributes = {}
# Get states returns everything before POINT for all entities
for state1, state2 in zip(
states,
sorted(
history.get_states(hass, future, no_attributes=True),
key=lambda state: state.entity_id,
),
):
assert state1 == state2
# Get states returns everything before POINT for tested entities
entities = [f"test.point_in_time_{i % 5} | Avoid selecting attributes in the history api when `no_attributes` is passed (#68352) | test_get_states_no_attributes | 816695cc96c19110ccda10431d92160ea6064d32 | core | test_history.py | 12 | 31 | https://github.com/home-assistant/core.git | 5 | 199 | 1 | 85 | 381 | Python | {
"docstring": "Test getting states without attributes at a specific point in time.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 11
} | def test_get_states_no_attributes(hass_recorder):
hass = hass_recorder()
now, future, states = _setup_get_states(hass)
for state in states:
state.attributes = {}
# Get states returns everything before POINT for all entities
for state1, state2 in zip(
states,
sorted(
history.get_states(hass, future, no_attributes=True),
key=lambda state: state.entity_id,
),
):
assert state1 == state2
# Get states returns everything before POINT for tested entities
entities = [f"test.point_in_time_{i % 5}" for i in range(5)]
for state1, state2 in zip(
states,
sorted(
history.get_states(hass, future, entities, no_attributes=True),
key=lambda state: state.entity_id,
),
):
assert state1 == state2
# Test get_state here because we have a DB setup
assert states[0] == history.get_state(
hass, future, states[0].entity_id, no_attributes=True
)
time_before_recorder_ran = now - timedelta(days=1000)
assert history.get_states(hass, time_before_recorder_ran, no_attributes=True) == []
assert (
history.get_state(hass, time_before_recorder_ran, "demo.id", no_attributes=True)
is None
)
@pytest.mark.parametrize(
"attributes, no_attributes, limit",
[
({"attr": True}, False, 5000),
({}, True, 5000),
({"attr": True}, False, 3),
({}, True, 3),
],
) |
56,203 | 221,098 | 63 | python3.10.4/Lib/bdb.py | 24 | 6 | def set_until(self, frame, lineno=None):
# the name "until" is borrowed from gdb
if lineno is None:
lineno = frame.f_lineno + 1
self._set_stopinfo(frame, frame, lineno)
| add python 3.10.4 for windows | set_until | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | bdb.py | 10 | 4 | https://github.com/XX-net/XX-Net.git | 2 | 34 | 0 | 21 | 54 | Python | {
"docstring": "Stop when the line with the lineno greater than the current one is\n reached or when returning from current frame.",
"language": "en",
"n_whitespaces": 26,
"n_words": 20,
"vocab_size": 16
} | def set_until(self, frame, lineno=None):
# the name "until" is borrowed from gdb
if lineno is None:
lineno = frame.f_lineno + 1
self._set_stopinfo(frame, frame, lineno)
|
|
89,205 | 290,079 | 43 | homeassistant/components/rest/switch.py | 15 | 13 | async def get_device_state(self, hass):
websession = async_get_clientsession(hass, self._verify_ssl)
rendered_headers = template.render_complex(self._headers, parse_res | Use _attr_is_on in rest (#81305) | get_device_state | fee3898f648d4fffdf9dbec748aab2410a0bd227 | core | switch.py | 9 | 31 | https://github.com/home-assistant/core.git | 6 | 180 | 0 | 13 | 68 | Python | {
"docstring": "Get the latest data from REST API and update the state.",
"language": "en",
"n_whitespaces": 10,
"n_words": 11,
"vocab_size": 10
} | async def get_device_state(self, hass):
websession = async_get_clientsession(hass, self._verify_ssl)
rendered_headers = template.render_complex(self._headers, parse_result=False)
rendered_params = template.render_complex(self._params)
|
|
29,525 | 131,429 | 75 | python/ray/tests/test_client_reconnect.py | 15 | 14 | def reset_channel(self) -> None:
if self.channel:
self. | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | reset_channel | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | test_client_reconnect.py | 10 | 12 | https://github.com/ray-project/ray.git | 2 | 74 | 0 | 15 | 121 | Python | {
"docstring": "\n Manually close and reopen the channel to the real ray server. This\n simulates a disconnection between the client and the server.\n ",
"language": "en",
"n_whitespaces": 43,
"n_words": 21,
"vocab_size": 16
} | def reset_channel(self) -> None:
if self.channel:
self.channel.close()
self.channel = grpc.insecure_channel(self.real_addr, options=GRPC_OPTIONS)
grpc.channel_ready_future(self.channel)
self.task_servicer.set_channel(self.channel)
self.data_servicer.set_channel(self.channel)
self.logs_servicer.set_channel(self.channel)
|
|
117,805 | 321,575 | 221 | tests/end2end/fixtures/quteprocess.py | 78 | 15 | def wait_scroll_pos_changed(self, x=None, y=None):
__tracebackhide__ = (lambda e:
e.errisinstance(testprocess.WaitForTimeout))
if (x is None and y is not None) or (y is None and x is not None):
raise ValueError("Either both x/y or neither must be given!")
if x is None and y is None:
point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here
elif x == '0' and y == '0':
| qt6 tests: Fix remaining PyQt5 references | wait_scroll_pos_changed | deb21acdebd77c6dc6d5fe4d8cad75e4ca074138 | qutebrowser | quteprocess.py | 12 | 13 | https://github.com/qutebrowser/qutebrowser.git | 9 | 107 | 0 | 54 | 184 | Python | {
"docstring": "Wait until a \"Scroll position changed\" message was found.\n\n With QtWebEngine, on older Qt versions which lack\n QWebEnginePage.scrollPositionChanged, this also skips the test.\n ",
"language": "en",
"n_whitespaces": 44,
"n_words": 23,
"vocab_size": 23
} | def wait_scroll_pos_changed(self, x=None, y=None):
__tracebackhide__ = (lambda e:
e.errisinstance(testprocess.WaitForTimeout))
if (x is None and y is not None) or (y is None and x is not None):
raise ValueError("Either both x/y or neither must be given!")
if x is None and y is None:
point = 'Py*.QtCore.QPoint(*, *)' # not counting 0/0 here
elif x == '0' and y == '0':
point = 'Py*.QtCore.QPoint()'
else:
point = 'Py*.QtCore.QPoint({}, {})'.format(x, y)
self.wait_for(category='webview',
message='Scroll position changed to ' + point)
|
|
30,665 | 135,586 | 1,453 | python/ray/data/tests/test_dataset_tfrecords.py | 231 | 40 | def test_write_tfrecords(ray_start_regular_shared, tmp_path):
import tensorflow as tf
# The dataset we will write to a .tfrecords file.
ds = ray.data.from_items(
[
# Row one.
{
"int_item": 1,
"int_list": [2, 2, 3],
"float_item": 1.0,
"float_list": [2.0, 3.0, 4.0],
"bytes_item": b"abc",
"bytes_list": [b"abc", b"1234"],
},
# Row two.
{
"int_item": 2,
"int_list": [3, 3, 4],
"float_item": 2.0,
"float_list": [2.0, 2.0, 3.0],
"bytes_item": b"def",
"bytes_list": [b"def", b"1234"],
},
]
)
# The corresponding tf.train.Example that we would expect to read
# from this dataset.
expected_records = [
# Record one (corresponding to row one).
tf.train.Example(
features=tf.train.Features(
feature={
"int_item": tf.train.Feature(
int64_list=tf.train.Int64List(value=[1])
),
"int_list": tf.train.Feature(
int64_list=tf.train.Int64List(value=[2, 2, 3])
),
"float_item": tf.train.Feature(
float_list=tf.train.FloatList(value=[1.0])
),
"float_l | [Datasets] Add writer for TFRecords. (#29448)
This PR enables users to write TFRecords from datasets.
In particular, the master branch already includes an API for reading TFRecords from datasets. Users have requested the ability to write these datasets back to TFRecords. | test_write_tfrecords | 9fab504fe776f96fecf85e12ea006264cbe92f4a | ray | test_dataset_tfrecords.py | 23 | 82 | https://github.com/ray-project/ray.git | 3 | 590 | 0 | 127 | 885 | Python | {
"docstring": "Test that write_tfrecords writes TFRecords correctly.\n\n Test this by writing a Dataset to a TFRecord (function under test),\n reading it back out into a tf.train.Example,\n and checking that the result is analogous to the original Dataset.\n ",
"language": "en",
"n_whitespaces": 48,
"n_words": 36,
"vocab_size": 30
} | def test_write_tfrecords(ray_start_regular_shared, tmp_path):
import tensorflow as tf
# The dataset we will write to a .tfrecords file.
ds = ray.data.from_items(
[
# Row one.
{
"int_item": 1,
"int_list": [2, 2, 3],
"float_item": 1.0,
"float_list": [2.0, 3.0, 4.0],
"bytes_item": b"abc",
"bytes_list": [b"abc", b"1234"],
},
# Row two.
{
"int_item": 2,
"int_list": [3, 3, 4],
"float_item": 2.0,
"float_list": [2.0, 2.0, 3.0],
"bytes_item": b"def",
"bytes_list": [b"def", b"1234"],
},
]
)
# The corresponding tf.train.Example that we would expect to read
# from this dataset.
expected_records = [
# Record one (corresponding to row one).
tf.train.Example(
features=tf.train.Features(
feature={
"int_item": tf.train.Feature(
int64_list=tf.train.Int64List(value=[1])
),
"int_list": tf.train.Feature(
int64_list=tf.train.Int64List(value=[2, 2, 3])
),
"float_item": tf.train.Feature(
float_list=tf.train.FloatList(value=[1.0])
),
"float_list": tf.train.Feature(
float_list=tf.train.FloatList(value=[2.0, 3.0, 4.0])
),
"bytes_item": tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b"abc"])
),
"bytes_list": tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b"abc", b"1234"])
),
}
)
),
# Record two (corresponding to row two).
tf.train.Example(
features=tf.train.Features(
feature={
"int_item": tf.train.Feature(
int64_list=tf.train.Int64List(value=[2])
),
"int_list": tf.train.Feature(
int64_list=tf.train.Int64List(value=[3, 3, 4])
),
"float_item": tf.train.Feature(
float_list=tf.train.FloatList(value=[2.0])
),
"float_list": tf.train.Feature(
float_list=tf.train.FloatList(value=[2.0, 2.0, 3.0])
),
"bytes_item": tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b"def"])
),
"bytes_list": tf.train.Feature(
bytes_list=tf.train.BytesList(value=[b"def", b"1234"])
),
}
)
),
]
# Perform the test.
# Write the dataset to a .tfrecords file.
ds.write_tfrecords(tmp_path)
# Read the Examples back out from the .tfrecords file.
# This follows the offical TFRecords tutorial:
# https://www.tensorflow.org/tutorials/load_data/tfrecord#reading_a_tfrecord_file_2
filenames = sorted(os.listdir(tmp_path))
filepaths = [os.path.join(tmp_path, filename) for filename in filenames]
raw_dataset = tf.data.TFRecordDataset(filepaths)
tfrecords = []
for raw_record in raw_dataset:
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
tfrecords.append(example)
assert tfrecords == expected_records
|
|
42,946 | 179,373 | 165 | test/test_processing_utils.py | 38 | 19 | def test_float_conversion_dtype(self):
x = np.a | Format The Codebase
- black formatting
- isort formatting | test_float_conversion_dtype | cc0cff893f9d7d472788adc2510c123967b384fe | gradio | test_processing_utils.py | 14 | 12 | https://github.com/gradio-app/gradio.git | 2 | 87 | 0 | 33 | 137 | Python | {
"docstring": "Test any convertion from a float dtype to an other.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def test_float_conversion_dtype(self):
x = np.array([-1, 1])
# Test all combinations of dtypes conversions
dtype_combin = np.array(
np.meshgrid(
OutputPreprocessing.float_dtype_list,
OutputPreprocessing.float_dtype_list,
)
).T.reshape(-1, 2)
for dtype_in, dtype_out in dtype_combin:
x = x.astype(dtype_in)
y = gr.processing_utils._convert(x, dtype_out)
assert y.dtype == np.dtype(dtype_out)
|
|
@app.server.websocket("/api/ws") | 5,544 | 30,395 | 61 | spotdl/console/web.py | 35 | 6 | def fix_mime_types():
# Known to be problematic when Visual Studio is installed:
# <https://github.com/tensorflow/tensorboard/issues/3120>
# https://github.com/spotDL/spotify-downloader/issues/1540
mimetypes.add_type("application/javascript", ".js")
# Not known to be problematic, but used by spotDL:
mimetypes.add_type("text/css", ".css")
mimetypes.add_type("image/ | fix: broken mimetypes #1540 | fix_mime_types | de31601550e5b6b243f7a00b2bc82300f43f2d9d | spotify-downloader | web.py | 8 | 5 | https://github.com/spotDL/spotify-downloader.git | 1 | 37 | 1 | 30 | 97 | Python | {
"docstring": "Fix incorrect entries in the `mimetypes` registry.\n On Windows, the Python standard library's `mimetypes` reads in\n mappings from file extension to MIME type from the Windows\n registry. Other applications can and do write incorrect values\n to this registry, which causes `mimetypes.guess_type` to return\n incorrect values, which causes spotDL to fail to render on\n the frontend.\n This method hard-codes the correct mappings for certain MIME\n types that are known to be either used by TensorBoard or\n problematic in general.\n ",
"language": "en",
"n_whitespaces": 108,
"n_words": 78,
"vocab_size": 58
} | def fix_mime_types():
# Known to be problematic when Visual Studio is installed:
# <https://github.com/tensorflow/tensorboard/issues/3120>
# https://github.com/spotDL/spotify-downloader/issues/1540
mimetypes.add_type("application/javascript", ".js")
# Not known to be problematic, but used by spotDL:
mimetypes.add_type("text/css", ".css")
mimetypes.add_type("image/svg+xml", ".svg")
mimetypes.add_type("text/html", ".html")
@app.server.websocket("/api/ws") |
20,797 | 101,382 | 87 | scripts/convert.py | 26 | 12 | def _get_threads(self) -> MultiThread:
# TODO Check if multiple threads actually speeds anything up
save_queue = queue_manager.get_queue("convert_out")
patch_queue = queue_manager.get_queue("patch")
return MultiThread(self._converter.process, patch_queue, save_queue,
thread_count=self._pool_p | Bugfix: convert - Gif Writer
- Fix non-launch error on Gif Writer
- convert plugins - linting
- convert/fs_media/preview/queue_manager - typing
- Change convert items from dict to Dataclass | _get_threads | 1022651eb8a7741014f5d2ec7cbfe882120dfa5f | faceswap | convert.py | 9 | 11 | https://github.com/deepfakes/faceswap.git | 1 | 47 | 0 | 25 | 80 | Python | {
"docstring": " Get the threads for patching the converted faces onto the frames.\n\n Returns\n :class:`lib.multithreading.MultiThread`\n The threads that perform the patching of swapped faces onto the output frames\n ",
"language": "en",
"n_whitespaces": 59,
"n_words": 26,
"vocab_size": 18
} | def _get_threads(self) -> MultiThread:
# TODO Check if multiple threads actually speeds anything up
save_queue = queue_manager.get_queue("convert_out")
patch_queue = queue_manager.get_queue("patch")
return MultiThread(self._converter.process, patch_queue, save_queue,
thread_count=self._pool_processes, name="patch")
|
|
56,740 | 222,788 | 51 | python3.10.4/Lib/distutils/command/register.py | 23 | 8 | def verify_metadata(self):
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_da | add python 3.10.4 for windows | verify_metadata | 8198943edd73a363c266633e1aa5b2a9e9c9f526 | XX-Net | register.py | 11 | 3 | https://github.com/XX-net/XX-Net.git | 1 | 33 | 0 | 20 | 58 | Python | {
"docstring": " Send the metadata to the package index server to be checked.\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 11,
"vocab_size": 9
} | def verify_metadata(self):
# send the info to the server and report the result
(code, result) = self.post_to_server(self.build_post_data('verify'))
log.info('Server response (%s): %s', code, result)
|
|
17,668 | 83,381 | 119 | zerver/tests/test_subs.py | 22 | 13 | def test_subscriptions_add_for_principal_invite_only(self) -> None:
invitee = self.example_user("iago")
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(
invitee.id,
invitee.realm,
invite_streams,
invite_only=True,
| stream_settings: Show stream privacy & description in stream events.
Provide stream privacy and description in stream notification events
when stream is created.
In function "send_messages_for_new_subscribers" for when stream is
created, put policy name and description of the stream.
Fixes #21004 | test_subscriptions_add_for_principal_invite_only | 4b9770e270823b7ed2bbbeda0e4450f0ba6a288b | zulip | test_subs.py | 9 | 14 | https://github.com/zulip/zulip.git | 1 | 55 | 0 | 20 | 90 | Python | {
"docstring": "\n You can subscribe other people to invite only streams.\n ",
"language": "en",
"n_whitespaces": 24,
"n_words": 9,
"vocab_size": 9
} | def test_subscriptions_add_for_principal_invite_only(self) -> None:
invitee = self.example_user("iago")
current_streams = self.get_streams(invitee)
invite_streams = self.make_random_stream_names(current_streams)
self.assert_adding_subscriptions_for_principal(
invitee.id,
invitee.realm,
invite_streams,
invite_only=True,
policy_name="Private, protected history",
)
|
|
960 | 6,328 | 19 | ludwig/features/feature_utils.py | 10 | 5 | def get_module_dict_key_from_name(name):
key = name.replace(".", "__ludwig_punct_peri | Enable feature names with periods in them. (#1787)
* Enable feature names with periods in them.
* Simplify new unit test. | get_module_dict_key_from_name | c9b6f4dfa32631c320d122ad07f09013769d9d5d | ludwig | feature_utils.py | 9 | 3 | https://github.com/ludwig-ai/ludwig.git | 1 | 20 | 0 | 9 | 38 | Python | {
"docstring": "Returns a key that's guaranteed to be compatible with torch.",
"language": "en",
"n_whitespaces": 9,
"n_words": 10,
"vocab_size": 10
} | def get_module_dict_key_from_name(name):
key = name.replace(".", "__ludwig_punct_period__")
return key + FEATURE_NAME_SUFFIX
|
|
@pytest.mark.parametrize(
"import_strategy, expected_to_fail",
[
pytest.param(
"""
from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker
import pyarrow.gandiva
""",
True,
id="import_pydbe_first-pyarrow_gandiva_second",
),
pytest.param(
"""
import pyarrow.gandiva
from modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker
""",
False,
id="import_pyarrow_gandiva_first-pydbe_second",
),
],
) | 36,121 | 154,641 | 196 | modin/test/storage_formats/hdk/test_internals.py | 41 | 20 | def test_hdk_import(import_strategy, has_other_engines):
remove_other_engines =
if not has_other_engines:
import_strategy = f"{remove_oth | FEAT-#4946: Replace OmniSci with HDK (#4947)
Co-authored-by: Iaroslav Igoshev <[email protected]>
Signed-off-by: Andrey Pavlenko <[email protected]> | test_hdk_import | e5b1888cd932909e49194d58035da34b210b91c4 | modin | test_internals.py | 12 | 15 | https://github.com/modin-project/modin.git | 3 | 66 | 1 | 34 | 182 | Python | {
"docstring": "\n Test import of HDK engine.\n\n The import of DbWorker requires to set special dlopen flags which make it then\n incompatible to import some other libraries further (like ``pyarrow.gandiva``).\n This test verifies that it's not the case when a user naturally imports Modin\n with HDK engine.\n\n Parameters\n ----------\n import_strategy : str\n There are several scenarios of how a user can import Modin with HDK engine:\n configure Modin first to use HDK engine and then import ``modin.pandas`` or vice versa.\n This parameters holds a python code, implementing one of these scenarios.\n has_other_engines : bool\n The problem with import may appear depending on whether other engines are\n installed. This parameter indicates whether to remove modules for\n non-hdk engines before the test.\n\n Notes\n -----\n The failed import flow may cause segfault, which causes to crash the pytest itself.\n This makes us to run the test in a separate process and check its exit-code to\n decide the success of the test.\n \nimport sys\nsys.modules['ray'] = None\nsys.modules['dask'] = None\n\nfrom modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker\nimport pyarrow.gandiva\n\nimport pyarrow.gandiva\nfrom modin.experimental.core.execution.native.implementations.hdk_on_native.db_worker import DbWorker\n",
"language": "en",
"n_whitespaces": 257,
"n_words": 176,
"vocab_size": 115
} | def test_hdk_import(import_strategy, has_other_engines):
remove_other_engines =
if not has_other_engines:
import_strategy = f"{remove_other_engines}\n{import_strategy}"
res = subprocess.run(
[sys.executable, "-c", import_strategy],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
)
if res.returncode != 0:
pytest.fail(str(res.stderr))
@pytest.mark.parametrize(
"import_strategy, expected_to_fail",
[
pytest.param(
,
True,
id="import_pydbe_first-pyarrow_gandiva_second",
),
pytest.param(
,
False,
id="import_pyarrow_gandiva_first-pydbe_second",
),
],
) |
29,214 | 130,289 | 665 | python/ray/_private/thirdparty/pathspec/util.py | 240 | 33 | def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links):
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
| [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | _iter_tree_entries_next | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | util.py | 16 | 38 | https://github.com/ray-project/ray.git | 14 | 249 | 0 | 140 | 396 | Python | {
"docstring": "\n Scan the directory for all descendant files.\n\n *root_full* (:class:`str`) the absolute path to the root directory.\n\n *dir_rel* (:class:`str`) the path to the directory to scan relative to\n *root_full*.\n\n *memo* (:class:`dict`) keeps track of ancestor directories\n encountered. Maps each ancestor real path (:class:`str`) to relative\n path (:class:`str`).\n\n *on_error* (:class:`~collections.abc.Callable` or :data:`None`)\n optionally is the error handler for file-system exceptions.\n\n *follow_links* (:class:`bool`) is whether to walk symbolic links that\n resolve to directories.\n\n Yields each entry (:class:`.TreeEntry`).\n ",
"language": "en",
"n_whitespaces": 114,
"n_words": 74,
"vocab_size": 52
} | def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links):
dir_full = os.path.join(root_full, dir_rel)
dir_real = os.path.realpath(dir_full)
# Remember each encountered ancestor directory and its canonical
# (real) path. If a canonical path is encountered more than once,
# recursion has occurred.
if dir_real not in memo:
memo[dir_real] = dir_rel
else:
raise RecursionError(
real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel
)
for node_name in os.listdir(dir_full):
node_rel = os.path.join(dir_rel, node_name)
node_full = os.path.join(root_full, node_rel)
# Inspect child node.
try:
node_lstat = os.lstat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
if stat.S_ISLNK(node_lstat.st_mode):
# Child node is a link, inspect the target node.
is_link = True
try:
node_stat = os.stat(node_full)
except OSError as e:
if on_error is not None:
on_error(e)
continue
else:
is_link = False
node_stat = node_lstat
if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
# Child node is a directory, recurse into it and yield its
# descendant files.
yield TreeEntry(node_name, node_rel, node_lstat, node_stat)
for entry in _iter_tree_entries_next(
root_full, node_rel, memo, on_error, follow_links
):
yield entry
elif stat.S_ISREG(node_stat.st_mode) or is_link:
# Child node is either a file or an unfollowed link, yield it.
yield TreeEntry(node_name, node_rel, node_lstat, node_stat)
# NOTE: Make sure to remove the canonical (real) path of the directory
# from the ancestors memo once we are done with it. This allows the
# same directory to appear multiple times. If this is not done, the
# second occurrence of the directory will be incorrectly interpreted
# as a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
del memo[dir_real]
|
|
47,723 | 196,223 | 318 | sympy/combinatorics/util.py | 88 | 24 | def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None):
from sympy.combinatorics.perm_groups import _orbit
base_len = len(base)
degree = strong_gens[0].size
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
if basic_orbits is None:
basic_orbits = []
for i in range(base_len):
basic_orbit = _orbit(degree, strong_gens_distr[i], base[i])
basic_orbits.append(basic_orbit)
strong_gens_distr.append([])
res = strong_gens[:]
for i in range(base_len - 1, -1, -1):
gens_copy = strong_gens_distr[i][:]
for gen in strong_gens_distr[i]:
if gen not in strong_gens_distr[i + 1]:
temp_gens = gens_copy[:]
temp_gens.remove(gen)
if temp_gens == []:
continue
temp_orbit = _orbit(degree, temp_gens, | Updated import locations | _remove_gens | 498015021131af4dbb07eb110e5badaba8250c7b | sympy | util.py | 15 | 26 | https://github.com/sympy/sympy.git | 9 | 201 | 0 | 59 | 308 | Python | {
"docstring": "\n Remove redundant generators from a strong generating set.\n\n Parameters\n ==========\n\n ``base`` - a base\n ``strong_gens`` - a strong generating set relative to ``base``\n ``basic_orbits`` - basic orbits\n ``strong_gens_distr`` - strong generators distributed by membership in basic\n stabilizers\n\n Returns\n =======\n\n A strong generating set with respect to ``base`` which is a subset of\n ``strong_gens``.\n\n Examples\n ========\n\n >>> from sympy.combinatorics import SymmetricGroup\n >>> from sympy.combinatorics.util import _remove_gens\n >>> from sympy.combinatorics.testutil import _verify_bsgs\n >>> S = SymmetricGroup(15)\n >>> base, strong_gens = S.schreier_sims_incremental()\n >>> new_gens = _remove_gens(base, strong_gens)\n >>> len(new_gens)\n 14\n >>> _verify_bsgs(S, base, new_gens)\n True\n\n Notes\n =====\n\n This procedure is outlined in [1],p.95.\n\n References\n ==========\n\n .. [1] Holt, D., Eick, B., O'Brien, E.\n \"Handbook of computational group theory\"\n\n ",
"language": "en",
"n_whitespaces": 219,
"n_words": 115,
"vocab_size": 79
} | def _remove_gens(base, strong_gens, basic_orbits=None, strong_gens_distr=None):
from sympy.combinatorics.perm_groups import _orbit
base_len = len(base)
degree = strong_gens[0].size
if strong_gens_distr is None:
strong_gens_distr = _distribute_gens_by_base(base, strong_gens)
if basic_orbits is None:
basic_orbits = []
for i in range(base_len):
basic_orbit = _orbit(degree, strong_gens_distr[i], base[i])
basic_orbits.append(basic_orbit)
strong_gens_distr.append([])
res = strong_gens[:]
for i in range(base_len - 1, -1, -1):
gens_copy = strong_gens_distr[i][:]
for gen in strong_gens_distr[i]:
if gen not in strong_gens_distr[i + 1]:
temp_gens = gens_copy[:]
temp_gens.remove(gen)
if temp_gens == []:
continue
temp_orbit = _orbit(degree, temp_gens, base[i])
if temp_orbit == basic_orbits[i]:
gens_copy.remove(gen)
res.remove(gen)
return res
|
|
50,758 | 204,505 | 64 | django/core/files/uploadedfile.py | 10 | 4 | def from_dict(cls, file_dict):
return cls(
file_dict["filename"],
file_dict["content"],
file_dict.get("con | Refs #33476 -- Reformatted code with Black. | from_dict | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | uploadedfile.py | 10 | 6 | https://github.com/django/django.git | 1 | 31 | 0 | 10 | 54 | Python | {
"docstring": "\n Create a SimpleUploadedFile object from a dictionary with keys:\n - filename\n - content-type\n - content\n ",
"language": "en",
"n_whitespaces": 60,
"n_words": 15,
"vocab_size": 12
} | def from_dict(cls, file_dict):
return cls(
file_dict["filename"],
file_dict["content"],
file_dict.get("content-type", "text/plain"),
)
|
|
52,073 | 207,730 | 189 | tests/admin_views/tests.py | 41 | 7 | def test_change_list_sorting_callable(self):
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 2}
)
self.assertContentBefore | Refs #33476 -- Reformatted code with Black. | test_change_list_sorting_callable | 9c19aff7c7561e3a82978a272ecdaad40dda5c00 | django | tests.py | 11 | 16 | https://github.com/django/django.git | 1 | 51 | 0 | 23 | 92 | Python | {
"docstring": "\n Ensure we can sort on a list_display field that is a callable\n (column 2 is callable_year in ArticleAdmin)\n ",
"language": "en",
"n_whitespaces": 40,
"n_words": 18,
"vocab_size": 16
} | def test_change_list_sorting_callable(self):
response = self.client.get(
reverse("admin:admin_views_article_changelist"), {"o": 2}
)
self.assertContentBefore(
response,
"Oldest content",
"Middle content",
"Results of sorting on callable are out of order.",
)
self.assertContentBefore(
response,
"Middle content",
"Newest content",
"Results of sorting on callable are out of order.",
)
|
|
20,789 | 101,374 | 107 | scripts/convert.py | 32 | 16 | def pre_encode(self) -> Optional[Callable[[np.ndarray], List[bytes]]]:
dummy = np.zeros((20, 20, 3), dtype="uint8")
test = self._writer.pre_encode(dummy)
retval: O | Bugfix: convert - Gif Writer
- Fix non-launch error on Gif Writer
- convert plugins - linting
- convert/fs_media/preview/queue_manager - typing
- Change convert items from dict to Dataclass | pre_encode | 1022651eb8a7741014f5d2ec7cbfe882120dfa5f | faceswap | convert.py | 11 | 9 | https://github.com/deepfakes/faceswap.git | 2 | 91 | 0 | 27 | 138 | Python | {
"docstring": " python function: Selected writer's pre-encode function, if it has one,\n otherwise ``None`` ",
"language": "en",
"n_whitespaces": 20,
"n_words": 12,
"vocab_size": 12
} | def pre_encode(self) -> Optional[Callable[[np.ndarray], List[bytes]]]:
dummy = np.zeros((20, 20, 3), dtype="uint8")
test = self._writer.pre_encode(dummy)
retval: Optional[Callable[[np.ndarray],
List[bytes]]] = None if test is None else self._writer.pre_encode
logger.debug("Writer pre_encode function: %s", retval)
return retval
|
|
25,459 | 115,431 | 59 | mindsdb/integrations/handlers/sqlite_handler/sqlite_handler.py | 13 | 5 | def disconnect(self):
| added connection_args and connection_args_example dicts | disconnect | fc9776d9b342f873cbb3f36fd39955b9e1ea6f76 | mindsdb | sqlite_handler.py | 8 | 6 | https://github.com/mindsdb/mindsdb.git | 2 | 30 | 0 | 10 | 52 | Python | {
"docstring": "\r\n Close any existing connections.\r\n ",
"language": "en",
"n_whitespaces": 19,
"n_words": 4,
"vocab_size": 4
} | def disconnect(self):
if self.is_connected is False:
return
self.connection.close()
self.is_connected = False
return self.is_connected
|
|
30,029 | 133,434 | 196 | python/ray/workflow/step_executor.py | 60 | 27 | def resolve(self) -> Tuple[List, Dict]:
objects_mapping = []
for obj_ref in self.workflow_outputs:
obj, ref = _resolve_object_ref(obj_ref.ref)
objects_mapping.append(o | [CI] Format Python code with Black (#21975)
See #21316 and #21311 for the motivation behind these changes. | resolve | 7f1bacc7dc9caf6d0ec042e39499bbf1d9a7d065 | ray | step_executor.py | 11 | 30 | https://github.com/ray-project/ray.git | 4 | 103 | 0 | 49 | 164 | Python | {
"docstring": "\n This function resolves the inputs for the code inside\n a workflow step (works on the callee side). For outputs from other\n workflows, we resolve them into object instances inplace.\n\n For each ObjectRef argument, the function returns both the ObjectRef\n and the object instance. If the ObjectRef is a chain of nested\n ObjectRefs, then we resolve it recursively until we get the\n object instance, and we return the *direct* ObjectRef of the\n instance. This function does not resolve ObjectRef\n inside another object (e.g. list of ObjectRefs) to give users some\n flexibility.\n\n Returns:\n Instances of arguments.\n ",
"language": "en",
"n_whitespaces": 190,
"n_words": 94,
"vocab_size": 62
} | def resolve(self) -> Tuple[List, Dict]:
objects_mapping = []
for obj_ref in self.workflow_outputs:
obj, ref = _resolve_object_ref(obj_ref.ref)
objects_mapping.append(obj)
workflow_ref_mapping = _resolve_dynamic_workflow_refs(self.workflow_refs)
with serialization_context.workflow_args_resolving_context(
objects_mapping, workflow_ref_mapping
):
# reconstruct input arguments under correct serialization context
flattened_args: List[Any] = ray.get(self.args)
# dereference arguments like Ray remote functions
flattened_args = [
ray.get(a) if isinstance(a, ObjectRef) else a for a in flattened_args
]
return signature.recover_args(flattened_args)
|
|
80,573 | 270,768 | 35 | keras/engine/base_layer.py | 10 | 6 | def _maybe_create_attribute(self, name, default_value):
| Reformatting the codebase with black.
PiperOrigin-RevId: 450093126 | _maybe_create_attribute | 84afc5193d38057e2e2badf9c889ea87d80d8fbf | keras | base_layer.py | 9 | 3 | https://github.com/keras-team/keras.git | 2 | 27 | 0 | 10 | 43 | Python | {
"docstring": "Create the attribute with the default value if it hasn't been created.\n\n This is useful for fields that is used for tracking purpose,\n _trainable_weights, or _layers. Note that user could create a layer subclass\n and assign an internal field before invoking the Layer.__init__(), the\n __setattr__() need to create the tracking fields and __init__() need to not\n override them.\n\n Args:\n name: String, the name of the attribute.\n default_value: Object, the default value of the attribute.\n ",
"language": "en",
"n_whitespaces": 141,
"n_words": 74,
"vocab_size": 53
} | def _maybe_create_attribute(self, name, default_value):
if not hasattr(self, name):
self.__setattr__(name, default_value)
|
|
42,421 | 177,527 | 283 | networkx/classes/digraph.py | 56 | 17 | def add_nodes_from(self, nodes_for_adding, **attr):
for n in nodes_for_adding:
try:
newnode = n not in self._node
newdict = attr
except Type | doc: update documentation when providing an iterator over current graph to add/remove_edges_from. (#6268)
* doc for add_edges_from
* doc for digraph
* doc for multigraph
* multigraph.add_nodes_from returns keylist
* update docs for graph - edges
* doc update: graph.add_nodes_from
* doc update: graph.remove_nodes_from
* doc update: graph.add_edges_from
* doc update: rewording for graph.add_edges_from
* doc update: graph.add_weighted_edges_from rewording
* doc update: digraph updated as graph
* doc update: digraph minor sync
* doc update: multigraph same as graph
* Update graph.py
* Update digraph.py
* Update multigraph.py | add_nodes_from | 979d54acba7c3d372c93d44c6c149700608ce8b0 | networkx | digraph.py | 14 | 17 | https://github.com/networkx/networkx.git | 5 | 118 | 0 | 37 | 191 | Python | {
"docstring": "Add multiple nodes.\n\n Parameters\n ----------\n nodes_for_adding : iterable container\n A container of nodes (list, dict, set, etc.).\n OR\n A container of (node, attribute dict) tuples.\n Node attributes are updated using the attribute dict.\n attr : keyword arguments, optional (default= no attributes)\n Update attributes for all nodes in nodes.\n Node attributes specified in nodes as a tuple take\n precedence over attributes specified via keyword arguments.\n\n See Also\n --------\n add_node\n\n Notes\n -------\n When adding nodes from an iterator over the graph you are changing,\n a `RuntimeError` can be raised with message:\n `RuntimeError: dictionary changed size during iteration`. This\n happens when the graph's underlying dictionary is modified during\n iteration. To avoid this error, evaluate the iterator into a separate\n object, e.g. by using `list(iterator_of_nodes)`, and pass this\n object to `G.add_nodes_from`.\n\n Examples\n --------\n >>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc\n >>> G.add_nodes_from(\"Hello\")\n >>> K3 = nx.Graph([(0, 1), (1, 2), (2, 0)])\n >>> G.add_nodes_from(K3)\n >>> sorted(G.nodes(), key=str)\n [0, 1, 2, 'H', 'e', 'l', 'o']\n\n Use keywords to update specific node attributes for every node.\n\n >>> G.add_nodes_from([1, 2], size=10)\n >>> G.add_nodes_from([3, 4], weight=0.4)\n\n Use (node, attrdict) tuples to update attributes for specific nodes.\n\n >>> G.add_nodes_from([(1, dict(size=11)), (2, {\"color\": \"blue\"})])\n >>> G.nodes[1][\"size\"]\n 11\n >>> H = nx.Graph()\n >>> H.add_nodes_from(G.nodes(data=True))\n >>> H.nodes[1][\"size\"]\n 11\n\n Evaluate an iterator over a graph if using it to modify the same graph\n\n >>> G = nx.DiGraph([(0, 1), (1, 2), (3, 4)])\n >>> # wrong way - will raise RuntimeError\n >>> # G.add_nodes_from(n + 1 for n in G.nodes)\n >>> # correct way\n >>> G.add_nodes_from(list(n + 1 for n in G.nodes))\n ",
"language": "en",
"n_whitespaces": 632,
"n_words": 260,
"vocab_size": 173
} | def add_nodes_from(self, nodes_for_adding, **attr):
for n in nodes_for_adding:
try:
newnode = n not in self._node
newdict = attr
except TypeError:
n, ndict = n
newnode = n not in self._node
newdict = attr.copy()
newdict.update(ndict)
if newnode:
if n is None:
raise ValueError("None cannot be a node")
self._succ[n] = self.adjlist_inner_dict_factory()
self._pred[n] = self.adjlist_inner_dict_factory()
self._node[n] = self.node_attr_dict_factory()
self._node[n].update(newdict)
|
|
15,860 | 72,246 | 393 | wagtail/admin/tests/test_workflows.py | 153 | 22 | def test_submitted_email_notifications_sent(self):
self.login(self.submitter)
self.submit()
self.assertEqual(len(mail.outbox), 4)
task_submission_emails = [
email for email in mail.outbox if "task" in email.subject
]
task_submission_emailed_addresses = [
address for email in task_submission_emails for address in email.to
]
workflow_submission_emails = [
email for email in mail.outbox if "workflow" in email.subject
]
workflow_submission_emailed_addresses = [
address for email in workflow_submission_emails for address in email.t | Reformat with black | test_submitted_email_notifications_sent | d10f15e55806c6944827d801cd9c2d53f5da4186 | wagtail | test_workflows.py | 10 | 26 | https://github.com/wagtail/wagtail.git | 9 | 214 | 0 | 64 | 335 | Python | {
"docstring": "Test that 'submitted' notifications for WorkflowState and TaskState are both sent correctly",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def test_submitted_email_notifications_sent(self):
self.login(self.submitter)
self.submit()
self.assertEqual(len(mail.outbox), 4)
task_submission_emails = [
email for email in mail.outbox if "task" in email.subject
]
task_submission_emailed_addresses = [
address for email in task_submission_emails for address in email.to
]
workflow_submission_emails = [
email for email in mail.outbox if "workflow" in email.subject
]
workflow_submission_emailed_addresses = [
address for email in workflow_submission_emails for address in email.to
]
self.assertEqual(len(task_submission_emails), 3)
# the moderator is in the Group assigned to the GroupApproval task, so should get an email
self.assertIn(self.moderator.email, task_submission_emailed_addresses)
self.assertIn(self.moderator2.email, task_submission_emailed_addresses)
# with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a task email
self.assertIn(self.superuser.email, task_submission_emailed_addresses)
# the submitter triggered this workflow update, so should not get an email
self.assertNotIn(self.submitter.email, task_submission_emailed_addresses)
self.assertEqual(len(workflow_submission_emails), 1)
# the moderator should not get a workflow email
self.assertNotIn(self.moderator.email, workflow_submission_emailed_addresses)
self.assertNotIn(self.moderator2.email, workflow_submission_emailed_addresses)
# with `WAGTAILADMIN_NOTIFICATION_INCLUDE_SUPERUSERS`, the superuser should get a workflow email
self.assertIn(self.superuser.email, workflow_submission_emailed_addresses)
# as the submitter was the triggering user, the submitter should not get an email notification
self.assertNotIn(self.submitter.email, workflow_submission_emailed_addresses)
|
|
6,543 | 35,810 | 535 | src/transformers/models/maskformer/modeling_maskformer.py | 229 | 51 | def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]:
indices: List[Tuple[np.array]] = []
preds_masks = masks_queries_logits
preds_probs = class_queries_logits.softmax(dim=-1)
# downsample all masks in one go -> save memory
mask_labels = nn.functional.interpolate(mask_labels, size=preds_masks.shape[-2:], mode="nearest")
# iterate through batch size
for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -pred_probs[:, labels]
# flatten spatial dimension "q h w -> q (h w)"
num_queries, height, width = pred_mask.shape
pred_mask_flat = pred_mask.view(num_queries, height * width) # [num_queries, H*W]
# same for target_mask "c h w -> c (h w)"
num_channels, height, width = target_mask.shape
target_mask_flat = target_mask.view(num_channels, height * width) # [num_total_labels, H*W]
# compute the focal loss between each mask pairs -> shape [NUM_QUERIES, CLASSES]
cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat)
# Compute the dice loss betwen each mask pairs -> shape [NUM_QUERIES, CLASSES]
cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat)
# final cost matrix
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
# do the assigmented using the hungarian algorithm in scipy
assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
| Maskformer (#15682)
* maskformer
* conflicts
* conflicts
* minor fixes
* feature extractor test fix
refactor MaskFormerLoss following conversation
MaskFormer related types should not trigger a module time import error
missed one
removed all the types that are not used
update config mapping
minor updates in the doc
resolved conversation that doesn't need a discussion
minor changes
resolved conversations
fixed DetrDecoder
* minor changes
minor changes
fixed mdx file
test feature_extractor return types
functional losses -> classes
removed the return type test for the feature extractor
minor changes + style + quality
* conflicts?
* rebase master
* readme
* added missing files
* deleded poolformers test that where in the wrong palce
* CI
* minor changes
* Apply suggestions from code review
Co-authored-by: NielsRogge <[email protected]>
* resolved conversations
* minor changes
* conversations
[Unispeech] Fix slow tests (#15818)
* remove soundfile old way of loading audio
* Adapt slow test
[Barthez Tokenizer] Fix saving (#15815)
[TFXLNet] Correct tf xlnet generate (#15822)
* [TFXLNet] Correct tf xlnet
* adapt test comment
Fix the push run (#15807)
Fix semantic segmentation pipeline test (#15826)
Fix dummy_inputs() to dummy_inputs in symbolic_trace doc (#15776)
Add model specific output classes to PoolFormer model docs (#15746)
* Added model specific output classes to poolformer docs
* Fixed Segformer typo in Poolformer docs
Adding the option to return_timestamps on pure CTC ASR models. (#15792)
* Adding the option to return_timestamps on pure CTC ASR models.
* Remove `math.prod` which was introduced in Python 3.8
* int are not floats.
* Reworking the PR to support "char" vs "word" output.
* Fixup!
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Update src/transformers/pipelines/automatic_speech_recognition.py
Co-authored-by: Patrick von Platen <[email protected]>
* Quality.
Co-authored-by: Patrick von Platen <[email protected]>
HFTracer.trace should use/return self.graph to be compatible with torch.fx.Tracer (#15824)
Fix tf.concatenate + test past_key_values for TF models (#15774)
* fix wrong method name tf.concatenate
* add tests related to causal LM / decoder
* make style and quality
* clean-up
* Fix TFBertModel's extended_attention_mask when past_key_values is provided
* Fix tests
* fix copies
* More tf.int8 -> tf.int32 in TF test template
* clean-up
* Update TF test template
* revert the previous commit + update the TF test template
* Fix TF template extended_attention_mask when past_key_values is provided
* Fix some styles manually
* clean-up
* Fix ValueError: too many values to unpack in the test
* Fix more: too many values to unpack in the test
* Add a comment for extended_attention_mask when there is past_key_values
* Fix TFElectra extended_attention_mask when past_key_values is provided
* Add tests to other TF models
* Fix for TF Electra test: add prepare_config_and_inputs_for_decoder
* Fix not passing training arg to lm_head in TFRobertaForCausalLM
* Fix tests (with past) for TF Roberta
* add testing for pask_key_values for TFElectra model
Co-authored-by: ydshieh <[email protected]>
[examples/summarization and translation] fix readme (#15833)
Add ONNX Runtime quantization for text classification notebook (#15817)
Re-enable doctests for the quicktour (#15828)
* Re-enable doctests for the quicktour
* Re-enable doctests for task_summary (#15830)
* Remove &
Framework split model report (#15825)
Add TFConvNextModel (#15750)
* feat: initial implementation of convnext in tensorflow.
* fix: sample code for the classification model.
* chore: added checked for from the classification model.
* chore: set bias initializer in the classification head.
* chore: updated license terms.
* chore: removed ununsed imports
* feat: enabled argument during using drop_path.
* chore: replaced tf.identity with layers.Activation(linear).
* chore: edited default checkpoint.
* fix: minor bugs in the initializations.
* partial-fix: tf model errors for loading pretrained pt weights.
* partial-fix: call method updated
* partial-fix: cross loading of weights (4x3 variables to be matched)
* chore: removed unneeded comment.
* removed playground.py
* rebasing
* rebasing and removing playground.py.
* fix: renaming TFConvNextStage conv and layer norm layers
* chore: added initializers and other minor additions.
* chore: added initializers and other minor additions.
* add: tests for convnext.
* fix: integration tester class.
* fix: issues mentioned in pr feedback (round 1).
* fix: how output_hidden_states arg is propoagated inside the network.
* feat: handling of arg for pure cnn models.
* chore: added a note on equal contribution in model docs.
* rebasing
* rebasing and removing playground.py.
* feat: encapsulation for the convnext trunk.
* Fix variable naming; Test-related corrections; Run make fixup
* chore: added Joao as a contributor to convnext.
* rebasing
* rebasing and removing playground.py.
* rebasing
* rebasing and removing playground.py.
* chore: corrected copyright year and added comment on NHWC.
* chore: fixed the black version and ran formatting.
* chore: ran make style.
* chore: removed from_pt argument from test, ran make style.
* rebasing
* rebasing and removing playground.py.
* rebasing
* rebasing and removing playground.py.
* fix: tests in the convnext subclass, ran make style.
* rebasing
* rebasing and removing playground.py.
* rebasing
* rebasing and removing playground.py.
* chore: moved convnext test to the correct location
* fix: locations for the test file of convnext.
* fix: convnext tests.
* chore: applied sgugger's suggestion for dealing w/ output_attentions.
* chore: added comments.
* chore: applied updated quality enviornment style.
* chore: applied formatting with quality enviornment.
* chore: revert to the previous tests/test_modeling_common.py.
* chore: revert to the original test_modeling_common.py
* chore: revert to previous states for test_modeling_tf_common.py and modeling_tf_utils.py
* fix: tests for convnext.
* chore: removed output_attentions argument from convnext config.
* chore: revert to the earlier tf utils.
* fix: output shapes of the hidden states
* chore: removed unnecessary comment
* chore: reverting to the right test_modeling_tf_common.py.
* Styling nits
Co-authored-by: ariG23498 <[email protected]>
Co-authored-by: Joao Gante <[email protected]>
Co-authored-by: Sylvain Gugger <[email protected]>
* minor changes
* doc fix in feature extractor
* doc
* typose
* removed detr logic from config
* removed detr logic from config
* removed num_labels
* small fix in the config
* auxilary -> auxiliary
* make style
* some test is failing
* fix a weird char in config prevending doc-builder
* retry to fix the doc-builder issue
* make style
* new try to fix the doc builder
* CI
* change weights to facebook
Co-authored-by: NielsRogge <[email protected]>
Co-authored-by: ariG23498 <[email protected]>
Co-authored-by: Joao Gante <[email protected]>
Co-authored-by: Sylvain Gugger <[email protected]> | forward | d83d22f578276e9f201b0b3b0f8f9bd68e86c133 | transformers | modeling_maskformer.py | 12 | 44 | https://github.com/huggingface/transformers.git | 3 | 243 | 0 | 152 | 376 | Python | {
"docstring": "Performs the matching\n\n Params:\n masks_queries_logits (`torch.Tensor`):\n A tensor` of dim `batch_size, num_queries, num_classes` with the\n classification logits.\n class_queries_logits (`torch.Tensor`):\n A tensor` of dim `batch_size, num_queries, height, width` with the\n predicted masks.\n\n class_labels (`torch.Tensor`):\n A tensor` of dim `num_target_boxes` (where num_target_boxes is the number\n of ground-truth objects in the target) containing the class labels.\n mask_labels (`torch.Tensor`):\n A tensor` of dim `num_target_boxes, height, width` containing the target\n masks.\n\n Returns:\n `List[Tuple[Tensor]]`: A list of size batch_size, containing tuples of (index_i, index_j) where:\n - index_i is the indices of the selected predictions (in order)\n - index_j is the indices of the corresponding selected labels (in order)\n For each batch element, it holds:\n len(index_i) = len(index_j) = min(num_queries, num_target_boxes).\n ",
"language": "en",
"n_whitespaces": 374,
"n_words": 114,
"vocab_size": 67
} | def forward(self, masks_queries_logits, class_queries_logits, mask_labels, class_labels) -> List[Tuple[Tensor]]:
indices: List[Tuple[np.array]] = []
preds_masks = masks_queries_logits
preds_probs = class_queries_logits.softmax(dim=-1)
# downsample all masks in one go -> save memory
mask_labels = nn.functional.interpolate(mask_labels, size=preds_masks.shape[-2:], mode="nearest")
# iterate through batch size
for pred_probs, pred_mask, target_mask, labels in zip(preds_probs, preds_masks, mask_labels, class_labels):
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
# but approximate it in 1 - proba[target class].
# The 1 is a constant that doesn't change the matching, it can be ommitted.
cost_class = -pred_probs[:, labels]
# flatten spatial dimension "q h w -> q (h w)"
num_queries, height, width = pred_mask.shape
pred_mask_flat = pred_mask.view(num_queries, height * width) # [num_queries, H*W]
# same for target_mask "c h w -> c (h w)"
num_channels, height, width = target_mask.shape
target_mask_flat = target_mask.view(num_channels, height * width) # [num_total_labels, H*W]
# compute the focal loss between each mask pairs -> shape [NUM_QUERIES, CLASSES]
cost_mask = pair_wise_sigmoid_focal_loss(pred_mask_flat, target_mask_flat)
# Compute the dice loss betwen each mask pairs -> shape [NUM_QUERIES, CLASSES]
cost_dice = pair_wise_dice_loss(pred_mask_flat, target_mask_flat)
# final cost matrix
cost_matrix = self.cost_mask * cost_mask + self.cost_class * cost_class + self.cost_dice * cost_dice
# do the assigmented using the hungarian algorithm in scipy
assigned_indices: Tuple[np.array] = linear_sum_assignment(cost_matrix.cpu())
indices.append(assigned_indices)
# It could be stacked in one tensor
matched_indices = [
(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices
]
return matched_indices
|
|
17,245 | 81,681 | 174 | awx/main/models/unified_jobs.py | 58 | 14 | def cancel_dispatcher_process(self):
if not self.celery_task_id:
return
canceled = []
try:
# Use control and reply mechanism to cancel and obtain confirmation
timeout = 5
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
except socket.timeout:
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeou | Refactor canceling to work through messaging and signals, not database
If canceled attempted before, still allow attempting another cancel
in this case, attempt to send the sigterm signal again.
Keep clicking, you might help!
Replace other cancel_callbacks with sigterm watcher
adapt special inventory mechanism for this too
Get rid of the cancel_watcher method with exception in main thread
Handle academic case of sigterm race condition
Process cancelation as control signal
Fully connect cancel method and run_dispatcher to control
Never transition workflows directly to canceled, add logs | cancel_dispatcher_process | c59bbdecdbdd920c5d3d298d691129c6bbc94c5e | awx | unified_jobs.py | 13 | 12 | https://github.com/ansible/awx.git | 4 | 71 | 0 | 49 | 136 | Python | {
"docstring": "Returns True if dispatcher running this job acknowledged request and sent SIGTERM",
"language": "en",
"n_whitespaces": 11,
"n_words": 12,
"vocab_size": 12
} | def cancel_dispatcher_process(self):
if not self.celery_task_id:
return
canceled = []
try:
# Use control and reply mechanism to cancel and obtain confirmation
timeout = 5
canceled = ControlDispatcher('dispatcher', self.controller_node).cancel([self.celery_task_id])
except socket.timeout:
logger.error(f'could not reach dispatcher on {self.controller_node} within {timeout}s')
except Exception:
logger.exception("error encountered when checking task status")
return bool(self.celery_task_id in canceled) # True or False, whether confirmation was obtained
|
|
36,083 | 154,573 | 61 | modin/experimental/core/execution/native/implementations/hdk_on_native/dataframe/dataframe.py | 18 | 6 | def _mangle_index_names(cls, names):
return [
f"__index__{i | FEAT-#4946: Replace OmniSci with HDK (#4947)
Co-authored-by: Iaroslav Igoshev <[email protected]>
Signed-off-by: Andrey Pavlenko <[email protected]> | _mangle_index_names | e5b1888cd932909e49194d58035da34b210b91c4 | modin | dataframe.py | 10 | 5 | https://github.com/modin-project/modin.git | 2 | 22 | 0 | 17 | 54 | Python | {
"docstring": "\n Return mangled index names for index labels.\n\n Mangled names are used for index columns because index\n labels cannot always be used as HDK table column\n names. E.e. label can be a non-string value or an\n unallowed string (empty strings, etc.) for a table column\n name.\n\n Parameters\n ----------\n names : list of str\n Index labels.\n\n Returns\n -------\n list of str\n Mangled names.\n ",
"language": "en",
"n_whitespaces": 175,
"n_words": 61,
"vocab_size": 43
} | def _mangle_index_names(cls, names):
return [
f"__index__{i}_{'__None__' if n is None else n}"
for i, n in enumerate(names)
]
|
|
14,444 | 67,221 | 69 | erpnext/regional/report/gstr_1/gstr_1.py | 101 | 21 | def get_b2cs_json(data, gstin):
company_state_number = gstin[0:2]
out = []
for d in data:
if not d.get("place_of_supply"):
frappe.throw(
_(
).format(frappe.bold("Place Of Supply"))
)
pos = d.get("place_of_supply").split("-")[0]
tax_details = {}
rate = d.get("rate", 0)
tax = flt((d["taxable_value"] * rate) / 100.0, 2)
if company_state_number == pos:
tax_details.update({"camt": flt(tax / 2.0, 2), "samt": flt(tax / 2.0, 2)})
else:
tax_details.update({"iamt": tax})
inv = {
"sply_ty": "INTRA" if company_state_number == pos else "INTER",
"pos": pos,
"typ": d.get("type"),
"txval": flt(d.get("taxable_value"), 2),
"rt": rate,
"iamt": flt(tax_details.get("iamt"), 2),
"camt": flt(tax_details.get("camt"), 2),
"samt": flt(tax_details.get("samt"), 2),
"csamt": flt(d.get("cess_amount"), 2),
}
if d.get("type") == "E" and d.get("ecommerce_gstin"):
inv.upda | style: format code with black | get_b2cs_json | 494bd9ef78313436f0424b918f200dab8fc7c20b | erpnext | gstr_1.py | 16 | 34 | https://github.com/frappe/erpnext.git | 7 | 291 | 0 | 76 | 495 | Python | {
"docstring": "{0} not entered in some invoices.\n\t\t\t\tPlease update and try again",
"language": "en",
"n_whitespaces": 9,
"n_words": 11,
"vocab_size": 11
} | def get_b2cs_json(data, gstin):
company_state_number = gstin[0:2]
out = []
for d in data:
if not d.get("place_of_supply"):
frappe.throw(
_(
).format(frappe.bold("Place Of Supply"))
)
pos = d.get("place_of_supply").split("-")[0]
tax_details = {}
rate = d.get("rate", 0)
tax = flt((d["taxable_value"] * rate) / 100.0, 2)
if company_state_number == pos:
tax_details.update({"camt": flt(tax / 2.0, 2), "samt": flt(tax / 2.0, 2)})
else:
tax_details.update({"iamt": tax})
inv = {
"sply_ty": "INTRA" if company_state_number == pos else "INTER",
"pos": pos,
"typ": d.get("type"),
"txval": flt(d.get("taxable_value"), 2),
"rt": rate,
"iamt": flt(tax_details.get("iamt"), 2),
"camt": flt(tax_details.get("camt"), 2),
"samt": flt(tax_details.get("samt"), 2),
"csamt": flt(d.get("cess_amount"), 2),
}
if d.get("type") == "E" and d.get("ecommerce_gstin"):
inv.update({"etin": d.get("ecommerce_gstin")})
out.append(inv)
return out
|