body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
---|---|---|---|---|---|---|---|
@property
def output_channels(self):
'The number of output channels for this conv layer.'
raise NotImplementedError() | 8,242,031,762,583,537,000 | The number of output channels for this conv layer. | lingvo/core/conv_layers_with_time_padding.py | output_channels | zhoudoufu/lingvo | python | @property
def output_channels(self):
raise NotImplementedError() |
@property
def input_channels(self):
'The number of input channels for this conv layer.'
return self.params.filter_shape[2] | 7,604,858,641,345,819,000 | The number of input channels for this conv layer. | lingvo/core/conv_layers_with_time_padding.py | input_channels | zhoudoufu/lingvo | python | @property
def input_channels(self):
return self.params.filter_shape[2] |
def OutShape(self, in_shape):
'Compute the output shape given the input shape.'
p = self.params
return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) | 2,269,693,255,134,553,600 | Compute the output shape given the input shape. | lingvo/core/conv_layers_with_time_padding.py | OutShape | zhoudoufu/lingvo | python | def OutShape(self, in_shape):
p = self.params
return ComputeConvOutputShape(in_shape, p.filter_stride[0], p.filter_stride[1], self.output_channels) |
def FProp(self, theta, inputs, paddings):
"Apply convolution to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n frequency, channel]. The time dimension corresponds to the height\n dimension as in images and the frequency dimension corresponds to the\n width dimension as in images.\n paddings: The paddings tensor, expected to be of shape [batch, time].\n\n Returns:\n outputs, out_paddings pair.\n "
p = self.params
with tf.name_scope(p.name):
inputs = py_utils.with_dependencies([py_utils.assert_shape_match(tf.shape(paddings), [(- 1), (- 1)]), py_utils.assert_shape_match(tf.shape(inputs), tf.concat([tf.shape(paddings), [(- 1), self.input_channels]], 0))], inputs)
def _ApplyPadding(tensor_in, padding_in):
padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, (- 1)), (- 1))
return (tensor_in * (1.0 - padding_expanded))
inputs = _ApplyPadding(inputs, paddings)
out = self._EvaluateConvKernel(theta, inputs)
conv_padding = ComputeConvOutputPadding(paddings, window=p.filter_stride[0], stride=p.filter_stride[0])
out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs)))
return (out, conv_padding) | 3,925,714,795,081,023,500 | Apply convolution to inputs.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs tensor. It is expected to be of shape [batch, time,
frequency, channel]. The time dimension corresponds to the height
dimension as in images and the frequency dimension corresponds to the
width dimension as in images.
paddings: The paddings tensor, expected to be of shape [batch, time].
Returns:
outputs, out_paddings pair. | lingvo/core/conv_layers_with_time_padding.py | FProp | zhoudoufu/lingvo | python | def FProp(self, theta, inputs, paddings):
"Apply convolution to inputs.\n\n Args:\n theta: A `.NestedMap` object containing weights' values of this layer and\n its children layers.\n inputs: The inputs tensor. It is expected to be of shape [batch, time,\n frequency, channel]. The time dimension corresponds to the height\n dimension as in images and the frequency dimension corresponds to the\n width dimension as in images.\n paddings: The paddings tensor, expected to be of shape [batch, time].\n\n Returns:\n outputs, out_paddings pair.\n "
p = self.params
with tf.name_scope(p.name):
inputs = py_utils.with_dependencies([py_utils.assert_shape_match(tf.shape(paddings), [(- 1), (- 1)]), py_utils.assert_shape_match(tf.shape(inputs), tf.concat([tf.shape(paddings), [(- 1), self.input_channels]], 0))], inputs)
def _ApplyPadding(tensor_in, padding_in):
padding_expanded = tf.expand_dims(tf.expand_dims(padding_in, (- 1)), (- 1))
return (tensor_in * (1.0 - padding_expanded))
inputs = _ApplyPadding(inputs, paddings)
out = self._EvaluateConvKernel(theta, inputs)
conv_padding = ComputeConvOutputPadding(paddings, window=p.filter_stride[0], stride=p.filter_stride[0])
out = py_utils.HasShape(out, self.OutShape(tf.shape(inputs)))
return (out, conv_padding) |
def _EvaluateConvKernel(self, theta, conv_input):
"Evaluate the convolution kernel on input 'conv_input'."
raise NotImplementedError | -7,612,223,677,189,903,000 | Evaluate the convolution kernel on input 'conv_input'. | lingvo/core/conv_layers_with_time_padding.py | _EvaluateConvKernel | zhoudoufu/lingvo | python | def _EvaluateConvKernel(self, theta, conv_input):
raise NotImplementedError |
@property
def output_channels(self):
'The number of output channels for this conv layer.'
p = self.params
return p.filter_shape[(- 1)] | -3,440,541,955,508,788,700 | The number of output channels for this conv layer. | lingvo/core/conv_layers_with_time_padding.py | output_channels | zhoudoufu/lingvo | python | @property
def output_channels(self):
p = self.params
return p.filter_shape[(- 1)] |
def _EvaluateConvKernel(self, theta, inputs):
'Apply convolution to inputs.'
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.convolution(inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') | -3,120,093,250,225,228,300 | Apply convolution to inputs. | lingvo/core/conv_layers_with_time_padding.py | _EvaluateConvKernel | zhoudoufu/lingvo | python | def _EvaluateConvKernel(self, theta, inputs):
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.convolution(inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding='SAME') |
def _EvaluateConvKernel(self, theta, inputs):
'Apply convolution to inputs.'
p = self.params
assert (p.filter_shape[1] == 1), 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = ((p.filter_shape[0] - 1) * p.dilation_rate[0])
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.convolution(inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) | -8,518,852,387,100,807,000 | Apply convolution to inputs. | lingvo/core/conv_layers_with_time_padding.py | _EvaluateConvKernel | zhoudoufu/lingvo | python | def _EvaluateConvKernel(self, theta, inputs):
p = self.params
assert (p.filter_shape[1] == 1), 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = ((p.filter_shape[0] - 1) * p.dilation_rate[0])
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.convolution(inputs, filter_w, strides=p.filter_stride, dilation_rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) |
@property
def output_channels(self):
'The number of output channels for this conv layer.'
p = self.params
return (p.filter_shape[2] * p.filter_shape[3]) | -3,158,050,055,185,182,000 | The number of output channels for this conv layer. | lingvo/core/conv_layers_with_time_padding.py | output_channels | zhoudoufu/lingvo | python | @property
def output_channels(self):
p = self.params
return (p.filter_shape[2] * p.filter_shape[3]) |
def _EvaluateConvKernel(self, theta, inputs):
'Apply convolution to inputs.'
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') | -2,014,962,577,060,515,000 | Apply convolution to inputs. | lingvo/core/conv_layers_with_time_padding.py | _EvaluateConvKernel | zhoudoufu/lingvo | python | def _EvaluateConvKernel(self, theta, inputs):
p = self.params
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding='SAME') |
def _EvaluateConvKernel(self, theta, inputs):
'Apply convolution to inputs.'
p = self.params
assert (p.filter_shape[1] == 1), 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = ((p.filter_shape[0] - 1) * p.dilation_rate[0])
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) | 2,925,317,837,623,134,700 | Apply convolution to inputs. | lingvo/core/conv_layers_with_time_padding.py | _EvaluateConvKernel | zhoudoufu/lingvo | python | def _EvaluateConvKernel(self, theta, inputs):
p = self.params
assert (p.filter_shape[1] == 1), 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = ((p.filter_shape[0] - 1) * p.dilation_rate[0])
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) |
@property
def output_channels(self):
'The number of output channels for this conv layer.'
p = self.params
return ((p.filter_shape[2] * p.filter_shape[3]) * p.weight_tiling_factor) | 6,790,885,910,145,574,000 | The number of output channels for this conv layer. | lingvo/core/conv_layers_with_time_padding.py | output_channels | zhoudoufu/lingvo | python | @property
def output_channels(self):
p = self.params
return ((p.filter_shape[2] * p.filter_shape[3]) * p.weight_tiling_factor) |
@property
def input_channels(self):
'The number of output channels for this conv layer.'
p = self.params
return (p.filter_shape[2] * p.weight_tiling_factor) | -1,990,916,665,096,716,500 | The number of output channels for this conv layer. | lingvo/core/conv_layers_with_time_padding.py | input_channels | zhoudoufu/lingvo | python | @property
def input_channels(self):
p = self.params
return (p.filter_shape[2] * p.weight_tiling_factor) |
def _EvaluateConvKernel(self, theta, inputs):
'Apply convolution to inputs.'
p = self.params
assert (p.filter_shape[1] == 1), 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = ((p.filter_shape[0] - 1) * p.dilation_rate[0])
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) | 2,925,317,837,623,134,700 | Apply convolution to inputs. | lingvo/core/conv_layers_with_time_padding.py | _EvaluateConvKernel | zhoudoufu/lingvo | python | def _EvaluateConvKernel(self, theta, inputs):
p = self.params
assert (p.filter_shape[1] == 1), 'Only 1D causal convolutions supported.'
padding_algorithm = 'VALID'
causal_pad_size = ((p.filter_shape[0] - 1) * p.dilation_rate[0])
inputs = tf.pad(inputs, [[0, 0], [causal_pad_size, 0], [0, 0], [0, 0]])
filter_w = self._GetWeight(theta)
return tf.nn.depthwise_conv2d(inputs, filter_w, strides=[1, p.filter_stride[0], p.filter_stride[1], 1], rate=p.dilation_rate, data_format='NHWC', padding=padding_algorithm) |
def _get_dataset_class(self):
'The dataset is SeqDataset.'
return dataset.SeqDataset | -2,868,445,351,187,315,000 | The dataset is SeqDataset. | recstudio/model/seq/hgn.py | _get_dataset_class | ustc-recsys/Torchrec | python | def _get_dataset_class(self):
return dataset.SeqDataset |
def _get_loss_func(self):
'BPR loss is used.'
return loss_func.BPRLoss() | -3,570,538,041,019,999,000 | BPR loss is used. | recstudio/model/seq/hgn.py | _get_loss_func | ustc-recsys/Torchrec | python | def _get_loss_func(self):
return loss_func.BPRLoss() |
def check_mempool_result(self, result_expected, *args, **kwargs):
"Wrapper to check result of testmempoolaccept on node_0's mempool"
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) | -7,100,453,028,876,266,000 | Wrapper to check result of testmempoolaccept on node_0's mempool | test/functional/mempool_accept.py | check_mempool_result | Mantle-One/mantlecoin | python | def check_mempool_result(self, result_expected, *args, **kwargs):
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) |
def download_file_repeatedly(tries, session, file_id, file_name, expected_file_size, request_headers, error):
'Attempt to download BaseSpace file numerous times in case of errors.'
for i in range(tries):
try:
download_file(session=session, file_id=file_id, file_name=file_name, request_headers=request_headers, error=error)
raise_for_file_corruption(file_name=file_name, expected_file_size=expected_file_size, error=error)
break
except BaseSpaceDownloadError:
if ((i + 1) == tries):
error('Could not download file from BaseSpace.')
else:
time.sleep(3) | -5,105,479,421,124,455,000 | Attempt to download BaseSpace file numerous times in case of errors. | resolwe_bio/processes/import_data/basespace.py | download_file_repeatedly | plojyon/resolwe-bio | python | def download_file_repeatedly(tries, session, file_id, file_name, expected_file_size, request_headers, error):
for i in range(tries):
try:
download_file(session=session, file_id=file_id, file_name=file_name, request_headers=request_headers, error=error)
raise_for_file_corruption(file_name=file_name, expected_file_size=expected_file_size, error=error)
break
except BaseSpaceDownloadError:
if ((i + 1) == tries):
error('Could not download file from BaseSpace.')
else:
time.sleep(3) |
def download_file(session, file_id, file_name, request_headers, error):
'Download BaseSpace file.'
response = make_get_request(session=session, url=get_api_file_content_url(file_id=file_id), headers=request_headers, error=error, stream=True)
try:
with open(file_name, 'wb') as f:
chunk_size = ((1024 * 1024) * 10)
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except FileNotFoundError:
error(f'Could not save file to {file_name}, due to directory not being found')
except PermissionError:
error(f'Could not save file to {file_name}, due to insufficient permissions')
except RequestException:
error(f'Could not save file to {file_name}, due to a network error') | 9,134,604,517,344,069,000 | Download BaseSpace file. | resolwe_bio/processes/import_data/basespace.py | download_file | plojyon/resolwe-bio | python | def download_file(session, file_id, file_name, request_headers, error):
response = make_get_request(session=session, url=get_api_file_content_url(file_id=file_id), headers=request_headers, error=error, stream=True)
try:
with open(file_name, 'wb') as f:
chunk_size = ((1024 * 1024) * 10)
for chunk in response.iter_content(chunk_size=chunk_size):
f.write(chunk)
except FileNotFoundError:
error(f'Could not save file to {file_name}, due to directory not being found')
except PermissionError:
error(f'Could not save file to {file_name}, due to insufficient permissions')
except RequestException:
error(f'Could not save file to {file_name}, due to a network error') |
def get_file_properties(session, file_id, request_headers, error):
'Get file name and size (in bytes).'
response = make_get_request(session=session, url=get_api_file_url(file_id=file_id), headers=request_headers, error=error)
info = response.json()['Response']
return (info['Name'], info['Size']) | 9,217,621,477,618,900,000 | Get file name and size (in bytes). | resolwe_bio/processes/import_data/basespace.py | get_file_properties | plojyon/resolwe-bio | python | def get_file_properties(session, file_id, request_headers, error):
response = make_get_request(session=session, url=get_api_file_url(file_id=file_id), headers=request_headers, error=error)
info = response.json()['Response']
return (info['Name'], info['Size']) |
def make_get_request(session, url, headers, error, stream=False):
'Make a get request.'
response = session.get(url=url, headers=headers, stream=stream, timeout=60)
if (response.status_code == 401):
error(f'Authentication failed on URL {url}')
elif (response.status_code == 404):
error(f'BaseSpace file {url} not found')
elif (response.status_code != 200):
error(f'Failed to retrieve content from {url}')
return response | 1,917,236,053,509,517,300 | Make a get request. | resolwe_bio/processes/import_data/basespace.py | make_get_request | plojyon/resolwe-bio | python | def make_get_request(session, url, headers, error, stream=False):
response = session.get(url=url, headers=headers, stream=stream, timeout=60)
if (response.status_code == 401):
error(f'Authentication failed on URL {url}')
elif (response.status_code == 404):
error(f'BaseSpace file {url} not found')
elif (response.status_code != 200):
error(f'Failed to retrieve content from {url}')
return response |
def get_api_file_url(file_id):
'Get BaseSpace API file URL.'
api_url = 'https://api.basespace.illumina.com/v1pre3'
return f'{api_url}/files/{file_id}' | -1,486,712,254,861,731,600 | Get BaseSpace API file URL. | resolwe_bio/processes/import_data/basespace.py | get_api_file_url | plojyon/resolwe-bio | python | def get_api_file_url(file_id):
api_url = 'https://api.basespace.illumina.com/v1pre3'
return f'{api_url}/files/{file_id}' |
def get_api_file_content_url(file_id):
'Get BaseSpace API file contents URL.'
return f'{get_api_file_url(file_id=file_id)}/content' | -5,197,090,098,529,219,000 | Get BaseSpace API file contents URL. | resolwe_bio/processes/import_data/basespace.py | get_api_file_content_url | plojyon/resolwe-bio | python | def get_api_file_content_url(file_id):
return f'{get_api_file_url(file_id=file_id)}/content' |
def output(output_option, value):
'Print to standard output.'
if (output_option == 'full'):
print(value)
elif (output_option == 'filename'):
if value.startswith('filename='):
print(value[len('filename='):]) | 6,814,121,900,314,926,000 | Print to standard output. | resolwe_bio/processes/import_data/basespace.py | output | plojyon/resolwe-bio | python | def output(output_option, value):
if (output_option == 'full'):
print(value)
elif (output_option == 'filename'):
if value.startswith('filename='):
print(value[len('filename='):]) |
def get_token_from_secret_file(secret_file_path, error):
'Read secret file to obtain access token.'
try:
with open(secret_file_path, 'r') as f:
return f.readline()
except FileNotFoundError:
error('Secret file not found')
except PermissionError:
error('No permissions to read secret file') | 9,143,504,654,655,903,000 | Read secret file to obtain access token. | resolwe_bio/processes/import_data/basespace.py | get_token_from_secret_file | plojyon/resolwe-bio | python | def get_token_from_secret_file(secret_file_path, error):
try:
with open(secret_file_path, 'r') as f:
return f.readline()
except FileNotFoundError:
error('Secret file not found')
except PermissionError:
error('No permissions to read secret file') |
def on_exit(session):
'Clean up function called on exit.'
session.close() | 3,417,955,870,742,018,600 | Clean up function called on exit. | resolwe_bio/processes/import_data/basespace.py | on_exit | plojyon/resolwe-bio | python | def on_exit(session):
session.close() |
def raise_for_file_corruption(file_name, expected_file_size, error):
'Raise an error if file does not pass integrity check.'
actual_file_size = os.path.getsize(file_name)
if (expected_file_size != actual_file_size):
error(f"File's ({file_name}) expected size ({expected_file_size}) does not match its actual size ({actual_file_size})")
if (file_name.split('.')[(- 1)] == 'gz'):
try:
with gzip.open(file_name, 'rb') as f:
chunk_size = ((1024 * 1024) * 10)
while bool(f.read(chunk_size)):
pass
except OSError:
error(f'File {file_name} did not pass gzip integrity check') | -2,081,467,245,419,386,000 | Raise an error if file does not pass integrity check. | resolwe_bio/processes/import_data/basespace.py | raise_for_file_corruption | plojyon/resolwe-bio | python | def raise_for_file_corruption(file_name, expected_file_size, error):
actual_file_size = os.path.getsize(file_name)
if (expected_file_size != actual_file_size):
error(f"File's ({file_name}) expected size ({expected_file_size}) does not match its actual size ({actual_file_size})")
if (file_name.split('.')[(- 1)] == 'gz'):
try:
with gzip.open(file_name, 'rb') as f:
chunk_size = ((1024 * 1024) * 10)
while bool(f.read(chunk_size)):
pass
except OSError:
error(f'File {file_name} did not pass gzip integrity check') |
def run(self, inputs, outputs):
'Run import.'
secret_path = (Path('/secrets') / inputs.access_token_secret['handle'])
session = Session()
atexit.register(on_exit, session)
try:
file_id = inputs.file_id
access_token = get_token_from_secret_file(secret_file_path=secret_path, error=self.error)
headers = {'x-access-token': access_token}
(file_name, file_size) = get_file_properties(session=session, file_id=file_id, request_headers=headers, error=self.error)
download_file_repeatedly(tries=inputs.advanced.tries, session=session, file_id=file_id, file_name=file_name, expected_file_size=file_size, request_headers=headers, error=self.error)
output(inputs.advanced.output, f'filename={file_name}')
except Exception as error:
if inputs.advanced.verbose:
traceback.print_exc()
self.error('Unexpected error occurred while trying to download files from BaseSpace. Check standard output for more details.')
else:
print(str(error))
self.error('Unexpected error occurred while trying to download files from BaseSpace. Set Verbose to True to see the traceback.')
outputs.file = file_name | 5,502,022,459,005,394,000 | Run import. | resolwe_bio/processes/import_data/basespace.py | run | plojyon/resolwe-bio | python | def run(self, inputs, outputs):
secret_path = (Path('/secrets') / inputs.access_token_secret['handle'])
session = Session()
atexit.register(on_exit, session)
try:
file_id = inputs.file_id
access_token = get_token_from_secret_file(secret_file_path=secret_path, error=self.error)
headers = {'x-access-token': access_token}
(file_name, file_size) = get_file_properties(session=session, file_id=file_id, request_headers=headers, error=self.error)
download_file_repeatedly(tries=inputs.advanced.tries, session=session, file_id=file_id, file_name=file_name, expected_file_size=file_size, request_headers=headers, error=self.error)
output(inputs.advanced.output, f'filename={file_name}')
except Exception as error:
if inputs.advanced.verbose:
traceback.print_exc()
self.error('Unexpected error occurred while trying to download files from BaseSpace. Check standard output for more details.')
else:
print(str(error))
self.error('Unexpected error occurred while trying to download files from BaseSpace. Set Verbose to True to see the traceback.')
outputs.file = file_name |
def list_topic_keys(authorization_rule_name: Optional[str]=None, namespace_name: Optional[str]=None, resource_group_name: Optional[str]=None, topic_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableListTopicKeysResult:
'\n Namespace/ServiceBus Connection String\n API Version: 2017-04-01.\n\n\n :param str authorization_rule_name: The authorization rule name.\n :param str namespace_name: The namespace name\n :param str resource_group_name: Name of the Resource group within the Azure subscription.\n :param str topic_name: The topic name.\n '
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value
return AwaitableListTopicKeysResult(alias_primary_connection_string=__ret__.alias_primary_connection_string, alias_secondary_connection_string=__ret__.alias_secondary_connection_string, key_name=__ret__.key_name, primary_connection_string=__ret__.primary_connection_string, primary_key=__ret__.primary_key, secondary_connection_string=__ret__.secondary_connection_string, secondary_key=__ret__.secondary_key) | 7,076,781,155,728,823,000 | Namespace/ServiceBus Connection String
API Version: 2017-04-01.
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The namespace name
:param str resource_group_name: Name of the Resource group within the Azure subscription.
:param str topic_name: The topic name. | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | list_topic_keys | pulumi/pulumi-azure-nextgen | python | def list_topic_keys(authorization_rule_name: Optional[str]=None, namespace_name: Optional[str]=None, resource_group_name: Optional[str]=None, topic_name: Optional[str]=None, opts: Optional[pulumi.InvokeOptions]=None) -> AwaitableListTopicKeysResult:
'\n Namespace/ServiceBus Connection String\n API Version: 2017-04-01.\n\n\n :param str authorization_rule_name: The authorization rule name.\n :param str namespace_name: The namespace name\n :param str resource_group_name: Name of the Resource group within the Azure subscription.\n :param str topic_name: The topic name.\n '
__args__ = dict()
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
__args__['topicName'] = topic_name
if (opts is None):
opts = pulumi.InvokeOptions()
if (opts.version is None):
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicebus:listTopicKeys', __args__, opts=opts, typ=ListTopicKeysResult).value
return AwaitableListTopicKeysResult(alias_primary_connection_string=__ret__.alias_primary_connection_string, alias_secondary_connection_string=__ret__.alias_secondary_connection_string, key_name=__ret__.key_name, primary_connection_string=__ret__.primary_connection_string, primary_key=__ret__.primary_key, secondary_connection_string=__ret__.secondary_connection_string, secondary_key=__ret__.secondary_key) |
@property
@pulumi.getter(name='aliasPrimaryConnectionString')
def alias_primary_connection_string(self) -> str:
'\n Primary connection string of the alias if GEO DR is enabled\n '
return pulumi.get(self, 'alias_primary_connection_string') | -735,264,547,924,943,000 | Primary connection string of the alias if GEO DR is enabled | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | alias_primary_connection_string | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='aliasPrimaryConnectionString')
def alias_primary_connection_string(self) -> str:
'\n \n '
return pulumi.get(self, 'alias_primary_connection_string') |
@property
@pulumi.getter(name='aliasSecondaryConnectionString')
def alias_secondary_connection_string(self) -> str:
'\n Secondary connection string of the alias if GEO DR is enabled\n '
return pulumi.get(self, 'alias_secondary_connection_string') | -7,252,278,262,410,730,000 | Secondary connection string of the alias if GEO DR is enabled | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | alias_secondary_connection_string | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='aliasSecondaryConnectionString')
def alias_secondary_connection_string(self) -> str:
'\n \n '
return pulumi.get(self, 'alias_secondary_connection_string') |
@property
@pulumi.getter(name='keyName')
def key_name(self) -> str:
'\n A string that describes the authorization rule.\n '
return pulumi.get(self, 'key_name') | -8,989,103,160,870,669,000 | A string that describes the authorization rule. | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | key_name | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='keyName')
def key_name(self) -> str:
'\n \n '
return pulumi.get(self, 'key_name') |
@property
@pulumi.getter(name='primaryConnectionString')
def primary_connection_string(self) -> str:
'\n Primary connection string of the created namespace authorization rule.\n '
return pulumi.get(self, 'primary_connection_string') | 5,476,672,033,728,210,000 | Primary connection string of the created namespace authorization rule. | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | primary_connection_string | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='primaryConnectionString')
def primary_connection_string(self) -> str:
'\n \n '
return pulumi.get(self, 'primary_connection_string') |
@property
@pulumi.getter(name='primaryKey')
def primary_key(self) -> str:
'\n A base64-encoded 256-bit primary key for signing and validating the SAS token.\n '
return pulumi.get(self, 'primary_key') | -8,605,071,421,063,727,000 | A base64-encoded 256-bit primary key for signing and validating the SAS token. | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | primary_key | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='primaryKey')
def primary_key(self) -> str:
'\n \n '
return pulumi.get(self, 'primary_key') |
@property
@pulumi.getter(name='secondaryConnectionString')
def secondary_connection_string(self) -> str:
'\n Secondary connection string of the created namespace authorization rule.\n '
return pulumi.get(self, 'secondary_connection_string') | 544,027,555,300,435,840 | Secondary connection string of the created namespace authorization rule. | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | secondary_connection_string | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='secondaryConnectionString')
def secondary_connection_string(self) -> str:
'\n \n '
return pulumi.get(self, 'secondary_connection_string') |
@property
@pulumi.getter(name='secondaryKey')
def secondary_key(self) -> str:
'\n A base64-encoded 256-bit primary key for signing and validating the SAS token.\n '
return pulumi.get(self, 'secondary_key') | -3,971,171,928,977,375,700 | A base64-encoded 256-bit primary key for signing and validating the SAS token. | sdk/python/pulumi_azure_nextgen/servicebus/list_topic_keys.py | secondary_key | pulumi/pulumi-azure-nextgen | python | @property
@pulumi.getter(name='secondaryKey')
def secondary_key(self) -> str:
'\n \n '
return pulumi.get(self, 'secondary_key') |
def buildTree(self, preorder, inorder):
'\n\t\t:type preorder: List[int]\n\t\t:type inorder: List[int]\n\t\t:rtype: TreeNode\n\t\t'
if (not preorder):
return None
def build_node(lo, hi):
node = TreeNode(preorder[self.index])
self.index += 1
j = inorder.index(node.val, lo, hi)
if ((self.index < len(preorder)) and (preorder[self.index] in inorder[lo:j])):
node.left = build_node(lo, j)
if ((self.index < len(preorder)) and (preorder[self.index] in inorder[(j + 1):hi])):
node.right = build_node((j + 1), hi)
return node
return build_node(0, len(preorder)) | -666,873,995,319,372,700 | :type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode | medium/Q105_ConstructBinaryTreeFromPreorderAndInorderTraversal.py | buildTree | Kaciras/leetcode | python | def buildTree(self, preorder, inorder):
'\n\t\t:type preorder: List[int]\n\t\t:type inorder: List[int]\n\t\t:rtype: TreeNode\n\t\t'
if (not preorder):
return None
def build_node(lo, hi):
node = TreeNode(preorder[self.index])
self.index += 1
j = inorder.index(node.val, lo, hi)
if ((self.index < len(preorder)) and (preorder[self.index] in inorder[lo:j])):
node.left = build_node(lo, j)
if ((self.index < len(preorder)) and (preorder[self.index] in inorder[(j + 1):hi])):
node.right = build_node((j + 1), hi)
return node
return build_node(0, len(preorder)) |
def get_virtualization_api_version():
'Returns the Virutalization API version string.\n\n :return: version string\n '
return to_str(dlpx.virtualization.api.__version__) | 8,405,904,629,277,544,000 | Returns the Virutalization API version string.
:return: version string | platform/src/main/python/dlpx/virtualization/platform/util.py | get_virtualization_api_version | Balamuruhan/virtualization-sdk | python | def get_virtualization_api_version():
'Returns the Virutalization API version string.\n\n :return: version string\n '
return to_str(dlpx.virtualization.api.__version__) |
def euclidian_distance(self, e1, e2):
'\n https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy\n '
return np.linalg.norm((e1 - e2)) | 7,530,376,623,388,592,000 | https://stackoverflow.com/questions/1401712/how-can-the-euclidean-distance-be-calculated-with-numpy | Word2Vec/NearestNeighbor.py | euclidian_distance | bi3mer/Word2Vec | python | def euclidian_distance(self, e1, e2):
'\n \n '
return np.linalg.norm((e1 - e2)) |
def _log_results(victims_output):
'Log results.'
cve_id = victims_output.cve.id_
logger.info('[{cve_id}] picked `{winner}` out of `{candidates}`'.format(cve_id=cve_id, winner=victims_output.winner, candidates=victims_output.candidates))
logger.info('[{cve_id}] Affected version range: {version_ranges}'.format(cve_id=cve_id, version_ranges=victims_output.affected_versions))
logger.info('[{cve_id}] Safe version range: {version_ranges}'.format(cve_id=cve_id, version_ranges=victims_output.safe_versions)) | 6,854,853,430,072,565,000 | Log results. | run.py | _log_results | jparsai/cvejob | python | def _log_results(victims_output):
cve_id = victims_output.cve.id_
logger.info('[{cve_id}] picked `{winner}` out of `{candidates}`'.format(cve_id=cve_id, winner=victims_output.winner, candidates=victims_output.candidates))
logger.info('[{cve_id}] Affected version range: {version_ranges}'.format(cve_id=cve_id, version_ranges=victims_output.affected_versions))
logger.info('[{cve_id}] Safe version range: {version_ranges}'.format(cve_id=cve_id, version_ranges=victims_output.safe_versions)) |
def _filter_collection(collection, date_range, cherry_pick):
'Filter Document collection.'
if date_range:
collection_size_before = collection.count()
collection = collection.find({'published_date': in_range(*date_range)})
logger.debug('Filtered out {} Documents that do not fall in the given range.'.format((collection_size_before - collection.count())))
if cherry_pick:
logger.debug('Cherry-picked CVE `{cve_id}`'.format(cve_id=cherry_pick))
collection = collection.find({'cve.id_': cherry_pick})
return collection | -7,678,109,665,041,835,000 | Filter Document collection. | run.py | _filter_collection | jparsai/cvejob | python | def _filter_collection(collection, date_range, cherry_pick):
if date_range:
collection_size_before = collection.count()
collection = collection.find({'published_date': in_range(*date_range)})
logger.debug('Filtered out {} Documents that do not fall in the given range.'.format((collection_size_before - collection.count())))
if cherry_pick:
logger.debug('Cherry-picked CVE `{cve_id}`'.format(cve_id=cherry_pick))
collection = collection.find({'cve.id_': cherry_pick})
return collection |
def run():
'Run CVEjob.'
feed_dir = Config.feed_dir
feed_names = Config.feed_names
date_range = Config.date_range
cherrypicked_cve_id = Config.cve_id
cherrypicked_year = None
if cherrypicked_cve_id:
cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]
if (int(cherrypicked_year) < 2002):
cherrypicked_year = 2002
if date_range:
date_range = parse_date_range(Config.date_range)
feed_names = range(date_range[0].year, (date_range[1].year + 1))
if cherrypicked_cve_id:
if (int(cherrypicked_year) not in feed_names):
logger.info('[{picked_cve_id}] does not belong to the given feed range: {date_range}'.format(picked_cve_id=cherrypicked_cve_id, date_range=date_range))
return
feed_names = [cherrypicked_year]
if (not feed_names):
if cherrypicked_cve_id:
feed_names = [cherrypicked_year]
else:
feed_names = ['modified']
with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:
feeds = feed_manager.fetch_feeds(feed_names=feed_names, data_dir=feed_dir, update=True)
collection = feed_manager.collect(feeds)
collection = _filter_collection(collection, date_range, cherrypicked_cve_id)
if (not collection):
logger.info('Collection is empty.'.format(picked_cve_id=cherrypicked_cve_id))
return
logger.debug('Number of CVE Documents in the collection: {}'.format(collection.count()))
if (Config.package_name and Config.cve_id):
doc = [x for x in collection][0]
(affected, safe) = NVDVersions(doc, Config.package_name, Config.ecosystem).run()
victims_output = VictimsYamlOutput(ecosystem=Config.ecosystem, cve_doc=doc, winner=PackageNameCandidate(Config.package_name, Decimal('1.0')), candidates=[], affected=affected, fixedin=safe)
_log_results(victims_output)
victims_output.write()
sys.exit(0)
for doc in collection:
cve_id = doc.cve.id_
try:
if (not validate_cve(doc)):
logger.debug('[{cve_id}] was filtered out by input checks'.format(cve_id=cve_id))
continue
pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)
identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)
candidates = identifier.identify()
if (not candidates):
logger.info('[{cve_id}] no package name candidates found'.format(cve_id=cve_id))
continue
selector = VersionSelector(doc, candidates, Config.ecosystem)
winner = selector.pick_winner()
if (not winner):
logger.info('[{cve_id}] no package name found'.format(cve_id=cve_id))
continue
(affected, safe) = NVDVersions(doc, winner.package, Config.ecosystem).run()
victims_output = VictimsYamlOutput(ecosystem=Config.ecosystem, cve_doc=doc, winner=winner, candidates=candidates, affected=affected, fixedin=safe)
_log_results(victims_output)
victims_output.write()
except Exception as exc:
logger.warning('[{cve_id}] Unexpected exception occurred: {exc}'.format(cve_id=cve_id, exc=exc), exc_info=True) | -8,527,722,847,959,948,000 | Run CVEjob. | run.py | run | jparsai/cvejob | python | def run():
feed_dir = Config.feed_dir
feed_names = Config.feed_names
date_range = Config.date_range
cherrypicked_cve_id = Config.cve_id
cherrypicked_year = None
if cherrypicked_cve_id:
cherrypicked_year = cherrypicked_cve_id.split(sep='-')[1]
if (int(cherrypicked_year) < 2002):
cherrypicked_year = 2002
if date_range:
date_range = parse_date_range(Config.date_range)
feed_names = range(date_range[0].year, (date_range[1].year + 1))
if cherrypicked_cve_id:
if (int(cherrypicked_year) not in feed_names):
logger.info('[{picked_cve_id}] does not belong to the given feed range: {date_range}'.format(picked_cve_id=cherrypicked_cve_id, date_range=date_range))
return
feed_names = [cherrypicked_year]
if (not feed_names):
if cherrypicked_cve_id:
feed_names = [cherrypicked_year]
else:
feed_names = ['modified']
with FeedManager(n_workers=multiprocessing.cpu_count()) as feed_manager:
feeds = feed_manager.fetch_feeds(feed_names=feed_names, data_dir=feed_dir, update=True)
collection = feed_manager.collect(feeds)
collection = _filter_collection(collection, date_range, cherrypicked_cve_id)
if (not collection):
logger.info('Collection is empty.'.format(picked_cve_id=cherrypicked_cve_id))
return
logger.debug('Number of CVE Documents in the collection: {}'.format(collection.count()))
if (Config.package_name and Config.cve_id):
doc = [x for x in collection][0]
(affected, safe) = NVDVersions(doc, Config.package_name, Config.ecosystem).run()
victims_output = VictimsYamlOutput(ecosystem=Config.ecosystem, cve_doc=doc, winner=PackageNameCandidate(Config.package_name, Decimal('1.0')), candidates=[], affected=affected, fixedin=safe)
_log_results(victims_output)
victims_output.write()
sys.exit(0)
for doc in collection:
cve_id = doc.cve.id_
try:
if (not validate_cve(doc)):
logger.debug('[{cve_id}] was filtered out by input checks'.format(cve_id=cve_id))
continue
pkgfile_path = get_pkgfile_path(Config.pkgfile_dir, Config.ecosystem)
identifier = get_identifier_cls()(doc, Config.ecosystem, pkgfile_path)
candidates = identifier.identify()
if (not candidates):
logger.info('[{cve_id}] no package name candidates found'.format(cve_id=cve_id))
continue
selector = VersionSelector(doc, candidates, Config.ecosystem)
winner = selector.pick_winner()
if (not winner):
logger.info('[{cve_id}] no package name found'.format(cve_id=cve_id))
continue
(affected, safe) = NVDVersions(doc, winner.package, Config.ecosystem).run()
victims_output = VictimsYamlOutput(ecosystem=Config.ecosystem, cve_doc=doc, winner=winner, candidates=candidates, affected=affected, fixedin=safe)
_log_results(victims_output)
victims_output.write()
except Exception as exc:
logger.warning('[{cve_id}] Unexpected exception occurred: {exc}'.format(cve_id=cve_id, exc=exc), exc_info=True) |
def __init__(self, S, A, R, p):
"\n\t\tParameters\n\t\t----------\n\t\tS : int\n\t\t\tNumber of states\n\t\tA : matrix\n\t\t\tA[s][a] is True iff a is permitted in s\n\t\tR : list\n\t\t\tA list of reward generators\n\t\tp : matrix\n\t\t\tp[s][a][s'] = p(s'|s,a)\n\t\t"
self.S = list(range(S))
(self.A, self.R, self.p) = (A, R, p)
self.no_of_states = S
self.no_of_actions = len(A[0]) | 5,419,094,374,289,751,000 | Parameters
----------
S : int
Number of states
A : matrix
A[s][a] is True iff a is permitted in s
R : list
A list of reward generators
p : matrix
p[s][a][s'] = p(s'|s,a) | main.py | __init__ | ronaldosvieira/rl | python | def __init__(self, S, A, R, p):
"\n\t\tParameters\n\t\t----------\n\t\tS : int\n\t\t\tNumber of states\n\t\tA : matrix\n\t\t\tA[s][a] is True iff a is permitted in s\n\t\tR : list\n\t\t\tA list of reward generators\n\t\tp : matrix\n\t\t\tp[s][a][s'] = p(s'|s,a)\n\t\t"
self.S = list(range(S))
(self.A, self.R, self.p) = (A, R, p)
self.no_of_states = S
self.no_of_actions = len(A[0]) |
def step(self, s, a):
'Given a state and an action, returns a new state and a reward.\n\n\t\tParameters\n\t\t----------\n\t\ts : int\n\t\t\tCurrent state\n\t\ta : int\n\t\t\tAction to take\n\t\t'
s_prime = np.random.choice(self.no_of_states, p=self.p[s][a])
r = self.R[s_prime].get()
return (s_prime, r) | 8,184,501,362,705,089,000 | Given a state and an action, returns a new state and a reward.
Parameters
----------
s : int
Current state
a : int
Action to take | main.py | step | ronaldosvieira/rl | python | def step(self, s, a):
'Given a state and an action, returns a new state and a reward.\n\n\t\tParameters\n\t\t----------\n\t\ts : int\n\t\t\tCurrent state\n\t\ta : int\n\t\t\tAction to take\n\t\t'
s_prime = np.random.choice(self.no_of_states, p=self.p[s][a])
r = self.R[s_prime].get()
return (s_prime, r) |
def __init__(self, username=None):
'\n Keyword args:\n username (str): The username of the user.\n '
if (username is not None):
self.username = username | -33,502,890,462,554,616 | Keyword args:
username (str): The username of the user. | pypureclient/flasharray/FA_2_2/models/username.py | __init__ | Flav-STOR-WL/py-pure-client | python | def __init__(self, username=None):
'\n Keyword args:\n username (str): The username of the user.\n '
if (username is not None):
self.username = username |
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(Username, dict):
for (key, value) in self.items():
result[key] = value
return result | -4,027,666,252,657,289,700 | Returns the model properties as a dict | pypureclient/flasharray/FA_2_2/models/username.py | to_dict | Flav-STOR-WL/py-pure-client | python | def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
if issubclass(Username, dict):
for (key, value) in self.items():
result[key] = value
return result |
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict()) | 5,849,158,643,760,736,000 | Returns the string representation of the model | pypureclient/flasharray/FA_2_2/models/username.py | to_str | Flav-STOR-WL/py-pure-client | python | def to_str(self):
return pprint.pformat(self.to_dict()) |
def __repr__(self):
'For `print` and `pprint`'
return self.to_str() | -8,960,031,694,814,905,000 | For `print` and `pprint` | pypureclient/flasharray/FA_2_2/models/username.py | __repr__ | Flav-STOR-WL/py-pure-client | python | def __repr__(self):
return self.to_str() |
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, Username)):
return False
return (self.__dict__ == other.__dict__) | -4,847,326,211,869,451,000 | Returns true if both objects are equal | pypureclient/flasharray/FA_2_2/models/username.py | __eq__ | Flav-STOR-WL/py-pure-client | python | def __eq__(self, other):
if (not isinstance(other, Username)):
return False
return (self.__dict__ == other.__dict__) |
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other)) | 7,764,124,047,908,058,000 | Returns true if both objects are not equal | pypureclient/flasharray/FA_2_2/models/username.py | __ne__ | Flav-STOR-WL/py-pure-client | python | def __ne__(self, other):
return (not (self == other)) |
def setup_platform(hass, config, add_entities, discovery_info=None):
'Set up the Blockchain.com sensors.'
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if (not validate_address(address)):
_LOGGER.error('Bitcoin address is not valid: %s', address)
return False
add_entities([BlockchainSensor(name, addresses)], True) | -512,189,206,448,340,000 | Set up the Blockchain.com sensors. | homeassistant/components/blockchain/sensor.py | setup_platform | CantankerousBullMoose/core | python | def setup_platform(hass, config, add_entities, discovery_info=None):
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if (not validate_address(address)):
_LOGGER.error('Bitcoin address is not valid: %s', address)
return False
add_entities([BlockchainSensor(name, addresses)], True) |
def __init__(self, name, addresses):
'Initialize the sensor.'
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = 'BTC' | -3,735,731,008,319,450,000 | Initialize the sensor. | homeassistant/components/blockchain/sensor.py | __init__ | CantankerousBullMoose/core | python | def __init__(self, name, addresses):
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = 'BTC' |
@property
def name(self):
'Return the name of the sensor.'
return self._name | 8,691,954,631,286,512,000 | Return the name of the sensor. | homeassistant/components/blockchain/sensor.py | name | CantankerousBullMoose/core | python | @property
def name(self):
return self._name |
@property
def state(self):
'Return the state of the sensor.'
return self._state | -2,324,550,726,442,955,000 | Return the state of the sensor. | homeassistant/components/blockchain/sensor.py | state | CantankerousBullMoose/core | python | @property
def state(self):
return self._state |
@property
def unit_of_measurement(self):
'Return the unit of measurement this sensor expresses itself in.'
return self._unit_of_measurement | -4,980,045,660,747,502,000 | Return the unit of measurement this sensor expresses itself in. | homeassistant/components/blockchain/sensor.py | unit_of_measurement | CantankerousBullMoose/core | python | @property
def unit_of_measurement(self):
return self._unit_of_measurement |
@property
def icon(self):
'Return the icon to use in the frontend, if any.'
return ICON | -4,249,800,035,670,332,000 | Return the icon to use in the frontend, if any. | homeassistant/components/blockchain/sensor.py | icon | CantankerousBullMoose/core | python | @property
def icon(self):
return ICON |
@property
def extra_state_attributes(self):
'Return the state attributes of the sensor.'
return {ATTR_ATTRIBUTION: ATTRIBUTION} | 2,399,391,115,687,184,400 | Return the state attributes of the sensor. | homeassistant/components/blockchain/sensor.py | extra_state_attributes | CantankerousBullMoose/core | python | @property
def extra_state_attributes(self):
return {ATTR_ATTRIBUTION: ATTRIBUTION} |
def update(self):
'Get the latest state of the sensor.'
self._state = get_balance(self.addresses) | 397,848,807,134,841,400 | Get the latest state of the sensor. | homeassistant/components/blockchain/sensor.py | update | CantankerousBullMoose/core | python | def update(self):
self._state = get_balance(self.addresses) |
def fit(self, train_dataset):
'Performs model training with standard settings'
self.train = deepcopy(train_dataset)
if ('orbit' in self.name):
self.model.fit(self.train)
elif ('nprophet' in self.name):
self.model.fit(self.train, validate_each_epoch=True, valid_p=0.2, freq=self.freq, plot_live_loss=True, epochs=100) | 1,659,354,040,461,019,400 | Performs model training with standard settings | interpolML/interpolML/model/model.py | fit | MiguelMque/eafit-numerical-analysis-project | python | def fit(self, train_dataset):
self.train = deepcopy(train_dataset)
if ('orbit' in self.name):
self.model.fit(self.train)
elif ('nprophet' in self.name):
self.model.fit(self.train, validate_each_epoch=True, valid_p=0.2, freq=self.freq, plot_live_loss=True, epochs=100) |
def predict(self, dataset: Any):
'Performs prediction'
self.test = deepcopy(dataset)
if ('orbit' in self.name):
prediction = self.model.predict(self.test)
elif ('nprophet' in self.name):
future = self.model.make_future_dataframe(self.train, periods=len(self.test))
prediction = self.model.predict(future).rename(columns={'yhat1': self.pred_col})
prediction = prediction[[self.date_col, self.pred_col]]
self.prediction = prediction
return self.prediction | -2,624,890,655,209,499,600 | Performs prediction | interpolML/interpolML/model/model.py | predict | MiguelMque/eafit-numerical-analysis-project | python | def predict(self, dataset: Any):
self.test = deepcopy(dataset)
if ('orbit' in self.name):
prediction = self.model.predict(self.test)
elif ('nprophet' in self.name):
future = self.model.make_future_dataframe(self.train, periods=len(self.test))
prediction = self.model.predict(future).rename(columns={'yhat1': self.pred_col})
prediction = prediction[[self.date_col, self.pred_col]]
self.prediction = prediction
return self.prediction |
def sol(n, w, wt, v):
'\n We do not need to create a 2d array here because all numbers are available\n always\n Try all items for weight ranging from 1 to w and check if weight\n can be picked. Take the max of the result\n '
dp = [0 for i in range((w + 1))]
for i in range(n):
for j in range((w + 1)):
if (wt[i] <= j):
dp[j] = max(dp[j], (v[i] + dp[(j - wt[i])]))
return dp[w] | -5,269,090,704,713,257,000 | We do not need to create a 2d array here because all numbers are available
always
Try all items for weight ranging from 1 to w and check if weight
can be picked. Take the max of the result | full-problems/knapsackWithDuplicates.py | sol | vikas-t/DS-Algo | python | def sol(n, w, wt, v):
'\n We do not need to create a 2d array here because all numbers are available\n always\n Try all items for weight ranging from 1 to w and check if weight\n can be picked. Take the max of the result\n '
dp = [0 for i in range((w + 1))]
for i in range(n):
for j in range((w + 1)):
if (wt[i] <= j):
dp[j] = max(dp[j], (v[i] + dp[(j - wt[i])]))
return dp[w] |
@property
def call_positions(self):
'Alias for :func:bitshares.account.Account.callpositions.'
return self.callpositions() | -4,466,748,821,980,121,000 | Alias for :func:bitshares.account.Account.callpositions. | bitshares/account.py | call_positions | bangzi1001/python-nbs | python | @property
def call_positions(self):
return self.callpositions() |
@property
def callpositions(self):
'List call positions (collateralized positions :doc:`mpa`)'
self.ensure_full()
from .dex import Dex
dex = Dex(blockchain_instance=self.blockchain)
return dex.list_debt_positions(self) | -1,382,996,191,267,861,500 | List call positions (collateralized positions :doc:`mpa`) | bitshares/account.py | callpositions | bangzi1001/python-nbs | python | @property
def callpositions(self):
self.ensure_full()
from .dex import Dex
dex = Dex(blockchain_instance=self.blockchain)
return dex.list_debt_positions(self) |
@property
def openorders(self):
'Returns open Orders.'
from .price import Order
self.ensure_full()
return [Order(o, blockchain_instance=self.blockchain) for o in self['limit_orders']] | -122,808,266,567,635,730 | Returns open Orders. | bitshares/account.py | openorders | bangzi1001/python-nbs | python | @property
def openorders(self):
from .price import Order
self.ensure_full()
return [Order(o, blockchain_instance=self.blockchain) for o in self['limit_orders']] |
def read_metafile(path: PathType) -> dd.DataFrame:
'Read cbgen metafile containing partitioned variant info'
with bgen_metafile(path) as mf:
divisions = ([(mf.partition_size * i) for i in range(mf.npartitions)] + [(mf.nvariants - 1)])
dfs = [dask.delayed(_read_metafile_partition)(path, i) for i in range(mf.npartitions)]
meta = dd.utils.make_meta(METAFILE_DTYPE)
return dd.from_delayed(dfs, meta=meta, divisions=divisions) | 4,662,318,411,178,781,000 | Read cbgen metafile containing partitioned variant info | sgkit/io/bgen/bgen_reader.py | read_metafile | pystatgen/sgk | python | def read_metafile(path: PathType) -> dd.DataFrame:
with bgen_metafile(path) as mf:
divisions = ([(mf.partition_size * i) for i in range(mf.npartitions)] + [(mf.nvariants - 1)])
dfs = [dask.delayed(_read_metafile_partition)(path, i) for i in range(mf.npartitions)]
meta = dd.utils.make_meta(METAFILE_DTYPE)
return dd.from_delayed(dfs, meta=meta, divisions=divisions) |
def read_samples(path: PathType) -> pd.DataFrame:
'Read BGEN .sample file'
df = pd.read_csv(path, sep=' ', skiprows=[1], usecols=[0])
df.columns = ['sample_id']
return df | -2,159,063,186,080,784,600 | Read BGEN .sample file | sgkit/io/bgen/bgen_reader.py | read_samples | pystatgen/sgk | python | def read_samples(path: PathType) -> pd.DataFrame:
df = pd.read_csv(path, sep=' ', skiprows=[1], usecols=[0])
df.columns = ['sample_id']
return df |
def read_bgen(path: PathType, metafile_path: Optional[PathType]=None, sample_path: Optional[PathType]=None, chunks: Union[(str, int, Tuple[(int, int, int)])]='auto', lock: bool=False, persist: bool=True, contig_dtype: DType='str', gp_dtype: DType='float32') -> Dataset:
'Read BGEN dataset.\n\n Loads a single BGEN dataset as dask arrays within a Dataset\n from a ``.bgen`` file.\n\n Parameters\n ----------\n path\n Path to BGEN file.\n metafile_path\n Path to companion index file used to determine BGEN byte offsets.\n Defaults to ``path`` + ".metafile" if not provided.\n This file is necessary for reading BGEN genotype probabilities and it will be\n generated the first time the file is read if it does not already exist.\n If it needs to be created, it can make the first call to this function\n much slower than subsequent calls.\n sample_path\n Path to ``.sample`` file, by default None. This is used to fetch sample identifiers\n and when provided it is preferred over sample identifiers embedded in the ``.bgen`` file.\n chunks\n Chunk size for genotype probability data (3 dimensions),\n by default "auto".\n lock\n Whether or not to synchronize concurrent reads of\n file blocks, by default False. This is passed through to\n [dask.array.from_array](https://docs.dask.org/en/latest/array-api.html#dask.array.from_array).\n persist\n Whether or not to persist variant information in memory, by default True.\n This is an important performance consideration as the metadata file for this data will\n be read multiple times when False.\n contig_dtype\n Data type for contig names, by default "str".\n This may also be an integer type (e.g. "int"), but will fail if any of the contig names\n cannot be converted to integers.\n gp_dtype\n Data type for genotype probabilities, by default "float32".\n\n Warnings\n --------\n Only bi-allelic, diploid BGEN files are currently supported.\n\n Returns\n -------\n A dataset containing the following variables:\n\n - :data:`sgkit.variables.variant_id_spec` (variants)\n - :data:`sgkit.variables.variant_contig_spec` (variants)\n - :data:`sgkit.variables.variant_position_spec` (variants)\n - :data:`sgkit.variables.variant_allele_spec` (variants)\n - :data:`sgkit.variables.sample_id_spec` (samples)\n - :data:`sgkit.variables.call_dosage_spec` (variants, samples)\n - :data:`sgkit.variables.call_dosage_mask_spec` (variants, samples)\n - :data:`sgkit.variables.call_genotype_probability_spec` (variants, samples, genotypes)\n - :data:`sgkit.variables.call_genotype_probability_mask_spec` (variants, samples, genotypes)\n\n '
if (isinstance(chunks, tuple) and (len(chunks) != 3)):
raise ValueError(f'`chunks` must be tuple with 3 items, not {chunks}')
if (not np.issubdtype(gp_dtype, np.floating)):
raise ValueError(f'`gp_dtype` must be a floating point data type, not {gp_dtype}')
if ((not np.issubdtype(contig_dtype, np.integer)) and (np.dtype(contig_dtype).kind not in {'U', 'S'})):
raise ValueError(f'`contig_dtype` must be of string or int type, not {contig_dtype}')
path = Path(path)
sample_path = (Path(sample_path) if sample_path else path.with_suffix('.sample'))
if sample_path.exists():
sample_id = read_samples(sample_path).sample_id.values.astype('U')
else:
sample_id = _default_sample_ids(path)
bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)
df = read_metafile(bgen_reader.metafile_path)
if persist:
df = df.persist()
arrs = dataframe_to_dict(df, METAFILE_DTYPE)
variant_id = arrs['id']
variant_contig: ArrayLike = arrs['chrom'].astype(contig_dtype)
(variant_contig, variant_contig_names) = encode_contigs(variant_contig)
variant_contig_names = list(variant_contig_names)
variant_position = arrs['pos']
variant_allele = da.hstack((arrs['a1'][:, np.newaxis], arrs['a2'][:, np.newaxis]))
call_genotype_probability = da.from_array(bgen_reader, chunks=chunks, lock=lock, fancy=False, asarray=False, name=f'{bgen_reader.name}:read_bgen:{path}')
call_dosage = _to_dosage(call_genotype_probability)
ds: Dataset = create_genotype_dosage_dataset(variant_contig_names=variant_contig_names, variant_contig=variant_contig, variant_position=variant_position, variant_allele=variant_allele, sample_id=sample_id, call_dosage=call_dosage, call_genotype_probability=call_genotype_probability, variant_id=variant_id)
return ds | 9,032,836,243,801,915,000 | Read BGEN dataset.
Loads a single BGEN dataset as dask arrays within a Dataset
from a ``.bgen`` file.
Parameters
----------
path
Path to BGEN file.
metafile_path
Path to companion index file used to determine BGEN byte offsets.
Defaults to ``path`` + ".metafile" if not provided.
This file is necessary for reading BGEN genotype probabilities and it will be
generated the first time the file is read if it does not already exist.
If it needs to be created, it can make the first call to this function
much slower than subsequent calls.
sample_path
Path to ``.sample`` file, by default None. This is used to fetch sample identifiers
and when provided it is preferred over sample identifiers embedded in the ``.bgen`` file.
chunks
Chunk size for genotype probability data (3 dimensions),
by default "auto".
lock
Whether or not to synchronize concurrent reads of
file blocks, by default False. This is passed through to
[dask.array.from_array](https://docs.dask.org/en/latest/array-api.html#dask.array.from_array).
persist
Whether or not to persist variant information in memory, by default True.
This is an important performance consideration as the metadata file for this data will
be read multiple times when False.
contig_dtype
Data type for contig names, by default "str".
This may also be an integer type (e.g. "int"), but will fail if any of the contig names
cannot be converted to integers.
gp_dtype
Data type for genotype probabilities, by default "float32".
Warnings
--------
Only bi-allelic, diploid BGEN files are currently supported.
Returns
-------
A dataset containing the following variables:
- :data:`sgkit.variables.variant_id_spec` (variants)
- :data:`sgkit.variables.variant_contig_spec` (variants)
- :data:`sgkit.variables.variant_position_spec` (variants)
- :data:`sgkit.variables.variant_allele_spec` (variants)
- :data:`sgkit.variables.sample_id_spec` (samples)
- :data:`sgkit.variables.call_dosage_spec` (variants, samples)
- :data:`sgkit.variables.call_dosage_mask_spec` (variants, samples)
- :data:`sgkit.variables.call_genotype_probability_spec` (variants, samples, genotypes)
- :data:`sgkit.variables.call_genotype_probability_mask_spec` (variants, samples, genotypes) | sgkit/io/bgen/bgen_reader.py | read_bgen | pystatgen/sgk | python | def read_bgen(path: PathType, metafile_path: Optional[PathType]=None, sample_path: Optional[PathType]=None, chunks: Union[(str, int, Tuple[(int, int, int)])]='auto', lock: bool=False, persist: bool=True, contig_dtype: DType='str', gp_dtype: DType='float32') -> Dataset:
'Read BGEN dataset.\n\n Loads a single BGEN dataset as dask arrays within a Dataset\n from a ``.bgen`` file.\n\n Parameters\n ----------\n path\n Path to BGEN file.\n metafile_path\n Path to companion index file used to determine BGEN byte offsets.\n Defaults to ``path`` + ".metafile" if not provided.\n This file is necessary for reading BGEN genotype probabilities and it will be\n generated the first time the file is read if it does not already exist.\n If it needs to be created, it can make the first call to this function\n much slower than subsequent calls.\n sample_path\n Path to ``.sample`` file, by default None. This is used to fetch sample identifiers\n and when provided it is preferred over sample identifiers embedded in the ``.bgen`` file.\n chunks\n Chunk size for genotype probability data (3 dimensions),\n by default "auto".\n lock\n Whether or not to synchronize concurrent reads of\n file blocks, by default False. This is passed through to\n [dask.array.from_array](https://docs.dask.org/en/latest/array-api.html#dask.array.from_array).\n persist\n Whether or not to persist variant information in memory, by default True.\n This is an important performance consideration as the metadata file for this data will\n be read multiple times when False.\n contig_dtype\n Data type for contig names, by default "str".\n This may also be an integer type (e.g. "int"), but will fail if any of the contig names\n cannot be converted to integers.\n gp_dtype\n Data type for genotype probabilities, by default "float32".\n\n Warnings\n --------\n Only bi-allelic, diploid BGEN files are currently supported.\n\n Returns\n -------\n A dataset containing the following variables:\n\n - :data:`sgkit.variables.variant_id_spec` (variants)\n - :data:`sgkit.variables.variant_contig_spec` (variants)\n - :data:`sgkit.variables.variant_position_spec` (variants)\n - :data:`sgkit.variables.variant_allele_spec` (variants)\n - :data:`sgkit.variables.sample_id_spec` (samples)\n - :data:`sgkit.variables.call_dosage_spec` (variants, samples)\n - :data:`sgkit.variables.call_dosage_mask_spec` (variants, samples)\n - :data:`sgkit.variables.call_genotype_probability_spec` (variants, samples, genotypes)\n - :data:`sgkit.variables.call_genotype_probability_mask_spec` (variants, samples, genotypes)\n\n '
if (isinstance(chunks, tuple) and (len(chunks) != 3)):
raise ValueError(f'`chunks` must be tuple with 3 items, not {chunks}')
if (not np.issubdtype(gp_dtype, np.floating)):
raise ValueError(f'`gp_dtype` must be a floating point data type, not {gp_dtype}')
if ((not np.issubdtype(contig_dtype, np.integer)) and (np.dtype(contig_dtype).kind not in {'U', 'S'})):
raise ValueError(f'`contig_dtype` must be of string or int type, not {contig_dtype}')
path = Path(path)
sample_path = (Path(sample_path) if sample_path else path.with_suffix('.sample'))
if sample_path.exists():
sample_id = read_samples(sample_path).sample_id.values.astype('U')
else:
sample_id = _default_sample_ids(path)
bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)
df = read_metafile(bgen_reader.metafile_path)
if persist:
df = df.persist()
arrs = dataframe_to_dict(df, METAFILE_DTYPE)
variant_id = arrs['id']
variant_contig: ArrayLike = arrs['chrom'].astype(contig_dtype)
(variant_contig, variant_contig_names) = encode_contigs(variant_contig)
variant_contig_names = list(variant_contig_names)
variant_position = arrs['pos']
variant_allele = da.hstack((arrs['a1'][:, np.newaxis], arrs['a2'][:, np.newaxis]))
call_genotype_probability = da.from_array(bgen_reader, chunks=chunks, lock=lock, fancy=False, asarray=False, name=f'{bgen_reader.name}:read_bgen:{path}')
call_dosage = _to_dosage(call_genotype_probability)
ds: Dataset = create_genotype_dosage_dataset(variant_contig_names=variant_contig_names, variant_contig=variant_contig, variant_position=variant_position, variant_allele=variant_allele, sample_id=sample_id, call_dosage=call_dosage, call_genotype_probability=call_genotype_probability, variant_id=variant_id)
return ds |
def _default_sample_ids(path: PathType) -> ArrayLike:
'Fetch or generate sample ids'
with bgen_file(path) as bgen:
if bgen.contain_samples:
return bgen.read_samples()
else:
return np.char.add(b'sample_', np.arange(bgen.nsamples).astype('S')) | -2,921,464,483,249,048,000 | Fetch or generate sample ids | sgkit/io/bgen/bgen_reader.py | _default_sample_ids | pystatgen/sgk | python | def _default_sample_ids(path: PathType) -> ArrayLike:
with bgen_file(path) as bgen:
if bgen.contain_samples:
return bgen.read_samples()
else:
return np.char.add(b'sample_', np.arange(bgen.nsamples).astype('S')) |
def _to_dosage(probs: ArrayLike) -> ArrayLike:
'Calculate the dosage from genotype likelihoods (probabilities)'
assert (probs.shape[(- 1)] == 3), f'Expecting genotype (trailing) dimension of size 3, got array of shape {probs.shape}'
return (probs[(..., 1)] + (2 * probs[(..., 2)])) | -602,845,097,226,297,300 | Calculate the dosage from genotype likelihoods (probabilities) | sgkit/io/bgen/bgen_reader.py | _to_dosage | pystatgen/sgk | python | def _to_dosage(probs: ArrayLike) -> ArrayLike:
assert (probs.shape[(- 1)] == 3), f'Expecting genotype (trailing) dimension of size 3, got array of shape {probs.shape}'
return (probs[(..., 1)] + (2 * probs[(..., 2)])) |
def rechunk_bgen(ds: Dataset, output: Union[(PathType, MutableMapping[(str, bytes)])], *, chunk_length: int=10000, chunk_width: int=1000, compressor: Optional[Any]=zarr.Blosc(cname='zstd', clevel=7, shuffle=2), probability_dtype: Optional[DType]='uint8', max_mem: str='4GB', pack: bool=True, tempdir: Optional[PathType]=None) -> Dataset:
'Rechunk BGEN dataset as Zarr.\n\n This function will use the algorithm https://rechunker.readthedocs.io/en/latest/\n to rechunk certain fields in a provided Dataset for better downstream performance.\n Depending on the system memory available (and the `max_mem` setting) this\n rechunking may occur without the need of any intermediate data store. Otherwise,\n approximately as much disk space is required as was needed to store the original\n BGEN data. Experiments show that this Zarr representation is ~20% larger even\n with all available optimizations and fairly aggressive compression (i.e. the\n default `clevel` 7).\n\n Note that this function is not evaluated lazily. The rechunking algorithm\n will run inline so calls to it may be slow. The resulting Dataset is\n generated based on the final, serialized Zarr data.\n\n Parameters\n ----------\n ds\n Dataset to rechunk, typically the result from `read_bgen`.\n output\n Zarr store or path to directory in file system.\n chunk_length\n Length (number of variants) of chunks in which data are stored, by default 10_000.\n chunk_width\n Width (number of samples) to use when storing chunks in output, by default 1_000.\n compressor\n Zarr compressor, no compression is used when set as None.\n probability_dtype\n Data type used to encode genotype probabilities, must be either uint8 or uint16.\n Setting this parameter results in a loss of precision. If None, probabilities\n will not be altered when stored.\n max_mem\n The amount of memory (in bytes) that workers are allowed to use. A string\n (e.g. 100MB) can also be used.\n pack\n Whether or not to optimize variable representations by removing unnecessary\n dimensions and elements. This includes storing 2 genotypes instead of 3, omitting\n dosage and collapsing the genotype probability mask to 2 dimensions. All of\n the above are restored in the resulting Dataset at the expense of extra\n computations on read.\n tempdir\n Temporary directory where intermediate files are stored. The default None means\n use the system default temporary directory.\n\n Warnings\n --------\n This functional is only applicable to diploid, bi-allelic BGEN datasets.\n\n Returns\n -------\n Dataset\n The rechunked dataset.\n '
if isinstance(output, Path):
output = str(output)
chunk_length = min(chunk_length, ds.dims['variants'])
chunk_width = min(chunk_width, ds.dims['samples'])
if pack:
ds = pack_variables(ds)
encoding = encode_variables(ds, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, probability_dtype=probability_dtype)
target_chunks = {var: encoding[var]['chunks'] for var in encoding if ('chunks' in encoding[var])}
target_options = {var: {k: v for (k, v) in encoding[var].items() if (k != 'chunks')} for var in encoding}
with tempfile.TemporaryDirectory(prefix='bgen_to_zarr_', suffix='.zarr', dir=tempdir) as tmpdir:
rechunked = rechunker_api.rechunk(ds, max_mem=max_mem, target_chunks=target_chunks, target_store=output, target_options=target_options, temp_store=tmpdir, executor='dask')
rechunked.execute()
zarr.consolidate_metadata(output)
ds: Dataset = xr.open_zarr(output, concat_characters=False)
if pack:
ds = unpack_variables(ds)
return ds | -7,131,876,420,751,915,000 | Rechunk BGEN dataset as Zarr.
This function will use the algorithm https://rechunker.readthedocs.io/en/latest/
to rechunk certain fields in a provided Dataset for better downstream performance.
Depending on the system memory available (and the `max_mem` setting) this
rechunking may occur without the need of any intermediate data store. Otherwise,
approximately as much disk space is required as was needed to store the original
BGEN data. Experiments show that this Zarr representation is ~20% larger even
with all available optimizations and fairly aggressive compression (i.e. the
default `clevel` 7).
Note that this function is not evaluated lazily. The rechunking algorithm
will run inline so calls to it may be slow. The resulting Dataset is
generated based on the final, serialized Zarr data.
Parameters
----------
ds
Dataset to rechunk, typically the result from `read_bgen`.
output
Zarr store or path to directory in file system.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
compressor
Zarr compressor, no compression is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset. | sgkit/io/bgen/bgen_reader.py | rechunk_bgen | pystatgen/sgk | python | def rechunk_bgen(ds: Dataset, output: Union[(PathType, MutableMapping[(str, bytes)])], *, chunk_length: int=10000, chunk_width: int=1000, compressor: Optional[Any]=zarr.Blosc(cname='zstd', clevel=7, shuffle=2), probability_dtype: Optional[DType]='uint8', max_mem: str='4GB', pack: bool=True, tempdir: Optional[PathType]=None) -> Dataset:
'Rechunk BGEN dataset as Zarr.\n\n This function will use the algorithm https://rechunker.readthedocs.io/en/latest/\n to rechunk certain fields in a provided Dataset for better downstream performance.\n Depending on the system memory available (and the `max_mem` setting) this\n rechunking may occur without the need of any intermediate data store. Otherwise,\n approximately as much disk space is required as was needed to store the original\n BGEN data. Experiments show that this Zarr representation is ~20% larger even\n with all available optimizations and fairly aggressive compression (i.e. the\n default `clevel` 7).\n\n Note that this function is not evaluated lazily. The rechunking algorithm\n will run inline so calls to it may be slow. The resulting Dataset is\n generated based on the final, serialized Zarr data.\n\n Parameters\n ----------\n ds\n Dataset to rechunk, typically the result from `read_bgen`.\n output\n Zarr store or path to directory in file system.\n chunk_length\n Length (number of variants) of chunks in which data are stored, by default 10_000.\n chunk_width\n Width (number of samples) to use when storing chunks in output, by default 1_000.\n compressor\n Zarr compressor, no compression is used when set as None.\n probability_dtype\n Data type used to encode genotype probabilities, must be either uint8 or uint16.\n Setting this parameter results in a loss of precision. If None, probabilities\n will not be altered when stored.\n max_mem\n The amount of memory (in bytes) that workers are allowed to use. A string\n (e.g. 100MB) can also be used.\n pack\n Whether or not to optimize variable representations by removing unnecessary\n dimensions and elements. This includes storing 2 genotypes instead of 3, omitting\n dosage and collapsing the genotype probability mask to 2 dimensions. All of\n the above are restored in the resulting Dataset at the expense of extra\n computations on read.\n tempdir\n Temporary directory where intermediate files are stored. The default None means\n use the system default temporary directory.\n\n Warnings\n --------\n This functional is only applicable to diploid, bi-allelic BGEN datasets.\n\n Returns\n -------\n Dataset\n The rechunked dataset.\n '
if isinstance(output, Path):
output = str(output)
chunk_length = min(chunk_length, ds.dims['variants'])
chunk_width = min(chunk_width, ds.dims['samples'])
if pack:
ds = pack_variables(ds)
encoding = encode_variables(ds, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, probability_dtype=probability_dtype)
target_chunks = {var: encoding[var]['chunks'] for var in encoding if ('chunks' in encoding[var])}
target_options = {var: {k: v for (k, v) in encoding[var].items() if (k != 'chunks')} for var in encoding}
with tempfile.TemporaryDirectory(prefix='bgen_to_zarr_', suffix='.zarr', dir=tempdir) as tmpdir:
rechunked = rechunker_api.rechunk(ds, max_mem=max_mem, target_chunks=target_chunks, target_store=output, target_options=target_options, temp_store=tmpdir, executor='dask')
rechunked.execute()
zarr.consolidate_metadata(output)
ds: Dataset = xr.open_zarr(output, concat_characters=False)
if pack:
ds = unpack_variables(ds)
return ds |
def bgen_to_zarr(input: PathType, output: Union[(PathType, MutableMapping[(str, bytes)])], region: Optional[Mapping[(Hashable, Any)]]=None, chunk_length: int=10000, chunk_width: int=1000, temp_chunk_length: int=100, compressor: Optional[Any]=zarr.Blosc(cname='zstd', clevel=7, shuffle=2), probability_dtype: Optional[DType]='uint8', max_mem: str='4GB', pack: bool=True, tempdir: Optional[PathType]=None) -> Dataset:
'Convert a BGEN file to a Zarr on-disk store.\n\n This function is a convenience for calling :func:`read_bgen` followed by\n :func:`rechunk_bgen`.\n\n Parameters\n ----------\n input\n Path to local BGEN dataset.\n output\n Zarr store or path to directory in file system.\n region\n Indexers on dataset dimensions used to define a subset of data to convert.\n Must be None or a dict with keys matching dimension names and values\n equal to integers or slice objects. This is passed directly to `Dataset.isel`\n so it has the same semantics.\n chunk_length\n Length (number of variants) of chunks in which data are stored, by default 10_000.\n chunk_width\n Width (number of samples) to use when storing chunks in output, by default 1_000.\n temp_chunk_length\n Length of chunks used in raw BGEN read, by default 100. This defines the vertical\n chunking (i.e. in the variants dimension) used when reading the raw data and because\n there is no horizontal chunking at this phase (i.e. in the samples dimension), this\n value should be much smaller than the target `chunk_length`.\n compressor\n Zarr compressor, by default Blosc + zstd with compression level 7. No compression\n is used when set as None.\n probability_dtype\n Data type used to encode genotype probabilities, must be either uint8 or uint16.\n Setting this parameter results in a loss of precision. If None, probabilities\n will not be altered when stored.\n max_mem\n The amount of memory (in bytes) that workers are allowed to use. A string\n (e.g. 100MB) can also be used.\n pack\n Whether or not to optimize variable representations by removing unnecessary\n dimensions and elements. This includes storing 2 genotypes instead of 3, omitting\n dosage and collapsing the genotype probability mask to 2 dimensions. All of\n the above are restored in the resulting Dataset at the expense of extra\n computations on read.\n tempdir\n Temporary directory where intermediate files are stored. The default None means\n use the system default temporary directory.\n\n Warnings\n --------\n This functional is only applicable to diploid, bi-allelic BGEN datasets.\n\n Returns\n -------\n Dataset\n The rechunked dataset.\n '
ds = read_bgen(input, chunks=(temp_chunk_length, (- 1), (- 1)))
if (region is not None):
ds = ds.isel(indexers=region)
return rechunk_bgen(ds, output, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, probability_dtype=probability_dtype, max_mem=max_mem, pack=pack, tempdir=tempdir) | 2,164,697,128,023,955,200 | Convert a BGEN file to a Zarr on-disk store.
This function is a convenience for calling :func:`read_bgen` followed by
:func:`rechunk_bgen`.
Parameters
----------
input
Path to local BGEN dataset.
output
Zarr store or path to directory in file system.
region
Indexers on dataset dimensions used to define a subset of data to convert.
Must be None or a dict with keys matching dimension names and values
equal to integers or slice objects. This is passed directly to `Dataset.isel`
so it has the same semantics.
chunk_length
Length (number of variants) of chunks in which data are stored, by default 10_000.
chunk_width
Width (number of samples) to use when storing chunks in output, by default 1_000.
temp_chunk_length
Length of chunks used in raw BGEN read, by default 100. This defines the vertical
chunking (i.e. in the variants dimension) used when reading the raw data and because
there is no horizontal chunking at this phase (i.e. in the samples dimension), this
value should be much smaller than the target `chunk_length`.
compressor
Zarr compressor, by default Blosc + zstd with compression level 7. No compression
is used when set as None.
probability_dtype
Data type used to encode genotype probabilities, must be either uint8 or uint16.
Setting this parameter results in a loss of precision. If None, probabilities
will not be altered when stored.
max_mem
The amount of memory (in bytes) that workers are allowed to use. A string
(e.g. 100MB) can also be used.
pack
Whether or not to optimize variable representations by removing unnecessary
dimensions and elements. This includes storing 2 genotypes instead of 3, omitting
dosage and collapsing the genotype probability mask to 2 dimensions. All of
the above are restored in the resulting Dataset at the expense of extra
computations on read.
tempdir
Temporary directory where intermediate files are stored. The default None means
use the system default temporary directory.
Warnings
--------
This functional is only applicable to diploid, bi-allelic BGEN datasets.
Returns
-------
Dataset
The rechunked dataset. | sgkit/io/bgen/bgen_reader.py | bgen_to_zarr | pystatgen/sgk | python | def bgen_to_zarr(input: PathType, output: Union[(PathType, MutableMapping[(str, bytes)])], region: Optional[Mapping[(Hashable, Any)]]=None, chunk_length: int=10000, chunk_width: int=1000, temp_chunk_length: int=100, compressor: Optional[Any]=zarr.Blosc(cname='zstd', clevel=7, shuffle=2), probability_dtype: Optional[DType]='uint8', max_mem: str='4GB', pack: bool=True, tempdir: Optional[PathType]=None) -> Dataset:
'Convert a BGEN file to a Zarr on-disk store.\n\n This function is a convenience for calling :func:`read_bgen` followed by\n :func:`rechunk_bgen`.\n\n Parameters\n ----------\n input\n Path to local BGEN dataset.\n output\n Zarr store or path to directory in file system.\n region\n Indexers on dataset dimensions used to define a subset of data to convert.\n Must be None or a dict with keys matching dimension names and values\n equal to integers or slice objects. This is passed directly to `Dataset.isel`\n so it has the same semantics.\n chunk_length\n Length (number of variants) of chunks in which data are stored, by default 10_000.\n chunk_width\n Width (number of samples) to use when storing chunks in output, by default 1_000.\n temp_chunk_length\n Length of chunks used in raw BGEN read, by default 100. This defines the vertical\n chunking (i.e. in the variants dimension) used when reading the raw data and because\n there is no horizontal chunking at this phase (i.e. in the samples dimension), this\n value should be much smaller than the target `chunk_length`.\n compressor\n Zarr compressor, by default Blosc + zstd with compression level 7. No compression\n is used when set as None.\n probability_dtype\n Data type used to encode genotype probabilities, must be either uint8 or uint16.\n Setting this parameter results in a loss of precision. If None, probabilities\n will not be altered when stored.\n max_mem\n The amount of memory (in bytes) that workers are allowed to use. A string\n (e.g. 100MB) can also be used.\n pack\n Whether or not to optimize variable representations by removing unnecessary\n dimensions and elements. This includes storing 2 genotypes instead of 3, omitting\n dosage and collapsing the genotype probability mask to 2 dimensions. All of\n the above are restored in the resulting Dataset at the expense of extra\n computations on read.\n tempdir\n Temporary directory where intermediate files are stored. The default None means\n use the system default temporary directory.\n\n Warnings\n --------\n This functional is only applicable to diploid, bi-allelic BGEN datasets.\n\n Returns\n -------\n Dataset\n The rechunked dataset.\n '
ds = read_bgen(input, chunks=(temp_chunk_length, (- 1), (- 1)))
if (region is not None):
ds = ds.isel(indexers=region)
return rechunk_bgen(ds, output, chunk_length=chunk_length, chunk_width=chunk_width, compressor=compressor, probability_dtype=probability_dtype, max_mem=max_mem, pack=pack, tempdir=tempdir) |
def __init__(self, method: Callable[(..., compute.TargetInstanceAggregatedList)], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[(str, str)]]=()):
'Instantiate the pager.\n\n Args:\n method (Callable): The method that was originally called, and\n which instantiated this pager.\n request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):\n The initial request object.\n response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):\n The initial response object.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n '
self._method = method
self._request = compute.AggregatedListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata | -2,642,775,035,473,236,000 | Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata. | google/cloud/compute_v1/services/target_instances/pagers.py | __init__ | Ctfbuster/python-compute | python | def __init__(self, method: Callable[(..., compute.TargetInstanceAggregatedList)], request: compute.AggregatedListTargetInstancesRequest, response: compute.TargetInstanceAggregatedList, *, metadata: Sequence[Tuple[(str, str)]]=()):
'Instantiate the pager.\n\n Args:\n method (Callable): The method that was originally called, and\n which instantiated this pager.\n request (google.cloud.compute_v1.types.AggregatedListTargetInstancesRequest):\n The initial request object.\n response (google.cloud.compute_v1.types.TargetInstanceAggregatedList):\n The initial response object.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n '
self._method = method
self._request = compute.AggregatedListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata |
def __init__(self, method: Callable[(..., compute.TargetInstanceList)], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[(str, str)]]=()):
'Instantiate the pager.\n\n Args:\n method (Callable): The method that was originally called, and\n which instantiated this pager.\n request (google.cloud.compute_v1.types.ListTargetInstancesRequest):\n The initial request object.\n response (google.cloud.compute_v1.types.TargetInstanceList):\n The initial response object.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n '
self._method = method
self._request = compute.ListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata | -2,524,944,656,419,113,000 | Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListTargetInstancesRequest):
The initial request object.
response (google.cloud.compute_v1.types.TargetInstanceList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata. | google/cloud/compute_v1/services/target_instances/pagers.py | __init__ | Ctfbuster/python-compute | python | def __init__(self, method: Callable[(..., compute.TargetInstanceList)], request: compute.ListTargetInstancesRequest, response: compute.TargetInstanceList, *, metadata: Sequence[Tuple[(str, str)]]=()):
'Instantiate the pager.\n\n Args:\n method (Callable): The method that was originally called, and\n which instantiated this pager.\n request (google.cloud.compute_v1.types.ListTargetInstancesRequest):\n The initial request object.\n response (google.cloud.compute_v1.types.TargetInstanceList):\n The initial response object.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n '
self._method = method
self._request = compute.ListTargetInstancesRequest(request)
self._response = response
self._metadata = metadata |
def audio_with_sox(path, sample_rate, start_time, end_time):
'\n crop and resample the recording with sox and loads it.\n '
with NamedTemporaryFile(suffix='.wav') as tar_file:
tar_filename = tar_file.name
sox_params = 'sox "{}" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1'.format(path, sample_rate, tar_filename, start_time, end_time)
os.system(sox_params)
y = load_audio(tar_filename)
return y | 1,673,118,714,672,008,000 | crop and resample the recording with sox and loads it. | data/data_loader.py | audio_with_sox | ShuanDeMorian/deepspeech.pytorch | python | def audio_with_sox(path, sample_rate, start_time, end_time):
'\n \n '
with NamedTemporaryFile(suffix='.wav') as tar_file:
tar_filename = tar_file.name
sox_params = 'sox "{}" -r {} -c 1 -b 16 -e si {} trim {} ={} >/dev/null 2>&1'.format(path, sample_rate, tar_filename, start_time, end_time)
os.system(sox_params)
y = load_audio(tar_filename)
return y |
def augment_audio_with_sox(path, sample_rate, tempo, gain):
'\n Changes tempo and gain of the recording with sox and loads it.\n '
with NamedTemporaryFile(suffix='.wav') as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ['tempo', '{:.3f}'.format(tempo), 'gain', '{:.3f}'.format(gain)]
sox_params = 'sox "{}" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1'.format(path, sample_rate, augmented_filename, ' '.join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
return y | -8,893,820,232,313,007,000 | Changes tempo and gain of the recording with sox and loads it. | data/data_loader.py | augment_audio_with_sox | ShuanDeMorian/deepspeech.pytorch | python | def augment_audio_with_sox(path, sample_rate, tempo, gain):
'\n \n '
with NamedTemporaryFile(suffix='.wav') as augmented_file:
augmented_filename = augmented_file.name
sox_augment_params = ['tempo', '{:.3f}'.format(tempo), 'gain', '{:.3f}'.format(gain)]
sox_params = 'sox "{}" -r {} -c 1 -b 16 -e si {} {} >/dev/null 2>&1'.format(path, sample_rate, augmented_filename, ' '.join(sox_augment_params))
os.system(sox_params)
y = load_audio(augmented_filename)
return y |
def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85, 1.15), gain_range=((- 6), 8)):
'\n Picks tempo and gain uniformly, applies it to the utterance by using sox utility.\n Returns the augmented utterance.\n '
(low_tempo, high_tempo) = tempo_range
tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)
(low_gain, high_gain) = gain_range
gain_value = np.random.uniform(low=low_gain, high=high_gain)
audio = augment_audio_with_sox(path=path, sample_rate=sample_rate, tempo=tempo_value, gain=gain_value)
return audio | 5,580,198,202,622,402,000 | Picks tempo and gain uniformly, applies it to the utterance by using sox utility.
Returns the augmented utterance. | data/data_loader.py | load_randomly_augmented_audio | ShuanDeMorian/deepspeech.pytorch | python | def load_randomly_augmented_audio(path, sample_rate=16000, tempo_range=(0.85, 1.15), gain_range=((- 6), 8)):
'\n Picks tempo and gain uniformly, applies it to the utterance by using sox utility.\n Returns the augmented utterance.\n '
(low_tempo, high_tempo) = tempo_range
tempo_value = np.random.uniform(low=low_tempo, high=high_tempo)
(low_gain, high_gain) = gain_range
gain_value = np.random.uniform(low=low_gain, high=high_gain)
audio = augment_audio_with_sox(path=path, sample_rate=sample_rate, tempo=tempo_value, gain=gain_value)
return audio |
def parse_transcript(self, transcript_path):
'\n :param transcript_path: Path where transcript is stored from the manifest file\n :return: Transcript in training/testing format\n '
raise NotImplementedError | 5,100,548,454,453,052,000 | :param transcript_path: Path where transcript is stored from the manifest file
:return: Transcript in training/testing format | data/data_loader.py | parse_transcript | ShuanDeMorian/deepspeech.pytorch | python | def parse_transcript(self, transcript_path):
'\n :param transcript_path: Path where transcript is stored from the manifest file\n :return: Transcript in training/testing format\n '
raise NotImplementedError |
def parse_audio(self, audio_path):
'\n :param audio_path: Path where audio is stored from the manifest file\n :return: Audio in training/testing format\n '
raise NotImplementedError | -881,191,290,387,988,900 | :param audio_path: Path where audio is stored from the manifest file
:return: Audio in training/testing format | data/data_loader.py | parse_audio | ShuanDeMorian/deepspeech.pytorch | python | def parse_audio(self, audio_path):
'\n :param audio_path: Path where audio is stored from the manifest file\n :return: Audio in training/testing format\n '
raise NotImplementedError |
def __init__(self, path=None, sample_rate=16000, noise_levels=(0, 0.5)):
'\n Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.\n Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py\n '
if ((path is not None) and (not os.path.exists(path))):
print("Directory doesn't exist: {}".format(path))
raise IOError
self.paths = ((path is not None) and librosa.util.find_files(path))
self.sample_rate = sample_rate
self.noise_levels = noise_levels | -5,970,089,495,763,871,000 | Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.
Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py | data/data_loader.py | __init__ | ShuanDeMorian/deepspeech.pytorch | python | def __init__(self, path=None, sample_rate=16000, noise_levels=(0, 0.5)):
'\n Adds noise to an input signal with specific SNR. Higher the noise level, the more noise added.\n Modified code from https://github.com/willfrey/audio/blob/master/torchaudio/transforms.py\n '
if ((path is not None) and (not os.path.exists(path))):
print("Directory doesn't exist: {}".format(path))
raise IOError
self.paths = ((path is not None) and librosa.util.find_files(path))
self.sample_rate = sample_rate
self.noise_levels = noise_levels |
def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False):
'\n Parses audio file into spectrogram with optional normalization and various augmentations\n :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds\n :param normalize(default False): Apply standard mean and deviation normalization to audio tensor\n :param speed_volume_perturb(default False): Apply random tempo and gain perturbations\n :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms\n '
super(SpectrogramParser, self).__init__()
self.window_stride = audio_conf['window_stride']
self.window_size = audio_conf['window_size']
self.sample_rate = audio_conf['sample_rate']
self.window = windows.get(audio_conf['window'], windows['hamming'])
self.normalize = normalize
self.speed_volume_perturb = speed_volume_perturb
self.spec_augment = spec_augment
self.noiseInjector = (NoiseInjection(audio_conf['noise_dir'], self.sample_rate, audio_conf['noise_levels']) if (audio_conf.get('noise_dir') is not None) else None)
self.noise_prob = audio_conf.get('noise_prob') | -2,378,630,192,019,733,000 | Parses audio file into spectrogram with optional normalization and various augmentations
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param normalize(default False): Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms | data/data_loader.py | __init__ | ShuanDeMorian/deepspeech.pytorch | python | def __init__(self, audio_conf, normalize=False, speed_volume_perturb=False, spec_augment=False):
'\n Parses audio file into spectrogram with optional normalization and various augmentations\n :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds\n :param normalize(default False): Apply standard mean and deviation normalization to audio tensor\n :param speed_volume_perturb(default False): Apply random tempo and gain perturbations\n :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms\n '
super(SpectrogramParser, self).__init__()
self.window_stride = audio_conf['window_stride']
self.window_size = audio_conf['window_size']
self.sample_rate = audio_conf['sample_rate']
self.window = windows.get(audio_conf['window'], windows['hamming'])
self.normalize = normalize
self.speed_volume_perturb = speed_volume_perturb
self.spec_augment = spec_augment
self.noiseInjector = (NoiseInjection(audio_conf['noise_dir'], self.sample_rate, audio_conf['noise_levels']) if (audio_conf.get('noise_dir') is not None) else None)
self.noise_prob = audio_conf.get('noise_prob') |
def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False):
'\n Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by\n a comma. Each new line is a different sample. Example below:\n\n /path/to/audio.wav,/path/to/audio.txt\n ...\n\n :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds\n :param manifest_filepath: Path to manifest csv as describe above\n :param labels: String containing all the possible characters to map to\n :param normalize: Apply standard mean and deviation normalization to audio tensor\n :param speed_volume_perturb(default False): Apply random tempo and gain perturbations\n :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms\n '
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [x.strip().split(',') for x in ids]
self.ids = ids
self.size = len(ids)
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
try:
self.use_jamo = audio_conf['use_jamo']
except:
self.use_jamo = False
super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment) | 4,335,064,648,897,661,000 | Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by
a comma. Each new line is a different sample. Example below:
/path/to/audio.wav,/path/to/audio.txt
...
:param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds
:param manifest_filepath: Path to manifest csv as describe above
:param labels: String containing all the possible characters to map to
:param normalize: Apply standard mean and deviation normalization to audio tensor
:param speed_volume_perturb(default False): Apply random tempo and gain perturbations
:param spec_augment(default False): Apply simple spectral augmentation to mel spectograms | data/data_loader.py | __init__ | ShuanDeMorian/deepspeech.pytorch | python | def __init__(self, audio_conf, manifest_filepath, labels, normalize=False, speed_volume_perturb=False, spec_augment=False):
'\n Dataset that loads tensors via a csv containing file paths to audio files and transcripts separated by\n a comma. Each new line is a different sample. Example below:\n\n /path/to/audio.wav,/path/to/audio.txt\n ...\n\n :param audio_conf: Dictionary containing the sample rate, window and the window length/stride in seconds\n :param manifest_filepath: Path to manifest csv as describe above\n :param labels: String containing all the possible characters to map to\n :param normalize: Apply standard mean and deviation normalization to audio tensor\n :param speed_volume_perturb(default False): Apply random tempo and gain perturbations\n :param spec_augment(default False): Apply simple spectral augmentation to mel spectograms\n '
with open(manifest_filepath) as f:
ids = f.readlines()
ids = [x.strip().split(',') for x in ids]
self.ids = ids
self.size = len(ids)
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
try:
self.use_jamo = audio_conf['use_jamo']
except:
self.use_jamo = False
super(SpectrogramDataset, self).__init__(audio_conf, normalize, speed_volume_perturb, spec_augment) |
def __init__(self, *args, **kwargs):
'\n Creates a data loader for AudioDatasets.\n '
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn | -1,195,107,911,870,949,600 | Creates a data loader for AudioDatasets. | data/data_loader.py | __init__ | ShuanDeMorian/deepspeech.pytorch | python | def __init__(self, *args, **kwargs):
'\n \n '
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = _collate_fn |
def __init__(self, data_source, batch_size=1):
'\n Samples batches assuming they are in order of size to batch similarly sized samples together.\n '
super(BucketingSampler, self).__init__(data_source)
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:(i + batch_size)] for i in range(0, len(ids), batch_size)] | -8,620,534,451,393,159,000 | Samples batches assuming they are in order of size to batch similarly sized samples together. | data/data_loader.py | __init__ | ShuanDeMorian/deepspeech.pytorch | python | def __init__(self, data_source, batch_size=1):
'\n \n '
super(BucketingSampler, self).__init__(data_source)
self.data_source = data_source
ids = list(range(0, len(data_source)))
self.bins = [ids[i:(i + batch_size)] for i in range(0, len(ids), batch_size)] |
def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None):
'\n Samples batches assuming they are in order of size to batch similarly sized samples together.\n '
super(DistributedBucketingSampler, self).__init__(data_source)
if (num_replicas is None):
num_replicas = get_world_size()
if (rank is None):
rank = get_rank()
self.data_source = data_source
self.ids = list(range(0, len(data_source)))
self.batch_size = batch_size
self.bins = [self.ids[i:(i + batch_size)] for i in range(0, len(self.ids), batch_size)]
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(((len(self.bins) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas) | -7,090,250,734,173,390,000 | Samples batches assuming they are in order of size to batch similarly sized samples together. | data/data_loader.py | __init__ | ShuanDeMorian/deepspeech.pytorch | python | def __init__(self, data_source, batch_size=1, num_replicas=None, rank=None):
'\n \n '
super(DistributedBucketingSampler, self).__init__(data_source)
if (num_replicas is None):
num_replicas = get_world_size()
if (rank is None):
rank = get_rank()
self.data_source = data_source
self.ids = list(range(0, len(data_source)))
self.batch_size = batch_size
self.bins = [self.ids[i:(i + batch_size)] for i in range(0, len(self.ids), batch_size)]
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(((len(self.bins) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas) |
def filter_event(event, happening_before):
'Check if the following keys are present. These\n keys only show up when using the API. If fetching\n from the iCal, JSON, or RSS feeds it will just compare\n the dates\n '
status = True
visibility = True
actions = True
if ('status' in event):
status = (event['status'] == 'upcoming')
if ('visibility' in event):
visibility = (event['visibility'] == 'public')
if ('self' in event):
actions = ('announce' not in event['self']['actions'])
return (status and visibility and actions and (event['time'] < happening_before)) | -7,648,817,168,635,728,000 | Check if the following keys are present. These
keys only show up when using the API. If fetching
from the iCal, JSON, or RSS feeds it will just compare
the dates | app/Meetup/Filter.py | filter_event | OpenTwinCities/site_bot | python | def filter_event(event, happening_before):
'Check if the following keys are present. These\n keys only show up when using the API. If fetching\n from the iCal, JSON, or RSS feeds it will just compare\n the dates\n '
status = True
visibility = True
actions = True
if ('status' in event):
status = (event['status'] == 'upcoming')
if ('visibility' in event):
visibility = (event['visibility'] == 'public')
if ('self' in event):
actions = ('announce' not in event['self']['actions'])
return (status and visibility and actions and (event['time'] < happening_before)) |
def evaluate(self, x):
'\n Performs the evaluation of the objective at x.\n '
(f_evals, cost_evals) = self._eval_func(x)
return (f_evals, cost_evals) | -8,275,771,205,466,574,000 | Performs the evaluation of the objective at x. | Samples/codes/matopt_review/add_objective.py | evaluate | wilsongis/3DP_Experiments | python | def evaluate(self, x):
'\n \n '
(f_evals, cost_evals) = self._eval_func(x)
return (f_evals, cost_evals) |
def _eval_func(self, x):
'\n Performs sequential evaluations of the function at x (single location or batch). The computing time of each\n evaluation is also provided.\n '
cost_evals = []
f_evals = np.empty(shape=[0, self.n_obj])
for i in range(x.shape[0]):
st_time = time.time()
rlt = self.func(np.atleast_2d(x[i]))
f_evals = np.vstack([f_evals, rlt])
cost_evals += [(time.time() - st_time)]
return (f_evals, cost_evals) | 3,552,057,242,387,114,000 | Performs sequential evaluations of the function at x (single location or batch). The computing time of each
evaluation is also provided. | Samples/codes/matopt_review/add_objective.py | _eval_func | wilsongis/3DP_Experiments | python | def _eval_func(self, x):
'\n Performs sequential evaluations of the function at x (single location or batch). The computing time of each\n evaluation is also provided.\n '
cost_evals = []
f_evals = np.empty(shape=[0, self.n_obj])
for i in range(x.shape[0]):
st_time = time.time()
rlt = self.func(np.atleast_2d(x[i]))
f_evals = np.vstack([f_evals, rlt])
cost_evals += [(time.time() - st_time)]
return (f_evals, cost_evals) |
def _validate_wildcard_uri(node, value):
'Raise if wildcards are within the domain of the uri.'
for val in value:
if (not wildcard_uri_is_valid(val)):
raise colander.Invalid(node, 'Wildcards (_ and *) are not permitted within the\n domain of wildcard_uri') | -7,200,298,268,601,455,000 | Raise if wildcards are within the domain of the uri. | h/schemas/annotation.py | _validate_wildcard_uri | bibliotechie/h | python | def _validate_wildcard_uri(node, value):
for val in value:
if (not wildcard_uri_is_valid(val)):
raise colander.Invalid(node, 'Wildcards (_ and *) are not permitted within the\n domain of wildcard_uri') |
def _document(document, claimant):
'\n Return document meta and document URI data from the given document dict.\n\n Transforms the "document" dict that the client posts into a convenient\n format for creating DocumentURI and DocumentMeta objects later.\n\n '
document = (document or {})
document_uri_dicts = document_claims.document_uris_from_data(copy.deepcopy(document), claimant=claimant)
document_meta_dicts = document_claims.document_metas_from_data(copy.deepcopy(document), claimant=claimant)
return {'document_uri_dicts': document_uri_dicts, 'document_meta_dicts': document_meta_dicts} | 7,862,136,510,796,650,000 | Return document meta and document URI data from the given document dict.
Transforms the "document" dict that the client posts into a convenient
format for creating DocumentURI and DocumentMeta objects later. | h/schemas/annotation.py | _document | bibliotechie/h | python | def _document(document, claimant):
'\n Return document meta and document URI data from the given document dict.\n\n Transforms the "document" dict that the client posts into a convenient\n format for creating DocumentURI and DocumentMeta objects later.\n\n '
document = (document or {})
document_uri_dicts = document_claims.document_uris_from_data(copy.deepcopy(document), claimant=claimant)
document_meta_dicts = document_claims.document_metas_from_data(copy.deepcopy(document), claimant=claimant)
return {'document_uri_dicts': document_uri_dicts, 'document_meta_dicts': document_meta_dicts} |
def _format_jsonschema_error(error):
'Format a :py:class:`jsonschema.ValidationError` as a string.'
if error.path:
dotted_path = '.'.join([str(c) for c in error.path])
return '{path}: {message}'.format(path=dotted_path, message=error.message)
return error.message | 2,304,976,702,490,669,300 | Format a :py:class:`jsonschema.ValidationError` as a string. | h/schemas/annotation.py | _format_jsonschema_error | bibliotechie/h | python | def _format_jsonschema_error(error):
if error.path:
dotted_path = '.'.join([str(c) for c in error.path])
return '{path}: {message}'.format(path=dotted_path, message=error.message)
return error.message |
def _shared(permissions, groupid):
"\n Return True if the given permissions object represents shared permissions.\n\n Return False otherwise.\n\n Reduces the client's complex permissions dict to a simple shared boolean.\n\n :param permissions: the permissions dict sent by the client in an\n annotation create or update request\n :type permissions: dict\n\n :param groupid: the groupid of the annotation that the permissions dict\n applies to\n :type groupid: unicode\n\n "
return (permissions['read'] == ['group:{id}'.format(id=groupid)]) | -4,910,070,461,960,579,000 | Return True if the given permissions object represents shared permissions.
Return False otherwise.
Reduces the client's complex permissions dict to a simple shared boolean.
:param permissions: the permissions dict sent by the client in an
annotation create or update request
:type permissions: dict
:param groupid: the groupid of the annotation that the permissions dict
applies to
:type groupid: unicode | h/schemas/annotation.py | _shared | bibliotechie/h | python | def _shared(permissions, groupid):
"\n Return True if the given permissions object represents shared permissions.\n\n Return False otherwise.\n\n Reduces the client's complex permissions dict to a simple shared boolean.\n\n :param permissions: the permissions dict sent by the client in an\n annotation create or update request\n :type permissions: dict\n\n :param groupid: the groupid of the annotation that the permissions dict\n applies to\n :type groupid: unicode\n\n "
return (permissions['read'] == ['group:{id}'.format(id=groupid)]) |
def _target_selectors(targets):
'\n Return the target selectors from the given target list.\n\n Transforms the target lists that the client sends in annotation create and\n update requests into our internal target_selectors format.\n\n '
if (targets and ('selector' in targets[0])):
return targets[0]['selector']
return [] | 1,615,902,591,248,185,000 | Return the target selectors from the given target list.
Transforms the target lists that the client sends in annotation create and
update requests into our internal target_selectors format. | h/schemas/annotation.py | _target_selectors | bibliotechie/h | python | def _target_selectors(targets):
'\n Return the target selectors from the given target list.\n\n Transforms the target lists that the client sends in annotation create and\n update requests into our internal target_selectors format.\n\n '
if (targets and ('selector' in targets[0])):
return targets[0]['selector']
return [] |
def _date_is_parsable(self, value):
'Return True if date is parsable and False otherwise.'
try:
if (float(value) < 9999):
raise ValueError('This is not in the form ms since the epoch.')
except ValueError:
try:
parse(value)
except ValueError:
return False
return True | 1,211,354,839,976,220,000 | Return True if date is parsable and False otherwise. | h/schemas/annotation.py | _date_is_parsable | bibliotechie/h | python | def _date_is_parsable(self, value):
try:
if (float(value) < 9999):
raise ValueError('This is not in the form ms since the epoch.')
except ValueError:
try:
parse(value)
except ValueError:
return False
return True |
def makeLineToWordsList(line, break_word=False):
u'将一行文本转为单词列表'
if break_word:
return [c for c in line]
lst = []
while line:
ro = g_re_first_word.match(line)
end = (1 if (not ro) else ro.end())
lst.append(line[:end])
line = line[end:]
return lst | 5,462,105,743,467,148,000 | 将一行文本转为单词列表 | hard-gists/9c4d012d6fff059ccea7/snippet.py | makeLineToWordsList | bopopescu/dockerizeme | python | def makeLineToWordsList(line, break_word=False):
u
if break_word:
return [c for c in line]
lst = []
while line:
ro = g_re_first_word.match(line)
end = (1 if (not ro) else ro.end())
lst.append(line[:end])
line = line[end:]
return lst |
def makeLongLineToLines(long_line, start_x, start_y, width, line_height, font, cn_char_width=0):
u'将一个长行分成多个可显示的短行'
txt = long_line
if (not txt):
return [None]
words = makeLineToWordsList(txt)
lines = []
if (not cn_char_width):
(cn_char_width, h) = font.size(u'汉')
avg_char_per_line = (width / cn_char_width)
if (avg_char_per_line <= 1):
avg_char_per_line = 1
line_x = start_x
line_y = start_y
while words:
tmp_words = words[:avg_char_per_line]
tmp_ln = ''.join(tmp_words)
(w, h) = font.size(tmp_ln)
wc = len(tmp_words)
while ((w < width) and (wc < len(words))):
wc += 1
tmp_words = words[:wc]
tmp_ln = ''.join(tmp_words)
(w, h) = font.size(tmp_ln)
while ((w > width) and (len(tmp_words) > 1)):
tmp_words = tmp_words[:(- 1)]
tmp_ln = ''.join(tmp_words)
(w, h) = font.size(tmp_ln)
if ((w > width) and (len(tmp_words) == 1)):
line_y = makeLongWordToLines(tmp_words[0], line_x, line_y, width, line_height, font, lines)
words = words[len(tmp_words):]
continue
line = {'x': line_x, 'y': line_y, 'text': tmp_ln, 'font': font}
line_y += line_height
words = words[len(tmp_words):]
lines.append(line)
if (len(lines) >= 1):
while ((len(words) > 0) and (not words[0].strip())):
words = words[1:]
return lines | 8,475,288,406,829,945,000 | 将一个长行分成多个可显示的短行 | hard-gists/9c4d012d6fff059ccea7/snippet.py | makeLongLineToLines | bopopescu/dockerizeme | python | def makeLongLineToLines(long_line, start_x, start_y, width, line_height, font, cn_char_width=0):
u
txt = long_line
if (not txt):
return [None]
words = makeLineToWordsList(txt)
lines = []
if (not cn_char_width):
(cn_char_width, h) = font.size(u'汉')
avg_char_per_line = (width / cn_char_width)
if (avg_char_per_line <= 1):
avg_char_per_line = 1
line_x = start_x
line_y = start_y
while words:
tmp_words = words[:avg_char_per_line]
tmp_ln = .join(tmp_words)
(w, h) = font.size(tmp_ln)
wc = len(tmp_words)
while ((w < width) and (wc < len(words))):
wc += 1
tmp_words = words[:wc]
tmp_ln = .join(tmp_words)
(w, h) = font.size(tmp_ln)
while ((w > width) and (len(tmp_words) > 1)):
tmp_words = tmp_words[:(- 1)]
tmp_ln = .join(tmp_words)
(w, h) = font.size(tmp_ln)
if ((w > width) and (len(tmp_words) == 1)):
line_y = makeLongWordToLines(tmp_words[0], line_x, line_y, width, line_height, font, lines)
words = words[len(tmp_words):]
continue
line = {'x': line_x, 'y': line_y, 'text': tmp_ln, 'font': font}
line_y += line_height
words = words[len(tmp_words):]
lines.append(line)
if (len(lines) >= 1):
while ((len(words) > 0) and (not words[0].strip())):
words = words[1:]
return lines |
def drawCopyright(im, dr, cfg):
u'绘制版权信息'
if (not cfg['copyright']):
return
font = getFontForPyGame(font_name=cfg['font-family'], font_size=12)
rtext = font.render(cfg['copyright'], cfg['font-antialiasing'], (128, 128, 128), cfg['background-color'])
sio = StringIO.StringIO()
pygame.image.save(rtext, sio)
sio.seek(0)
copyright_im = Image.open(sio)
(iw, ih) = im.size
(cw, ch) = rtext.get_size()
padding = cfg['padding']
offset_y = ((ih - 32) - padding[2])
if cfg['copyright-center']:
cx = ((iw - cw) / 2)
else:
cx = cfg['padding'][3]
cy = (offset_y + 12)
dr.line([(padding[3], offset_y), ((iw - padding[1]), offset_y)], width=1, fill=(192, 192, 192))
im.paste(copyright_im, (cx, cy)) | 7,383,745,422,005,040,000 | 绘制版权信息 | hard-gists/9c4d012d6fff059ccea7/snippet.py | drawCopyright | bopopescu/dockerizeme | python | def drawCopyright(im, dr, cfg):
u
if (not cfg['copyright']):
return
font = getFontForPyGame(font_name=cfg['font-family'], font_size=12)
rtext = font.render(cfg['copyright'], cfg['font-antialiasing'], (128, 128, 128), cfg['background-color'])
sio = StringIO.StringIO()
pygame.image.save(rtext, sio)
sio.seek(0)
copyright_im = Image.open(sio)
(iw, ih) = im.size
(cw, ch) = rtext.get_size()
padding = cfg['padding']
offset_y = ((ih - 32) - padding[2])
if cfg['copyright-center']:
cx = ((iw - cw) / 2)
else:
cx = cfg['padding'][3]
cy = (offset_y + 12)
dr.line([(padding[3], offset_y), ((iw - padding[1]), offset_y)], width=1, fill=(192, 192, 192))
im.paste(copyright_im, (cx, cy)) |
def drawBorder(im, dr, cfg):
u'绘制边框'
if (not cfg['border-size']):
return
(w, h) = im.size
(x, y) = ((w - 1), (h - 1))
dr.line([(0, 0), (x, 0), (x, y), (0, y), (0, 0)], width=cfg['border-size'], fill=cfg['border-color']) | -7,522,714,186,463,741,000 | 绘制边框 | hard-gists/9c4d012d6fff059ccea7/snippet.py | drawBorder | bopopescu/dockerizeme | python | def drawBorder(im, dr, cfg):
u
if (not cfg['border-size']):
return
(w, h) = im.size
(x, y) = ((w - 1), (h - 1))
dr.line([(0, 0), (x, 0), (x, y), (0, y), (0, 0)], width=cfg['border-size'], fill=cfg['border-color']) |
def chain_return_value(future, loop, return_value):
'Compatible way to return a value in all Pythons.\n\n PEP 479, raise StopIteration(value) from a coroutine won\'t work forever,\n but "return value" doesn\'t work in Python 2. Instead, Motor methods that\n return values resolve a Future with it, and are implemented with callbacks\n rather than a coroutine internally.\n '
chained = concurrent.Future()
def copy(_future):
if chained.done():
return
if (_future.exception() is not None):
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained | 2,489,905,371,473,619,500 | Compatible way to return a value in all Pythons.
PEP 479, raise StopIteration(value) from a coroutine won't work forever,
but "return value" doesn't work in Python 2. Instead, Motor methods that
return values resolve a Future with it, and are implemented with callbacks
rather than a coroutine internally. | motor/frameworks/tornado/__init__.py | chain_return_value | smurfix/motor | python | def chain_return_value(future, loop, return_value):
'Compatible way to return a value in all Pythons.\n\n PEP 479, raise StopIteration(value) from a coroutine won\'t work forever,\n but "return value" doesn\'t work in Python 2. Instead, Motor methods that\n return values resolve a Future with it, and are implemented with callbacks\n rather than a coroutine internally.\n '
chained = concurrent.Future()
def copy(_future):
if chained.done():
return
if (_future.exception() is not None):
chained.set_exception(_future.exception())
else:
chained.set_result(return_value)
future.add_done_callback(functools.partial(loop.add_callback, copy))
return chained |
def pymongo_class_wrapper(f, pymongo_class):
'Executes the coroutine f and wraps its result in a Motor class.\n\n See WrapAsync.\n '
@functools.wraps(f)
async def _wrapper(self, *args, **kwargs):
result = (await f(self, *args, **kwargs))
if (result.__class__ == pymongo_class):
return self.wrap(result)
else:
return result
return _wrapper | -1,625,393,533,709,248,000 | Executes the coroutine f and wraps its result in a Motor class.
See WrapAsync. | motor/frameworks/tornado/__init__.py | pymongo_class_wrapper | smurfix/motor | python | def pymongo_class_wrapper(f, pymongo_class):
'Executes the coroutine f and wraps its result in a Motor class.\n\n See WrapAsync.\n '
@functools.wraps(f)
async def _wrapper(self, *args, **kwargs):
result = (await f(self, *args, **kwargs))
if (result.__class__ == pymongo_class):
return self.wrap(result)
else:
return result
return _wrapper |
def __init__(self, detail, source=None, title=None, status=None, code=None, id_=None, links=None, meta=None):
'Initialize a jsonapi exception\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n '
self.detail = detail
self.source = source
self.code = code
self.id = id_
self.links = (links or {})
self.meta = (meta or {})
if (title is not None):
self.title = title
if (status is not None):
self.status = status | -3,200,909,858,962,412,000 | Initialize a jsonapi exception
:param dict source: the source of the error
:param str detail: the detail of the error | flapison/exceptions.py | __init__ | Leechael/flapison | python | def __init__(self, detail, source=None, title=None, status=None, code=None, id_=None, links=None, meta=None):
'Initialize a jsonapi exception\n\n :param dict source: the source of the error\n :param str detail: the detail of the error\n '
self.detail = detail
self.source = source
self.code = code
self.id = id_
self.links = (links or {})
self.meta = (meta or {})
if (title is not None):
self.title = title
if (status is not None):
self.status = status |