body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
e311d33e2ee6dab690e5d9ac8e9714d2c754e61cf5dc0f65cff1cf0d381a8f65
def main(): 'Send the RPC command to the server and print the result.' parser = argparse.ArgumentParser('Send electrumx an RPC command') parser.add_argument('-p', '--port', metavar='port_num', type=int, help='RPC port number') parser.add_argument('command', nargs=1, default=[], help='command to send') parser.add_argument('param', nargs='*', default=[], help='params to send') args = parser.parse_args() if (args.port is None): args.port = int(environ.get('RPC_PORT', 8000)) loop = asyncio.get_event_loop() coro = loop.create_connection(RPCClient, 'localhost', args.port) try: (transport, protocol) = loop.run_until_complete(coro) coro = protocol.send_and_wait(args.command[0], args.param, timeout=15) loop.run_until_complete(coro) except OSError: print('error connecting - is ElectrumX catching up or not running?') finally: loop.close()
Send the RPC command to the server and print the result.
electrumx_rpc.py
main
Skirmant/electrumx-trump
0
python
def main(): parser = argparse.ArgumentParser('Send electrumx an RPC command') parser.add_argument('-p', '--port', metavar='port_num', type=int, help='RPC port number') parser.add_argument('command', nargs=1, default=[], help='command to send') parser.add_argument('param', nargs='*', default=[], help='params to send') args = parser.parse_args() if (args.port is None): args.port = int(environ.get('RPC_PORT', 8000)) loop = asyncio.get_event_loop() coro = loop.create_connection(RPCClient, 'localhost', args.port) try: (transport, protocol) = loop.run_until_complete(coro) coro = protocol.send_and_wait(args.command[0], args.param, timeout=15) loop.run_until_complete(coro) except OSError: print('error connecting - is ElectrumX catching up or not running?') finally: loop.close()
def main(): parser = argparse.ArgumentParser('Send electrumx an RPC command') parser.add_argument('-p', '--port', metavar='port_num', type=int, help='RPC port number') parser.add_argument('command', nargs=1, default=[], help='command to send') parser.add_argument('param', nargs='*', default=[], help='params to send') args = parser.parse_args() if (args.port is None): args.port = int(environ.get('RPC_PORT', 8000)) loop = asyncio.get_event_loop() coro = loop.create_connection(RPCClient, 'localhost', args.port) try: (transport, protocol) = loop.run_until_complete(coro) coro = protocol.send_and_wait(args.command[0], args.param, timeout=15) loop.run_until_complete(coro) except OSError: print('error connecting - is ElectrumX catching up or not running?') finally: loop.close()<|docstring|>Send the RPC command to the server and print the result.<|endoftext|>
7acb56f3d1695444b6d99a7ba84782b730773d46e07d620b62c76bf248cf472a
def __init__(self, n_topics, vocab_size, doc_count, batch_size, batch_steps, num_collection_passes, num_documents_passes, device, dtype, phi_smooth_sparse_tau=0.0, theta_smooth_sparse_tau=0.0, vocab_stat=None, mode='v1', dump_phi_freq=None, dump_phi_path=None, log_perplexity=False, log_matrix_norms=False): '\n :param n_topics:\n :param vocab_size:\n :param doc_count:\n :param context_size:\n :param batch_size:\n :param batch_steps:\n :param num_collection_passes:\n :param num_documents_passes:\n :param device:\n :param dtype:\n :param phi_smooth_sparse_tau:\n :param theta_smooth_sparse_tau:\n :param vocab_stat: TF for phi sparse/smooth reg.\n :param mode: v1/v2; v1 - e-step for all batches, m-step after all\n v2 - em-step on each batch\n ' self.n_topics = n_topics self.vocab_size = vocab_size self.doc_count = doc_count self.num_collection_passes = num_collection_passes self.num_documents_passes = num_documents_passes self.phi_smooth_sparse_tau = phi_smooth_sparse_tau self.theta_smooth_sparse_tau = theta_smooth_sparse_tau self.batch_size = batch_size self.batch_steps = batch_steps self.device = device self.dtype = dtype self.log_perplexity = log_perplexity self.log_matrix_norms = log_matrix_norms self.__init_matrices() self.__init_aux_vars() self.vocab_stat = vocab_stat self.__init_regularizers() self.phi_log = [] self.theta_log = [] self.perplexity_log = [] if dump_phi_freq: self.dump_phi_freq = dump_phi_freq self.dump_phi_path = dump_phi_path else: self.dump_phi_freq = None self.steps_trained = 0 if (mode == 'v1'): self.run = self.run self.__rectify = self.__rectify elif (mode == 'v2'): self.run = self.run_v2 self.__rectify = self.__rectify_v2 else: raise NotImplementedError
:param n_topics: :param vocab_size: :param doc_count: :param context_size: :param batch_size: :param batch_steps: :param num_collection_passes: :param num_documents_passes: :param device: :param dtype: :param phi_smooth_sparse_tau: :param theta_smooth_sparse_tau: :param vocab_stat: TF for phi sparse/smooth reg. :param mode: v1/v2; v1 - e-step for all batches, m-step after all v2 - em-step on each batch
model_fn.py
__init__
ilyakhov/pytorch-wntm
3
python
def __init__(self, n_topics, vocab_size, doc_count, batch_size, batch_steps, num_collection_passes, num_documents_passes, device, dtype, phi_smooth_sparse_tau=0.0, theta_smooth_sparse_tau=0.0, vocab_stat=None, mode='v1', dump_phi_freq=None, dump_phi_path=None, log_perplexity=False, log_matrix_norms=False): '\n :param n_topics:\n :param vocab_size:\n :param doc_count:\n :param context_size:\n :param batch_size:\n :param batch_steps:\n :param num_collection_passes:\n :param num_documents_passes:\n :param device:\n :param dtype:\n :param phi_smooth_sparse_tau:\n :param theta_smooth_sparse_tau:\n :param vocab_stat: TF for phi sparse/smooth reg.\n :param mode: v1/v2; v1 - e-step for all batches, m-step after all\n v2 - em-step on each batch\n ' self.n_topics = n_topics self.vocab_size = vocab_size self.doc_count = doc_count self.num_collection_passes = num_collection_passes self.num_documents_passes = num_documents_passes self.phi_smooth_sparse_tau = phi_smooth_sparse_tau self.theta_smooth_sparse_tau = theta_smooth_sparse_tau self.batch_size = batch_size self.batch_steps = batch_steps self.device = device self.dtype = dtype self.log_perplexity = log_perplexity self.log_matrix_norms = log_matrix_norms self.__init_matrices() self.__init_aux_vars() self.vocab_stat = vocab_stat self.__init_regularizers() self.phi_log = [] self.theta_log = [] self.perplexity_log = [] if dump_phi_freq: self.dump_phi_freq = dump_phi_freq self.dump_phi_path = dump_phi_path else: self.dump_phi_freq = None self.steps_trained = 0 if (mode == 'v1'): self.run = self.run self.__rectify = self.__rectify elif (mode == 'v2'): self.run = self.run_v2 self.__rectify = self.__rectify_v2 else: raise NotImplementedError
def __init__(self, n_topics, vocab_size, doc_count, batch_size, batch_steps, num_collection_passes, num_documents_passes, device, dtype, phi_smooth_sparse_tau=0.0, theta_smooth_sparse_tau=0.0, vocab_stat=None, mode='v1', dump_phi_freq=None, dump_phi_path=None, log_perplexity=False, log_matrix_norms=False): '\n :param n_topics:\n :param vocab_size:\n :param doc_count:\n :param context_size:\n :param batch_size:\n :param batch_steps:\n :param num_collection_passes:\n :param num_documents_passes:\n :param device:\n :param dtype:\n :param phi_smooth_sparse_tau:\n :param theta_smooth_sparse_tau:\n :param vocab_stat: TF for phi sparse/smooth reg.\n :param mode: v1/v2; v1 - e-step for all batches, m-step after all\n v2 - em-step on each batch\n ' self.n_topics = n_topics self.vocab_size = vocab_size self.doc_count = doc_count self.num_collection_passes = num_collection_passes self.num_documents_passes = num_documents_passes self.phi_smooth_sparse_tau = phi_smooth_sparse_tau self.theta_smooth_sparse_tau = theta_smooth_sparse_tau self.batch_size = batch_size self.batch_steps = batch_steps self.device = device self.dtype = dtype self.log_perplexity = log_perplexity self.log_matrix_norms = log_matrix_norms self.__init_matrices() self.__init_aux_vars() self.vocab_stat = vocab_stat self.__init_regularizers() self.phi_log = [] self.theta_log = [] self.perplexity_log = [] if dump_phi_freq: self.dump_phi_freq = dump_phi_freq self.dump_phi_path = dump_phi_path else: self.dump_phi_freq = None self.steps_trained = 0 if (mode == 'v1'): self.run = self.run self.__rectify = self.__rectify elif (mode == 'v2'): self.run = self.run_v2 self.__rectify = self.__rectify_v2 else: raise NotImplementedError<|docstring|>:param n_topics: :param vocab_size: :param doc_count: :param context_size: :param batch_size: :param batch_steps: :param num_collection_passes: :param num_documents_passes: :param device: :param dtype: :param phi_smooth_sparse_tau: :param theta_smooth_sparse_tau: :param vocab_stat: TF for phi sparse/smooth reg. :param mode: v1/v2; v1 - e-step for all batches, m-step after all v2 - em-step on each batch<|endoftext|>
b5779dd61f7e11cfd0b5268f7a968eddaed9ed3de7d26b69dcad4cc43b563c5b
def perplexity(self): '\n Full:\n exp(-1/n_m * sum(n_dw * ln(mm(self.phi, self.theta))))\n :return:\n ' phi = self.phi.cpu() theta = self.theta.cpu() n_m = torch.sum(self.n_dw) one = torch.tensor(1, dtype=self.dtype, device='cpu:0') mm = torch.mm(phi, theta) mm = torch.where((mm == self.zero.cpu()), (0.0001 * one), mm) mm = mm.log() one = ((- 1) * one) perp = torch.exp(((one / n_m) * torch.sum((self.n_dw * mm)))) return perp.numpy()
Full: exp(-1/n_m * sum(n_dw * ln(mm(self.phi, self.theta)))) :return:
model_fn.py
perplexity
ilyakhov/pytorch-wntm
3
python
def perplexity(self): '\n Full:\n exp(-1/n_m * sum(n_dw * ln(mm(self.phi, self.theta))))\n :return:\n ' phi = self.phi.cpu() theta = self.theta.cpu() n_m = torch.sum(self.n_dw) one = torch.tensor(1, dtype=self.dtype, device='cpu:0') mm = torch.mm(phi, theta) mm = torch.where((mm == self.zero.cpu()), (0.0001 * one), mm) mm = mm.log() one = ((- 1) * one) perp = torch.exp(((one / n_m) * torch.sum((self.n_dw * mm)))) return perp.numpy()
def perplexity(self): '\n Full:\n exp(-1/n_m * sum(n_dw * ln(mm(self.phi, self.theta))))\n :return:\n ' phi = self.phi.cpu() theta = self.theta.cpu() n_m = torch.sum(self.n_dw) one = torch.tensor(1, dtype=self.dtype, device='cpu:0') mm = torch.mm(phi, theta) mm = torch.where((mm == self.zero.cpu()), (0.0001 * one), mm) mm = mm.log() one = ((- 1) * one) perp = torch.exp(((one / n_m) * torch.sum((self.n_dw * mm)))) return perp.numpy()<|docstring|>Full: exp(-1/n_m * sum(n_dw * ln(mm(self.phi, self.theta)))) :return:<|endoftext|>
01da978c17506836ad21b2c7c41f4cc9a1e052a0263afe9d907841fc1af609bf
def e_step(self, n_dw, doc_inxs, context_batch, gather_ndw=False): "\n :param n_dw: freq of term 'w' occurrence in doc 'd'\n [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€”\n [batch_size, context_size]\n :param doc_inxs: Tensor of doc inxs with shape [batch_size]\n :param context_batch: Tensor of word inxs with shape\n [batch_size, context_size]\n :return:\n " with torch.cuda.device(self.device): if (gather_ndw is True): self.n_dw[(:, doc_inxs.long())] = n_dw.t() n_dw = torch.unsqueeze(n_dw, 2).cuda(self.device) context_batch = context_batch.cuda(self.device) batch_size = context_batch.shape[0] context_size = context_batch.shape[1] phi_w = self.phi[context_batch.long()] theta_d = torch.t(self.theta)[doc_inxs.long()] theta_d = theta_d.repeat(1, 1, context_size) theta_d = theta_d.view(batch_size, context_size, (- 1)) numerator = (phi_w * theta_d) denominator = torch.sum((phi_w * theta_d), dim=2, keepdim=True) n_tdw = ((n_dw * numerator) / denominator) n_tdw[torch.isnan(n_tdw)] = self.zero n_tdw_context = n_tdw.view((- 1), self.n_topics) n_tdw_doc = torch.sum(n_tdw, dim=1, keepdim=False) n_tdw_t = n_tdw.sum(1).sum(0) n_tdw_d = n_tdw.sum(2).sum(1) context_1d_mask = context_batch.view((- 1)) wt_index = context_1d_mask.long().cuda(self.device) (n_wt_update, wt_index) = self._group_by_with_index_mapping(wt_index, n_tdw_context) self.n_wt[wt_index] += n_wt_update self.n_td[(:, doc_inxs.long())] += n_tdw_doc.t() self.n_t += n_tdw_t self.n_d[doc_inxs.long()] += n_tdw_d
:param n_dw: freq of term 'w' occurrence in doc 'd' [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€” [batch_size, context_size] :param doc_inxs: Tensor of doc inxs with shape [batch_size] :param context_batch: Tensor of word inxs with shape [batch_size, context_size] :return:
model_fn.py
e_step
ilyakhov/pytorch-wntm
3
python
def e_step(self, n_dw, doc_inxs, context_batch, gather_ndw=False): "\n :param n_dw: freq of term 'w' occurrence in doc 'd'\n [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€”\n [batch_size, context_size]\n :param doc_inxs: Tensor of doc inxs with shape [batch_size]\n :param context_batch: Tensor of word inxs with shape\n [batch_size, context_size]\n :return:\n " with torch.cuda.device(self.device): if (gather_ndw is True): self.n_dw[(:, doc_inxs.long())] = n_dw.t() n_dw = torch.unsqueeze(n_dw, 2).cuda(self.device) context_batch = context_batch.cuda(self.device) batch_size = context_batch.shape[0] context_size = context_batch.shape[1] phi_w = self.phi[context_batch.long()] theta_d = torch.t(self.theta)[doc_inxs.long()] theta_d = theta_d.repeat(1, 1, context_size) theta_d = theta_d.view(batch_size, context_size, (- 1)) numerator = (phi_w * theta_d) denominator = torch.sum((phi_w * theta_d), dim=2, keepdim=True) n_tdw = ((n_dw * numerator) / denominator) n_tdw[torch.isnan(n_tdw)] = self.zero n_tdw_context = n_tdw.view((- 1), self.n_topics) n_tdw_doc = torch.sum(n_tdw, dim=1, keepdim=False) n_tdw_t = n_tdw.sum(1).sum(0) n_tdw_d = n_tdw.sum(2).sum(1) context_1d_mask = context_batch.view((- 1)) wt_index = context_1d_mask.long().cuda(self.device) (n_wt_update, wt_index) = self._group_by_with_index_mapping(wt_index, n_tdw_context) self.n_wt[wt_index] += n_wt_update self.n_td[(:, doc_inxs.long())] += n_tdw_doc.t() self.n_t += n_tdw_t self.n_d[doc_inxs.long()] += n_tdw_d
def e_step(self, n_dw, doc_inxs, context_batch, gather_ndw=False): "\n :param n_dw: freq of term 'w' occurrence in doc 'd'\n [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€”\n [batch_size, context_size]\n :param doc_inxs: Tensor of doc inxs with shape [batch_size]\n :param context_batch: Tensor of word inxs with shape\n [batch_size, context_size]\n :return:\n " with torch.cuda.device(self.device): if (gather_ndw is True): self.n_dw[(:, doc_inxs.long())] = n_dw.t() n_dw = torch.unsqueeze(n_dw, 2).cuda(self.device) context_batch = context_batch.cuda(self.device) batch_size = context_batch.shape[0] context_size = context_batch.shape[1] phi_w = self.phi[context_batch.long()] theta_d = torch.t(self.theta)[doc_inxs.long()] theta_d = theta_d.repeat(1, 1, context_size) theta_d = theta_d.view(batch_size, context_size, (- 1)) numerator = (phi_w * theta_d) denominator = torch.sum((phi_w * theta_d), dim=2, keepdim=True) n_tdw = ((n_dw * numerator) / denominator) n_tdw[torch.isnan(n_tdw)] = self.zero n_tdw_context = n_tdw.view((- 1), self.n_topics) n_tdw_doc = torch.sum(n_tdw, dim=1, keepdim=False) n_tdw_t = n_tdw.sum(1).sum(0) n_tdw_d = n_tdw.sum(2).sum(1) context_1d_mask = context_batch.view((- 1)) wt_index = context_1d_mask.long().cuda(self.device) (n_wt_update, wt_index) = self._group_by_with_index_mapping(wt_index, n_tdw_context) self.n_wt[wt_index] += n_wt_update self.n_td[(:, doc_inxs.long())] += n_tdw_doc.t() self.n_t += n_tdw_t self.n_d[doc_inxs.long()] += n_tdw_d<|docstring|>:param n_dw: freq of term 'w' occurrence in doc 'd' [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€” [batch_size, context_size] :param doc_inxs: Tensor of doc inxs with shape [batch_size] :param context_batch: Tensor of word inxs with shape [batch_size, context_size] :return:<|endoftext|>
f5c59aa64191544cc1bd3079d8736988394739525aacbd76217f49bc91b628cf
def _group_by_with_index_mapping(self, true_labels, samples): '\n TODO: implement stuff from "Notes of reproducibility"\n :param true_labels: indices for initial embedding matrix\n [100, 100, 200, 200, 0] =>\n [0, 100, 200], [1, 1, 2, 2, 0], [1, 2, 2]\n :param samples: 2D-tensor with vectors to agg(sum)\n [[0.1, .0], [-0.1, 0.2], [...], [...], [...]]\n :return: agg(sum): [[...], [.0, 0.1], [...]],\n index: [0, 100, 200]\n ' with torch.cuda.device(self.device): (true_unique_labels, ordering_index) = true_labels.unique(dim=0, return_counts=False, return_inverse=True) ordering_labels = ordering_index.view(ordering_index.size(0), 1).expand((- 1), samples.size(1)) (ordering_unique_labels, ordering_count) = ordering_labels.unique(dim=0, return_counts=True, return_inverse=False) grouped_res = torch.zeros_like(ordering_unique_labels, dtype=torch.float, device=self.device) grouped_res = grouped_res.scatter_add_(0, ordering_labels.cuda(self.device), samples.cuda(self.device)) return (grouped_res, true_unique_labels)
TODO: implement stuff from "Notes of reproducibility" :param true_labels: indices for initial embedding matrix [100, 100, 200, 200, 0] => [0, 100, 200], [1, 1, 2, 2, 0], [1, 2, 2] :param samples: 2D-tensor with vectors to agg(sum) [[0.1, .0], [-0.1, 0.2], [...], [...], [...]] :return: agg(sum): [[...], [.0, 0.1], [...]], index: [0, 100, 200]
model_fn.py
_group_by_with_index_mapping
ilyakhov/pytorch-wntm
3
python
def _group_by_with_index_mapping(self, true_labels, samples): '\n TODO: implement stuff from "Notes of reproducibility"\n :param true_labels: indices for initial embedding matrix\n [100, 100, 200, 200, 0] =>\n [0, 100, 200], [1, 1, 2, 2, 0], [1, 2, 2]\n :param samples: 2D-tensor with vectors to agg(sum)\n [[0.1, .0], [-0.1, 0.2], [...], [...], [...]]\n :return: agg(sum): [[...], [.0, 0.1], [...]],\n index: [0, 100, 200]\n ' with torch.cuda.device(self.device): (true_unique_labels, ordering_index) = true_labels.unique(dim=0, return_counts=False, return_inverse=True) ordering_labels = ordering_index.view(ordering_index.size(0), 1).expand((- 1), samples.size(1)) (ordering_unique_labels, ordering_count) = ordering_labels.unique(dim=0, return_counts=True, return_inverse=False) grouped_res = torch.zeros_like(ordering_unique_labels, dtype=torch.float, device=self.device) grouped_res = grouped_res.scatter_add_(0, ordering_labels.cuda(self.device), samples.cuda(self.device)) return (grouped_res, true_unique_labels)
def _group_by_with_index_mapping(self, true_labels, samples): '\n TODO: implement stuff from "Notes of reproducibility"\n :param true_labels: indices for initial embedding matrix\n [100, 100, 200, 200, 0] =>\n [0, 100, 200], [1, 1, 2, 2, 0], [1, 2, 2]\n :param samples: 2D-tensor with vectors to agg(sum)\n [[0.1, .0], [-0.1, 0.2], [...], [...], [...]]\n :return: agg(sum): [[...], [.0, 0.1], [...]],\n index: [0, 100, 200]\n ' with torch.cuda.device(self.device): (true_unique_labels, ordering_index) = true_labels.unique(dim=0, return_counts=False, return_inverse=True) ordering_labels = ordering_index.view(ordering_index.size(0), 1).expand((- 1), samples.size(1)) (ordering_unique_labels, ordering_count) = ordering_labels.unique(dim=0, return_counts=True, return_inverse=False) grouped_res = torch.zeros_like(ordering_unique_labels, dtype=torch.float, device=self.device) grouped_res = grouped_res.scatter_add_(0, ordering_labels.cuda(self.device), samples.cuda(self.device)) return (grouped_res, true_unique_labels)<|docstring|>TODO: implement stuff from "Notes of reproducibility" :param true_labels: indices for initial embedding matrix [100, 100, 200, 200, 0] => [0, 100, 200], [1, 1, 2, 2, 0], [1, 2, 2] :param samples: 2D-tensor with vectors to agg(sum) [[0.1, .0], [-0.1, 0.2], [...], [...], [...]] :return: agg(sum): [[...], [.0, 0.1], [...]], index: [0, 100, 200]<|endoftext|>
e02a2c01f27168e013e450c577e34bf6c15f8694e152682bdd7fe34eb2b1fbca
def m_step(self): '\n Rational EM. The same is "smoothed/sparsed" with reg_tau=0.0\n :return:\n ' with torch.cuda.device(self.device): new_phi = (self.n_wt / self.n_t.view((- 1), self.n_topics)) phi_norm = ((torch.sum(((self.phi - new_phi) ** 2)) ** 1) / 2) self.phi_log.append(phi_norm.cpu().numpy()) self.phi = new_phi self.theta = (self.n_td / self.n_d.view((- 1), self.doc_count))
Rational EM. The same is "smoothed/sparsed" with reg_tau=0.0 :return:
model_fn.py
m_step
ilyakhov/pytorch-wntm
3
python
def m_step(self): '\n Rational EM. The same is "smoothed/sparsed" with reg_tau=0.0\n :return:\n ' with torch.cuda.device(self.device): new_phi = (self.n_wt / self.n_t.view((- 1), self.n_topics)) phi_norm = ((torch.sum(((self.phi - new_phi) ** 2)) ** 1) / 2) self.phi_log.append(phi_norm.cpu().numpy()) self.phi = new_phi self.theta = (self.n_td / self.n_d.view((- 1), self.doc_count))
def m_step(self): '\n Rational EM. The same is "smoothed/sparsed" with reg_tau=0.0\n :return:\n ' with torch.cuda.device(self.device): new_phi = (self.n_wt / self.n_t.view((- 1), self.n_topics)) phi_norm = ((torch.sum(((self.phi - new_phi) ** 2)) ** 1) / 2) self.phi_log.append(phi_norm.cpu().numpy()) self.phi = new_phi self.theta = (self.n_td / self.n_d.view((- 1), self.doc_count))<|docstring|>Rational EM. The same is "smoothed/sparsed" with reg_tau=0.0 :return:<|endoftext|>
94c1725291d84daf6a5669344a0e8ea65e0acb603351766874b332a90a58860a
def run_v2(self, batch_generator): '\n M-step after each E-step. Not tested enough!\n :param batch_generator:\n :return:\n ' for _ in tqdm(range(self.num_collection_passes), total=self.num_collection_passes, desc='Passing through collection: '): old_phi = self.phi.cpu() for (n_dw, doc_inxs, batch, context_len) in batch_generator: for _ in range(self.num_documents_passes): self.em_step(n_dw, doc_inxs, batch, context_len) assert (not np.any((torch.sum(torch.isnan(self.phi.cpu())).numpy() > 0))) phi_norm = ((torch.sum(((self.phi.cpu().float() - old_phi.float()) ** 2)) ** 1) / 2) self.phi_log.append(phi_norm.cpu().numpy()) self.__init_aux_vars() self.steps_trained += 1 logging.info(f'Phi norm: {self.phi_log[(self.steps_trained - 1)]}; step: {self.steps_trained}') if (self.dump_phi_freq and ((self.steps_trained % self.dump_phi_freq) == 0)): self.__dump_phi(self.steps_trained)
M-step after each E-step. Not tested enough! :param batch_generator: :return:
model_fn.py
run_v2
ilyakhov/pytorch-wntm
3
python
def run_v2(self, batch_generator): '\n M-step after each E-step. Not tested enough!\n :param batch_generator:\n :return:\n ' for _ in tqdm(range(self.num_collection_passes), total=self.num_collection_passes, desc='Passing through collection: '): old_phi = self.phi.cpu() for (n_dw, doc_inxs, batch, context_len) in batch_generator: for _ in range(self.num_documents_passes): self.em_step(n_dw, doc_inxs, batch, context_len) assert (not np.any((torch.sum(torch.isnan(self.phi.cpu())).numpy() > 0))) phi_norm = ((torch.sum(((self.phi.cpu().float() - old_phi.float()) ** 2)) ** 1) / 2) self.phi_log.append(phi_norm.cpu().numpy()) self.__init_aux_vars() self.steps_trained += 1 logging.info(f'Phi norm: {self.phi_log[(self.steps_trained - 1)]}; step: {self.steps_trained}') if (self.dump_phi_freq and ((self.steps_trained % self.dump_phi_freq) == 0)): self.__dump_phi(self.steps_trained)
def run_v2(self, batch_generator): '\n M-step after each E-step. Not tested enough!\n :param batch_generator:\n :return:\n ' for _ in tqdm(range(self.num_collection_passes), total=self.num_collection_passes, desc='Passing through collection: '): old_phi = self.phi.cpu() for (n_dw, doc_inxs, batch, context_len) in batch_generator: for _ in range(self.num_documents_passes): self.em_step(n_dw, doc_inxs, batch, context_len) assert (not np.any((torch.sum(torch.isnan(self.phi.cpu())).numpy() > 0))) phi_norm = ((torch.sum(((self.phi.cpu().float() - old_phi.float()) ** 2)) ** 1) / 2) self.phi_log.append(phi_norm.cpu().numpy()) self.__init_aux_vars() self.steps_trained += 1 logging.info(f'Phi norm: {self.phi_log[(self.steps_trained - 1)]}; step: {self.steps_trained}') if (self.dump_phi_freq and ((self.steps_trained % self.dump_phi_freq) == 0)): self.__dump_phi(self.steps_trained)<|docstring|>M-step after each E-step. Not tested enough! :param batch_generator: :return:<|endoftext|>
872522fff92ce50f350fb951e543f6f2a13faa14171af9037d1d807fe23dc516
def __rectify_v2(self, t): "\n Rectification on each step is expensive operation if\n data are being copied on cpu. For train_mode='v2' no data copy\n to 'cpu'(RAM) has being used.\n :param t:\n :return:\n " t = torch.where((t < self.zero), self.zero, t) t = torch.where((t != t), self.zero, t) return t
Rectification on each step is expensive operation if data are being copied on cpu. For train_mode='v2' no data copy to 'cpu'(RAM) has being used. :param t: :return:
model_fn.py
__rectify_v2
ilyakhov/pytorch-wntm
3
python
def __rectify_v2(self, t): "\n Rectification on each step is expensive operation if\n data are being copied on cpu. For train_mode='v2' no data copy\n to 'cpu'(RAM) has being used.\n :param t:\n :return:\n " t = torch.where((t < self.zero), self.zero, t) t = torch.where((t != t), self.zero, t) return t
def __rectify_v2(self, t): "\n Rectification on each step is expensive operation if\n data are being copied on cpu. For train_mode='v2' no data copy\n to 'cpu'(RAM) has being used.\n :param t:\n :return:\n " t = torch.where((t < self.zero), self.zero, t) t = torch.where((t != t), self.zero, t) return t<|docstring|>Rectification on each step is expensive operation if data are being copied on cpu. For train_mode='v2' no data copy to 'cpu'(RAM) has being used. :param t: :return:<|endoftext|>
f30fd7a798c581dfc36dd85174c3569d6a36a94825b6fff467acf153834dba2e
def __init__(self, n_topics, vocab_size, doc_count, batch_size, batch_steps, num_collection_passes, num_documents_passes, device, dtype, phi_smooth_sparse_tau=0.0, theta_smooth_sparse_tau=0.0, vocab_stat=None, mode='v1', dump_phi_freq=None, dump_phi_path=None, log_perplexity=False, log_matrix_norms=False): '\n :param n_topics:\n :param vocab_size:\n :param doc_count:\n :param context_size:\n :param batch_size:\n :param batch_steps:\n :param num_collection_passes:\n :param num_documents_passes:\n :param device:\n :param dtype:\n :param phi_smooth_sparse_tau:\n :param theta_smooth_sparse_tau:\n :param vocab_stat: TF for phi sparse/smooth reg.\n :param mode: v1/v2; v1 - e-step for all batches, m-step after all\n v2 - em-step on each batch\n ' super(WNTM_pLSA, self).__init__(n_topics=n_topics, vocab_size=vocab_size, doc_count=doc_count, batch_size=batch_size, batch_steps=batch_steps, num_collection_passes=num_collection_passes, num_documents_passes=num_documents_passes, device=device, dtype=dtype, phi_smooth_sparse_tau=phi_smooth_sparse_tau, theta_smooth_sparse_tau=theta_smooth_sparse_tau, vocab_stat=vocab_stat, mode=mode, dump_phi_freq=dump_phi_freq, dump_phi_path=dump_phi_path, log_perplexity=log_perplexity, log_matrix_norms=log_matrix_norms) self.__init_const()
:param n_topics: :param vocab_size: :param doc_count: :param context_size: :param batch_size: :param batch_steps: :param num_collection_passes: :param num_documents_passes: :param device: :param dtype: :param phi_smooth_sparse_tau: :param theta_smooth_sparse_tau: :param vocab_stat: TF for phi sparse/smooth reg. :param mode: v1/v2; v1 - e-step for all batches, m-step after all v2 - em-step on each batch
model_fn.py
__init__
ilyakhov/pytorch-wntm
3
python
def __init__(self, n_topics, vocab_size, doc_count, batch_size, batch_steps, num_collection_passes, num_documents_passes, device, dtype, phi_smooth_sparse_tau=0.0, theta_smooth_sparse_tau=0.0, vocab_stat=None, mode='v1', dump_phi_freq=None, dump_phi_path=None, log_perplexity=False, log_matrix_norms=False): '\n :param n_topics:\n :param vocab_size:\n :param doc_count:\n :param context_size:\n :param batch_size:\n :param batch_steps:\n :param num_collection_passes:\n :param num_documents_passes:\n :param device:\n :param dtype:\n :param phi_smooth_sparse_tau:\n :param theta_smooth_sparse_tau:\n :param vocab_stat: TF for phi sparse/smooth reg.\n :param mode: v1/v2; v1 - e-step for all batches, m-step after all\n v2 - em-step on each batch\n ' super(WNTM_pLSA, self).__init__(n_topics=n_topics, vocab_size=vocab_size, doc_count=doc_count, batch_size=batch_size, batch_steps=batch_steps, num_collection_passes=num_collection_passes, num_documents_passes=num_documents_passes, device=device, dtype=dtype, phi_smooth_sparse_tau=phi_smooth_sparse_tau, theta_smooth_sparse_tau=theta_smooth_sparse_tau, vocab_stat=vocab_stat, mode=mode, dump_phi_freq=dump_phi_freq, dump_phi_path=dump_phi_path, log_perplexity=log_perplexity, log_matrix_norms=log_matrix_norms) self.__init_const()
def __init__(self, n_topics, vocab_size, doc_count, batch_size, batch_steps, num_collection_passes, num_documents_passes, device, dtype, phi_smooth_sparse_tau=0.0, theta_smooth_sparse_tau=0.0, vocab_stat=None, mode='v1', dump_phi_freq=None, dump_phi_path=None, log_perplexity=False, log_matrix_norms=False): '\n :param n_topics:\n :param vocab_size:\n :param doc_count:\n :param context_size:\n :param batch_size:\n :param batch_steps:\n :param num_collection_passes:\n :param num_documents_passes:\n :param device:\n :param dtype:\n :param phi_smooth_sparse_tau:\n :param theta_smooth_sparse_tau:\n :param vocab_stat: TF for phi sparse/smooth reg.\n :param mode: v1/v2; v1 - e-step for all batches, m-step after all\n v2 - em-step on each batch\n ' super(WNTM_pLSA, self).__init__(n_topics=n_topics, vocab_size=vocab_size, doc_count=doc_count, batch_size=batch_size, batch_steps=batch_steps, num_collection_passes=num_collection_passes, num_documents_passes=num_documents_passes, device=device, dtype=dtype, phi_smooth_sparse_tau=phi_smooth_sparse_tau, theta_smooth_sparse_tau=theta_smooth_sparse_tau, vocab_stat=vocab_stat, mode=mode, dump_phi_freq=dump_phi_freq, dump_phi_path=dump_phi_path, log_perplexity=log_perplexity, log_matrix_norms=log_matrix_norms) self.__init_const()<|docstring|>:param n_topics: :param vocab_size: :param doc_count: :param context_size: :param batch_size: :param batch_steps: :param num_collection_passes: :param num_documents_passes: :param device: :param dtype: :param phi_smooth_sparse_tau: :param theta_smooth_sparse_tau: :param vocab_stat: TF for phi sparse/smooth reg. :param mode: v1/v2; v1 - e-step for all batches, m-step after all v2 - em-step on each batch<|endoftext|>
6913eb75a0bbcd8aae093006ef21a9b0eccddc1433cf314193adea566edec713
def e_step(self, n_dw, doc_inxs, context_batch, gather_ndw=False): "\n :param n_dw: freq of term 'w' occurrence in doc 'd'\n [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€”\n [batch_size, context_size]\n :param doc_inxs: Tensor of doc inxs with shape [batch_size]\n :param context_batch: Tensor of word inxs with shape\n [batch_size, context_size]\n :param first: 'first' iteration over documents\n to gather self.n_dw matrix\n :return:\n " with torch.cuda.device(self.device): if (gather_ndw is True): self.n_dw[(:, doc_inxs.long())] = n_dw.t() context_batch = context_batch.cuda(self.device) batch_size = context_batch.shape[0] context_size = context_batch.shape[1] phi_w = self.phi[context_batch.long()] theta_d = torch.t(self.theta)[doc_inxs.long()] mask = (context_batch == self.unk_inx) phi_w = torch.masked_fill(phi_w, mask.view(batch_size, context_size, (- 1)), self.zero) theta_d = theta_d.repeat(1, 1, context_size) theta_d = theta_d.view(batch_size, context_size, (- 1)) numerator = (phi_w * theta_d) denominator = torch.sum((phi_w * theta_d), dim=2, keepdim=True) n_tdw = (numerator / denominator) n_tdw[torch.isnan(n_tdw)] = self.zero n_tdw_context = n_tdw.view((- 1), self.n_topics) n_tdw_doc = torch.sum(n_tdw, dim=1, keepdim=False) n_tdw_t = n_tdw.sum(1).sum(0) n_tdw_d = n_tdw.sum(2).sum(1) context_1d_mask = context_batch.view((- 1)) wt_index = context_1d_mask.long().cuda(self.device) (n_wt_update, wt_index) = self._group_by_with_index_mapping(wt_index, n_tdw_context) self.n_wt[wt_index] += n_wt_update self.n_td[(:, doc_inxs.long())] += n_tdw_doc.t() self.n_t += n_tdw_t self.n_d[doc_inxs.long()] += n_tdw_d
:param n_dw: freq of term 'w' occurrence in doc 'd' [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€” [batch_size, context_size] :param doc_inxs: Tensor of doc inxs with shape [batch_size] :param context_batch: Tensor of word inxs with shape [batch_size, context_size] :param first: 'first' iteration over documents to gather self.n_dw matrix :return:
model_fn.py
e_step
ilyakhov/pytorch-wntm
3
python
def e_step(self, n_dw, doc_inxs, context_batch, gather_ndw=False): "\n :param n_dw: freq of term 'w' occurrence in doc 'd'\n [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€”\n [batch_size, context_size]\n :param doc_inxs: Tensor of doc inxs with shape [batch_size]\n :param context_batch: Tensor of word inxs with shape\n [batch_size, context_size]\n :param first: 'first' iteration over documents\n to gather self.n_dw matrix\n :return:\n " with torch.cuda.device(self.device): if (gather_ndw is True): self.n_dw[(:, doc_inxs.long())] = n_dw.t() context_batch = context_batch.cuda(self.device) batch_size = context_batch.shape[0] context_size = context_batch.shape[1] phi_w = self.phi[context_batch.long()] theta_d = torch.t(self.theta)[doc_inxs.long()] mask = (context_batch == self.unk_inx) phi_w = torch.masked_fill(phi_w, mask.view(batch_size, context_size, (- 1)), self.zero) theta_d = theta_d.repeat(1, 1, context_size) theta_d = theta_d.view(batch_size, context_size, (- 1)) numerator = (phi_w * theta_d) denominator = torch.sum((phi_w * theta_d), dim=2, keepdim=True) n_tdw = (numerator / denominator) n_tdw[torch.isnan(n_tdw)] = self.zero n_tdw_context = n_tdw.view((- 1), self.n_topics) n_tdw_doc = torch.sum(n_tdw, dim=1, keepdim=False) n_tdw_t = n_tdw.sum(1).sum(0) n_tdw_d = n_tdw.sum(2).sum(1) context_1d_mask = context_batch.view((- 1)) wt_index = context_1d_mask.long().cuda(self.device) (n_wt_update, wt_index) = self._group_by_with_index_mapping(wt_index, n_tdw_context) self.n_wt[wt_index] += n_wt_update self.n_td[(:, doc_inxs.long())] += n_tdw_doc.t() self.n_t += n_tdw_t self.n_d[doc_inxs.long()] += n_tdw_d
def e_step(self, n_dw, doc_inxs, context_batch, gather_ndw=False): "\n :param n_dw: freq of term 'w' occurrence in doc 'd'\n [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€”\n [batch_size, context_size]\n :param doc_inxs: Tensor of doc inxs with shape [batch_size]\n :param context_batch: Tensor of word inxs with shape\n [batch_size, context_size]\n :param first: 'first' iteration over documents\n to gather self.n_dw matrix\n :return:\n " with torch.cuda.device(self.device): if (gather_ndw is True): self.n_dw[(:, doc_inxs.long())] = n_dw.t() context_batch = context_batch.cuda(self.device) batch_size = context_batch.shape[0] context_size = context_batch.shape[1] phi_w = self.phi[context_batch.long()] theta_d = torch.t(self.theta)[doc_inxs.long()] mask = (context_batch == self.unk_inx) phi_w = torch.masked_fill(phi_w, mask.view(batch_size, context_size, (- 1)), self.zero) theta_d = theta_d.repeat(1, 1, context_size) theta_d = theta_d.view(batch_size, context_size, (- 1)) numerator = (phi_w * theta_d) denominator = torch.sum((phi_w * theta_d), dim=2, keepdim=True) n_tdw = (numerator / denominator) n_tdw[torch.isnan(n_tdw)] = self.zero n_tdw_context = n_tdw.view((- 1), self.n_topics) n_tdw_doc = torch.sum(n_tdw, dim=1, keepdim=False) n_tdw_t = n_tdw.sum(1).sum(0) n_tdw_d = n_tdw.sum(2).sum(1) context_1d_mask = context_batch.view((- 1)) wt_index = context_1d_mask.long().cuda(self.device) (n_wt_update, wt_index) = self._group_by_with_index_mapping(wt_index, n_tdw_context) self.n_wt[wt_index] += n_wt_update self.n_td[(:, doc_inxs.long())] += n_tdw_doc.t() self.n_t += n_tdw_t self.n_d[doc_inxs.long()] += n_tdw_d<|docstring|>:param n_dw: freq of term 'w' occurrence in doc 'd' [[1, 1, 2, 1, 2] - for each word in a doc, ...] β€” [batch_size, context_size] :param doc_inxs: Tensor of doc inxs with shape [batch_size] :param context_batch: Tensor of word inxs with shape [batch_size, context_size] :param first: 'first' iteration over documents to gather self.n_dw matrix :return:<|endoftext|>
88c8670d83f2cde855d0d0ab6e6b666f8b2cf79bcb84852a81fa8ceb54688bec
def __init__(self, filters, p_m, get_mask, apply_mask, path_json='src/python_code/settings.json'): '\n CNN decoder layers (tensorflow 2 book)\n :param filters: list filters\n :param path_json: path settings\n ' settings = json.load(open(path_json))['Model'] hyperparameters = settings['decoder_cnn'][int(settings['size_use'])] super(DecoderCNN, self).__init__() self.get_mask = get_mask self.apply_mask = apply_mask self.conv1 = tf.keras.layers.Conv2D(filters=filters[2], kernel_size=3, strides=1, activation='relu', padding='same') self.conv2 = tf.keras.layers.Conv2D(filters=filters[1], kernel_size=3, strides=1, activation='relu', padding='same') self.conv3 = tf.keras.layers.Conv2D(filters=filters[0], kernel_size=3, strides=1, activation='relu', padding=hyperparameters['padding_last']) self.conv4 = tf.keras.layers.Conv2D(filters=int(hyperparameters['channels_last']), kernel_size=3, strides=1, activation='sigmoid', padding='same') self.upsample = tf.keras.layers.UpSampling2D((2, 2)) self.reshape2 = tf.keras.layers.Reshape((4, 4, 32)) self._layers_ = [self.conv1, self.conv2, self.conv3, self.conv4] self.p_m = p_m
CNN decoder layers (tensorflow 2 book) :param filters: list filters :param path_json: path settings
src/python_code/Models/EAE_models/DecoderCNN.py
__init__
ipmach/Thesis2021
0
python
def __init__(self, filters, p_m, get_mask, apply_mask, path_json='src/python_code/settings.json'): '\n CNN decoder layers (tensorflow 2 book)\n :param filters: list filters\n :param path_json: path settings\n ' settings = json.load(open(path_json))['Model'] hyperparameters = settings['decoder_cnn'][int(settings['size_use'])] super(DecoderCNN, self).__init__() self.get_mask = get_mask self.apply_mask = apply_mask self.conv1 = tf.keras.layers.Conv2D(filters=filters[2], kernel_size=3, strides=1, activation='relu', padding='same') self.conv2 = tf.keras.layers.Conv2D(filters=filters[1], kernel_size=3, strides=1, activation='relu', padding='same') self.conv3 = tf.keras.layers.Conv2D(filters=filters[0], kernel_size=3, strides=1, activation='relu', padding=hyperparameters['padding_last']) self.conv4 = tf.keras.layers.Conv2D(filters=int(hyperparameters['channels_last']), kernel_size=3, strides=1, activation='sigmoid', padding='same') self.upsample = tf.keras.layers.UpSampling2D((2, 2)) self.reshape2 = tf.keras.layers.Reshape((4, 4, 32)) self._layers_ = [self.conv1, self.conv2, self.conv3, self.conv4] self.p_m = p_m
def __init__(self, filters, p_m, get_mask, apply_mask, path_json='src/python_code/settings.json'): '\n CNN decoder layers (tensorflow 2 book)\n :param filters: list filters\n :param path_json: path settings\n ' settings = json.load(open(path_json))['Model'] hyperparameters = settings['decoder_cnn'][int(settings['size_use'])] super(DecoderCNN, self).__init__() self.get_mask = get_mask self.apply_mask = apply_mask self.conv1 = tf.keras.layers.Conv2D(filters=filters[2], kernel_size=3, strides=1, activation='relu', padding='same') self.conv2 = tf.keras.layers.Conv2D(filters=filters[1], kernel_size=3, strides=1, activation='relu', padding='same') self.conv3 = tf.keras.layers.Conv2D(filters=filters[0], kernel_size=3, strides=1, activation='relu', padding=hyperparameters['padding_last']) self.conv4 = tf.keras.layers.Conv2D(filters=int(hyperparameters['channels_last']), kernel_size=3, strides=1, activation='sigmoid', padding='same') self.upsample = tf.keras.layers.UpSampling2D((2, 2)) self.reshape2 = tf.keras.layers.Reshape((4, 4, 32)) self._layers_ = [self.conv1, self.conv2, self.conv3, self.conv4] self.p_m = p_m<|docstring|>CNN decoder layers (tensorflow 2 book) :param filters: list filters :param path_json: path settings<|endoftext|>
6809d902bbacf9eac6c00c94ba9882f5b7bc3526f525fb4d5d264651951d58da
def initialize_masks(self): '\n Initialize masks for the model\n :return:\n ' self._masks_ = [] for i in self._layers_: self.get_mask(i.get_weights()[0].shape, p=self.p_m)
Initialize masks for the model :return:
src/python_code/Models/EAE_models/DecoderCNN.py
initialize_masks
ipmach/Thesis2021
0
python
def initialize_masks(self): '\n Initialize masks for the model\n :return:\n ' self._masks_ = [] for i in self._layers_: self.get_mask(i.get_weights()[0].shape, p=self.p_m)
def initialize_masks(self): '\n Initialize masks for the model\n :return:\n ' self._masks_ = [] for i in self._layers_: self.get_mask(i.get_weights()[0].shape, p=self.p_m)<|docstring|>Initialize masks for the model :return:<|endoftext|>
6939b7bdd5414e29efdb790dced6d59dadc90a6f90011df8566df9288a0bbd20
def apply_masks(self): '\n Apply masks to all layers of the model\n :return:\n ' for (l, m) in zip(self._layers_, self._masks_): new_weights = self.apply_mask(m, l.get_weights()) l.set_weights(new_weights)
Apply masks to all layers of the model :return:
src/python_code/Models/EAE_models/DecoderCNN.py
apply_masks
ipmach/Thesis2021
0
python
def apply_masks(self): '\n Apply masks to all layers of the model\n :return:\n ' for (l, m) in zip(self._layers_, self._masks_): new_weights = self.apply_mask(m, l.get_weights()) l.set_weights(new_weights)
def apply_masks(self): '\n Apply masks to all layers of the model\n :return:\n ' for (l, m) in zip(self._layers_, self._masks_): new_weights = self.apply_mask(m, l.get_weights()) l.set_weights(new_weights)<|docstring|>Apply masks to all layers of the model :return:<|endoftext|>
e863dfd88212590acb95344865d47f979802c29a18849919053f337bdcffad5a
def binary_exp(n): 'Binary exponentiation algorithm' cur = base res = 1 if (not n): return res while True: if (n & 1): res *= cur if (n == 1): return res cur *= cur n >>= 1
Binary exponentiation algorithm
tests/perfomance/power.py
binary_exp
borzunov/cpmoptimize
121
python
def binary_exp(n): cur = base res = 1 if (not n): return res while True: if (n & 1): res *= cur if (n == 1): return res cur *= cur n >>= 1
def binary_exp(n): cur = base res = 1 if (not n): return res while True: if (n & 1): res *= cur if (n == 1): return res cur *= cur n >>= 1<|docstring|>Binary exponentiation algorithm<|endoftext|>
f3abeb105731c3a677d6412c50614bc26a7a908b5482a79c8e232b2344973cbe
def train(train_generator, test_generator, criterion, model, epochs, optimizer, Batch_size): 'Function to train a pytorch model\n Args:\n train_generator: pytorch train generator instance\n test_generator: pytorch test generator instance\n criterion: pytorch criterion\n model: pytorch model\n epochs: int, number of epochs to train\n optmizer: pytorch optmizer\n Batch_size: int, batch size for forward passes\n \n Returns:\n train_losses: list with the train losses \n validation_losses: list with the validation losses\n accuracy_vect: list with the accuracy of the classification of the test\n \n ' device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) (train_losses, validation_losses) = ([], []) accuracy_vect = [] for e in range(epochs): print('epoch ', e) running_loss = 0 for i in range(1): (images, labels) = next(iter(train_generator)) (images, labels) = (images.to(device), labels.to(device)) optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: accuracy = 0 validation_loss = 0 with torch.no_grad(): model.eval() for i in range(1): (images, labels) = next(iter(test_generator)) (images, labels) = (images.to(device), labels.to(device)) log_ps = model(images) validation_loss += criterion(log_ps, labels).item() ps = torch.exp(log_ps) (top_p, top_class) = ps.topk(1, dim=1) print(torch.reshape(top_class, (1, (- 1)))) print(labels) hits = (top_class == labels.view(*top_class.shape)) accuracy += torch.mean(hits.type(torch.FloatTensor)) model.train() train_losses.append((running_loss / Batch_size)) validation_losses.append((validation_loss / Batch_size)) accuracy_vect.append(accuracy.numpy().item(0)) return (train_losses, validation_losses, accuracy_vect)
Function to train a pytorch model Args: train_generator: pytorch train generator instance test_generator: pytorch test generator instance criterion: pytorch criterion model: pytorch model epochs: int, number of epochs to train optmizer: pytorch optmizer Batch_size: int, batch size for forward passes Returns: train_losses: list with the train losses validation_losses: list with the validation losses accuracy_vect: list with the accuracy of the classification of the test
60_Grainsize_project/DL_functions/DL_train.py
train
htorodriguez/grainsize_measure
0
python
def train(train_generator, test_generator, criterion, model, epochs, optimizer, Batch_size): 'Function to train a pytorch model\n Args:\n train_generator: pytorch train generator instance\n test_generator: pytorch test generator instance\n criterion: pytorch criterion\n model: pytorch model\n epochs: int, number of epochs to train\n optmizer: pytorch optmizer\n Batch_size: int, batch size for forward passes\n \n Returns:\n train_losses: list with the train losses \n validation_losses: list with the validation losses\n accuracy_vect: list with the accuracy of the classification of the test\n \n ' device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) (train_losses, validation_losses) = ([], []) accuracy_vect = [] for e in range(epochs): print('epoch ', e) running_loss = 0 for i in range(1): (images, labels) = next(iter(train_generator)) (images, labels) = (images.to(device), labels.to(device)) optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: accuracy = 0 validation_loss = 0 with torch.no_grad(): model.eval() for i in range(1): (images, labels) = next(iter(test_generator)) (images, labels) = (images.to(device), labels.to(device)) log_ps = model(images) validation_loss += criterion(log_ps, labels).item() ps = torch.exp(log_ps) (top_p, top_class) = ps.topk(1, dim=1) print(torch.reshape(top_class, (1, (- 1)))) print(labels) hits = (top_class == labels.view(*top_class.shape)) accuracy += torch.mean(hits.type(torch.FloatTensor)) model.train() train_losses.append((running_loss / Batch_size)) validation_losses.append((validation_loss / Batch_size)) accuracy_vect.append(accuracy.numpy().item(0)) return (train_losses, validation_losses, accuracy_vect)
def train(train_generator, test_generator, criterion, model, epochs, optimizer, Batch_size): 'Function to train a pytorch model\n Args:\n train_generator: pytorch train generator instance\n test_generator: pytorch test generator instance\n criterion: pytorch criterion\n model: pytorch model\n epochs: int, number of epochs to train\n optmizer: pytorch optmizer\n Batch_size: int, batch size for forward passes\n \n Returns:\n train_losses: list with the train losses \n validation_losses: list with the validation losses\n accuracy_vect: list with the accuracy of the classification of the test\n \n ' device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu')) (train_losses, validation_losses) = ([], []) accuracy_vect = [] for e in range(epochs): print('epoch ', e) running_loss = 0 for i in range(1): (images, labels) = next(iter(train_generator)) (images, labels) = (images.to(device), labels.to(device)) optimizer.zero_grad() log_ps = model(images) loss = criterion(log_ps, labels) loss.backward() optimizer.step() running_loss += loss.item() else: accuracy = 0 validation_loss = 0 with torch.no_grad(): model.eval() for i in range(1): (images, labels) = next(iter(test_generator)) (images, labels) = (images.to(device), labels.to(device)) log_ps = model(images) validation_loss += criterion(log_ps, labels).item() ps = torch.exp(log_ps) (top_p, top_class) = ps.topk(1, dim=1) print(torch.reshape(top_class, (1, (- 1)))) print(labels) hits = (top_class == labels.view(*top_class.shape)) accuracy += torch.mean(hits.type(torch.FloatTensor)) model.train() train_losses.append((running_loss / Batch_size)) validation_losses.append((validation_loss / Batch_size)) accuracy_vect.append(accuracy.numpy().item(0)) return (train_losses, validation_losses, accuracy_vect)<|docstring|>Function to train a pytorch model Args: train_generator: pytorch train generator instance test_generator: pytorch test generator instance criterion: pytorch criterion model: pytorch model epochs: int, number of epochs to train optmizer: pytorch optmizer Batch_size: int, batch size for forward passes Returns: train_losses: list with the train losses validation_losses: list with the validation losses accuracy_vect: list with the accuracy of the classification of the test<|endoftext|>
04e79e025310e9b0232e0401f757198630acefbc7e907eef0399631aa9d9ce03
@fitparam(param_name='flat_topP', param_latex='$P^{mie}_\\mathrm{top}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieTopPressure(self): '\n Pressure at top of absorbing region in Pa\n ' return self._mie_top_pressure
Pressure at top of absorbing region in Pa
taurex/contributions/flatmie.py
mieTopPressure
ucl-exoplanets/TauREx3_public
10
python
@fitparam(param_name='flat_topP', param_latex='$P^{mie}_\\mathrm{top}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieTopPressure(self): '\n \n ' return self._mie_top_pressure
@fitparam(param_name='flat_topP', param_latex='$P^{mie}_\\mathrm{top}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieTopPressure(self): '\n \n ' return self._mie_top_pressure<|docstring|>Pressure at top of absorbing region in Pa<|endoftext|>
9b1a1534d195ab21458fb1320f77668ae73fce0e6fc0d659cdfe14925d54a6dd
@fitparam(param_name='flat_bottomP', param_latex='$P^{mie}_\\mathrm{bottom}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieBottomPressure(self): '\n Pressure at bottom of absorbing region in Pa\n ' return self._mie_bottom_pressure
Pressure at bottom of absorbing region in Pa
taurex/contributions/flatmie.py
mieBottomPressure
ucl-exoplanets/TauREx3_public
10
python
@fitparam(param_name='flat_bottomP', param_latex='$P^{mie}_\\mathrm{bottom}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieBottomPressure(self): '\n \n ' return self._mie_bottom_pressure
@fitparam(param_name='flat_bottomP', param_latex='$P^{mie}_\\mathrm{bottom}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieBottomPressure(self): '\n \n ' return self._mie_bottom_pressure<|docstring|>Pressure at bottom of absorbing region in Pa<|endoftext|>
52f3d0e5a67b7806b3c1360f72c13b1bde0f0c1ff55b30e52eb6b874596a1295
@fitparam(param_name='flat_mix_ratio', param_latex='$\\chi_\\mathrm{mie}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieMixing(self): '\n Opacity of absorbing region in m2\n ' return self._mie_mix
Opacity of absorbing region in m2
taurex/contributions/flatmie.py
mieMixing
ucl-exoplanets/TauREx3_public
10
python
@fitparam(param_name='flat_mix_ratio', param_latex='$\\chi_\\mathrm{mie}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieMixing(self): '\n \n ' return self._mie_mix
@fitparam(param_name='flat_mix_ratio', param_latex='$\\chi_\\mathrm{mie}$', default_mode='log', default_fit=False, default_bounds=[1e-20, 1]) def mieMixing(self): '\n \n ' return self._mie_mix<|docstring|>Opacity of absorbing region in m2<|endoftext|>
5bcc51c3d54fd573aef99598ef45fe097a57863f03bf89f35d734fd152d06d1d
def prepare_each(self, model, wngrid): '\n Computes and flat absorbing opacity for\n the pressure regions given\n\n Parameters\n ----------\n model: :class:`~taurex.model.model.ForwardModel`\n Forward model\n\n wngrid: :obj:`array`\n Wavenumber grid\n\n Yields\n ------\n component: :obj:`tuple` of type (str, :obj:`array`)\n ``Flat`` and the weighted mie opacity.\n\n\n ' self._nlayers = model.nLayers self._ngrid = wngrid.shape[0] pressure_levels = np.log10(model.pressure.pressure_profile_levels[::(- 1)]) bottom_pressure = self.mieBottomPressure if (bottom_pressure < 0): bottom_pressure = pressure_levels.max() top_pressure = np.log10(self.mieTopPressure) if (top_pressure < 0): top_pressure = pressure_levels.min() P_left = pressure_levels[:(- 1)] P_right = pressure_levels[1:] P_range = sorted([top_pressure, bottom_pressure]) save_start = np.searchsorted(P_right, P_range[0], side='right') save_stop = np.searchsorted(P_left[1:], P_range[1], side='right') P_min = P_left[save_start:(save_stop + 1)] P_max = P_right[save_start:(save_stop + 1)] weight = (np.minimum(P_range[(- 1)], P_max) - np.maximum(P_range[0], P_min)) weight /= weight.max() sigma_xsec = np.zeros(shape=(self._nlayers, wngrid.shape[0])) sigma_xsec[save_start:(save_stop + 1)] = (weight[(:, None)] * self.mieMixing) sigma_xsec = sigma_xsec[::(- 1)] self.sigma_xsec = sigma_xsec (yield ('Flat', sigma_xsec))
Computes and flat absorbing opacity for the pressure regions given Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) ``Flat`` and the weighted mie opacity.
taurex/contributions/flatmie.py
prepare_each
ucl-exoplanets/TauREx3_public
10
python
def prepare_each(self, model, wngrid): '\n Computes and flat absorbing opacity for\n the pressure regions given\n\n Parameters\n ----------\n model: :class:`~taurex.model.model.ForwardModel`\n Forward model\n\n wngrid: :obj:`array`\n Wavenumber grid\n\n Yields\n ------\n component: :obj:`tuple` of type (str, :obj:`array`)\n ``Flat`` and the weighted mie opacity.\n\n\n ' self._nlayers = model.nLayers self._ngrid = wngrid.shape[0] pressure_levels = np.log10(model.pressure.pressure_profile_levels[::(- 1)]) bottom_pressure = self.mieBottomPressure if (bottom_pressure < 0): bottom_pressure = pressure_levels.max() top_pressure = np.log10(self.mieTopPressure) if (top_pressure < 0): top_pressure = pressure_levels.min() P_left = pressure_levels[:(- 1)] P_right = pressure_levels[1:] P_range = sorted([top_pressure, bottom_pressure]) save_start = np.searchsorted(P_right, P_range[0], side='right') save_stop = np.searchsorted(P_left[1:], P_range[1], side='right') P_min = P_left[save_start:(save_stop + 1)] P_max = P_right[save_start:(save_stop + 1)] weight = (np.minimum(P_range[(- 1)], P_max) - np.maximum(P_range[0], P_min)) weight /= weight.max() sigma_xsec = np.zeros(shape=(self._nlayers, wngrid.shape[0])) sigma_xsec[save_start:(save_stop + 1)] = (weight[(:, None)] * self.mieMixing) sigma_xsec = sigma_xsec[::(- 1)] self.sigma_xsec = sigma_xsec (yield ('Flat', sigma_xsec))
def prepare_each(self, model, wngrid): '\n Computes and flat absorbing opacity for\n the pressure regions given\n\n Parameters\n ----------\n model: :class:`~taurex.model.model.ForwardModel`\n Forward model\n\n wngrid: :obj:`array`\n Wavenumber grid\n\n Yields\n ------\n component: :obj:`tuple` of type (str, :obj:`array`)\n ``Flat`` and the weighted mie opacity.\n\n\n ' self._nlayers = model.nLayers self._ngrid = wngrid.shape[0] pressure_levels = np.log10(model.pressure.pressure_profile_levels[::(- 1)]) bottom_pressure = self.mieBottomPressure if (bottom_pressure < 0): bottom_pressure = pressure_levels.max() top_pressure = np.log10(self.mieTopPressure) if (top_pressure < 0): top_pressure = pressure_levels.min() P_left = pressure_levels[:(- 1)] P_right = pressure_levels[1:] P_range = sorted([top_pressure, bottom_pressure]) save_start = np.searchsorted(P_right, P_range[0], side='right') save_stop = np.searchsorted(P_left[1:], P_range[1], side='right') P_min = P_left[save_start:(save_stop + 1)] P_max = P_right[save_start:(save_stop + 1)] weight = (np.minimum(P_range[(- 1)], P_max) - np.maximum(P_range[0], P_min)) weight /= weight.max() sigma_xsec = np.zeros(shape=(self._nlayers, wngrid.shape[0])) sigma_xsec[save_start:(save_stop + 1)] = (weight[(:, None)] * self.mieMixing) sigma_xsec = sigma_xsec[::(- 1)] self.sigma_xsec = sigma_xsec (yield ('Flat', sigma_xsec))<|docstring|>Computes and flat absorbing opacity for the pressure regions given Parameters ---------- model: :class:`~taurex.model.model.ForwardModel` Forward model wngrid: :obj:`array` Wavenumber grid Yields ------ component: :obj:`tuple` of type (str, :obj:`array`) ``Flat`` and the weighted mie opacity.<|endoftext|>
b9d5e19d475f55978824f8d0e9c9f4915d3a46e6905ffe0d99ad4181c8cfc155
def __init__(self, account_id=None, document_id=None, external_id=None, signer_id=None, external_signer_id=None, error=None, sign_success=None, expires=None, aborted=None, additional_properties={}): 'Constructor for the JwtPayload class' self.account_id = account_id self.document_id = document_id self.external_id = external_id self.signer_id = signer_id self.external_signer_id = external_signer_id self.error = error self.sign_success = sign_success self.expires = (APIHelper.RFC3339DateTime(expires) if expires else None) self.aborted = aborted self.additional_properties = additional_properties
Constructor for the JwtPayload class
idfy_rest_client/models/jwt_payload.py
__init__
dealflowteam/Idfy
0
python
def __init__(self, account_id=None, document_id=None, external_id=None, signer_id=None, external_signer_id=None, error=None, sign_success=None, expires=None, aborted=None, additional_properties={}): self.account_id = account_id self.document_id = document_id self.external_id = external_id self.signer_id = signer_id self.external_signer_id = external_signer_id self.error = error self.sign_success = sign_success self.expires = (APIHelper.RFC3339DateTime(expires) if expires else None) self.aborted = aborted self.additional_properties = additional_properties
def __init__(self, account_id=None, document_id=None, external_id=None, signer_id=None, external_signer_id=None, error=None, sign_success=None, expires=None, aborted=None, additional_properties={}): self.account_id = account_id self.document_id = document_id self.external_id = external_id self.signer_id = signer_id self.external_signer_id = external_signer_id self.error = error self.sign_success = sign_success self.expires = (APIHelper.RFC3339DateTime(expires) if expires else None) self.aborted = aborted self.additional_properties = additional_properties<|docstring|>Constructor for the JwtPayload class<|endoftext|>
dc3c69c195d84776dc8c0d2ee75525a35d738145a857e9e6cd3464d9280c1c3b
@classmethod def from_dictionary(cls, dictionary): "Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n " if (dictionary is None): return None account_id = dictionary.get('accountId') document_id = dictionary.get('documentId') external_id = dictionary.get('externalId') signer_id = dictionary.get('signerId') external_signer_id = dictionary.get('externalSignerId') error = (idfy_rest_client.models.signature_error.SignatureError.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None) sign_success = (idfy_rest_client.models.sign_success.SignSuccess.from_dictionary(dictionary.get('signSuccess')) if dictionary.get('signSuccess') else None) expires = (APIHelper.RFC3339DateTime.from_value(dictionary.get('expires')).datetime if dictionary.get('expires') else None) aborted = dictionary.get('aborted') for key in cls._names.values(): if (key in dictionary): del dictionary[key] return cls(account_id, document_id, external_id, signer_id, external_signer_id, error, sign_success, expires, aborted, dictionary)
Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.
idfy_rest_client/models/jwt_payload.py
from_dictionary
dealflowteam/Idfy
0
python
@classmethod def from_dictionary(cls, dictionary): "Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n " if (dictionary is None): return None account_id = dictionary.get('accountId') document_id = dictionary.get('documentId') external_id = dictionary.get('externalId') signer_id = dictionary.get('signerId') external_signer_id = dictionary.get('externalSignerId') error = (idfy_rest_client.models.signature_error.SignatureError.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None) sign_success = (idfy_rest_client.models.sign_success.SignSuccess.from_dictionary(dictionary.get('signSuccess')) if dictionary.get('signSuccess') else None) expires = (APIHelper.RFC3339DateTime.from_value(dictionary.get('expires')).datetime if dictionary.get('expires') else None) aborted = dictionary.get('aborted') for key in cls._names.values(): if (key in dictionary): del dictionary[key] return cls(account_id, document_id, external_id, signer_id, external_signer_id, error, sign_success, expires, aborted, dictionary)
@classmethod def from_dictionary(cls, dictionary): "Creates an instance of this model from a dictionary\n\n Args:\n dictionary (dictionary): A dictionary representation of the object as\n obtained from the deserialization of the server's response. The keys\n MUST match property names in the API description.\n\n Returns:\n object: An instance of this structure class.\n\n " if (dictionary is None): return None account_id = dictionary.get('accountId') document_id = dictionary.get('documentId') external_id = dictionary.get('externalId') signer_id = dictionary.get('signerId') external_signer_id = dictionary.get('externalSignerId') error = (idfy_rest_client.models.signature_error.SignatureError.from_dictionary(dictionary.get('error')) if dictionary.get('error') else None) sign_success = (idfy_rest_client.models.sign_success.SignSuccess.from_dictionary(dictionary.get('signSuccess')) if dictionary.get('signSuccess') else None) expires = (APIHelper.RFC3339DateTime.from_value(dictionary.get('expires')).datetime if dictionary.get('expires') else None) aborted = dictionary.get('aborted') for key in cls._names.values(): if (key in dictionary): del dictionary[key] return cls(account_id, document_id, external_id, signer_id, external_signer_id, error, sign_success, expires, aborted, dictionary)<|docstring|>Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.<|endoftext|>
6a0ebe3d8c82ef380cbf357b98235e7d3b36597612a2759116ce150525e331b0
@click.command() @click.argument('dir', default='env') @click.option('-n', '--name', metavar='NAME', help='Environment name (default is env parent directory name).') @click.option('-p', '--python', metavar='VERSION', help='Version of Python to use for the environment.') @click.option('-g', '--guild', metavar='VERSION_OR_PATH', help='Version of Guild AI to use for the environment. By default, the active version of Guild is installed. This value may alternatively be a path to a Guild wheel distribution.') @click.option('-r', '--requirement', metavar='REQ', multiple=True, help='Install required package or packages defined in a file. May be used multiple times.') @click.option('-P', '--path', metavar='DIR', multiple=True, help='Include DIR as a Python path in the environment.') @click.option('--no-reqs', is_flag=True, help="Don't install from requirements.txt or guild.yml in environment parent directory.") @click.option('--tensorflow', metavar='PACKAGE', help='Install PACKAGE for TensorFlow. By default installs the package suitable for the system based on GPU support.') @click.option('--skip-tensorflow', is_flag=True, help="Don't install TensorFlow.") @click.option('-l', '--local-resource-cache', is_flag=True, help='Use a local cache when initializing an environment.') @click.option('-y', '--yes', is_flag=True, help='Initialize a Guild environment without prompting.') @click.option('--no-progress', is_flag=True, help="Don't show progress when installing environment packages.") @click_util.use_args def init(args): 'Initialize a Guild environment.\n\n `init` initializes a Guild environment in `DIR`, which is the\n current directory by default.\n\n `init` creates a virtual environment in `DIR` using `virtualenv`.\n\n Use `--python` to specify the Python interpreter to use within the\n generated virtual environment. By default, the default Python\n interpreter for `virtualenv` is used unless `python` is explicitly\n listed as a requirement. If `no-venv` is specified, `--python` is\n ignored.\n\n ### Requirements\n\n By default, any required packages listed under packages.requires\n in `guild.yml` in the environment parent directory are installed\n into the environment. Use `--no-reqs` to suppress this behavior.\n\n Additionally, packages defined in `requirements.txt` in the\n environment parent directory will be installed. Use `--no-reqs` to\n suppress this behavior.\n\n Note that packages defined in `guild.yml` use Guild package names\n while packages defined in `requirements.txt` use PyPI package\n names.\n\n For information in requirements files, see:\n\n https://pip.readthedocs.io/en/1.1/requirements.html\n\n You may explicitly specify requirements file using `-r` or\n `--requirement`. If `-r, --requirement` is specified, Guild will\n not automatically install packages in `requirements.txt` -- that\n file must be specified explicitly in the command.\n\n ### Guild AI\n\n By default `init` installs the active version of Guild AI in the\n initialized environment. To install a different version, or to\n install a Guild wheel distribution file use the `--guild` option.\n\n ### TensorFlow\n\n TensorFlow is installed to the environment unless\n `--skip-tensorflow` is specified. The TensorFlow package to\n install can be specified using `--tensorflow`. By default, Guild\n installs the TensorFlow package suited for the system:\n ``tensorflow-gpu`` if a GPU is available, otherwise\n ``tensorflow``.\n\n ### Resource cache\n\n By default resources are cached and shared at the user level in\n `~/.guild/cache/resources` so that resources downloaded from one\n environment are available to other environments. You can modify\n this behavior to have all resources downloaded local to the\n environment by specifying `--local-resource-cache`.\n\n ' from . import init_impl init_impl.main(args)
Initialize a Guild environment. `init` initializes a Guild environment in `DIR`, which is the current directory by default. `init` creates a virtual environment in `DIR` using `virtualenv`. Use `--python` to specify the Python interpreter to use within the generated virtual environment. By default, the default Python interpreter for `virtualenv` is used unless `python` is explicitly listed as a requirement. If `no-venv` is specified, `--python` is ignored. ### Requirements By default, any required packages listed under packages.requires in `guild.yml` in the environment parent directory are installed into the environment. Use `--no-reqs` to suppress this behavior. Additionally, packages defined in `requirements.txt` in the environment parent directory will be installed. Use `--no-reqs` to suppress this behavior. Note that packages defined in `guild.yml` use Guild package names while packages defined in `requirements.txt` use PyPI package names. For information in requirements files, see: https://pip.readthedocs.io/en/1.1/requirements.html You may explicitly specify requirements file using `-r` or `--requirement`. If `-r, --requirement` is specified, Guild will not automatically install packages in `requirements.txt` -- that file must be specified explicitly in the command. ### Guild AI By default `init` installs the active version of Guild AI in the initialized environment. To install a different version, or to install a Guild wheel distribution file use the `--guild` option. ### TensorFlow TensorFlow is installed to the environment unless `--skip-tensorflow` is specified. The TensorFlow package to install can be specified using `--tensorflow`. By default, Guild installs the TensorFlow package suited for the system: ``tensorflow-gpu`` if a GPU is available, otherwise ``tensorflow``. ### Resource cache By default resources are cached and shared at the user level in `~/.guild/cache/resources` so that resources downloaded from one environment are available to other environments. You can modify this behavior to have all resources downloaded local to the environment by specifying `--local-resource-cache`.
guild/commands/init.py
init
flamato/guildai
1
python
@click.command() @click.argument('dir', default='env') @click.option('-n', '--name', metavar='NAME', help='Environment name (default is env parent directory name).') @click.option('-p', '--python', metavar='VERSION', help='Version of Python to use for the environment.') @click.option('-g', '--guild', metavar='VERSION_OR_PATH', help='Version of Guild AI to use for the environment. By default, the active version of Guild is installed. This value may alternatively be a path to a Guild wheel distribution.') @click.option('-r', '--requirement', metavar='REQ', multiple=True, help='Install required package or packages defined in a file. May be used multiple times.') @click.option('-P', '--path', metavar='DIR', multiple=True, help='Include DIR as a Python path in the environment.') @click.option('--no-reqs', is_flag=True, help="Don't install from requirements.txt or guild.yml in environment parent directory.") @click.option('--tensorflow', metavar='PACKAGE', help='Install PACKAGE for TensorFlow. By default installs the package suitable for the system based on GPU support.') @click.option('--skip-tensorflow', is_flag=True, help="Don't install TensorFlow.") @click.option('-l', '--local-resource-cache', is_flag=True, help='Use a local cache when initializing an environment.') @click.option('-y', '--yes', is_flag=True, help='Initialize a Guild environment without prompting.') @click.option('--no-progress', is_flag=True, help="Don't show progress when installing environment packages.") @click_util.use_args def init(args): 'Initialize a Guild environment.\n\n `init` initializes a Guild environment in `DIR`, which is the\n current directory by default.\n\n `init` creates a virtual environment in `DIR` using `virtualenv`.\n\n Use `--python` to specify the Python interpreter to use within the\n generated virtual environment. By default, the default Python\n interpreter for `virtualenv` is used unless `python` is explicitly\n listed as a requirement. If `no-venv` is specified, `--python` is\n ignored.\n\n ### Requirements\n\n By default, any required packages listed under packages.requires\n in `guild.yml` in the environment parent directory are installed\n into the environment. Use `--no-reqs` to suppress this behavior.\n\n Additionally, packages defined in `requirements.txt` in the\n environment parent directory will be installed. Use `--no-reqs` to\n suppress this behavior.\n\n Note that packages defined in `guild.yml` use Guild package names\n while packages defined in `requirements.txt` use PyPI package\n names.\n\n For information in requirements files, see:\n\n https://pip.readthedocs.io/en/1.1/requirements.html\n\n You may explicitly specify requirements file using `-r` or\n `--requirement`. If `-r, --requirement` is specified, Guild will\n not automatically install packages in `requirements.txt` -- that\n file must be specified explicitly in the command.\n\n ### Guild AI\n\n By default `init` installs the active version of Guild AI in the\n initialized environment. To install a different version, or to\n install a Guild wheel distribution file use the `--guild` option.\n\n ### TensorFlow\n\n TensorFlow is installed to the environment unless\n `--skip-tensorflow` is specified. The TensorFlow package to\n install can be specified using `--tensorflow`. By default, Guild\n installs the TensorFlow package suited for the system:\n ``tensorflow-gpu`` if a GPU is available, otherwise\n ``tensorflow``.\n\n ### Resource cache\n\n By default resources are cached and shared at the user level in\n `~/.guild/cache/resources` so that resources downloaded from one\n environment are available to other environments. You can modify\n this behavior to have all resources downloaded local to the\n environment by specifying `--local-resource-cache`.\n\n ' from . import init_impl init_impl.main(args)
@click.command() @click.argument('dir', default='env') @click.option('-n', '--name', metavar='NAME', help='Environment name (default is env parent directory name).') @click.option('-p', '--python', metavar='VERSION', help='Version of Python to use for the environment.') @click.option('-g', '--guild', metavar='VERSION_OR_PATH', help='Version of Guild AI to use for the environment. By default, the active version of Guild is installed. This value may alternatively be a path to a Guild wheel distribution.') @click.option('-r', '--requirement', metavar='REQ', multiple=True, help='Install required package or packages defined in a file. May be used multiple times.') @click.option('-P', '--path', metavar='DIR', multiple=True, help='Include DIR as a Python path in the environment.') @click.option('--no-reqs', is_flag=True, help="Don't install from requirements.txt or guild.yml in environment parent directory.") @click.option('--tensorflow', metavar='PACKAGE', help='Install PACKAGE for TensorFlow. By default installs the package suitable for the system based on GPU support.') @click.option('--skip-tensorflow', is_flag=True, help="Don't install TensorFlow.") @click.option('-l', '--local-resource-cache', is_flag=True, help='Use a local cache when initializing an environment.') @click.option('-y', '--yes', is_flag=True, help='Initialize a Guild environment without prompting.') @click.option('--no-progress', is_flag=True, help="Don't show progress when installing environment packages.") @click_util.use_args def init(args): 'Initialize a Guild environment.\n\n `init` initializes a Guild environment in `DIR`, which is the\n current directory by default.\n\n `init` creates a virtual environment in `DIR` using `virtualenv`.\n\n Use `--python` to specify the Python interpreter to use within the\n generated virtual environment. By default, the default Python\n interpreter for `virtualenv` is used unless `python` is explicitly\n listed as a requirement. If `no-venv` is specified, `--python` is\n ignored.\n\n ### Requirements\n\n By default, any required packages listed under packages.requires\n in `guild.yml` in the environment parent directory are installed\n into the environment. Use `--no-reqs` to suppress this behavior.\n\n Additionally, packages defined in `requirements.txt` in the\n environment parent directory will be installed. Use `--no-reqs` to\n suppress this behavior.\n\n Note that packages defined in `guild.yml` use Guild package names\n while packages defined in `requirements.txt` use PyPI package\n names.\n\n For information in requirements files, see:\n\n https://pip.readthedocs.io/en/1.1/requirements.html\n\n You may explicitly specify requirements file using `-r` or\n `--requirement`. If `-r, --requirement` is specified, Guild will\n not automatically install packages in `requirements.txt` -- that\n file must be specified explicitly in the command.\n\n ### Guild AI\n\n By default `init` installs the active version of Guild AI in the\n initialized environment. To install a different version, or to\n install a Guild wheel distribution file use the `--guild` option.\n\n ### TensorFlow\n\n TensorFlow is installed to the environment unless\n `--skip-tensorflow` is specified. The TensorFlow package to\n install can be specified using `--tensorflow`. By default, Guild\n installs the TensorFlow package suited for the system:\n ``tensorflow-gpu`` if a GPU is available, otherwise\n ``tensorflow``.\n\n ### Resource cache\n\n By default resources are cached and shared at the user level in\n `~/.guild/cache/resources` so that resources downloaded from one\n environment are available to other environments. You can modify\n this behavior to have all resources downloaded local to the\n environment by specifying `--local-resource-cache`.\n\n ' from . import init_impl init_impl.main(args)<|docstring|>Initialize a Guild environment. `init` initializes a Guild environment in `DIR`, which is the current directory by default. `init` creates a virtual environment in `DIR` using `virtualenv`. Use `--python` to specify the Python interpreter to use within the generated virtual environment. By default, the default Python interpreter for `virtualenv` is used unless `python` is explicitly listed as a requirement. If `no-venv` is specified, `--python` is ignored. ### Requirements By default, any required packages listed under packages.requires in `guild.yml` in the environment parent directory are installed into the environment. Use `--no-reqs` to suppress this behavior. Additionally, packages defined in `requirements.txt` in the environment parent directory will be installed. Use `--no-reqs` to suppress this behavior. Note that packages defined in `guild.yml` use Guild package names while packages defined in `requirements.txt` use PyPI package names. For information in requirements files, see: https://pip.readthedocs.io/en/1.1/requirements.html You may explicitly specify requirements file using `-r` or `--requirement`. If `-r, --requirement` is specified, Guild will not automatically install packages in `requirements.txt` -- that file must be specified explicitly in the command. ### Guild AI By default `init` installs the active version of Guild AI in the initialized environment. To install a different version, or to install a Guild wheel distribution file use the `--guild` option. ### TensorFlow TensorFlow is installed to the environment unless `--skip-tensorflow` is specified. The TensorFlow package to install can be specified using `--tensorflow`. By default, Guild installs the TensorFlow package suited for the system: ``tensorflow-gpu`` if a GPU is available, otherwise ``tensorflow``. ### Resource cache By default resources are cached and shared at the user level in `~/.guild/cache/resources` so that resources downloaded from one environment are available to other environments. You can modify this behavior to have all resources downloaded local to the environment by specifying `--local-resource-cache`.<|endoftext|>
11d30c192889d10b4d2a0c840d19812496fef21654e5734c2ff0eda9f97a8e3e
def test_invalid(self): 'Invalid pipeline param name and op_name.' with self.assertRaises(ValueError): p = PipelineParam(name='123_abc')
Invalid pipeline param name and op_name.
sdk/python/tests/dsl/pipeline_param_tests.py
test_invalid
awesome-archive/pipelines
2
python
def test_invalid(self): with self.assertRaises(ValueError): p = PipelineParam(name='123_abc')
def test_invalid(self): with self.assertRaises(ValueError): p = PipelineParam(name='123_abc')<|docstring|>Invalid pipeline param name and op_name.<|endoftext|>
689fa98e57057a93774e13840a518d78d646fcb9db4484c98d86d77b76f7b516
def test_str_repr(self): 'Test string representation.' p = PipelineParam(name='param1', op_name='op1') self.assertEqual('{{pipelineparam:op=op1;name=param1;value=}}', str(p)) p = PipelineParam(name='param2') self.assertEqual('{{pipelineparam:op=;name=param2;value=}}', str(p)) p = PipelineParam(name='param3', value='value3') self.assertEqual('{{pipelineparam:op=;name=param3;value=value3}}', str(p))
Test string representation.
sdk/python/tests/dsl/pipeline_param_tests.py
test_str_repr
awesome-archive/pipelines
2
python
def test_str_repr(self): p = PipelineParam(name='param1', op_name='op1') self.assertEqual('{{pipelineparam:op=op1;name=param1;value=}}', str(p)) p = PipelineParam(name='param2') self.assertEqual('{{pipelineparam:op=;name=param2;value=}}', str(p)) p = PipelineParam(name='param3', value='value3') self.assertEqual('{{pipelineparam:op=;name=param3;value=value3}}', str(p))
def test_str_repr(self): p = PipelineParam(name='param1', op_name='op1') self.assertEqual('{{pipelineparam:op=op1;name=param1;value=}}', str(p)) p = PipelineParam(name='param2') self.assertEqual('{{pipelineparam:op=;name=param2;value=}}', str(p)) p = PipelineParam(name='param3', value='value3') self.assertEqual('{{pipelineparam:op=;name=param3;value=value3}}', str(p))<|docstring|>Test string representation.<|endoftext|>
3646d646fd9d26c6faeae5901453bfb29f56cc9c9a658ce4759d6629900c295e
def load_indicators(): 'Load indicators from file.\n\n :return:\n ' ti = list() try: ti_file: Path = Path(__file__).with_name('json').joinpath('tv_indicators.json') if (ti_file.exists() and ti_file.is_file()): text = ti_file.read_text() ti = json.loads(text) except (TypeError, ValueError, IOError) as err: print(str(err)) raise err return ti
Load indicators from file. :return:
pytvc/cli.py
load_indicators
havocesp/pytvc
12
python
def load_indicators(): 'Load indicators from file.\n\n :return:\n ' ti = list() try: ti_file: Path = Path(__file__).with_name('json').joinpath('tv_indicators.json') if (ti_file.exists() and ti_file.is_file()): text = ti_file.read_text() ti = json.loads(text) except (TypeError, ValueError, IOError) as err: print(str(err)) raise err return ti
def load_indicators(): 'Load indicators from file.\n\n :return:\n ' ti = list() try: ti_file: Path = Path(__file__).with_name('json').joinpath('tv_indicators.json') if (ti_file.exists() and ti_file.is_file()): text = ti_file.read_text() ti = json.loads(text) except (TypeError, ValueError, IOError) as err: print(str(err)) raise err return ti<|docstring|>Load indicators from file. :return:<|endoftext|>
153ad8de4b913930279e729ac841089f1650d23bd1f1ae44f8a40efc38f28022
def list_indicators() -> int: 'List all supported indicators.\n\n :return: 0 if all was fine.\n ' ti = load_indicators() indicators = [f'- {v:<30}' for v in ti] indicators.sort() for i in indicators: print(i) return 0
List all supported indicators. :return: 0 if all was fine.
pytvc/cli.py
list_indicators
havocesp/pytvc
12
python
def list_indicators() -> int: 'List all supported indicators.\n\n :return: 0 if all was fine.\n ' ti = load_indicators() indicators = [f'- {v:<30}' for v in ti] indicators.sort() for i in indicators: print(i) return 0
def list_indicators() -> int: 'List all supported indicators.\n\n :return: 0 if all was fine.\n ' ti = load_indicators() indicators = [f'- {v:<30}' for v in ti] indicators.sort() for i in indicators: print(i) return 0<|docstring|>List all supported indicators. :return: 0 if all was fine.<|endoftext|>
b932f3dda037f12cfb6f41ce264ad66e553213b9a2ce318e1340f7cfa29c7248
def main(args) -> int: 'TradingView Chart parserr.\n \n :param Namespace args:\n :return:\n ' tvc = TradingViewChart() tvc.launch(**vars(args)) return 0
TradingView Chart parserr. :param Namespace args: :return:
pytvc/cli.py
main
havocesp/pytvc
12
python
def main(args) -> int: 'TradingView Chart parserr.\n \n :param Namespace args:\n :return:\n ' tvc = TradingViewChart() tvc.launch(**vars(args)) return 0
def main(args) -> int: 'TradingView Chart parserr.\n \n :param Namespace args:\n :return:\n ' tvc = TradingViewChart() tvc.launch(**vars(args)) return 0<|docstring|>TradingView Chart parserr. :param Namespace args: :return:<|endoftext|>
aacd552f635f6c6a0a64708728b14f03d00f99df080dbb827f28256341916eee
def run(): 'As CLI starting point, this function dispatch argument parsing to be supplied to main function.' base_markets = ['BTC', 'TUSD', 'USDT', 'USD', 'EUR', 'PAX', 'USDS'] exchanges = ['binance', 'hitbtc2', 'poloniex', 'kraken', 'coinbase', 'cexio'] exchanges = {e: e.strip('_12345 ') for e in exchanges} parser = argparse.ArgumentParser() parser.add_argument('-l, --list-indicators', action='store_true', dest='list_indicators', help='List all supported technical indicators') parser.add_argument('-e, --exchange', default='binance', dest='exchange', nargs='?', choices=exchanges, help='Exchange used for watch list symbols.') parser.add_argument('-q', '--quote-currency', nargs='?', default='BTC', choices=base_markets, help='Base market currency (default BTC)') parser.add_argument('-i', '--indicator', metavar='TI', nargs='*', choices=list(load_indicators().keys()), help='Technical analysis indicators to be showed within chart.') parser.add_argument('-m', '--min-volume', type=float, nargs='?', dest='min_volume', default=0.0, help='Min. 24h volume filter.') args = parser.parse_args() if (args.list_indicators is True): r_code = list_indicators() else: r_code = main(args) sys.exit(r_code)
As CLI starting point, this function dispatch argument parsing to be supplied to main function.
pytvc/cli.py
run
havocesp/pytvc
12
python
def run(): base_markets = ['BTC', 'TUSD', 'USDT', 'USD', 'EUR', 'PAX', 'USDS'] exchanges = ['binance', 'hitbtc2', 'poloniex', 'kraken', 'coinbase', 'cexio'] exchanges = {e: e.strip('_12345 ') for e in exchanges} parser = argparse.ArgumentParser() parser.add_argument('-l, --list-indicators', action='store_true', dest='list_indicators', help='List all supported technical indicators') parser.add_argument('-e, --exchange', default='binance', dest='exchange', nargs='?', choices=exchanges, help='Exchange used for watch list symbols.') parser.add_argument('-q', '--quote-currency', nargs='?', default='BTC', choices=base_markets, help='Base market currency (default BTC)') parser.add_argument('-i', '--indicator', metavar='TI', nargs='*', choices=list(load_indicators().keys()), help='Technical analysis indicators to be showed within chart.') parser.add_argument('-m', '--min-volume', type=float, nargs='?', dest='min_volume', default=0.0, help='Min. 24h volume filter.') args = parser.parse_args() if (args.list_indicators is True): r_code = list_indicators() else: r_code = main(args) sys.exit(r_code)
def run(): base_markets = ['BTC', 'TUSD', 'USDT', 'USD', 'EUR', 'PAX', 'USDS'] exchanges = ['binance', 'hitbtc2', 'poloniex', 'kraken', 'coinbase', 'cexio'] exchanges = {e: e.strip('_12345 ') for e in exchanges} parser = argparse.ArgumentParser() parser.add_argument('-l, --list-indicators', action='store_true', dest='list_indicators', help='List all supported technical indicators') parser.add_argument('-e, --exchange', default='binance', dest='exchange', nargs='?', choices=exchanges, help='Exchange used for watch list symbols.') parser.add_argument('-q', '--quote-currency', nargs='?', default='BTC', choices=base_markets, help='Base market currency (default BTC)') parser.add_argument('-i', '--indicator', metavar='TI', nargs='*', choices=list(load_indicators().keys()), help='Technical analysis indicators to be showed within chart.') parser.add_argument('-m', '--min-volume', type=float, nargs='?', dest='min_volume', default=0.0, help='Min. 24h volume filter.') args = parser.parse_args() if (args.list_indicators is True): r_code = list_indicators() else: r_code = main(args) sys.exit(r_code)<|docstring|>As CLI starting point, this function dispatch argument parsing to be supplied to main function.<|endoftext|>
360dba7075008b5182ff54dff468af1e5cd97787bff6a069d85f2f46090c21c2
def get_location(loc): '\n currently working only on my computer\n english Model\n english.muc.7class.distsim.crf.ser.gz\n german Models\n german.dewac_175m_600.crf.ser.gz\n german.hgc_175m_600.crf.ser.gz\n ' st = StanfordNERTagger('stanford-ner-2015-12-09/classifiers/english.muc.7class.distsim.crf.ser.gz', 'stanford-ner-2015-12-09/stanford-ner-3.6.0.jar') loc_ner = st.tag(loc) "\n might be faster starting from back to front\n 'LOCATION' for English\n 'I-LOC' for German\n " loc_tuples = [item[0] for item in loc_ner if ('LOCATION' in item)] try: location = loc_tuples[0] if (len(loc_tuples) > 1): for i in range(1, len(loc_tuples)): location += (' ' + loc_tuples[i]) except IndexError: return None return location
currently working only on my computer english Model english.muc.7class.distsim.crf.ser.gz german Models german.dewac_175m_600.crf.ser.gz german.hgc_175m_600.crf.ser.gz
extractor.py
get_location
phucdev/weatherbot
0
python
def get_location(loc): '\n currently working only on my computer\n english Model\n english.muc.7class.distsim.crf.ser.gz\n german Models\n german.dewac_175m_600.crf.ser.gz\n german.hgc_175m_600.crf.ser.gz\n ' st = StanfordNERTagger('stanford-ner-2015-12-09/classifiers/english.muc.7class.distsim.crf.ser.gz', 'stanford-ner-2015-12-09/stanford-ner-3.6.0.jar') loc_ner = st.tag(loc) "\n might be faster starting from back to front\n 'LOCATION' for English\n 'I-LOC' for German\n " loc_tuples = [item[0] for item in loc_ner if ('LOCATION' in item)] try: location = loc_tuples[0] if (len(loc_tuples) > 1): for i in range(1, len(loc_tuples)): location += (' ' + loc_tuples[i]) except IndexError: return None return location
def get_location(loc): '\n currently working only on my computer\n english Model\n english.muc.7class.distsim.crf.ser.gz\n german Models\n german.dewac_175m_600.crf.ser.gz\n german.hgc_175m_600.crf.ser.gz\n ' st = StanfordNERTagger('stanford-ner-2015-12-09/classifiers/english.muc.7class.distsim.crf.ser.gz', 'stanford-ner-2015-12-09/stanford-ner-3.6.0.jar') loc_ner = st.tag(loc) "\n might be faster starting from back to front\n 'LOCATION' for English\n 'I-LOC' for German\n " loc_tuples = [item[0] for item in loc_ner if ('LOCATION' in item)] try: location = loc_tuples[0] if (len(loc_tuples) > 1): for i in range(1, len(loc_tuples)): location += (' ' + loc_tuples[i]) except IndexError: return None return location<|docstring|>currently working only on my computer english Model english.muc.7class.distsim.crf.ser.gz german Models german.dewac_175m_600.crf.ser.gz german.hgc_175m_600.crf.ser.gz<|endoftext|>
6e691b4afac0e44e94ed72229fd79a3ae83be04d4f26008245f0ce566c7d973e
def flask_post_json(): 'Ah the joys of frameworks! They do so much work for you\n that they get in the way of sane operation!' if (request.json != None): return request.json elif ((request.data != None) and (request.data.decode('utf8') != u'')): return json.loads(request.data.decode('utf8')) else: return json.loads(request.form.keys()[0])
Ah the joys of frameworks! They do so much work for you that they get in the way of sane operation!
server.py
flask_post_json
dcones/CMPUT404-assignment-ajax
1
python
def flask_post_json(): 'Ah the joys of frameworks! They do so much work for you\n that they get in the way of sane operation!' if (request.json != None): return request.json elif ((request.data != None) and (request.data.decode('utf8') != u)): return json.loads(request.data.decode('utf8')) else: return json.loads(request.form.keys()[0])
def flask_post_json(): 'Ah the joys of frameworks! They do so much work for you\n that they get in the way of sane operation!' if (request.json != None): return request.json elif ((request.data != None) and (request.data.decode('utf8') != u)): return json.loads(request.data.decode('utf8')) else: return json.loads(request.form.keys()[0])<|docstring|>Ah the joys of frameworks! They do so much work for you that they get in the way of sane operation!<|endoftext|>
9dca322af1c95789df86925512386d1aa5155f41fb9ec3ab474d222c3cc149f8
def __init__(self, byte: int, line: int, index: int, args: List[Token], resolved_vars: dict={}, resolved_gotos: dict={}): 'Represents a compilable line.\n\n Args:\n byte (int): The bytecode byte of the instruction associated.\n ' self.byte = byte self.args = args self.line = line self.index = index self.vars = resolved_vars self.gotos = resolved_gotos
Represents a compilable line. Args: byte (int): The bytecode byte of the instruction associated.
assemblyish/compiler.py
__init__
vcokltfre/assemblyish
1
python
def __init__(self, byte: int, line: int, index: int, args: List[Token], resolved_vars: dict={}, resolved_gotos: dict={}): 'Represents a compilable line.\n\n Args:\n byte (int): The bytecode byte of the instruction associated.\n ' self.byte = byte self.args = args self.line = line self.index = index self.vars = resolved_vars self.gotos = resolved_gotos
def __init__(self, byte: int, line: int, index: int, args: List[Token], resolved_vars: dict={}, resolved_gotos: dict={}): 'Represents a compilable line.\n\n Args:\n byte (int): The bytecode byte of the instruction associated.\n ' self.byte = byte self.args = args self.line = line self.index = index self.vars = resolved_vars self.gotos = resolved_gotos<|docstring|>Represents a compilable line. Args: byte (int): The bytecode byte of the instruction associated.<|endoftext|>
e439d61e76b4636fc0521f928161011d7c151ea40fd8f512276ad88ef749d82f
def __init__(self, filename: str, tokens: List[Token]): "A compiler class for assemblyish.\n\n Args:\n filename (str): The filename of the file being compiled. Used for error logging.\n tokens (List[Token]): A list of Token objects representing the program's code.\n " self.filename = filename self.tokens = tokens self.variables = {} self.gotos = {} self.var_goto_id = 1 self.instrs = [] self.lines = self.getlines()
A compiler class for assemblyish. Args: filename (str): The filename of the file being compiled. Used for error logging. tokens (List[Token]): A list of Token objects representing the program's code.
assemblyish/compiler.py
__init__
vcokltfre/assemblyish
1
python
def __init__(self, filename: str, tokens: List[Token]): "A compiler class for assemblyish.\n\n Args:\n filename (str): The filename of the file being compiled. Used for error logging.\n tokens (List[Token]): A list of Token objects representing the program's code.\n " self.filename = filename self.tokens = tokens self.variables = {} self.gotos = {} self.var_goto_id = 1 self.instrs = [] self.lines = self.getlines()
def __init__(self, filename: str, tokens: List[Token]): "A compiler class for assemblyish.\n\n Args:\n filename (str): The filename of the file being compiled. Used for error logging.\n tokens (List[Token]): A list of Token objects representing the program's code.\n " self.filename = filename self.tokens = tokens self.variables = {} self.gotos = {} self.var_goto_id = 1 self.instrs = [] self.lines = self.getlines()<|docstring|>A compiler class for assemblyish. Args: filename (str): The filename of the file being compiled. Used for error logging. tokens (List[Token]): A list of Token objects representing the program's code.<|endoftext|>
d33d96144f3fc75049c4af3beafe357192a1031f88ab8e0bd8f3948b63422447
def calc_cos(self, batch_size, n_tau=32): '\n Calculating the cosinus values depending on the number of tau samples\n ' taus = th.rand(batch_size, n_tau).unsqueeze((- 1)).to(self.device) cos = th.cos((taus * self.pis.to(self.device))) assert (cos.shape == (batch_size, n_tau, self.n_cos)), 'cos shape is incorrect' return (cos, taus)
Calculating the cosinus values depending on the number of tau samples
custom_algos/d3pg/policies.py
calc_cos
vinerich/rl-baselines3-zoo
0
python
def calc_cos(self, batch_size, n_tau=32): '\n \n ' taus = th.rand(batch_size, n_tau).unsqueeze((- 1)).to(self.device) cos = th.cos((taus * self.pis.to(self.device))) assert (cos.shape == (batch_size, n_tau, self.n_cos)), 'cos shape is incorrect' return (cos, taus)
def calc_cos(self, batch_size, n_tau=32): '\n \n ' taus = th.rand(batch_size, n_tau).unsqueeze((- 1)).to(self.device) cos = th.cos((taus * self.pis.to(self.device))) assert (cos.shape == (batch_size, n_tau, self.n_cos)), 'cos shape is incorrect' return (cos, taus)<|docstring|>Calculating the cosinus values depending on the number of tau samples<|endoftext|>
37c18a736586dca84f0ca40fdb8086e4296534633e48f7a669439c6ac73bd86d
@property def id(self): 'Returns the SHA1 ID of this commitish' if (self._id is None): self._id = self.repo.revparse(self.ref) return self._id
Returns the SHA1 ID of this commitish
src/geogigpy/commitish.py
id
boundlessgeo/geogig-py
7
python
@property def id(self): if (self._id is None): self._id = self.repo.revparse(self.ref) return self._id
@property def id(self): if (self._id is None): self._id = self.repo.revparse(self.ref) return self._id<|docstring|>Returns the SHA1 ID of this commitish<|endoftext|>
558d36ca1237fd8f9b6421157a3d73a8811139acdf1d4e24d7e249a945a71301
def log(self): 'Return the history up to this commitish' return self.repo.log(self.ref)
Return the history up to this commitish
src/geogigpy/commitish.py
log
boundlessgeo/geogig-py
7
python
def log(self): return self.repo.log(self.ref)
def log(self): return self.repo.log(self.ref)<|docstring|>Return the history up to this commitish<|endoftext|>
4074d6c0af2d8f8e957fdc031e1c04d62c3215142706c5343da440cfdb17065d
@property def root(self): 'Returns a Tree that represents the root tree at this snapshot' return Tree(self.repo, self.ref)
Returns a Tree that represents the root tree at this snapshot
src/geogigpy/commitish.py
root
boundlessgeo/geogig-py
7
python
@property def root(self): return Tree(self.repo, self.ref)
@property def root(self): return Tree(self.repo, self.ref)<|docstring|>Returns a Tree that represents the root tree at this snapshot<|endoftext|>
923927d34044c67cae9fcf8598d92d502d155fe1e4dc71269fa4fc559ff2a000
def checkout(self): 'Checks out this commitish, and set it as the current HEAD' self.repo.checkout(self.ref)
Checks out this commitish, and set it as the current HEAD
src/geogigpy/commitish.py
checkout
boundlessgeo/geogig-py
7
python
def checkout(self): self.repo.checkout(self.ref)
def checkout(self): self.repo.checkout(self.ref)<|docstring|>Checks out this commitish, and set it as the current HEAD<|endoftext|>
b001b8d1de85228124517b705ad769abc10815102199ba0ffb1f14831a71b065
def diff(self): 'Returns a list of DiffEntry with all changes introduced by this commitish' if (self._diff is None): self._diff = self.repo.diff((self.ref + '~1'), self.ref) return self._diff
Returns a list of DiffEntry with all changes introduced by this commitish
src/geogigpy/commitish.py
diff
boundlessgeo/geogig-py
7
python
def diff(self): if (self._diff is None): self._diff = self.repo.diff((self.ref + '~1'), self.ref) return self._diff
def diff(self): if (self._diff is None): self._diff = self.repo.diff((self.ref + '~1'), self.ref) return self._diff<|docstring|>Returns a list of DiffEntry with all changes introduced by this commitish<|endoftext|>
92690b4ebc00a873c04a81b7440c3be5a6bc95465999d29af322c528a5727726
@property def parent(self): 'Returns a commitish that represents the parent of this one' return Commitish(self.repo, (self.ref + '~1'))
Returns a commitish that represents the parent of this one
src/geogigpy/commitish.py
parent
boundlessgeo/geogig-py
7
python
@property def parent(self): return Commitish(self.repo, (self.ref + '~1'))
@property def parent(self): return Commitish(self.repo, (self.ref + '~1'))<|docstring|>Returns a commitish that represents the parent of this one<|endoftext|>
656eb7fd477a9f3a5afe33af99fa28aee264f5b1e5f7c44953c767d8d22a4396
def humantext(self): 'Returns a nice human-readable description of the commitish' headid = self.repo.revparse(self.repo.head.ref) if (headid == self.id): return 'Current branch' return self.ref
Returns a nice human-readable description of the commitish
src/geogigpy/commitish.py
humantext
boundlessgeo/geogig-py
7
python
def humantext(self): headid = self.repo.revparse(self.repo.head.ref) if (headid == self.id): return 'Current branch' return self.ref
def humantext(self): headid = self.repo.revparse(self.repo.head.ref) if (headid == self.id): return 'Current branch' return self.ref<|docstring|>Returns a nice human-readable description of the commitish<|endoftext|>
e8f1f38099c8f224dec038fca1b03ecce00136188f7c31b2776eaed0f386bb5a
def read_triformat(directory, x_filename): '\n X - 2d numpy array with rows representing compounds and columns represnting features\n compounds - pandas DataFrame with compound information\n features - pandas DataFrame with features information\n ' features_path = os.path.join(directory, 'features.txt') features = pandas.io.parsers.read_table(features_path) compounds_path = os.path.join(directory, 'compounds.txt') compounds = pandas.io.parsers.read_table(compounds_path) x_path = os.path.join(directory, x_filename) X = numpy.loadtxt(x_path, delimiter='\t') triformat = {'X': X, 'compounds': compounds, 'features': features} return triformat
X - 2d numpy array with rows representing compounds and columns represnting features compounds - pandas DataFrame with compound information features - pandas DataFrame with features information
projects/remyelination/feature_reader.py
read_triformat
dhimmel/serg-pycode
0
python
def read_triformat(directory, x_filename): '\n X - 2d numpy array with rows representing compounds and columns represnting features\n compounds - pandas DataFrame with compound information\n features - pandas DataFrame with features information\n ' features_path = os.path.join(directory, 'features.txt') features = pandas.io.parsers.read_table(features_path) compounds_path = os.path.join(directory, 'compounds.txt') compounds = pandas.io.parsers.read_table(compounds_path) x_path = os.path.join(directory, x_filename) X = numpy.loadtxt(x_path, delimiter='\t') triformat = {'X': X, 'compounds': compounds, 'features': features} return triformat
def read_triformat(directory, x_filename): '\n X - 2d numpy array with rows representing compounds and columns represnting features\n compounds - pandas DataFrame with compound information\n features - pandas DataFrame with features information\n ' features_path = os.path.join(directory, 'features.txt') features = pandas.io.parsers.read_table(features_path) compounds_path = os.path.join(directory, 'compounds.txt') compounds = pandas.io.parsers.read_table(compounds_path) x_path = os.path.join(directory, x_filename) X = numpy.loadtxt(x_path, delimiter='\t') triformat = {'X': X, 'compounds': compounds, 'features': features} return triformat<|docstring|>X - 2d numpy array with rows representing compounds and columns represnting features compounds - pandas DataFrame with compound information features - pandas DataFrame with features information<|endoftext|>
e795c20b4fac8ca817b57c2c9d417b9a9b8111b64b2bd949aef207b998d7df8c
def whoami(string): 'Leon introduces himself' return utils.output('end', 'introduction', utils.translate('introduction'))
Leon introduces himself
packages/leon/whoami.py
whoami
jankeromnes/leon
4
python
def whoami(string): return utils.output('end', 'introduction', utils.translate('introduction'))
def whoami(string): return utils.output('end', 'introduction', utils.translate('introduction'))<|docstring|>Leon introduces himself<|endoftext|>
5e1981956e8cf250b39eff83e25d76ae200d81d2f4c2a215f394865d1aa8dad0
def __get_node_label_charcnn_embeddings(self, unique_labels_as_characters: tf.Tensor, node_labels_to_unique_labels: tf.Tensor) -> tf.Tensor: '\n Compute representation of node labels using a 2-layer character CNN.\n\n Args:\n unique_labels_as_characters: int32 tensor of shape [U, C]\n representing the unique (node) labels occurring in a\n batch, where U is the number of such labels and C the\n maximal number of characters.\n node_labels_to_unique_labels: int32 tensor of shape [V],\n mapping each node in the batch to one of the unique\n labels.\n\n Returns:\n float32 tensor of shape [V, D] representing embedded node\n label information about each node.\n ' label_embedding_size = self.params['graph_node_label_representation_size'] self.unique_label_chars_one_hot = unique_label_chars_one_hot = tf.one_hot(indices=unique_labels_as_characters, depth=len(ALPHABET), axis=(- 1)) char_conv_l1_kernel_size = 5 char_conv_l2_kernel_size = (self.params['graph_node_label_max_num_chars'] - (2 * (char_conv_l1_kernel_size - 1))) char_conv_l1 = tf.keras.layers.Conv1D(filters=16, kernel_size=char_conv_l1_kernel_size, activation=tf.nn.leaky_relu)(unique_label_chars_one_hot) char_pool_l1 = tf.keras.layers.MaxPool1D(pool_size=char_conv_l1_kernel_size, strides=1)(inputs=char_conv_l1) char_conv_l2 = tf.keras.layers.Conv1D(filters=label_embedding_size, kernel_size=char_conv_l2_kernel_size, activation=tf.nn.leaky_relu)(char_pool_l1) unique_label_representations = tf.squeeze(char_conv_l2, axis=1) node_label_representations = tf.gather(params=unique_label_representations, indices=node_labels_to_unique_labels) return node_label_representations
Compute representation of node labels using a 2-layer character CNN. Args: unique_labels_as_characters: int32 tensor of shape [U, C] representing the unique (node) labels occurring in a batch, where U is the number of such labels and C the maximal number of characters. node_labels_to_unique_labels: int32 tensor of shape [V], mapping each node in the batch to one of the unique labels. Returns: float32 tensor of shape [V, D] representing embedded node label information about each node.
tf-gnn-samples/tasks/varmisuse_task.py
__get_node_label_charcnn_embeddings
yangzhou6666/adversarial-examples
10
python
def __get_node_label_charcnn_embeddings(self, unique_labels_as_characters: tf.Tensor, node_labels_to_unique_labels: tf.Tensor) -> tf.Tensor: '\n Compute representation of node labels using a 2-layer character CNN.\n\n Args:\n unique_labels_as_characters: int32 tensor of shape [U, C]\n representing the unique (node) labels occurring in a\n batch, where U is the number of such labels and C the\n maximal number of characters.\n node_labels_to_unique_labels: int32 tensor of shape [V],\n mapping each node in the batch to one of the unique\n labels.\n\n Returns:\n float32 tensor of shape [V, D] representing embedded node\n label information about each node.\n ' label_embedding_size = self.params['graph_node_label_representation_size'] self.unique_label_chars_one_hot = unique_label_chars_one_hot = tf.one_hot(indices=unique_labels_as_characters, depth=len(ALPHABET), axis=(- 1)) char_conv_l1_kernel_size = 5 char_conv_l2_kernel_size = (self.params['graph_node_label_max_num_chars'] - (2 * (char_conv_l1_kernel_size - 1))) char_conv_l1 = tf.keras.layers.Conv1D(filters=16, kernel_size=char_conv_l1_kernel_size, activation=tf.nn.leaky_relu)(unique_label_chars_one_hot) char_pool_l1 = tf.keras.layers.MaxPool1D(pool_size=char_conv_l1_kernel_size, strides=1)(inputs=char_conv_l1) char_conv_l2 = tf.keras.layers.Conv1D(filters=label_embedding_size, kernel_size=char_conv_l2_kernel_size, activation=tf.nn.leaky_relu)(char_pool_l1) unique_label_representations = tf.squeeze(char_conv_l2, axis=1) node_label_representations = tf.gather(params=unique_label_representations, indices=node_labels_to_unique_labels) return node_label_representations
def __get_node_label_charcnn_embeddings(self, unique_labels_as_characters: tf.Tensor, node_labels_to_unique_labels: tf.Tensor) -> tf.Tensor: '\n Compute representation of node labels using a 2-layer character CNN.\n\n Args:\n unique_labels_as_characters: int32 tensor of shape [U, C]\n representing the unique (node) labels occurring in a\n batch, where U is the number of such labels and C the\n maximal number of characters.\n node_labels_to_unique_labels: int32 tensor of shape [V],\n mapping each node in the batch to one of the unique\n labels.\n\n Returns:\n float32 tensor of shape [V, D] representing embedded node\n label information about each node.\n ' label_embedding_size = self.params['graph_node_label_representation_size'] self.unique_label_chars_one_hot = unique_label_chars_one_hot = tf.one_hot(indices=unique_labels_as_characters, depth=len(ALPHABET), axis=(- 1)) char_conv_l1_kernel_size = 5 char_conv_l2_kernel_size = (self.params['graph_node_label_max_num_chars'] - (2 * (char_conv_l1_kernel_size - 1))) char_conv_l1 = tf.keras.layers.Conv1D(filters=16, kernel_size=char_conv_l1_kernel_size, activation=tf.nn.leaky_relu)(unique_label_chars_one_hot) char_pool_l1 = tf.keras.layers.MaxPool1D(pool_size=char_conv_l1_kernel_size, strides=1)(inputs=char_conv_l1) char_conv_l2 = tf.keras.layers.Conv1D(filters=label_embedding_size, kernel_size=char_conv_l2_kernel_size, activation=tf.nn.leaky_relu)(char_pool_l1) unique_label_representations = tf.squeeze(char_conv_l2, axis=1) node_label_representations = tf.gather(params=unique_label_representations, indices=node_labels_to_unique_labels) return node_label_representations<|docstring|>Compute representation of node labels using a 2-layer character CNN. Args: unique_labels_as_characters: int32 tensor of shape [U, C] representing the unique (node) labels occurring in a batch, where U is the number of such labels and C the maximal number of characters. node_labels_to_unique_labels: int32 tensor of shape [V], mapping each node in the batch to one of the unique labels. Returns: float32 tensor of shape [V, D] representing embedded node label information about each node.<|endoftext|>
897ec548dd608c28f7be3f343d6a7a053c6d5f6555328aeaeea4735fde122199
def image_entropy(im): '\n Calculate the entropy of an image. Used for "smart cropping".\n ' if (not isinstance(im, Image.Image)): return 0 hist = im.histogram() hist_size = float(sum(hist)) hist = [(h / hist_size) for h in hist] return (- sum([(p * math.log(p, 2)) for p in hist if (p != 0)]))
Calculate the entropy of an image. Used for "smart cropping".
pressurecooker/thumbscropping.py
image_entropy
kollivier/pressurecooker
14
python
def image_entropy(im): '\n \n ' if (not isinstance(im, Image.Image)): return 0 hist = im.histogram() hist_size = float(sum(hist)) hist = [(h / hist_size) for h in hist] return (- sum([(p * math.log(p, 2)) for p in hist if (p != 0)]))
def image_entropy(im): '\n \n ' if (not isinstance(im, Image.Image)): return 0 hist = im.histogram() hist_size = float(sum(hist)) hist = [(h / hist_size) for h in hist] return (- sum([(p * math.log(p, 2)) for p in hist if (p != 0)]))<|docstring|>Calculate the entropy of an image. Used for "smart cropping".<|endoftext|>
4ee07f37b37b93de7c5119bdbba3880554199858a29ab561929eace8c6180bc7
def _compare_entropy(start_slice, end_slice, slice, difference): '\n Calculate the entropy of two slices (from the start and end of an axis),\n returning a tuple containing the amount that should be added to the start\n and removed from the end of the axis.\n ' start_entropy = image_entropy(start_slice) end_entropy = image_entropy(end_slice) if (end_entropy and (abs(((start_entropy / end_entropy) - 1)) < 0.01)): if (difference >= (slice * 2)): return (slice, slice) half_slice = (slice // 2) return (half_slice, (slice - half_slice)) if (start_entropy > end_entropy): return (0, slice) else: return (slice, 0)
Calculate the entropy of two slices (from the start and end of an axis), returning a tuple containing the amount that should be added to the start and removed from the end of the axis.
pressurecooker/thumbscropping.py
_compare_entropy
kollivier/pressurecooker
14
python
def _compare_entropy(start_slice, end_slice, slice, difference): '\n Calculate the entropy of two slices (from the start and end of an axis),\n returning a tuple containing the amount that should be added to the start\n and removed from the end of the axis.\n ' start_entropy = image_entropy(start_slice) end_entropy = image_entropy(end_slice) if (end_entropy and (abs(((start_entropy / end_entropy) - 1)) < 0.01)): if (difference >= (slice * 2)): return (slice, slice) half_slice = (slice // 2) return (half_slice, (slice - half_slice)) if (start_entropy > end_entropy): return (0, slice) else: return (slice, 0)
def _compare_entropy(start_slice, end_slice, slice, difference): '\n Calculate the entropy of two slices (from the start and end of an axis),\n returning a tuple containing the amount that should be added to the start\n and removed from the end of the axis.\n ' start_entropy = image_entropy(start_slice) end_entropy = image_entropy(end_slice) if (end_entropy and (abs(((start_entropy / end_entropy) - 1)) < 0.01)): if (difference >= (slice * 2)): return (slice, slice) half_slice = (slice // 2) return (half_slice, (slice - half_slice)) if (start_entropy > end_entropy): return (0, slice) else: return (slice, 0)<|docstring|>Calculate the entropy of two slices (from the start and end of an axis), returning a tuple containing the amount that should be added to the start and removed from the end of the axis.<|endoftext|>
1172381e4328887fb2d06d20f7aaea9d2a3001418c1e71c419911a4a09b8f873
def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None, **kwargs): '\n Handle scaling and cropping the source image.\n Images can be scaled / cropped against a single dimension by using zero\n as the placeholder in the size. For example, ``size=(100, 0)`` will cause\n the image to be resized to 100 pixels wide, keeping the aspect ratio of\n the source image.\n crop\n Crop the source image height or width to exactly match the requested\n thumbnail size (the default is to proportionally resize the source\n image to fit within the requested thumbnail size).\n By default, the image is centered before being cropped. To crop from\n the edges, pass a comma separated string containing the ``x`` and ``y``\n percentage offsets (negative values go from the right/bottom). Some\n examples follow:\n * ``crop="0,0"`` will crop from the left and top edges.\n * ``crop="-10,-0"`` will crop from the right edge (with a 10% offset)\n and the bottom edge.\n * ``crop=",0"`` will keep the default behavior for the x axis\n (horizontally centering the image) and crop from the top edge.\n The image can also be "smart cropped" by using ``crop="smart"``. The\n image is incrementally cropped down to the requested size by removing\n slices from edges with the least entropy.\n Finally, you can use ``crop="scale"`` to simply scale the image so that\n at least one dimension fits within the size dimensions given (you may\n want to use the upscale option too).\n upscale\n Allow upscaling of the source image during scaling.\n zoom\n A percentage to zoom in on the scaled image. For example, a zoom of\n ``40`` will clip 20% off each side of the source image before\n thumbnailing.\n target\n Set the focal point as a percentage for the image if it needs to be\n cropped (defaults to ``(50, 50)``).\n For example, ``target="10,20"`` will set the focal point as 10% and 20%\n from the left and top of the image, respectively. If the image needs to\n be cropped, it will trim off the right and bottom edges until the focal\n point is centered.\n Can either be set as a two-item tuple such as ``(20, 30)`` or a comma\n separated string such as ``"20,10"``.\n A null value such as ``(20, None)`` or ``",60"`` will default to 50%.\n ' (source_x, source_y) = [float(v) for v in im.size] (target_x, target_y) = [int(v) for v in size] if (crop or (not target_x) or (not target_y)): scale = max((target_x / source_x), (target_y / source_y)) else: scale = min((target_x / source_x), (target_y / source_y)) if (not target_x): target_x = round((source_x * scale)) elif (not target_y): target_y = round((source_y * scale)) if zoom: if (not crop): target_x = round((source_x * scale)) target_y = round((source_y * scale)) crop = True scale *= ((100 + int(zoom)) / 100.0) if ((scale < 1.0) or ((scale > 1.0) and upscale)): im = im.resize((int(round((source_x * scale))), int(round((source_y * scale)))), resample=Image.ANTIALIAS) if crop: (source_x, source_y) = im.size diff_x = int((source_x - min(source_x, target_x))) diff_y = int((source_y - min(source_y, target_y))) if ((crop != 'scale') and (diff_x or diff_y)): if isinstance(target, string_types): target = re.match('(\\d+)?,(\\d+)?$', target) if target: target = target.groups() if target: focal_point = [(int(n) if (n or (n == 0)) else 50) for n in target] else: focal_point = (50, 50) (halftarget_x, halftarget_y) = (int((target_x / 2)), int((target_y / 2))) focal_point_x = int(((source_x * focal_point[0]) / 100)) focal_point_y = int(((source_y * focal_point[1]) / 100)) box = [max(0, min((source_x - target_x), (focal_point_x - halftarget_x))), max(0, min((source_y - target_y), (focal_point_y - halftarget_y)))] box.append(int(min(source_x, (box[0] + target_x)))) box.append(int(min(source_y, (box[1] + target_y)))) edge_crop = (isinstance(crop, string_types) and re.match('(?:(-?)(\\d+))?,(?:(-?)(\\d+))?$', crop)) if (edge_crop and filter(None, edge_crop.groups())): (x_right, x_crop, y_bottom, y_crop) = edge_crop.groups() if x_crop: offset = min(((int(target_x) * int(x_crop)) // 100), diff_x) if x_right: box[0] = (diff_x - offset) box[2] = (source_x - offset) else: box[0] = offset box[2] = (source_x - (diff_x - offset)) if y_crop: offset = min(((int(target_y) * int(y_crop)) // 100), diff_y) if y_bottom: box[1] = (diff_y - offset) box[3] = (source_y - offset) else: box[1] = offset box[3] = (source_y - (diff_y - offset)) elif (crop == 'smart'): left = top = 0 (right, bottom) = (source_x, source_y) while diff_x: slice = min(diff_x, max((diff_x // 5), 10)) start = im.crop((left, 0, (left + slice), source_y)) end = im.crop(((right - slice), 0, right, source_y)) (add, remove) = _compare_entropy(start, end, slice, diff_x) left += add right -= remove diff_x = ((diff_x - add) - remove) while diff_y: slice = min(diff_y, max((diff_y // 5), 10)) start = im.crop((0, top, source_x, (top + slice))) end = im.crop((0, (bottom - slice), source_x, bottom)) (add, remove) = _compare_entropy(start, end, slice, diff_y) top += add bottom -= remove diff_y = ((diff_y - add) - remove) box = (left, top, right, bottom) im = im.crop(box) return im
Handle scaling and cropping the source image. Images can be scaled / cropped against a single dimension by using zero as the placeholder in the size. For example, ``size=(100, 0)`` will cause the image to be resized to 100 pixels wide, keeping the aspect ratio of the source image. crop Crop the source image height or width to exactly match the requested thumbnail size (the default is to proportionally resize the source image to fit within the requested thumbnail size). By default, the image is centered before being cropped. To crop from the edges, pass a comma separated string containing the ``x`` and ``y`` percentage offsets (negative values go from the right/bottom). Some examples follow: * ``crop="0,0"`` will crop from the left and top edges. * ``crop="-10,-0"`` will crop from the right edge (with a 10% offset) and the bottom edge. * ``crop=",0"`` will keep the default behavior for the x axis (horizontally centering the image) and crop from the top edge. The image can also be "smart cropped" by using ``crop="smart"``. The image is incrementally cropped down to the requested size by removing slices from edges with the least entropy. Finally, you can use ``crop="scale"`` to simply scale the image so that at least one dimension fits within the size dimensions given (you may want to use the upscale option too). upscale Allow upscaling of the source image during scaling. zoom A percentage to zoom in on the scaled image. For example, a zoom of ``40`` will clip 20% off each side of the source image before thumbnailing. target Set the focal point as a percentage for the image if it needs to be cropped (defaults to ``(50, 50)``). For example, ``target="10,20"`` will set the focal point as 10% and 20% from the left and top of the image, respectively. If the image needs to be cropped, it will trim off the right and bottom edges until the focal point is centered. Can either be set as a two-item tuple such as ``(20, 30)`` or a comma separated string such as ``"20,10"``. A null value such as ``(20, None)`` or ``",60"`` will default to 50%.
pressurecooker/thumbscropping.py
scale_and_crop
kollivier/pressurecooker
14
python
def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None, **kwargs): '\n Handle scaling and cropping the source image.\n Images can be scaled / cropped against a single dimension by using zero\n as the placeholder in the size. For example, ``size=(100, 0)`` will cause\n the image to be resized to 100 pixels wide, keeping the aspect ratio of\n the source image.\n crop\n Crop the source image height or width to exactly match the requested\n thumbnail size (the default is to proportionally resize the source\n image to fit within the requested thumbnail size).\n By default, the image is centered before being cropped. To crop from\n the edges, pass a comma separated string containing the ``x`` and ``y``\n percentage offsets (negative values go from the right/bottom). Some\n examples follow:\n * ``crop="0,0"`` will crop from the left and top edges.\n * ``crop="-10,-0"`` will crop from the right edge (with a 10% offset)\n and the bottom edge.\n * ``crop=",0"`` will keep the default behavior for the x axis\n (horizontally centering the image) and crop from the top edge.\n The image can also be "smart cropped" by using ``crop="smart"``. The\n image is incrementally cropped down to the requested size by removing\n slices from edges with the least entropy.\n Finally, you can use ``crop="scale"`` to simply scale the image so that\n at least one dimension fits within the size dimensions given (you may\n want to use the upscale option too).\n upscale\n Allow upscaling of the source image during scaling.\n zoom\n A percentage to zoom in on the scaled image. For example, a zoom of\n ``40`` will clip 20% off each side of the source image before\n thumbnailing.\n target\n Set the focal point as a percentage for the image if it needs to be\n cropped (defaults to ``(50, 50)``).\n For example, ``target="10,20"`` will set the focal point as 10% and 20%\n from the left and top of the image, respectively. If the image needs to\n be cropped, it will trim off the right and bottom edges until the focal\n point is centered.\n Can either be set as a two-item tuple such as ``(20, 30)`` or a comma\n separated string such as ``"20,10"``.\n A null value such as ``(20, None)`` or ``",60"`` will default to 50%.\n ' (source_x, source_y) = [float(v) for v in im.size] (target_x, target_y) = [int(v) for v in size] if (crop or (not target_x) or (not target_y)): scale = max((target_x / source_x), (target_y / source_y)) else: scale = min((target_x / source_x), (target_y / source_y)) if (not target_x): target_x = round((source_x * scale)) elif (not target_y): target_y = round((source_y * scale)) if zoom: if (not crop): target_x = round((source_x * scale)) target_y = round((source_y * scale)) crop = True scale *= ((100 + int(zoom)) / 100.0) if ((scale < 1.0) or ((scale > 1.0) and upscale)): im = im.resize((int(round((source_x * scale))), int(round((source_y * scale)))), resample=Image.ANTIALIAS) if crop: (source_x, source_y) = im.size diff_x = int((source_x - min(source_x, target_x))) diff_y = int((source_y - min(source_y, target_y))) if ((crop != 'scale') and (diff_x or diff_y)): if isinstance(target, string_types): target = re.match('(\\d+)?,(\\d+)?$', target) if target: target = target.groups() if target: focal_point = [(int(n) if (n or (n == 0)) else 50) for n in target] else: focal_point = (50, 50) (halftarget_x, halftarget_y) = (int((target_x / 2)), int((target_y / 2))) focal_point_x = int(((source_x * focal_point[0]) / 100)) focal_point_y = int(((source_y * focal_point[1]) / 100)) box = [max(0, min((source_x - target_x), (focal_point_x - halftarget_x))), max(0, min((source_y - target_y), (focal_point_y - halftarget_y)))] box.append(int(min(source_x, (box[0] + target_x)))) box.append(int(min(source_y, (box[1] + target_y)))) edge_crop = (isinstance(crop, string_types) and re.match('(?:(-?)(\\d+))?,(?:(-?)(\\d+))?$', crop)) if (edge_crop and filter(None, edge_crop.groups())): (x_right, x_crop, y_bottom, y_crop) = edge_crop.groups() if x_crop: offset = min(((int(target_x) * int(x_crop)) // 100), diff_x) if x_right: box[0] = (diff_x - offset) box[2] = (source_x - offset) else: box[0] = offset box[2] = (source_x - (diff_x - offset)) if y_crop: offset = min(((int(target_y) * int(y_crop)) // 100), diff_y) if y_bottom: box[1] = (diff_y - offset) box[3] = (source_y - offset) else: box[1] = offset box[3] = (source_y - (diff_y - offset)) elif (crop == 'smart'): left = top = 0 (right, bottom) = (source_x, source_y) while diff_x: slice = min(diff_x, max((diff_x // 5), 10)) start = im.crop((left, 0, (left + slice), source_y)) end = im.crop(((right - slice), 0, right, source_y)) (add, remove) = _compare_entropy(start, end, slice, diff_x) left += add right -= remove diff_x = ((diff_x - add) - remove) while diff_y: slice = min(diff_y, max((diff_y // 5), 10)) start = im.crop((0, top, source_x, (top + slice))) end = im.crop((0, (bottom - slice), source_x, bottom)) (add, remove) = _compare_entropy(start, end, slice, diff_y) top += add bottom -= remove diff_y = ((diff_y - add) - remove) box = (left, top, right, bottom) im = im.crop(box) return im
def scale_and_crop(im, size, crop=False, upscale=False, zoom=None, target=None, **kwargs): '\n Handle scaling and cropping the source image.\n Images can be scaled / cropped against a single dimension by using zero\n as the placeholder in the size. For example, ``size=(100, 0)`` will cause\n the image to be resized to 100 pixels wide, keeping the aspect ratio of\n the source image.\n crop\n Crop the source image height or width to exactly match the requested\n thumbnail size (the default is to proportionally resize the source\n image to fit within the requested thumbnail size).\n By default, the image is centered before being cropped. To crop from\n the edges, pass a comma separated string containing the ``x`` and ``y``\n percentage offsets (negative values go from the right/bottom). Some\n examples follow:\n * ``crop="0,0"`` will crop from the left and top edges.\n * ``crop="-10,-0"`` will crop from the right edge (with a 10% offset)\n and the bottom edge.\n * ``crop=",0"`` will keep the default behavior for the x axis\n (horizontally centering the image) and crop from the top edge.\n The image can also be "smart cropped" by using ``crop="smart"``. The\n image is incrementally cropped down to the requested size by removing\n slices from edges with the least entropy.\n Finally, you can use ``crop="scale"`` to simply scale the image so that\n at least one dimension fits within the size dimensions given (you may\n want to use the upscale option too).\n upscale\n Allow upscaling of the source image during scaling.\n zoom\n A percentage to zoom in on the scaled image. For example, a zoom of\n ``40`` will clip 20% off each side of the source image before\n thumbnailing.\n target\n Set the focal point as a percentage for the image if it needs to be\n cropped (defaults to ``(50, 50)``).\n For example, ``target="10,20"`` will set the focal point as 10% and 20%\n from the left and top of the image, respectively. If the image needs to\n be cropped, it will trim off the right and bottom edges until the focal\n point is centered.\n Can either be set as a two-item tuple such as ``(20, 30)`` or a comma\n separated string such as ``"20,10"``.\n A null value such as ``(20, None)`` or ``",60"`` will default to 50%.\n ' (source_x, source_y) = [float(v) for v in im.size] (target_x, target_y) = [int(v) for v in size] if (crop or (not target_x) or (not target_y)): scale = max((target_x / source_x), (target_y / source_y)) else: scale = min((target_x / source_x), (target_y / source_y)) if (not target_x): target_x = round((source_x * scale)) elif (not target_y): target_y = round((source_y * scale)) if zoom: if (not crop): target_x = round((source_x * scale)) target_y = round((source_y * scale)) crop = True scale *= ((100 + int(zoom)) / 100.0) if ((scale < 1.0) or ((scale > 1.0) and upscale)): im = im.resize((int(round((source_x * scale))), int(round((source_y * scale)))), resample=Image.ANTIALIAS) if crop: (source_x, source_y) = im.size diff_x = int((source_x - min(source_x, target_x))) diff_y = int((source_y - min(source_y, target_y))) if ((crop != 'scale') and (diff_x or diff_y)): if isinstance(target, string_types): target = re.match('(\\d+)?,(\\d+)?$', target) if target: target = target.groups() if target: focal_point = [(int(n) if (n or (n == 0)) else 50) for n in target] else: focal_point = (50, 50) (halftarget_x, halftarget_y) = (int((target_x / 2)), int((target_y / 2))) focal_point_x = int(((source_x * focal_point[0]) / 100)) focal_point_y = int(((source_y * focal_point[1]) / 100)) box = [max(0, min((source_x - target_x), (focal_point_x - halftarget_x))), max(0, min((source_y - target_y), (focal_point_y - halftarget_y)))] box.append(int(min(source_x, (box[0] + target_x)))) box.append(int(min(source_y, (box[1] + target_y)))) edge_crop = (isinstance(crop, string_types) and re.match('(?:(-?)(\\d+))?,(?:(-?)(\\d+))?$', crop)) if (edge_crop and filter(None, edge_crop.groups())): (x_right, x_crop, y_bottom, y_crop) = edge_crop.groups() if x_crop: offset = min(((int(target_x) * int(x_crop)) // 100), diff_x) if x_right: box[0] = (diff_x - offset) box[2] = (source_x - offset) else: box[0] = offset box[2] = (source_x - (diff_x - offset)) if y_crop: offset = min(((int(target_y) * int(y_crop)) // 100), diff_y) if y_bottom: box[1] = (diff_y - offset) box[3] = (source_y - offset) else: box[1] = offset box[3] = (source_y - (diff_y - offset)) elif (crop == 'smart'): left = top = 0 (right, bottom) = (source_x, source_y) while diff_x: slice = min(diff_x, max((diff_x // 5), 10)) start = im.crop((left, 0, (left + slice), source_y)) end = im.crop(((right - slice), 0, right, source_y)) (add, remove) = _compare_entropy(start, end, slice, diff_x) left += add right -= remove diff_x = ((diff_x - add) - remove) while diff_y: slice = min(diff_y, max((diff_y // 5), 10)) start = im.crop((0, top, source_x, (top + slice))) end = im.crop((0, (bottom - slice), source_x, bottom)) (add, remove) = _compare_entropy(start, end, slice, diff_y) top += add bottom -= remove diff_y = ((diff_y - add) - remove) box = (left, top, right, bottom) im = im.crop(box) return im<|docstring|>Handle scaling and cropping the source image. Images can be scaled / cropped against a single dimension by using zero as the placeholder in the size. For example, ``size=(100, 0)`` will cause the image to be resized to 100 pixels wide, keeping the aspect ratio of the source image. crop Crop the source image height or width to exactly match the requested thumbnail size (the default is to proportionally resize the source image to fit within the requested thumbnail size). By default, the image is centered before being cropped. To crop from the edges, pass a comma separated string containing the ``x`` and ``y`` percentage offsets (negative values go from the right/bottom). Some examples follow: * ``crop="0,0"`` will crop from the left and top edges. * ``crop="-10,-0"`` will crop from the right edge (with a 10% offset) and the bottom edge. * ``crop=",0"`` will keep the default behavior for the x axis (horizontally centering the image) and crop from the top edge. The image can also be "smart cropped" by using ``crop="smart"``. The image is incrementally cropped down to the requested size by removing slices from edges with the least entropy. Finally, you can use ``crop="scale"`` to simply scale the image so that at least one dimension fits within the size dimensions given (you may want to use the upscale option too). upscale Allow upscaling of the source image during scaling. zoom A percentage to zoom in on the scaled image. For example, a zoom of ``40`` will clip 20% off each side of the source image before thumbnailing. target Set the focal point as a percentage for the image if it needs to be cropped (defaults to ``(50, 50)``). For example, ``target="10,20"`` will set the focal point as 10% and 20% from the left and top of the image, respectively. If the image needs to be cropped, it will trim off the right and bottom edges until the focal point is centered. Can either be set as a two-item tuple such as ``(20, 30)`` or a comma separated string such as ``"20,10"``. A null value such as ``(20, None)`` or ``",60"`` will default to 50%.<|endoftext|>
e3066797096b000bdeb5e94b93a7bb4c622e2d28cc786709e71b4a62b2588ab6
def read_conf(self): 'MODEL' self.model_root = self.conf['Model'] self.model_name = self.model_root.get('ModelName') self.model_tag = '{model_name}.model'.format(model_name=self.model_name) self.model_field_param = self.model_root.get('ModelField') self.model_scene_param = self.model_root.get('ModelScene') 'SYSTEM' self.system_root = self.conf['System'] self.memory_usage = self.system_root.get('MemoryUsage') self.model_version = self.system_root.get('Version') self.save_model = os.path.join(self.model_root_path, self.model_tag) self.save_checkpoint = os.path.join(self.model_root_path, self.checkpoint_tag) 'FIELD PARAM - IMAGE' self.field_root = self.conf['FieldParam'] self.category_param = self.field_root.get('Category') if isinstance(self.category_param, list): self.category_param_text = json.dumps(self.category_param, ensure_ascii=False) elif isinstance(self.category_param, str): self.category_param_text = self.category_param self.image_channel = self.field_root.get('ImageChannel') self.image_width = self.field_root.get('ImageWidth') self.image_height = self.field_root.get('ImageHeight') self.resize = self.field_root.get('Resize') self.max_label_num = self.field_root.get('MaxLabelNum') self.auto_padding = self.field_root.get('AutoPadding') self.output_split = self.field_root.get('OutputSplit') 'NEURAL NETWORK' self.neu_network_root = self.conf['NeuralNet'] self.neu_cnn_param = self.neu_network_root.get('CNNNetwork') self.neu_recurrent_param = self.neu_network_root.get('RecurrentNetwork') self.neu_recurrent_param = (self.neu_recurrent_param if self.neu_recurrent_param else 'NoRecurrent') self.units_num = self.neu_network_root.get('UnitsNum') self.neu_optimizer_param = self.neu_network_root.get('Optimizer') self.neu_optimizer_param = (self.neu_optimizer_param if self.neu_optimizer_param else 'RAdam') self.output_layer = self.neu_network_root.get('OutputLayer') self.loss_func_param = self.output_layer.get('LossFunction') self.decoder = self.output_layer.get('Decoder') 'LABEL' self.label_root = self.conf.get('Label') self.label_from_param = self.label_root.get('LabelFrom') self.extract_regex = self.label_root.get('ExtractRegex') self.extract_regex = (self.extract_regex if self.extract_regex else '.*?(?=_)') self.label_split = self.label_root.get('LabelSplit') 'PATH' self.trains_root = self.conf['Trains'] self.dataset_path_root = self.trains_root.get('DatasetPath') self.trains_path[DatasetType.TFRecords]: list = self.dataset_path_root.get('Training') self.validation_path[DatasetType.TFRecords]: list = self.dataset_path_root.get('Validation') self.source_path_root = self.trains_root.get('SourcePath') self.trains_path[DatasetType.Directory]: list = self.source_path_root.get('Training') self.validation_path[DatasetType.Directory]: list = self.source_path_root.get('Validation') self.validation_set_num: int = self.trains_root.get('ValidationSetNum') 'TRAINS' self.trains_save_steps = self.trains_root.get('SavedSteps') self.trains_validation_steps = self.trains_root.get('ValidationSteps') self.trains_end_acc = self.trains_root.get('EndAcc') self.trains_end_cost = self.trains_root.get('EndCost') self.trains_end_cost = (self.trains_end_cost if self.trains_end_cost else 1) self.trains_end_epochs = self.trains_root.get('EndEpochs') self.trains_end_epochs = (self.trains_end_epochs if self.trains_end_epochs else 2) self.trains_learning_rate: float = float(self.trains_root.get('LearningRate')) self.batch_size = self.trains_root.get('BatchSize') self.batch_size = (self.batch_size if self.batch_size else 64) self.validation_batch_size = self.trains_root.get('ValidationBatchSize') self.validation_batch_size = (self.validation_batch_size if self.validation_batch_size else 300) 'DATA AUGMENTATION' self.data_augmentation_root = self.conf['DataAugmentation'] self.da_binaryzation = self.data_augmentation_root.get('Binaryzation') self.da_median_blur = self.data_augmentation_root.get('MedianBlur') self.da_gaussian_blur = self.data_augmentation_root.get('GaussianBlur') self.da_equalize_hist = self.data_augmentation_root.get('EqualizeHist') self.da_laplace = self.data_augmentation_root.get('Laplace') self.da_rotate = self.data_augmentation_root.get('Rotate') self.da_warp_perspective = self.data_augmentation_root.get('WarpPerspective') self.da_sp_noise = self.data_augmentation_root.get('PepperNoise') self.da_brightness = self.data_augmentation_root.get('Brightness') self.da_saturation = self.data_augmentation_root.get('Saturation') self.da_hue = self.data_augmentation_root.get('Hue') self.da_gamma = self.data_augmentation_root.get('Gamma') self.da_channel_swap = self.data_augmentation_root.get('ChannelSwap') self.da_random_blank = self.data_augmentation_root.get('RandomBlank') self.da_random_transition = self.data_augmentation_root.get('RandomTransition') self.da_random_captcha = self.data_augmentation_root.get('RandomCaptcha') if (not self.da_random_captcha): self.da_random_captcha = {'Enable': False, 'FontPath': ''} 'PRETREATMENT' self.pretreatment_root = self.conf['Pretreatment'] self.pre_binaryzation = self.pretreatment_root.get('Binaryzation') self.pre_replace_transparent = self.pretreatment_root.get('ReplaceTransparent') self.pre_horizontal_stitching = self.pretreatment_root.get('HorizontalStitching') self.pre_concat_frames = self.pretreatment_root.get('ConcatFrames') self.pre_blend_frames = self.pretreatment_root.get('BlendFrames') self.pre_exec_map = self.pretreatment_root.get('ExecuteMap') self.pre_exec_map = (self.pre_exec_map if self.pre_exec_map else {}) 'COMPILE_MODEL' self.compile_model_path = os.path.join(self.output_path, 'graph') self.compile_model_path = self.compile_model_path.replace('\\', '/') self.check_field()
MODEL
config.py
read_conf
liuyang77886/captcha_trainer
2,548
python
def read_conf(self): self.model_root = self.conf['Model'] self.model_name = self.model_root.get('ModelName') self.model_tag = '{model_name}.model'.format(model_name=self.model_name) self.model_field_param = self.model_root.get('ModelField') self.model_scene_param = self.model_root.get('ModelScene') 'SYSTEM' self.system_root = self.conf['System'] self.memory_usage = self.system_root.get('MemoryUsage') self.model_version = self.system_root.get('Version') self.save_model = os.path.join(self.model_root_path, self.model_tag) self.save_checkpoint = os.path.join(self.model_root_path, self.checkpoint_tag) 'FIELD PARAM - IMAGE' self.field_root = self.conf['FieldParam'] self.category_param = self.field_root.get('Category') if isinstance(self.category_param, list): self.category_param_text = json.dumps(self.category_param, ensure_ascii=False) elif isinstance(self.category_param, str): self.category_param_text = self.category_param self.image_channel = self.field_root.get('ImageChannel') self.image_width = self.field_root.get('ImageWidth') self.image_height = self.field_root.get('ImageHeight') self.resize = self.field_root.get('Resize') self.max_label_num = self.field_root.get('MaxLabelNum') self.auto_padding = self.field_root.get('AutoPadding') self.output_split = self.field_root.get('OutputSplit') 'NEURAL NETWORK' self.neu_network_root = self.conf['NeuralNet'] self.neu_cnn_param = self.neu_network_root.get('CNNNetwork') self.neu_recurrent_param = self.neu_network_root.get('RecurrentNetwork') self.neu_recurrent_param = (self.neu_recurrent_param if self.neu_recurrent_param else 'NoRecurrent') self.units_num = self.neu_network_root.get('UnitsNum') self.neu_optimizer_param = self.neu_network_root.get('Optimizer') self.neu_optimizer_param = (self.neu_optimizer_param if self.neu_optimizer_param else 'RAdam') self.output_layer = self.neu_network_root.get('OutputLayer') self.loss_func_param = self.output_layer.get('LossFunction') self.decoder = self.output_layer.get('Decoder') 'LABEL' self.label_root = self.conf.get('Label') self.label_from_param = self.label_root.get('LabelFrom') self.extract_regex = self.label_root.get('ExtractRegex') self.extract_regex = (self.extract_regex if self.extract_regex else '.*?(?=_)') self.label_split = self.label_root.get('LabelSplit') 'PATH' self.trains_root = self.conf['Trains'] self.dataset_path_root = self.trains_root.get('DatasetPath') self.trains_path[DatasetType.TFRecords]: list = self.dataset_path_root.get('Training') self.validation_path[DatasetType.TFRecords]: list = self.dataset_path_root.get('Validation') self.source_path_root = self.trains_root.get('SourcePath') self.trains_path[DatasetType.Directory]: list = self.source_path_root.get('Training') self.validation_path[DatasetType.Directory]: list = self.source_path_root.get('Validation') self.validation_set_num: int = self.trains_root.get('ValidationSetNum') 'TRAINS' self.trains_save_steps = self.trains_root.get('SavedSteps') self.trains_validation_steps = self.trains_root.get('ValidationSteps') self.trains_end_acc = self.trains_root.get('EndAcc') self.trains_end_cost = self.trains_root.get('EndCost') self.trains_end_cost = (self.trains_end_cost if self.trains_end_cost else 1) self.trains_end_epochs = self.trains_root.get('EndEpochs') self.trains_end_epochs = (self.trains_end_epochs if self.trains_end_epochs else 2) self.trains_learning_rate: float = float(self.trains_root.get('LearningRate')) self.batch_size = self.trains_root.get('BatchSize') self.batch_size = (self.batch_size if self.batch_size else 64) self.validation_batch_size = self.trains_root.get('ValidationBatchSize') self.validation_batch_size = (self.validation_batch_size if self.validation_batch_size else 300) 'DATA AUGMENTATION' self.data_augmentation_root = self.conf['DataAugmentation'] self.da_binaryzation = self.data_augmentation_root.get('Binaryzation') self.da_median_blur = self.data_augmentation_root.get('MedianBlur') self.da_gaussian_blur = self.data_augmentation_root.get('GaussianBlur') self.da_equalize_hist = self.data_augmentation_root.get('EqualizeHist') self.da_laplace = self.data_augmentation_root.get('Laplace') self.da_rotate = self.data_augmentation_root.get('Rotate') self.da_warp_perspective = self.data_augmentation_root.get('WarpPerspective') self.da_sp_noise = self.data_augmentation_root.get('PepperNoise') self.da_brightness = self.data_augmentation_root.get('Brightness') self.da_saturation = self.data_augmentation_root.get('Saturation') self.da_hue = self.data_augmentation_root.get('Hue') self.da_gamma = self.data_augmentation_root.get('Gamma') self.da_channel_swap = self.data_augmentation_root.get('ChannelSwap') self.da_random_blank = self.data_augmentation_root.get('RandomBlank') self.da_random_transition = self.data_augmentation_root.get('RandomTransition') self.da_random_captcha = self.data_augmentation_root.get('RandomCaptcha') if (not self.da_random_captcha): self.da_random_captcha = {'Enable': False, 'FontPath': } 'PRETREATMENT' self.pretreatment_root = self.conf['Pretreatment'] self.pre_binaryzation = self.pretreatment_root.get('Binaryzation') self.pre_replace_transparent = self.pretreatment_root.get('ReplaceTransparent') self.pre_horizontal_stitching = self.pretreatment_root.get('HorizontalStitching') self.pre_concat_frames = self.pretreatment_root.get('ConcatFrames') self.pre_blend_frames = self.pretreatment_root.get('BlendFrames') self.pre_exec_map = self.pretreatment_root.get('ExecuteMap') self.pre_exec_map = (self.pre_exec_map if self.pre_exec_map else {}) 'COMPILE_' self.compile_model_path = os.path.join(self.output_path, 'graph') self.compile_model_path = self.compile_model_path.replace('\\', '/') self.check_field()
def read_conf(self): self.model_root = self.conf['Model'] self.model_name = self.model_root.get('ModelName') self.model_tag = '{model_name}.model'.format(model_name=self.model_name) self.model_field_param = self.model_root.get('ModelField') self.model_scene_param = self.model_root.get('ModelScene') 'SYSTEM' self.system_root = self.conf['System'] self.memory_usage = self.system_root.get('MemoryUsage') self.model_version = self.system_root.get('Version') self.save_model = os.path.join(self.model_root_path, self.model_tag) self.save_checkpoint = os.path.join(self.model_root_path, self.checkpoint_tag) 'FIELD PARAM - IMAGE' self.field_root = self.conf['FieldParam'] self.category_param = self.field_root.get('Category') if isinstance(self.category_param, list): self.category_param_text = json.dumps(self.category_param, ensure_ascii=False) elif isinstance(self.category_param, str): self.category_param_text = self.category_param self.image_channel = self.field_root.get('ImageChannel') self.image_width = self.field_root.get('ImageWidth') self.image_height = self.field_root.get('ImageHeight') self.resize = self.field_root.get('Resize') self.max_label_num = self.field_root.get('MaxLabelNum') self.auto_padding = self.field_root.get('AutoPadding') self.output_split = self.field_root.get('OutputSplit') 'NEURAL NETWORK' self.neu_network_root = self.conf['NeuralNet'] self.neu_cnn_param = self.neu_network_root.get('CNNNetwork') self.neu_recurrent_param = self.neu_network_root.get('RecurrentNetwork') self.neu_recurrent_param = (self.neu_recurrent_param if self.neu_recurrent_param else 'NoRecurrent') self.units_num = self.neu_network_root.get('UnitsNum') self.neu_optimizer_param = self.neu_network_root.get('Optimizer') self.neu_optimizer_param = (self.neu_optimizer_param if self.neu_optimizer_param else 'RAdam') self.output_layer = self.neu_network_root.get('OutputLayer') self.loss_func_param = self.output_layer.get('LossFunction') self.decoder = self.output_layer.get('Decoder') 'LABEL' self.label_root = self.conf.get('Label') self.label_from_param = self.label_root.get('LabelFrom') self.extract_regex = self.label_root.get('ExtractRegex') self.extract_regex = (self.extract_regex if self.extract_regex else '.*?(?=_)') self.label_split = self.label_root.get('LabelSplit') 'PATH' self.trains_root = self.conf['Trains'] self.dataset_path_root = self.trains_root.get('DatasetPath') self.trains_path[DatasetType.TFRecords]: list = self.dataset_path_root.get('Training') self.validation_path[DatasetType.TFRecords]: list = self.dataset_path_root.get('Validation') self.source_path_root = self.trains_root.get('SourcePath') self.trains_path[DatasetType.Directory]: list = self.source_path_root.get('Training') self.validation_path[DatasetType.Directory]: list = self.source_path_root.get('Validation') self.validation_set_num: int = self.trains_root.get('ValidationSetNum') 'TRAINS' self.trains_save_steps = self.trains_root.get('SavedSteps') self.trains_validation_steps = self.trains_root.get('ValidationSteps') self.trains_end_acc = self.trains_root.get('EndAcc') self.trains_end_cost = self.trains_root.get('EndCost') self.trains_end_cost = (self.trains_end_cost if self.trains_end_cost else 1) self.trains_end_epochs = self.trains_root.get('EndEpochs') self.trains_end_epochs = (self.trains_end_epochs if self.trains_end_epochs else 2) self.trains_learning_rate: float = float(self.trains_root.get('LearningRate')) self.batch_size = self.trains_root.get('BatchSize') self.batch_size = (self.batch_size if self.batch_size else 64) self.validation_batch_size = self.trains_root.get('ValidationBatchSize') self.validation_batch_size = (self.validation_batch_size if self.validation_batch_size else 300) 'DATA AUGMENTATION' self.data_augmentation_root = self.conf['DataAugmentation'] self.da_binaryzation = self.data_augmentation_root.get('Binaryzation') self.da_median_blur = self.data_augmentation_root.get('MedianBlur') self.da_gaussian_blur = self.data_augmentation_root.get('GaussianBlur') self.da_equalize_hist = self.data_augmentation_root.get('EqualizeHist') self.da_laplace = self.data_augmentation_root.get('Laplace') self.da_rotate = self.data_augmentation_root.get('Rotate') self.da_warp_perspective = self.data_augmentation_root.get('WarpPerspective') self.da_sp_noise = self.data_augmentation_root.get('PepperNoise') self.da_brightness = self.data_augmentation_root.get('Brightness') self.da_saturation = self.data_augmentation_root.get('Saturation') self.da_hue = self.data_augmentation_root.get('Hue') self.da_gamma = self.data_augmentation_root.get('Gamma') self.da_channel_swap = self.data_augmentation_root.get('ChannelSwap') self.da_random_blank = self.data_augmentation_root.get('RandomBlank') self.da_random_transition = self.data_augmentation_root.get('RandomTransition') self.da_random_captcha = self.data_augmentation_root.get('RandomCaptcha') if (not self.da_random_captcha): self.da_random_captcha = {'Enable': False, 'FontPath': } 'PRETREATMENT' self.pretreatment_root = self.conf['Pretreatment'] self.pre_binaryzation = self.pretreatment_root.get('Binaryzation') self.pre_replace_transparent = self.pretreatment_root.get('ReplaceTransparent') self.pre_horizontal_stitching = self.pretreatment_root.get('HorizontalStitching') self.pre_concat_frames = self.pretreatment_root.get('ConcatFrames') self.pre_blend_frames = self.pretreatment_root.get('BlendFrames') self.pre_exec_map = self.pretreatment_root.get('ExecuteMap') self.pre_exec_map = (self.pre_exec_map if self.pre_exec_map else {}) 'COMPILE_' self.compile_model_path = os.path.join(self.output_path, 'graph') self.compile_model_path = self.compile_model_path.replace('\\', '/') self.check_field()<|docstring|>MODEL<|endoftext|>
0dfe2eb01bfd37947fa900aec4827f80d1fb31b9d71ec5b70761b5e76aa0fc17
def init_loader(args): 'Initialize test DataLoader' if (args.dataset_name is not None): datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {'test': args.data_path} extension = args.data_path.split('.')[(- 1)] datasets = load_dataset(extension, data_files=data_files) test_dataset = datasets['test'] if (args.max_test_samples is not None): test_dataset = test_dataset.select(range(args.max_test_samples)) params = {'batch_size': args.test_batch_size, 'shuffle': False} test_loader = torch.utils.data.DataLoader(test_dataset, **params) return test_loader
Initialize test DataLoader
src/loaders.py
init_loader
Shreyas-21/DANCER-summ
7
python
def init_loader(args): if (args.dataset_name is not None): datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {'test': args.data_path} extension = args.data_path.split('.')[(- 1)] datasets = load_dataset(extension, data_files=data_files) test_dataset = datasets['test'] if (args.max_test_samples is not None): test_dataset = test_dataset.select(range(args.max_test_samples)) params = {'batch_size': args.test_batch_size, 'shuffle': False} test_loader = torch.utils.data.DataLoader(test_dataset, **params) return test_loader
def init_loader(args): if (args.dataset_name is not None): datasets = load_dataset(args.dataset_name, args.dataset_config_name) else: data_files = {'test': args.data_path} extension = args.data_path.split('.')[(- 1)] datasets = load_dataset(extension, data_files=data_files) test_dataset = datasets['test'] if (args.max_test_samples is not None): test_dataset = test_dataset.select(range(args.max_test_samples)) params = {'batch_size': args.test_batch_size, 'shuffle': False} test_loader = torch.utils.data.DataLoader(test_dataset, **params) return test_loader<|docstring|>Initialize test DataLoader<|endoftext|>
f06d1f68ba12ba0d86e5706514e7045670c5cb6ec29613f42b6eb1a253b4e8d4
def load_model(args, device): 'Load model and tokenizer' print(f'Loading tokenizer {(args.tokenizer_name if args.tokenizer_name else args.model_path)}') tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_path)) print(f'Loading model from {args.model_path}') model = AutoModelForSeq2SeqLM.from_pretrained(args.model_path).to(device) return (model, tokenizer)
Load model and tokenizer
src/loaders.py
load_model
Shreyas-21/DANCER-summ
7
python
def load_model(args, device): print(f'Loading tokenizer {(args.tokenizer_name if args.tokenizer_name else args.model_path)}') tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_path)) print(f'Loading model from {args.model_path}') model = AutoModelForSeq2SeqLM.from_pretrained(args.model_path).to(device) return (model, tokenizer)
def load_model(args, device): print(f'Loading tokenizer {(args.tokenizer_name if args.tokenizer_name else args.model_path)}') tokenizer = AutoTokenizer.from_pretrained((args.tokenizer_name if args.tokenizer_name else args.model_path)) print(f'Loading model from {args.model_path}') model = AutoModelForSeq2SeqLM.from_pretrained(args.model_path).to(device) return (model, tokenizer)<|docstring|>Load model and tokenizer<|endoftext|>
e734b190c6b313c30cf41bd2fdc7b2c746815cd7d296c56e947880cc7a80a5ba
def __init__(self, context, scenario): '\n Create and enter a temp directory which will be used to stored xpedite application information\n If a target application is being run remotely, TargetLauncher will create the temp directory on the remote host\n ' self.targetApp = scenario.makeTargetApp(context) self.xpediteApp = scenario.makeXpediteApp(context.workspace)
Create and enter a temp directory which will be used to stored xpedite application information If a target application is being run remotely, TargetLauncher will create the temp directory on the remote host
test/pytest/test_xpedite/test_profiler/app.py
__init__
mdlugajczyk/Xpedite
99
python
def __init__(self, context, scenario): '\n Create and enter a temp directory which will be used to stored xpedite application information\n If a target application is being run remotely, TargetLauncher will create the temp directory on the remote host\n ' self.targetApp = scenario.makeTargetApp(context) self.xpediteApp = scenario.makeXpediteApp(context.workspace)
def __init__(self, context, scenario): '\n Create and enter a temp directory which will be used to stored xpedite application information\n If a target application is being run remotely, TargetLauncher will create the temp directory on the remote host\n ' self.targetApp = scenario.makeTargetApp(context) self.xpediteApp = scenario.makeXpediteApp(context.workspace)<|docstring|>Create and enter a temp directory which will be used to stored xpedite application information If a target application is being run remotely, TargetLauncher will create the temp directory on the remote host<|endoftext|>
4eed2cafe5415085ddbb6b349a4040e4890d8cc92e3128c82dac93ace78f9f2a
def __init__(self, to, subject, sender, aws_access_key, aws_secret_key, aws_region='us-east-1'): '\n :param to:\n :param subject:\n :return:\n ' self.connection = None self.to = to self.subject = subject self._html = None self._text = None self._format = 'html' self.def_sender = sender is_test = os.environ.get(UNIT_TEST_KEY, False) if (not is_test): self.connection = boto3.client('ses', aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, region_name=aws_region)
:param to: :param subject: :return:
datacoco_cloud/ses_interaction.py
__init__
Phil-Ocone/datacoco-cloud
1
python
def __init__(self, to, subject, sender, aws_access_key, aws_secret_key, aws_region='us-east-1'): '\n :param to:\n :param subject:\n :return:\n ' self.connection = None self.to = to self.subject = subject self._html = None self._text = None self._format = 'html' self.def_sender = sender is_test = os.environ.get(UNIT_TEST_KEY, False) if (not is_test): self.connection = boto3.client('ses', aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, region_name=aws_region)
def __init__(self, to, subject, sender, aws_access_key, aws_secret_key, aws_region='us-east-1'): '\n :param to:\n :param subject:\n :return:\n ' self.connection = None self.to = to self.subject = subject self._html = None self._text = None self._format = 'html' self.def_sender = sender is_test = os.environ.get(UNIT_TEST_KEY, False) if (not is_test): self.connection = boto3.client('ses', aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, region_name=aws_region)<|docstring|>:param to: :param subject: :return:<|endoftext|>
9feb10e239b447f4ac2be114de26e4baea0a328cb92846118b14f4f34698505f
def html(self, html): "\n set's email html message property\n :param html:\n :return:\n " self._html = html
set's email html message property :param html: :return:
datacoco_cloud/ses_interaction.py
html
Phil-Ocone/datacoco-cloud
1
python
def html(self, html): "\n set's email html message property\n :param html:\n :return:\n " self._html = html
def html(self, html): "\n set's email html message property\n :param html:\n :return:\n " self._html = html<|docstring|>set's email html message property :param html: :return:<|endoftext|>
670e35429a41e744d4c56e508865d063e3444969d4d358782a1d999406c03416
def text(self, text): "\n set's email text message property\n :param text:\n :return:\n " self._text = text
set's email text message property :param text: :return:
datacoco_cloud/ses_interaction.py
text
Phil-Ocone/datacoco-cloud
1
python
def text(self, text): "\n set's email text message property\n :param text:\n :return:\n " self._text = text
def text(self, text): "\n set's email text message property\n :param text:\n :return:\n " self._text = text<|docstring|>set's email text message property :param text: :return:<|endoftext|>
ef43abffa2c2272459b2103d2cd349888af5ec1c9c111562ec0ee50de6fcd326
def send(self, from_addr=None): '\n sends email\n :param from_addr:\n :return:\n ' body = self._html if isinstance(self.to, basestring): self.to = [self.to] if (not from_addr): from_addr = self.def_sender if ((not self._html) and (not self._text)): raise Exception('You must provide a text or html body.') if (not self._html): self._format = 'text' body = self._text return self.connection.send_email(Source=from_addr, Destination={'ToAddresses': self.to}, Message={'Subject': {'Data': self.subject}, 'Body': {'Text': {'Data': body}, 'Html': {'Data': body}}})
sends email :param from_addr: :return:
datacoco_cloud/ses_interaction.py
send
Phil-Ocone/datacoco-cloud
1
python
def send(self, from_addr=None): '\n sends email\n :param from_addr:\n :return:\n ' body = self._html if isinstance(self.to, basestring): self.to = [self.to] if (not from_addr): from_addr = self.def_sender if ((not self._html) and (not self._text)): raise Exception('You must provide a text or html body.') if (not self._html): self._format = 'text' body = self._text return self.connection.send_email(Source=from_addr, Destination={'ToAddresses': self.to}, Message={'Subject': {'Data': self.subject}, 'Body': {'Text': {'Data': body}, 'Html': {'Data': body}}})
def send(self, from_addr=None): '\n sends email\n :param from_addr:\n :return:\n ' body = self._html if isinstance(self.to, basestring): self.to = [self.to] if (not from_addr): from_addr = self.def_sender if ((not self._html) and (not self._text)): raise Exception('You must provide a text or html body.') if (not self._html): self._format = 'text' body = self._text return self.connection.send_email(Source=from_addr, Destination={'ToAddresses': self.to}, Message={'Subject': {'Data': self.subject}, 'Body': {'Text': {'Data': body}, 'Html': {'Data': body}}})<|docstring|>sends email :param from_addr: :return:<|endoftext|>
de7931b4e1b4280b45155f897da01d4cb815badc9ae19d4a6d893a45da34dd58
def pool_output_length(input_length, pool_size, stride, pad, ignore_border): '\n Compute the output length of a pooling operator\n along a single dimension.\n\n Parameters\n ----------\n input_length : integer\n The length of the input in the pooling dimension\n pool_size : integer\n The length of the pooling region\n stride : integer\n The stride between successive pooling regions\n pad : integer\n The number of elements to be added to the input on each side.\n ignore_border: bool\n If ``True``, partial pooling regions will be ignored.\n Must be ``True`` if ``pad != 0``.\n\n Returns\n -------\n output_length\n * None if either input is None.\n * Computed length of the pooling operator otherwise.\n\n Notes\n -----\n When ``ignore_border == True``, this is given by the number of full\n pooling regions that fit in the padded input length,\n divided by the stride (rounding down).\n\n If ``ignore_border == False``, a single partial pooling region is\n appended if at least one input element would be left uncovered otherwise.\n ' if ((input_length is None) or (pool_size is None)): return None if ignore_border: output_length = (((input_length + (2 * pad)) - pool_size) + 1) output_length = (((output_length + stride) - 1) // stride) else: assert (pad == 0) if (stride >= pool_size): output_length = (((input_length + stride) - 1) // stride) else: output_length = (max(0, ((((input_length - pool_size) + stride) - 1) // stride)) + 1) return output_length
Compute the output length of a pooling operator along a single dimension. Parameters ---------- input_length : integer The length of the input in the pooling dimension pool_size : integer The length of the pooling region stride : integer The stride between successive pooling regions pad : integer The number of elements to be added to the input on each side. ignore_border: bool If ``True``, partial pooling regions will be ignored. Must be ``True`` if ``pad != 0``. Returns ------- output_length * None if either input is None. * Computed length of the pooling operator otherwise. Notes ----- When ``ignore_border == True``, this is given by the number of full pooling regions that fit in the padded input length, divided by the stride (rounding down). If ``ignore_border == False``, a single partial pooling region is appended if at least one input element would be left uncovered otherwise.
lasagne/layers/pool.py
pool_output_length
BenjaminBossan/Lasagne
0
python
def pool_output_length(input_length, pool_size, stride, pad, ignore_border): '\n Compute the output length of a pooling operator\n along a single dimension.\n\n Parameters\n ----------\n input_length : integer\n The length of the input in the pooling dimension\n pool_size : integer\n The length of the pooling region\n stride : integer\n The stride between successive pooling regions\n pad : integer\n The number of elements to be added to the input on each side.\n ignore_border: bool\n If ``True``, partial pooling regions will be ignored.\n Must be ``True`` if ``pad != 0``.\n\n Returns\n -------\n output_length\n * None if either input is None.\n * Computed length of the pooling operator otherwise.\n\n Notes\n -----\n When ``ignore_border == True``, this is given by the number of full\n pooling regions that fit in the padded input length,\n divided by the stride (rounding down).\n\n If ``ignore_border == False``, a single partial pooling region is\n appended if at least one input element would be left uncovered otherwise.\n ' if ((input_length is None) or (pool_size is None)): return None if ignore_border: output_length = (((input_length + (2 * pad)) - pool_size) + 1) output_length = (((output_length + stride) - 1) // stride) else: assert (pad == 0) if (stride >= pool_size): output_length = (((input_length + stride) - 1) // stride) else: output_length = (max(0, ((((input_length - pool_size) + stride) - 1) // stride)) + 1) return output_length
def pool_output_length(input_length, pool_size, stride, pad, ignore_border): '\n Compute the output length of a pooling operator\n along a single dimension.\n\n Parameters\n ----------\n input_length : integer\n The length of the input in the pooling dimension\n pool_size : integer\n The length of the pooling region\n stride : integer\n The stride between successive pooling regions\n pad : integer\n The number of elements to be added to the input on each side.\n ignore_border: bool\n If ``True``, partial pooling regions will be ignored.\n Must be ``True`` if ``pad != 0``.\n\n Returns\n -------\n output_length\n * None if either input is None.\n * Computed length of the pooling operator otherwise.\n\n Notes\n -----\n When ``ignore_border == True``, this is given by the number of full\n pooling regions that fit in the padded input length,\n divided by the stride (rounding down).\n\n If ``ignore_border == False``, a single partial pooling region is\n appended if at least one input element would be left uncovered otherwise.\n ' if ((input_length is None) or (pool_size is None)): return None if ignore_border: output_length = (((input_length + (2 * pad)) - pool_size) + 1) output_length = (((output_length + stride) - 1) // stride) else: assert (pad == 0) if (stride >= pool_size): output_length = (((input_length + stride) - 1) // stride) else: output_length = (max(0, ((((input_length - pool_size) + stride) - 1) // stride)) + 1) return output_length<|docstring|>Compute the output length of a pooling operator along a single dimension. Parameters ---------- input_length : integer The length of the input in the pooling dimension pool_size : integer The length of the pooling region stride : integer The stride between successive pooling regions pad : integer The number of elements to be added to the input on each side. ignore_border: bool If ``True``, partial pooling regions will be ignored. Must be ``True`` if ``pad != 0``. Returns ------- output_length * None if either input is None. * Computed length of the pooling operator otherwise. Notes ----- When ``ignore_border == True``, this is given by the number of full pooling regions that fit in the padded input length, divided by the stride (rounding down). If ``ignore_border == False``, a single partial pooling region is appended if at least one input element would be left uncovered otherwise.<|endoftext|>
a5df83ad447423a16cd91cf2cc2bb3a32a284fbc058de77f6f8f460ec435465b
def pool_2d(input, **kwargs): '\n Wrapper function that calls :func:`theano.tensor.signal.pool_2d` either\n with the new or old keyword argument names expected by Theano.\n ' try: return T.signal.pool.pool_2d(input, **kwargs) except TypeError: kwargs['ds'] = kwargs.pop('ws') kwargs['st'] = kwargs.pop('stride') kwargs['padding'] = kwargs.pop('pad') return T.signal.pool.pool_2d(input, **kwargs)
Wrapper function that calls :func:`theano.tensor.signal.pool_2d` either with the new or old keyword argument names expected by Theano.
lasagne/layers/pool.py
pool_2d
BenjaminBossan/Lasagne
0
python
def pool_2d(input, **kwargs): '\n Wrapper function that calls :func:`theano.tensor.signal.pool_2d` either\n with the new or old keyword argument names expected by Theano.\n ' try: return T.signal.pool.pool_2d(input, **kwargs) except TypeError: kwargs['ds'] = kwargs.pop('ws') kwargs['st'] = kwargs.pop('stride') kwargs['padding'] = kwargs.pop('pad') return T.signal.pool.pool_2d(input, **kwargs)
def pool_2d(input, **kwargs): '\n Wrapper function that calls :func:`theano.tensor.signal.pool_2d` either\n with the new or old keyword argument names expected by Theano.\n ' try: return T.signal.pool.pool_2d(input, **kwargs) except TypeError: kwargs['ds'] = kwargs.pop('ws') kwargs['st'] = kwargs.pop('stride') kwargs['padding'] = kwargs.pop('pad') return T.signal.pool.pool_2d(input, **kwargs)<|docstring|>Wrapper function that calls :func:`theano.tensor.signal.pool_2d` either with the new or old keyword argument names expected by Theano.<|endoftext|>
42e9f2b53049f8378c70f7e4d1763e19d4986a779157f8d7e0511934da33cac2
@progress.setter def progress(self, progress): 'Should be overridden by different Task types.' self._progress = progress
Should be overridden by different Task types.
crispy/tasks.py
progress
StephenHermes/crispy
0
python
@progress.setter def progress(self, progress): self._progress = progress
@progress.setter def progress(self, progress): self._progress = progress<|docstring|>Should be overridden by different Task types.<|endoftext|>
0e7b100326078844ee1b07c18d05cb0d5bf062ed8f2b745b4005f9867e76e6df
def test_index(client): 'Check the index page loads.' response = client.get('/') assert (response.status_code == 200)
Check the index page loads.
test/test_app.py
test_index
j-penson/image-distortion
0
python
def test_index(client): response = client.get('/') assert (response.status_code == 200)
def test_index(client): response = client.get('/') assert (response.status_code == 200)<|docstring|>Check the index page loads.<|endoftext|>
ccb176e3535e7429040a53a0fd4c7dd04937e25921183175501c08fcce3893a9
def test_endpoint(client): 'Check the index page loads.' response = client.get('/v1/image') assert (response.status_code == 200)
Check the index page loads.
test/test_app.py
test_endpoint
j-penson/image-distortion
0
python
def test_endpoint(client): response = client.get('/v1/image') assert (response.status_code == 200)
def test_endpoint(client): response = client.get('/v1/image') assert (response.status_code == 200)<|docstring|>Check the index page loads.<|endoftext|>
ef201c4579fe609d59eb4378906fc6f8ce8c2c622b0cae2848d704801cd05ece
@manager.option('suite', default='all', nargs='?', choices=suites.keys(), help='Specify test suite to run (default all)') @manager.option('--spec', action='store_true', help='Output in spec style') def test(spec, suite): 'Runs tests' args = [] if spec: args.extend(['--spec']) if (not suite): suite = 'all' args.extend(suites[suite]) return pytest.main(args)
Runs tests
manage.py
test
crossgovernmentservices/csd_notes
0
python
@manager.option('suite', default='all', nargs='?', choices=suites.keys(), help='Specify test suite to run (default all)') @manager.option('--spec', action='store_true', help='Output in spec style') def test(spec, suite): args = [] if spec: args.extend(['--spec']) if (not suite): suite = 'all' args.extend(suites[suite]) return pytest.main(args)
@manager.option('suite', default='all', nargs='?', choices=suites.keys(), help='Specify test suite to run (default all)') @manager.option('--spec', action='store_true', help='Output in spec style') def test(spec, suite): args = [] if spec: args.extend(['--spec']) if (not suite): suite = 'all' args.extend(suites[suite]) return pytest.main(args)<|docstring|>Runs tests<|endoftext|>
e812f46d995e29059073e7e24f0211bdb3ddd403b1a96b75fd7002cb2787dbb6
def get_system_info(): '\n Get information about the system to be inserted into the User-Agent header.\n ' return 'lang={0}; arch={1}; os={2}; python.version={3}'.format('python', platform.machine(), platform.system(), platform.python_version())
Get information about the system to be inserted into the User-Agent header.
eventstreams_sdk/common.py
get_system_info
IBM/eventstreams-python-sdk
2
python
def get_system_info(): '\n \n ' return 'lang={0}; arch={1}; os={2}; python.version={3}'.format('python', platform.machine(), platform.system(), platform.python_version())
def get_system_info(): '\n \n ' return 'lang={0}; arch={1}; os={2}; python.version={3}'.format('python', platform.machine(), platform.system(), platform.python_version())<|docstring|>Get information about the system to be inserted into the User-Agent header.<|endoftext|>
f842c6e85de8afbf9f5613362baa495f8ac084fcc9202a1fbe7e459949db72b7
def get_user_agent(): '\n Get the value to be sent in the User-Agent header.\n ' return USER_AGENT
Get the value to be sent in the User-Agent header.
eventstreams_sdk/common.py
get_user_agent
IBM/eventstreams-python-sdk
2
python
def get_user_agent(): '\n \n ' return USER_AGENT
def get_user_agent(): '\n \n ' return USER_AGENT<|docstring|>Get the value to be sent in the User-Agent header.<|endoftext|>
35f0e7ba46352d79dce1561323a3e80f67a2078519b2a22c3a8e678d751fd30e
def get_sdk_headers(service_name, service_version, operation_id): '\n Get the request headers to be sent in requests by the SDK.\n \n If you plan to gather metrics for your SDK, the User-Agent header value must\n be a string similar to the following:\n eventstreams-python-sdk/0.0.1 (lang=python; arch=x86_64; os=Linux; python.version=3.7.4)\n\n In the example above, the analytics tool will parse the user-agent header and\n use the following properties:\n "eventstreams-python-sdk" - the name of your sdk\n "0.0.1"- the version of your sdk\n "lang=python" - the language of the current sdk\n "arch=x86_64; os=Linux; python.version=3.7.4" - system information\n\n Note: It is very important that the sdk name ends with the string `-sdk`,\n as the analytics data collector uses this to gather usage data.\n ' headers = {} headers[HEADER_NAME_USER_AGENT] = get_user_agent() return headers
Get the request headers to be sent in requests by the SDK. If you plan to gather metrics for your SDK, the User-Agent header value must be a string similar to the following: eventstreams-python-sdk/0.0.1 (lang=python; arch=x86_64; os=Linux; python.version=3.7.4) In the example above, the analytics tool will parse the user-agent header and use the following properties: "eventstreams-python-sdk" - the name of your sdk "0.0.1"- the version of your sdk "lang=python" - the language of the current sdk "arch=x86_64; os=Linux; python.version=3.7.4" - system information Note: It is very important that the sdk name ends with the string `-sdk`, as the analytics data collector uses this to gather usage data.
eventstreams_sdk/common.py
get_sdk_headers
IBM/eventstreams-python-sdk
2
python
def get_sdk_headers(service_name, service_version, operation_id): '\n Get the request headers to be sent in requests by the SDK.\n \n If you plan to gather metrics for your SDK, the User-Agent header value must\n be a string similar to the following:\n eventstreams-python-sdk/0.0.1 (lang=python; arch=x86_64; os=Linux; python.version=3.7.4)\n\n In the example above, the analytics tool will parse the user-agent header and\n use the following properties:\n "eventstreams-python-sdk" - the name of your sdk\n "0.0.1"- the version of your sdk\n "lang=python" - the language of the current sdk\n "arch=x86_64; os=Linux; python.version=3.7.4" - system information\n\n Note: It is very important that the sdk name ends with the string `-sdk`,\n as the analytics data collector uses this to gather usage data.\n ' headers = {} headers[HEADER_NAME_USER_AGENT] = get_user_agent() return headers
def get_sdk_headers(service_name, service_version, operation_id): '\n Get the request headers to be sent in requests by the SDK.\n \n If you plan to gather metrics for your SDK, the User-Agent header value must\n be a string similar to the following:\n eventstreams-python-sdk/0.0.1 (lang=python; arch=x86_64; os=Linux; python.version=3.7.4)\n\n In the example above, the analytics tool will parse the user-agent header and\n use the following properties:\n "eventstreams-python-sdk" - the name of your sdk\n "0.0.1"- the version of your sdk\n "lang=python" - the language of the current sdk\n "arch=x86_64; os=Linux; python.version=3.7.4" - system information\n\n Note: It is very important that the sdk name ends with the string `-sdk`,\n as the analytics data collector uses this to gather usage data.\n ' headers = {} headers[HEADER_NAME_USER_AGENT] = get_user_agent() return headers<|docstring|>Get the request headers to be sent in requests by the SDK. If you plan to gather metrics for your SDK, the User-Agent header value must be a string similar to the following: eventstreams-python-sdk/0.0.1 (lang=python; arch=x86_64; os=Linux; python.version=3.7.4) In the example above, the analytics tool will parse the user-agent header and use the following properties: "eventstreams-python-sdk" - the name of your sdk "0.0.1"- the version of your sdk "lang=python" - the language of the current sdk "arch=x86_64; os=Linux; python.version=3.7.4" - system information Note: It is very important that the sdk name ends with the string `-sdk`, as the analytics data collector uses this to gather usage data.<|endoftext|>
486f4a0e64deaaca514b569fb6cdb4701dc8784f56a046d0e29ce315bfab14bd
@property def base_api_url(self) -> str: 'The provider base REST API URL' return self._config.api_base_url
The provider base REST API URL
jupyterlab_pullrequests/managers/manager.py
base_api_url
fcollonval/pull-requests
32
python
@property def base_api_url(self) -> str: return self._config.api_base_url
@property def base_api_url(self) -> str: return self._config.api_base_url<|docstring|>The provider base REST API URL<|endoftext|>
5d4b7b93f46e3a20b0306a9dd7022ebfef5db4fb4c38774b7c1fd4f0e45156a7
@property def per_page_argument(self) -> Optional[Tuple[(str, int)]]: 'Returns query argument to set number of items per page.\n\n Returns\n [str, int]: (query argument name, value)\n None: the provider does not support pagination\n ' return None
Returns query argument to set number of items per page. Returns [str, int]: (query argument name, value) None: the provider does not support pagination
jupyterlab_pullrequests/managers/manager.py
per_page_argument
fcollonval/pull-requests
32
python
@property def per_page_argument(self) -> Optional[Tuple[(str, int)]]: 'Returns query argument to set number of items per page.\n\n Returns\n [str, int]: (query argument name, value)\n None: the provider does not support pagination\n ' return None
@property def per_page_argument(self) -> Optional[Tuple[(str, int)]]: 'Returns query argument to set number of items per page.\n\n Returns\n [str, int]: (query argument name, value)\n None: the provider does not support pagination\n ' return None<|docstring|>Returns query argument to set number of items per page. Returns [str, int]: (query argument name, value) None: the provider does not support pagination<|endoftext|>
f1292826fb9ce348daa666b79df70a95992a762a6b48be559e7ff57aecef998e
@abc.abstractmethod async def get_current_user(self) -> str: 'Get the current user ID.' raise NotImplementedError()
Get the current user ID.
jupyterlab_pullrequests/managers/manager.py
get_current_user
fcollonval/pull-requests
32
python
@abc.abstractmethod async def get_current_user(self) -> str: raise NotImplementedError()
@abc.abstractmethod async def get_current_user(self) -> str: raise NotImplementedError()<|docstring|>Get the current user ID.<|endoftext|>
81f8cc9cd628a1d16f5d5641bbc3f0eb0d8cd8e52563f5168ac9765fe1e419db
@abc.abstractmethod async def get_file_diff(self, pr_id: str, filename: str) -> dict: 'Get the file diff for the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name\n Returns:\n The file diff description\n ' raise NotImplementedError()
Get the file diff for the pull request. Args: pr_id: pull request ID endpoint filename: The file name Returns: The file diff description
jupyterlab_pullrequests/managers/manager.py
get_file_diff
fcollonval/pull-requests
32
python
@abc.abstractmethod async def get_file_diff(self, pr_id: str, filename: str) -> dict: 'Get the file diff for the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name\n Returns:\n The file diff description\n ' raise NotImplementedError()
@abc.abstractmethod async def get_file_diff(self, pr_id: str, filename: str) -> dict: 'Get the file diff for the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name\n Returns:\n The file diff description\n ' raise NotImplementedError()<|docstring|>Get the file diff for the pull request. Args: pr_id: pull request ID endpoint filename: The file name Returns: The file diff description<|endoftext|>
87a8dd6bf03582b94cba7b5e3b5295ad0dc997361a9e89b6fa919f759320b5c2
@abc.abstractmethod async def get_threads(self, pr_id: str, filename: Optional[str]=None) -> List[dict]: 'Get the discussions on a file or the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name; None to get the discussion on the pull requests\n Returns:\n The discussions\n ' raise NotImplementedError()
Get the discussions on a file or the pull request. Args: pr_id: pull request ID endpoint filename: The file name; None to get the discussion on the pull requests Returns: The discussions
jupyterlab_pullrequests/managers/manager.py
get_threads
fcollonval/pull-requests
32
python
@abc.abstractmethod async def get_threads(self, pr_id: str, filename: Optional[str]=None) -> List[dict]: 'Get the discussions on a file or the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name; None to get the discussion on the pull requests\n Returns:\n The discussions\n ' raise NotImplementedError()
@abc.abstractmethod async def get_threads(self, pr_id: str, filename: Optional[str]=None) -> List[dict]: 'Get the discussions on a file or the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name; None to get the discussion on the pull requests\n Returns:\n The discussions\n ' raise NotImplementedError()<|docstring|>Get the discussions on a file or the pull request. Args: pr_id: pull request ID endpoint filename: The file name; None to get the discussion on the pull requests Returns: The discussions<|endoftext|>
5e9a1b1c94335f57a67432313080d799e9e57c20f3c7153b21c3296d9f3de08e
@abc.abstractmethod async def list_files(self, pr_id: str) -> list: 'Get the list of modified files for a pull request.\n\n Args:\n pr_id: pull request ID endpoint\n Returns:\n The list of modified files\n ' raise NotImplementedError()
Get the list of modified files for a pull request. Args: pr_id: pull request ID endpoint Returns: The list of modified files
jupyterlab_pullrequests/managers/manager.py
list_files
fcollonval/pull-requests
32
python
@abc.abstractmethod async def list_files(self, pr_id: str) -> list: 'Get the list of modified files for a pull request.\n\n Args:\n pr_id: pull request ID endpoint\n Returns:\n The list of modified files\n ' raise NotImplementedError()
@abc.abstractmethod async def list_files(self, pr_id: str) -> list: 'Get the list of modified files for a pull request.\n\n Args:\n pr_id: pull request ID endpoint\n Returns:\n The list of modified files\n ' raise NotImplementedError()<|docstring|>Get the list of modified files for a pull request. Args: pr_id: pull request ID endpoint Returns: The list of modified files<|endoftext|>
2c714dfc339b4b2c472009ce71041e4ec5d7ab40d00f0ba15e61d71c417d64a6
@abc.abstractmethod async def list_prs(self, username: str, pr_filter: str) -> list: 'Returns the list of pull requests for the given user.\n\n Args:\n username: User ID for the versioning service\n pr_filter: Filter to add to the pull requests requests\n Returns:\n The list of pull requests\n ' raise NotImplementedError()
Returns the list of pull requests for the given user. Args: username: User ID for the versioning service pr_filter: Filter to add to the pull requests requests Returns: The list of pull requests
jupyterlab_pullrequests/managers/manager.py
list_prs
fcollonval/pull-requests
32
python
@abc.abstractmethod async def list_prs(self, username: str, pr_filter: str) -> list: 'Returns the list of pull requests for the given user.\n\n Args:\n username: User ID for the versioning service\n pr_filter: Filter to add to the pull requests requests\n Returns:\n The list of pull requests\n ' raise NotImplementedError()
@abc.abstractmethod async def list_prs(self, username: str, pr_filter: str) -> list: 'Returns the list of pull requests for the given user.\n\n Args:\n username: User ID for the versioning service\n pr_filter: Filter to add to the pull requests requests\n Returns:\n The list of pull requests\n ' raise NotImplementedError()<|docstring|>Returns the list of pull requests for the given user. Args: username: User ID for the versioning service pr_filter: Filter to add to the pull requests requests Returns: The list of pull requests<|endoftext|>
5930610c6ea94fd87177154aa0abee12b364832fd6ec6fde5991e371d0949789
@abc.abstractmethod async def post_comment(self, pr_id: str, filename: str, body: str) -> Dict[(str, str)]: 'Create a new comment on a file or a the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name; None to comment on the pull request\n body: Comment body\n Returns:\n The created comment\n ' raise NotImplementedError()
Create a new comment on a file or a the pull request. Args: pr_id: pull request ID endpoint filename: The file name; None to comment on the pull request body: Comment body Returns: The created comment
jupyterlab_pullrequests/managers/manager.py
post_comment
fcollonval/pull-requests
32
python
@abc.abstractmethod async def post_comment(self, pr_id: str, filename: str, body: str) -> Dict[(str, str)]: 'Create a new comment on a file or a the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name; None to comment on the pull request\n body: Comment body\n Returns:\n The created comment\n ' raise NotImplementedError()
@abc.abstractmethod async def post_comment(self, pr_id: str, filename: str, body: str) -> Dict[(str, str)]: 'Create a new comment on a file or a the pull request.\n\n Args:\n pr_id: pull request ID endpoint\n filename: The file name; None to comment on the pull request\n body: Comment body\n Returns:\n The created comment\n ' raise NotImplementedError()<|docstring|>Create a new comment on a file or a the pull request. Args: pr_id: pull request ID endpoint filename: The file name; None to comment on the pull request body: Comment body Returns: The created comment<|endoftext|>
1363baf79b4e752bea64d31b7147507cbcb4625c55a3ad58caba3d5958676e13
async def _call_provider(self, url: str, load_json: bool=True, method: str='GET', body: Optional[dict]=None, params: Optional[Dict[(str, str)]]=None, headers: Optional[Dict[(str, str)]]=None, has_pagination: bool=True) -> Union[(dict, str)]: 'Call the third party service\n\n The request is presumed to support pagination by default if\n - The method is GET\n - load_json is True\n - The provider returns not None per_page_argument property\n\n Args:\n url: Endpoint to request\n load_json: Is the response of JSON type\n method: HTTP method\n body: Request body; None if no body\n params: Query arguments as dictionary; None if no arguments\n headers: Request headers as dictionary; None if no headers\n has_pagination: Whether the pagination query arguments should be appended\n Returns:\n List or Dict: Create from JSON response body if load_json is True\n str: Raw response body if load_json is False\n ' if (not self._config.access_token): raise tornado.web.HTTPError(status_code=http.HTTPStatus.BAD_REQUEST, reason='No access token specified. Please set PRConfig.access_token in your user jupyter_server_config file.') if (body is not None): if (headers is None): headers = {} headers['Content-Type'] = 'application/json' body = tornado.escape.json_encode(body) if ((not url.startswith(self.base_api_url)) and (not re.search('^https?:', url))): url = url_path_join(self.base_api_url, url) with_pagination = False if (load_json and has_pagination and (method.lower() == 'get') and (self.per_page_argument is not None)): with_pagination = True params = (params or {}) params.update([self.per_page_argument]) if (params is not None): url = tornado.httputil.url_concat(url, params) request = tornado.httpclient.HTTPRequest(url, user_agent=f'JupyterLab Pull Requests v{__version__}', method=method.upper(), body=body, headers=headers) self.log.debug(f'{method.upper()} {url}') try: response = (await self._client.fetch(request)) result = response.body.decode('utf-8') if load_json: link = response.headers.get('Link') next_url = None if (link is not None): for e in link.split(','): args = e.strip().split(';') data = args[0] metadata = {k.strip(): v.strip().strip('"') for (k, v) in map((lambda s: s.strip().split('=')), args[1:])} if (metadata.get('rel', '') == 'next'): next_url = data[1:(- 1)] break new_ = json.loads(result) if (next_url is not None): next_ = (await self._call_provider(next_url, load_json=load_json, method=method, body=body, headers=headers, has_pagination=False)) if (not isinstance(new_, list)): new_ = [new_] if (not isinstance(next_, list)): next_ = [next_] return (new_ + next_) elif (with_pagination and (not isinstance(new_, list))): return [new_] else: return new_ else: return result except tornado.httpclient.HTTPClientError as e: self.log.debug(f'Failed to fetch {request.method} {request.url}', exc_info=e) error_body = ((e.response.body or b'{}').decode('utf-8') if (e.response is not None) else '{}') self.log.debug(error_body) try: message = json.loads(error_body).get('message', str(e)) except json.JSONDecodeError: message = str(e) raise tornado.web.HTTPError(status_code=e.code, reason=f"Invalid response in '{url}': {message}") from e except (json.JSONDecodeError, UnicodeDecodeError) as e: self.log.error('Failed to decode the response', exc_info=e) raise tornado.web.HTTPError(status_code=http.HTTPStatus.BAD_REQUEST, reason=f"Invalid response in '{url}': {e}") from e except Exception as e: self.log.error('Failed to fetch http request', exc_info=e) raise tornado.web.HTTPError(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR, reason=f"Unknown error in '{url}': {e}") from e
Call the third party service The request is presumed to support pagination by default if - The method is GET - load_json is True - The provider returns not None per_page_argument property Args: url: Endpoint to request load_json: Is the response of JSON type method: HTTP method body: Request body; None if no body params: Query arguments as dictionary; None if no arguments headers: Request headers as dictionary; None if no headers has_pagination: Whether the pagination query arguments should be appended Returns: List or Dict: Create from JSON response body if load_json is True str: Raw response body if load_json is False
jupyterlab_pullrequests/managers/manager.py
_call_provider
fcollonval/pull-requests
32
python
async def _call_provider(self, url: str, load_json: bool=True, method: str='GET', body: Optional[dict]=None, params: Optional[Dict[(str, str)]]=None, headers: Optional[Dict[(str, str)]]=None, has_pagination: bool=True) -> Union[(dict, str)]: 'Call the third party service\n\n The request is presumed to support pagination by default if\n - The method is GET\n - load_json is True\n - The provider returns not None per_page_argument property\n\n Args:\n url: Endpoint to request\n load_json: Is the response of JSON type\n method: HTTP method\n body: Request body; None if no body\n params: Query arguments as dictionary; None if no arguments\n headers: Request headers as dictionary; None if no headers\n has_pagination: Whether the pagination query arguments should be appended\n Returns:\n List or Dict: Create from JSON response body if load_json is True\n str: Raw response body if load_json is False\n ' if (not self._config.access_token): raise tornado.web.HTTPError(status_code=http.HTTPStatus.BAD_REQUEST, reason='No access token specified. Please set PRConfig.access_token in your user jupyter_server_config file.') if (body is not None): if (headers is None): headers = {} headers['Content-Type'] = 'application/json' body = tornado.escape.json_encode(body) if ((not url.startswith(self.base_api_url)) and (not re.search('^https?:', url))): url = url_path_join(self.base_api_url, url) with_pagination = False if (load_json and has_pagination and (method.lower() == 'get') and (self.per_page_argument is not None)): with_pagination = True params = (params or {}) params.update([self.per_page_argument]) if (params is not None): url = tornado.httputil.url_concat(url, params) request = tornado.httpclient.HTTPRequest(url, user_agent=f'JupyterLab Pull Requests v{__version__}', method=method.upper(), body=body, headers=headers) self.log.debug(f'{method.upper()} {url}') try: response = (await self._client.fetch(request)) result = response.body.decode('utf-8') if load_json: link = response.headers.get('Link') next_url = None if (link is not None): for e in link.split(','): args = e.strip().split(';') data = args[0] metadata = {k.strip(): v.strip().strip('"') for (k, v) in map((lambda s: s.strip().split('=')), args[1:])} if (metadata.get('rel', ) == 'next'): next_url = data[1:(- 1)] break new_ = json.loads(result) if (next_url is not None): next_ = (await self._call_provider(next_url, load_json=load_json, method=method, body=body, headers=headers, has_pagination=False)) if (not isinstance(new_, list)): new_ = [new_] if (not isinstance(next_, list)): next_ = [next_] return (new_ + next_) elif (with_pagination and (not isinstance(new_, list))): return [new_] else: return new_ else: return result except tornado.httpclient.HTTPClientError as e: self.log.debug(f'Failed to fetch {request.method} {request.url}', exc_info=e) error_body = ((e.response.body or b'{}').decode('utf-8') if (e.response is not None) else '{}') self.log.debug(error_body) try: message = json.loads(error_body).get('message', str(e)) except json.JSONDecodeError: message = str(e) raise tornado.web.HTTPError(status_code=e.code, reason=f"Invalid response in '{url}': {message}") from e except (json.JSONDecodeError, UnicodeDecodeError) as e: self.log.error('Failed to decode the response', exc_info=e) raise tornado.web.HTTPError(status_code=http.HTTPStatus.BAD_REQUEST, reason=f"Invalid response in '{url}': {e}") from e except Exception as e: self.log.error('Failed to fetch http request', exc_info=e) raise tornado.web.HTTPError(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR, reason=f"Unknown error in '{url}': {e}") from e
async def _call_provider(self, url: str, load_json: bool=True, method: str='GET', body: Optional[dict]=None, params: Optional[Dict[(str, str)]]=None, headers: Optional[Dict[(str, str)]]=None, has_pagination: bool=True) -> Union[(dict, str)]: 'Call the third party service\n\n The request is presumed to support pagination by default if\n - The method is GET\n - load_json is True\n - The provider returns not None per_page_argument property\n\n Args:\n url: Endpoint to request\n load_json: Is the response of JSON type\n method: HTTP method\n body: Request body; None if no body\n params: Query arguments as dictionary; None if no arguments\n headers: Request headers as dictionary; None if no headers\n has_pagination: Whether the pagination query arguments should be appended\n Returns:\n List or Dict: Create from JSON response body if load_json is True\n str: Raw response body if load_json is False\n ' if (not self._config.access_token): raise tornado.web.HTTPError(status_code=http.HTTPStatus.BAD_REQUEST, reason='No access token specified. Please set PRConfig.access_token in your user jupyter_server_config file.') if (body is not None): if (headers is None): headers = {} headers['Content-Type'] = 'application/json' body = tornado.escape.json_encode(body) if ((not url.startswith(self.base_api_url)) and (not re.search('^https?:', url))): url = url_path_join(self.base_api_url, url) with_pagination = False if (load_json and has_pagination and (method.lower() == 'get') and (self.per_page_argument is not None)): with_pagination = True params = (params or {}) params.update([self.per_page_argument]) if (params is not None): url = tornado.httputil.url_concat(url, params) request = tornado.httpclient.HTTPRequest(url, user_agent=f'JupyterLab Pull Requests v{__version__}', method=method.upper(), body=body, headers=headers) self.log.debug(f'{method.upper()} {url}') try: response = (await self._client.fetch(request)) result = response.body.decode('utf-8') if load_json: link = response.headers.get('Link') next_url = None if (link is not None): for e in link.split(','): args = e.strip().split(';') data = args[0] metadata = {k.strip(): v.strip().strip('"') for (k, v) in map((lambda s: s.strip().split('=')), args[1:])} if (metadata.get('rel', ) == 'next'): next_url = data[1:(- 1)] break new_ = json.loads(result) if (next_url is not None): next_ = (await self._call_provider(next_url, load_json=load_json, method=method, body=body, headers=headers, has_pagination=False)) if (not isinstance(new_, list)): new_ = [new_] if (not isinstance(next_, list)): next_ = [next_] return (new_ + next_) elif (with_pagination and (not isinstance(new_, list))): return [new_] else: return new_ else: return result except tornado.httpclient.HTTPClientError as e: self.log.debug(f'Failed to fetch {request.method} {request.url}', exc_info=e) error_body = ((e.response.body or b'{}').decode('utf-8') if (e.response is not None) else '{}') self.log.debug(error_body) try: message = json.loads(error_body).get('message', str(e)) except json.JSONDecodeError: message = str(e) raise tornado.web.HTTPError(status_code=e.code, reason=f"Invalid response in '{url}': {message}") from e except (json.JSONDecodeError, UnicodeDecodeError) as e: self.log.error('Failed to decode the response', exc_info=e) raise tornado.web.HTTPError(status_code=http.HTTPStatus.BAD_REQUEST, reason=f"Invalid response in '{url}': {e}") from e except Exception as e: self.log.error('Failed to fetch http request', exc_info=e) raise tornado.web.HTTPError(status_code=http.HTTPStatus.INTERNAL_SERVER_ERROR, reason=f"Unknown error in '{url}': {e}") from e<|docstring|>Call the third party service The request is presumed to support pagination by default if - The method is GET - load_json is True - The provider returns not None per_page_argument property Args: url: Endpoint to request load_json: Is the response of JSON type method: HTTP method body: Request body; None if no body params: Query arguments as dictionary; None if no arguments headers: Request headers as dictionary; None if no headers has_pagination: Whether the pagination query arguments should be appended Returns: List or Dict: Create from JSON response body if load_json is True str: Raw response body if load_json is False<|endoftext|>
dc2dd5fc81968b55d78c1d52b972bc0834dc3c8535e3c8b8b9e8808eb9780a7f
def __init__(self, rouge_dir=None, rouge_args=None, verbose=False): '\n ROUGE metric\n Makes use of pyrouge: https://github.com/bheinzerling/pyrouge\n\n Args:\n :param rouge_dir: directory of ROUGE-1.5.5/, by default uses environment\'s ROUGE_HOME variable\n :param rouge_args: arguments for ROUGE calculation; if None, defaults to "-c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m"; a string of parameters. Please see ROUGE-1.5.5 README (e.g. https://github.com/andersjo/pyrouge/tree/master/tools/ROUGE-1.5.5) for a list of possible parameters\n :param verbose: whether to log data preparation or just output\n\n ' log_level = (logging.ERROR if (not verbose) else None) from set_path import set_path set_path() if (not ROUGE_HOME): rouge_dir = os.environ['ROUGE_HOME'] self.r = Rouge155(rouge_dir=rouge_dir, rouge_args=rouge_args) self.rouge_args = rouge_args
ROUGE metric Makes use of pyrouge: https://github.com/bheinzerling/pyrouge Args: :param rouge_dir: directory of ROUGE-1.5.5/, by default uses environment's ROUGE_HOME variable :param rouge_args: arguments for ROUGE calculation; if None, defaults to "-c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m"; a string of parameters. Please see ROUGE-1.5.5 README (e.g. https://github.com/andersjo/pyrouge/tree/master/tools/ROUGE-1.5.5) for a list of possible parameters :param verbose: whether to log data preparation or just output
cal_scores/SummEval/evaluation/summ_eval/rouge_metric.py
__init__
bzhao2718/ReliableSummEvalReg
0
python
def __init__(self, rouge_dir=None, rouge_args=None, verbose=False): '\n ROUGE metric\n Makes use of pyrouge: https://github.com/bheinzerling/pyrouge\n\n Args:\n :param rouge_dir: directory of ROUGE-1.5.5/, by default uses environment\'s ROUGE_HOME variable\n :param rouge_args: arguments for ROUGE calculation; if None, defaults to "-c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m"; a string of parameters. Please see ROUGE-1.5.5 README (e.g. https://github.com/andersjo/pyrouge/tree/master/tools/ROUGE-1.5.5) for a list of possible parameters\n :param verbose: whether to log data preparation or just output\n\n ' log_level = (logging.ERROR if (not verbose) else None) from set_path import set_path set_path() if (not ROUGE_HOME): rouge_dir = os.environ['ROUGE_HOME'] self.r = Rouge155(rouge_dir=rouge_dir, rouge_args=rouge_args) self.rouge_args = rouge_args
def __init__(self, rouge_dir=None, rouge_args=None, verbose=False): '\n ROUGE metric\n Makes use of pyrouge: https://github.com/bheinzerling/pyrouge\n\n Args:\n :param rouge_dir: directory of ROUGE-1.5.5/, by default uses environment\'s ROUGE_HOME variable\n :param rouge_args: arguments for ROUGE calculation; if None, defaults to "-c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m"; a string of parameters. Please see ROUGE-1.5.5 README (e.g. https://github.com/andersjo/pyrouge/tree/master/tools/ROUGE-1.5.5) for a list of possible parameters\n :param verbose: whether to log data preparation or just output\n\n ' log_level = (logging.ERROR if (not verbose) else None) from set_path import set_path set_path() if (not ROUGE_HOME): rouge_dir = os.environ['ROUGE_HOME'] self.r = Rouge155(rouge_dir=rouge_dir, rouge_args=rouge_args) self.rouge_args = rouge_args<|docstring|>ROUGE metric Makes use of pyrouge: https://github.com/bheinzerling/pyrouge Args: :param rouge_dir: directory of ROUGE-1.5.5/, by default uses environment's ROUGE_HOME variable :param rouge_args: arguments for ROUGE calculation; if None, defaults to "-c 95 -2 -1 -U -r 1000 -n 4 -w 1.2 -a -m"; a string of parameters. Please see ROUGE-1.5.5 README (e.g. https://github.com/andersjo/pyrouge/tree/master/tools/ROUGE-1.5.5) for a list of possible parameters :param verbose: whether to log data preparation or just output<|endoftext|>
2b34bda5f373e43077868ae89ff07d978c43822401a666818eb5c4895dada364
def get_validator() -> Type[AzPlatformValidator]: 'Returns the validator class for this module' return AzPlatformValidator
Returns the validator class for this module
scripts/commit_validation/commit_validation/validators/az_platform_validator.py
get_validator
eerock/o3de
11
python
def get_validator() -> Type[AzPlatformValidator]: return AzPlatformValidator
def get_validator() -> Type[AzPlatformValidator]: return AzPlatformValidator<|docstring|>Returns the validator class for this module<|endoftext|>