|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,,,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,,,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,,,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,,,,,,,,,,,,,,,,,],:,:[,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,],:} |
|
{:,:,:,:,:,:,:,:,:,:,:[,,,,,,,,,],:\Generate bert's embedding from fine-tuned model.\"\"\"\n batch_size, time = labels.shape\n\n cls_ids = torch.full(\n (batch_size, 1), bert_model.bert_text_encoder.cls_idx, dtype=labels.dtype, device=labels.device)\n bert_labels = torch.cat([cls_ids, labels], 1)\n # replace eos with sep\n eos_idx = bert_model.bert_text_encoder.eos_idx\n sep_idx = bert_model.bert_text_encoder.sep_idx\n bert_labels[bert_labels == eos_idx] = sep_idx\n\n embedding, _ = bert_model.bert(bert_labels, output_all_encoded_layers=True)\n # sum over all layers embedding\n embedding = torch.stack(embedding).sum(0)\n # get rid of cls\n embedding = embedding[:, 1:]\n\n assert labels.shape == embedding.shape[:-1]\n\n return embedding","function_tokens":["def","generate_embedding","(","bert_model",",","labels",")",":","batch_size",",","time","=","labels",".","shape","cls_ids","=","torch",".","full","(","(","batch_size",",","1",")",",","bert_model",".","bert_text_encoder",".","cls_idx",",","dtype","=","labels",".","dtype",",","device","=","labels",".","device",")","bert_labels","=","torch",".","cat","(","[","cls_ids",",","labels","]",",","1",")","# replace eos with sep","eos_idx","=","bert_model",".","bert_text_encoder",".","eos_idx","sep_idx","=","bert_model",".","bert_text_encoder",".","sep_idx","bert_labels","[","bert_labels","==","eos_idx","]","=","sep_idx","embedding",",","_","=","bert_model",".","bert","(","bert_labels",",","output_all_encoded_layers","=","True",")","# sum over all layers embedding","embedding","=","torch",".","stack","(","embedding",")",".","sum","(","0",")","# get rid of cls","embedding","=","embedding","[",":",",","1",":","]","assert","labels",".","shape","==","embedding",".","shape","[",":","-","1","]","return","embedding"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/bert_embedding.py#L38-L58"} |
|
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/bert_embedding.py","language":"python","identifier":"load_fine_tuned_model","parameters":"(bert_model, text_encoder, path)","argument_list":"","return_statement":"return model","docstring":"Load fine-tuned bert model given text encoder and checkpoint path.","docstring_summary":"Load fine-tuned bert model given text encoder and checkpoint path.","docstring_tokens":["Load","fine","-","tuned","bert","model","given","text","encoder","and","checkpoint","path","."],"function":"def load_fine_tuned_model(bert_model, text_encoder, path):\n \"\"\"Load fine-tuned bert model given text encoder and checkpoint path.\"\"\"\n bert_text_encoder = BertLikeSentencePieceTextEncoder(text_encoder)\n\n model = BertForMaskedLM.from_pretrained(bert_model)\n model.bert_text_encoder = bert_text_encoder\n model.bert.embeddings.word_embeddings = nn.Embedding(\n bert_text_encoder.vocab_size, model.bert.embeddings.word_embeddings.weight.shape[1])\n model.config.vocab_size = bert_text_encoder.vocab_size\n model.cls = BertOnlyMLMHead(\n model.config, model.bert.embeddings.word_embeddings.weight)\n\n model.load_state_dict(torch.load(path))\n\n return model","function_tokens":["def","load_fine_tuned_model","(","bert_model",",","text_encoder",",","path",")",":","bert_text_encoder","=","BertLikeSentencePieceTextEncoder","(","text_encoder",")","model","=","BertForMaskedLM",".","from_pretrained","(","bert_model",")","model",".","bert_text_encoder","=","bert_text_encoder","model",".","bert",".","embeddings",".","word_embeddings","=","nn",".","Embedding","(","bert_text_encoder",".","vocab_size",",","model",".","bert",".","embeddings",".","word_embeddings",".","weight",".","shape","[","1","]",")","model",".","config",".","vocab_size","=","bert_text_encoder",".","vocab_size","model",".","cls","=","BertOnlyMLMHead","(","model",".","config",",","model",".","bert",".","embeddings",".","word_embeddings",".","weight",")","model",".","load_state_dict","(","torch",".","load","(","path",")",")","return","model"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/bert_embedding.py#L61-L75"} |
|
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.backward","parameters":"(self, loss)","argument_list":"","return_statement":"return grad_norm","docstring":"Standard backward step with self.timer and debugger\n Arguments\n loss - the loss to perform loss.backward()","docstring_summary":"Standard backward step with self.timer and debugger\n Arguments\n loss - the loss to perform loss.backward()","docstring_tokens":["Standard","backward","step","with","self",".","timer","and","debugger","Arguments","loss","-","the","loss","to","perform","loss",".","backward","()"],"function":"def backward(self, loss):\n '''\n Standard backward step with self.timer and debugger\n Arguments\n loss - the loss to perform loss.backward()\n '''\n self.timer.set()\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.GRAD_CLIP)\n if math.isnan(grad_norm):\n self.verbose('Error : grad norm is NaN @ step '+str(self.step))\n else:\n self.optimizer.step()\n self.timer.cnt('bw')\n return grad_norm","function_tokens":["def","backward","(","self",",","loss",")",":","self",".","timer",".","set","(",")","loss",".","backward","(",")","grad_norm","=","torch",".","nn",".","utils",".","clip_grad_norm_","(","self",".","model",".","parameters","(",")",",","self",".","GRAD_CLIP",")","if","math",".","isnan","(","grad_norm",")",":","self",".","verbose","(","'Error : grad norm is NaN @ step '","+","str","(","self",".","step",")",")","else",":","self",".","optimizer",".","step","(",")","self",".","timer",".","cnt","(","'bw'",")","return","grad_norm"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L76-L91"} |
|
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.load_ckpt","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Load ckpt if --load option is specified","docstring_summary":"Load ckpt if --load option is specified","docstring_tokens":["Load","ckpt","if","--","load","option","is","specified"],"function":"def load_ckpt(self):\n ''' Load ckpt if --load option is specified '''\n if self.paras.load:\n # Load weights\n ckpt = torch.load(\n self.paras.load, map_location=self.device if self.mode == 'train' else 'cpu')\n self.model.load_state_dict(ckpt['model'])\n if self.emb_decoder is not None:\n self.emb_decoder.load_state_dict(ckpt['emb_decoder'])\n # if self.amp:\n # amp.load_state_dict(ckpt['amp'])\n # Load task-dependent items\n metric = \"None\"\n score = 0.0\n for k, v in ckpt.items():\n if type(v) is float:\n metric, score = k, v\n if self.mode == 'train':\n self.step = ckpt['global_step']\n self.optimizer.load_opt_state_dict(ckpt['optimizer'])\n self.verbose('Load ckpt from {}, restarting at step {} (recorded {} = {:.2f} %)'.format(\n self.paras.load, self.step, metric, score))\n else:\n self.model.eval()\n if self.emb_decoder is not None:\n self.emb_decoder.eval()\n self.verbose('Evaluation target = {} (recorded {} = {:.2f} %)'.format(self.paras.load, metric, score))","function_tokens":["def","load_ckpt","(","self",")",":","if","self",".","paras",".","load",":","# Load weights","ckpt","=","torch",".","load","(","self",".","paras",".","load",",","map_location","=","self",".","device","if","self",".","mode","==","'train'","else","'cpu'",")","self",".","model",".","load_state_dict","(","ckpt","[","'model'","]",")","if","self",".","emb_decoder","is","not","None",":","self",".","emb_decoder",".","load_state_dict","(","ckpt","[","'emb_decoder'","]",")","# if self.amp:","# amp.load_state_dict(ckpt['amp'])","# Load task-dependent items","metric","=","\"None\"","score","=","0.0","for","k",",","v","in","ckpt",".","items","(",")",":","if","type","(","v",")","is","float",":","metric",",","score","=","k",",","v","if","self",".","mode","==","'train'",":","self",".","step","=","ckpt","[","'global_step'","]","self",".","optimizer",".","load_opt_state_dict","(","ckpt","[","'optimizer'","]",")","self",".","verbose","(","'Load ckpt from {}, restarting at step {} (recorded {} = {:.2f} %)'",".","format","(","self",".","paras",".","load",",","self",".","step",",","metric",",","score",")",")","else",":","self",".","model",".","eval","(",")","if","self",".","emb_decoder","is","not","None",":","self",".","emb_decoder",".","eval","(",")","self",".","verbose","(","'Evaluation target = {} (recorded {} = {:.2f} %)'",".","format","(","self",".","paras",".","load",",","metric",",","score",")",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L93-L119"} |
|
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.verbose","parameters":"(self, msg)","argument_list":"","return_statement":"","docstring":"Verbose function for print information to stdout","docstring_summary":"Verbose function for print information to stdout","docstring_tokens":["Verbose","function","for","print","information","to","stdout"],"function":"def verbose(self, msg):\n ''' Verbose function for print information to stdout'''\n if self.paras.verbose:\n if type(msg) == list:\n for m in msg:\n print('[INFO]', m.ljust(100))\n else:\n print('[INFO]', msg.ljust(100))","function_tokens":["def","verbose","(","self",",","msg",")",":","if","self",".","paras",".","verbose",":","if","type","(","msg",")","==","list",":","for","m","in","msg",":","print","(","'[INFO]'",",","m",".","ljust","(","100",")",")","else",":","print","(","'[INFO]'",",","msg",".","ljust","(","100",")",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L121-L128"} |
|
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.progress","parameters":"(self, msg)","argument_list":"","return_statement":"","docstring":"Verbose function for updating progress on stdout (do not include newline)","docstring_summary":"Verbose function for updating progress on stdout (do not include newline)","docstring_tokens":["Verbose","function","for","updating","progress","on","stdout","(","do","not","include","newline",")"],"function":"def progress(self, msg):\n ''' Verbose function for updating progress on stdout (do not include newline) '''\n if self.paras.verbose:\n sys.stdout.write(\"\\033[K\") # Clear line\n print('[{}] {}'.format(human_format(self.step), msg), end='\\r')","function_tokens":["def","progress","(","self",",","msg",")",":","if","self",".","paras",".","verbose",":","sys",".","stdout",".","write","(","\"\\033[K\"",")","# Clear line","print","(","'[{}] {}'",".","format","(","human_format","(","self",".","step",")",",","msg",")",",","end","=","'\\r'",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L130-L134"} |
|
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.write_log","parameters":"(self, log_name, log_dict)","argument_list":"","return_statement":"","docstring":"Write log to TensorBoard\n log_name - <str> Name of tensorboard variable \n log_value - <dict>\/<array> Value of variable (e.g. dict of losses), passed if value = None","docstring_summary":"Write log to TensorBoard\n log_name - <str> Name of tensorboard variable \n log_value - <dict>\/<array> Value of variable (e.g. dict of losses), passed if value = None","docstring_tokens":["Write","log","to","TensorBoard","log_name","-","<str",">","Name","of","tensorboard","variable","log_value","-","<dict",">","\/","<array",">","Value","of","variable","(","e",".","g",".","dict","of","losses",")","passed","if","value","=","None"],"function":"def write_log(self, log_name, log_dict):\n '''\n Write log to TensorBoard\n log_name - <str> Name of tensorboard variable \n log_value - <dict>\/<array> Value of variable (e.g. dict of losses), passed if value = None\n '''\n if type(log_dict) is dict:\n log_dict = {key: val for key, val in log_dict.items() if (\n val is not None and not math.isnan(val))}\n if log_dict is None:\n pass\n elif len(log_dict) > 0:\n if 'align' in log_name or 'spec' in log_name:\n img, form = log_dict\n self.log.add_image(\n log_name, img, global_step=self.step, dataformats=form)\n elif 'text' in log_name or 'hyp' in log_name:\n self.log.add_text(log_name, log_dict, self.step)\n else:\n self.log.add_scalars(log_name, log_dict, self.step)","function_tokens":["def","write_log","(","self",",","log_name",",","log_dict",")",":","if","type","(","log_dict",")","is","dict",":","log_dict","=","{","key",":","val","for","key",",","val","in","log_dict",".","items","(",")","if","(","val","is","not","None","and","not","math",".","isnan","(","val",")",")","}","if","log_dict","is","None",":","pass","elif","len","(","log_dict",")",">","0",":","if","'align'","in","log_name","or","'spec'","in","log_name",":","img",",","form","=","log_dict","self",".","log",".","add_image","(","log_name",",","img",",","global_step","=","self",".","step",",","dataformats","=","form",")","elif","'text'","in","log_name","or","'hyp'","in","log_name",":","self",".","log",".","add_text","(","log_name",",","log_dict",",","self",".","step",")","else",":","self",".","log",".","add_scalars","(","log_name",",","log_dict",",","self",".","step",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L136-L155"} |
|
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.save_checkpoint","parameters":"(self, f_name, metric, score, show_msg=True)","argument_list":"","return_statement":"","docstring":"Ckpt saver\n f_name - <str> the name phnof ckpt file (w\/o prefix) to store, overwrite if existed\n score - <float> The value of metric used to evaluate model","docstring_summary":"Ckpt saver\n f_name - <str> the name phnof ckpt file (w\/o prefix) to store, overwrite if existed\n score - <float> The value of metric used to evaluate model","docstring_tokens":["Ckpt","saver","f_name","-","<str",">","the","name","phnof","ckpt","file","(","w","\/","o","prefix",")","to","store","overwrite","if","existed","score","-","<float",">","The","value","of","metric","used","to","evaluate","model"],"function":"def save_checkpoint(self, f_name, metric, score, show_msg=True):\n '''' \n Ckpt saver\n f_name - <str> the name phnof ckpt file (w\/o prefix) to store, overwrite if existed\n score - <float> The value of metric used to evaluate model\n '''\n ckpt_path = os.path.join(self.ckpdir, f_name)\n full_dict = {\n \: self.model.state_dict(),\n \: self.optimizer.get_opt_state_dict(),\n \: self.step,\n metric: score\n }\n # Additional modules to save\n # if self.amp:\n # full_dict['amp'] = self.amp_lib.state_dict()\n if self.emb_decoder is not None:\n full_dict['emb_decoder'] = self.emb_decoder.state_dict()\n\n torch.save(full_dict, ckpt_path)\n if show_msg:\n self.verbose(\.\n format(human_format(self.step), metric, score, ckpt_path))function_tokensdefsave_checkpoint(self,f_name,metric,score,show_msg=True):ckpt_path=os.path.join(self.ckpdir,f_name)full_dict={\:self.model.state_dict(),\:self.optimizer.get_opt_state_dict(),\:self.step,metric:score}# Additional modules to save# if self.amp:# full_dict['amp'] = self.amp_lib.state_dict()ifself.emb_decoderisnotNone:full_dict['emb_decoder']=self.emb_decoder.state_dict()torch.save(full_dict,ckpt_path)ifshow_msg:self.verbose(\.format(human_format(self.step),metric,score,ckpt_path))urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L157-L179 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/solver.pylanguagepythonidentifierBaseSolver.load_dataparameters(self)argument_listreturn_statementdocstringCalled by main to load all data\n After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set)\n No return valuedocstring_summaryCalled by main to load all data\n After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set)\n No return valuedocstring_tokensCalledbymaintoloadalldataAfterthiscalldatarelatedattributesshouldbesetup(e.g.self.tr_setself.dev_set)Noreturnvaluefunctiondef load_data(self):\n '''\n Called by main to load all data\n After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set)\n No return value\n '''\n raise NotImplementedErrorfunction_tokensdefload_data(self):raiseNotImplementedErrorurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L193-L199 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/solver.pylanguagepythonidentifierBaseSolver.set_modelparameters(self)argument_listreturn_statementdocstringCalled by main to set models\n After this call, model related attributes should be setup (e.g. self.l2_loss)\n The followings MUST be setup\n - self.model (torch.nn.Module)\n - self.optimizer (src.Optimizer),\n init. w\/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas'])\n Loading pre-trained model should also be performed here \n No return valuedocstring_summaryCalled by main to set models\n After this call, model related attributes should be setup (e.g. self.l2_loss)\n The followings MUST be setup\n - self.model (torch.nn.Module)\n - self.optimizer (src.Optimizer),\n init. w\/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas'])\n Loading pre-trained model should also be performed here \n No return valuedocstring_tokensCalledbymaintosetmodelsAfterthiscallmodelrelatedattributesshouldbesetup(e.g.self.l2_loss)ThefollowingsMUSTbesetup-self.model(torch.nn.Module)-self.optimizer(src.Optimizer)init.w\/self.optimizer=src.Optimizer(self.model.parameters()**self.config[hparas])Loadingpre-trainedmodelshouldalsobeperformedhereNoreturnvaluefunctiondef set_model(self):\n '''\n Called by main to set models\n After this call, model related attributes should be setup (e.g. self.l2_loss)\n The followings MUST be setup\n - self.model (torch.nn.Module)\n - self.optimizer (src.Optimizer),\n init. w\/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas'])\n Loading pre-trained model should also be performed here \n No return value\n '''\n raise NotImplementedErrorfunction_tokensdefset_model(self):raiseNotImplementedErrorurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L202-L213 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/solver.pylanguagepythonidentifierBaseSolver.execparameters(self)argument_listreturn_statementdocstringCalled by main to execute training\/inferencedocstring_summaryCalled by main to execute training\/inferencedocstring_tokensCalledbymaintoexecutetraining\/inferencefunctiondef exec(self):\n '''\n Called by main to execute training\/inference\n '''\n raise NotImplementedErrorfunction_tokensdefexec(self):raiseNotImplementedErrorurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L216-L220 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/decode.pylanguagepythonidentifierHypothesis.avgScoreparameters(self)argument_listreturn_statementreturn sum(self.output_scores) \/ len(self.output_scores)docstringReturn the averaged log probability of hypothesisdocstring_summaryReturn the averaged log probability of hypothesisdocstring_tokensReturntheaveragedlogprobabilityofhypothesisfunctiondef avgScore(self):\n '''Return the averaged log probability of hypothesis'''\n assert len(self.output_scores) != 0\n return sum(self.output_scores) \/ len(self.output_scores)function_tokensdefavgScore(self):assertlen(self.output_scores)!=0returnsum(self.output_scores)\/len(self.output_scores)urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/decode.py#L204-L207 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/decode.pylanguagepythonidentifierHypothesis.addTopkparameters(self, topi, topv, decoder_state, att_map=None,\n lm_state=None, ctc_state=None, ctc_prob=0.0, ctc_candidates=[])argument_listreturn_statementreturn None, new_hypothesisdocstringExpand current hypothesis with a given beam sizedocstring_summaryExpand current hypothesis with a given beam sizedocstring_tokensExpandcurrenthypothesiswithagivenbeamsizefunctiondef addTopk(self, topi, topv, decoder_state, att_map=None,\n lm_state=None, ctc_state=None, ctc_prob=0.0, ctc_candidates=[]):\n '''Expand current hypothesis with a given beam size'''\n new_hypothesis = []\n term_score = None\n ctc_s, ctc_p = None, None\n beam_size = topi.shape[-1]\n\n for i in range(beam_size):\n # Detect <eos>\n if topi[i].item() == 1:\n term_score = topv[i].cpu()\n continue\n\n idxes = self.output_seq[:] # pass by value\n scores = self.output_scores[:] # pass by value\n idxes.append(topi[i].cpu())\n scores.append(topv[i].cpu())\n if ctc_state is not None:\n # ToDo: Handle out-of-candidate case.\n idx = ctc_candidates.index(topi[i].item())\n ctc_s = ctc_state[idx, :, :]\n ctc_p = ctc_prob[idx]\n new_hypothesis.append(Hypothesis(decoder_state,\n output_seq=idxes, output_scores=scores, lm_state=lm_state,\n ctc_state=ctc_s, ctc_prob=ctc_p, att_map=att_map))\n if term_score is not None:\n self.output_seq.append(torch.tensor(1))\n self.output_scores.append(term_score)\n return self, new_hypothesis\n return None, new_hypothesisfunction_tokensdefaddTopk(self,topi,topv,decoder_state,att_map=None,lm_state=None,ctc_state=None,ctc_prob=0.0,ctc_candidates=[]):new_hypothesis=[]term_score=Nonectc_s,ctc_p=None,Nonebeam_size=topi.shape[-1]foriinrange(beam_size):# Detect <eos>iftopi[i].item()==1:term_score=topv[i].cpu()continueidxes=self.output_seq[:]# pass by valuescores=self.output_scores[:]# pass by valueidxes.append(topi[i].cpu())scores.append(topv[i].cpu())ifctc_stateisnotNone:# ToDo: Handle out-of-candidate case.idx=ctc_candidates.index(topi[i].item())ctc_s=ctc_state[idx,:,:]ctc_p=ctc_prob[idx]new_hypothesis.append(Hypothesis(decoder_state,output_seq=idxes,output_scores=scores,lm_state=lm_state,ctc_state=ctc_s,ctc_prob=ctc_p,att_map=att_map))ifterm_scoreisnotNone:self.output_seq.append(torch.tensor(1))self.output_scores.append(term_score)returnself,new_hypothesisreturnNone,new_hypothesisurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/decode.py#L209-L239 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/plugin.pylanguagepythonidentifierEmbeddingRegularizer.fuse_probparameters(self, x_emb, dec_logit)argument_listreturn_statementreturn log_fused_probdocstringTakes context and decoder logit to perform word embedding fusiondocstring_summaryTakes context and decoder logit to perform word embedding fusiondocstring_tokensTakescontextanddecoderlogittoperformwordembeddingfusionfunctiondef fuse_prob(self, x_emb, dec_logit):\n ''' Takes context and decoder logit to perform word embedding fusion '''\n # Compute distribution for dec\/emb\n if self.fuse_normalize:\n emb_logit = nn.functional.linear(nn.functional.normalize(x_emb, dim=-1),\n nn.functional.normalize(self.emb_table.weight, dim=-1))\n else:\n emb_logit = nn.functional.linear(x_emb, self.emb_table.weight)\n emb_prob = (nn.functional.relu(self.temp)*emb_logit).softmax(dim=-1)\n dec_prob = dec_logit.softmax(dim=-1)\n # Mix distribution\n if self.fuse_learnable:\n fused_prob = (1-torch.sigmoid(self.fuse_lambda))*dec_prob +\\\n torch.sigmoid(self.fuse_lambda)*emb_prob\n else:\n fused_prob = (1-self.fuse_lambda)*dec_prob + \\\n self.fuse_lambda*emb_prob\n # Log-prob\n log_fused_prob = (fused_prob+self.eps).log()\n\n return log_fused_probfunction_tokensdeffuse_prob(self,x_emb,dec_logit):# Compute distribution for dec\/embifself.fuse_normalize:emb_logit=nn.functional.linear(nn.functional.normalize(x_emb,dim=-1),nn.functional.normalize(self.emb_table.weight,dim=-1))else:emb_logit=nn.functional.linear(x_emb,self.emb_table.weight)emb_prob=(nn.functional.relu(self.temp)*emb_logit).softmax(dim=-1)dec_prob=dec_logit.softmax(dim=-1)# Mix distributionifself.fuse_learnable:fused_prob=(1-torch.sigmoid(self.fuse_lambda))*dec_prob+torch.sigmoid(self.fuse_lambda)*emb_probelse:fused_prob=(1-self.fuse_lambda)*dec_prob+self.fuse_lambda*emb_prob# Log-problog_fused_prob=(fused_prob+self.eps).log()returnlog_fused_proburlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/plugin.py#L103-L123 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/data.pylanguagepythonidentifiercollect_audio_batchparameters(batch, audio_transform, mode)argument_listreturn_statementreturn file, audio_feat, audio_len, textdocstringCollects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...]docstring_summaryCollects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...]docstring_tokensCollectsabatchshouldbelistoftuples(audio_path<str>listofinttoken<list>)e.g.[(file1txt1)(file2txt2)...]functiondef collect_audio_batch(batch, audio_transform, mode):\n '''Collects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...] '''\n\n # Bucketed batch should be [[(file1,txt1),(file2,txt2),...]]\n if type(batch[0]) is not tuple:\n batch = batch[0]\n # Make sure that batch size is reasonable\n first_len = audio_transform(str(batch[0][0])).shape[0]\n if first_len > HALF_BATCHSIZE_AUDIO_LEN and mode == 'train':\n batch = batch[:len(batch)\/\/2]\n\n # Read batch\n file, audio_feat, audio_len, text = [], [], [], []\n with torch.no_grad():\n for b in batch:\n file.append(str(b[0]).split('\/')[-1].split('.')[0])\n feat = audio_transform(str(b[0]))\n audio_feat.append(feat)\n audio_len.append(len(feat))\n text.append(torch.LongTensor(b[1]))\n # Descending audio length within each batch\n audio_len, file, audio_feat, text = zip(*[(feat_len, f_name, feat, txt)\n for feat_len, f_name, feat, txt in sorted(zip(audio_len, file, audio_feat, text), reverse=True, key=lambda x:x[0])])\n # Zero-padding\n audio_feat = pad_sequence(audio_feat, batch_first=True)\n text = pad_sequence(text, batch_first=True)\n audio_len = torch.LongTensor(audio_len)\n\n return file, audio_feat, audio_len, textfunction_tokensdefcollect_audio_batch(batch,audio_transform,mode):# Bucketed batch should be [[(file1,txt1),(file2,txt2),...]]iftype(batch[0])isnottuple:batch=batch[0]# Make sure that batch size is reasonablefirst_len=audio_transform(str(batch[0][0])).shape[0]iffirst_len>HALF_BATCHSIZE_AUDIO_LENandmode=='train':batch=batch[:len(batch)\/\/2]# Read batchfile,audio_feat,audio_len,text=[],[],[],[]withtorch.no_grad():forbinbatch:file.append(str(b[0]).split('\/')[-1].split('.')[0])feat=audio_transform(str(b[0]))audio_feat.append(feat)audio_len.append(len(feat))text.append(torch.LongTensor(b[1]))# Descending audio length within each batchaudio_len,file,audio_feat,text=zip(*[(feat_len,f_name,feat,txt)forfeat_len,f_name,feat,txtinsorted(zip(audio_len,file,audio_feat,text),reverse=True,key=lambdax:x[0])])# Zero-paddingaudio_feat=pad_sequence(audio_feat,batch_first=True)text=pad_sequence(text,batch_first=True)audio_len=torch.LongTensor(audio_len)returnfile,audio_feat,audio_len,texturlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L14-L43 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/data.pylanguagepythonidentifiercollect_text_batchparameters(batch, mode)argument_listreturn_statementreturn textdocstringCollects a batch of text, should be list of list of int token \n e.g. [txt1 <list>,txt2 <list>,...]docstring_summaryCollects a batch of text, should be list of list of int token \n e.g. [txt1 <list>,txt2 <list>,...]docstring_tokensCollectsabatchoftextshouldbelistoflistofinttokene.g.[txt1<list>txt2<list>...]functiondef collect_text_batch(batch, mode):\n '''Collects a batch of text, should be list of list of int token \n e.g. [txt1 <list>,txt2 <list>,...] '''\n\n # Bucketed batch should be [[txt1, txt2,...]]\n if type(batch[0][0]) is list:\n batch = batch[0]\n # Half batch size if input to long\n if len(batch[0]) > HALF_BATCHSIZE_TEXT_LEN and mode == 'train':\n batch = batch[:len(batch)\/\/2]\n # Read batch\n text = [torch.LongTensor(b) for b in batch]\n # Zero-padding\n text = pad_sequence(text, batch_first=True)\n\n return textfunction_tokensdefcollect_text_batch(batch,mode):# Bucketed batch should be [[txt1, txt2,...]]iftype(batch[0][0])islist:batch=batch[0]# Half batch size if input to longiflen(batch[0])>HALF_BATCHSIZE_TEXT_LENandmode=='train':batch=batch[:len(batch)\/\/2]# Read batchtext=[torch.LongTensor(b)forbinbatch]# Zero-paddingtext=pad_sequence(text,batch_first=True)returntexturlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L46-L61 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/data.pylanguagepythonidentifiercreate_datasetparameters(tokenizer, ascending, name, path, bucketing, batch_size,\n train_split=None, dev_split=None, test_split=None)argument_listreturn_statementdocstringInterface for creating all kinds of datasetdocstring_summaryInterface for creating all kinds of datasetdocstring_tokensInterfaceforcreatingallkindsofdatasetfunctiondef create_dataset(tokenizer, ascending, name, path, bucketing, batch_size,\n train_split=None, dev_split=None, test_split=None):\n ''' Interface for creating all kinds of dataset'''\n\n # Recognize corpus\n if name.lower() == \:\n from corpus.librispeech import LibriDataset as Dataset\n else:\n raise NotImplementedError\n\n # Create dataset\n if train_split is not None:\n # Training mode\n mode = 'train'\n tr_loader_bs = 1 if bucketing and (not ascending) else batch_size\n bucket_size = batch_size if bucketing and (\n not ascending) else 1 # Ascending without bucketing\n # Do not use bucketing for dev set\n dv_set = Dataset(path, dev_split, tokenizer, 1)\n tr_set = Dataset(path, train_split, tokenizer,\n bucket_size, ascending=ascending)\n # Messages to show\n msg_list = _data_msg(name, path, train_split.__str__(), len(tr_set),\n dev_split.__str__(), len(dv_set), batch_size, bucketing)\n\n return tr_set, dv_set, tr_loader_bs, batch_size, mode, msg_list\n else:\n # Testing model\n mode = 'test'\n # Do not use bucketing for dev set\n dv_set = Dataset(path, dev_split, tokenizer, 1)\n # Do not use bucketing for test set\n tt_set = Dataset(path, test_split, tokenizer, 1)\n # Messages to show\n msg_list = _data_msg(name, path, dev_split.__str__(), len(dv_set),\n test_split.__str__(), len(tt_set), batch_size, False)\n msg_list = [m.replace('Dev', 'Test').replace(\n 'Train', 'Dev') for m in msg_list]\n return dv_set, tt_set, batch_size, batch_size, mode, msg_listfunction_tokensdefcreate_dataset(tokenizer,ascending,name,path,bucketing,batch_size,train_split=None,dev_split=None,test_split=None):# Recognize corpusifname.lower()==\:fromcorpus.librispeechimportLibriDatasetasDatasetelse:raiseNotImplementedError# Create datasetiftrain_splitisnotNone:# Training modemode='train'tr_loader_bs=1ifbucketingand(notascending)elsebatch_sizebucket_size=batch_sizeifbucketingand(notascending)else1# Ascending without bucketing# Do not use bucketing for dev setdv_set=Dataset(path,dev_split,tokenizer,1)tr_set=Dataset(path,train_split,tokenizer,bucket_size,ascending=ascending)# Messages to showmsg_list=_data_msg(name,path,train_split.__str__(),len(tr_set),dev_split.__str__(),len(dv_set),batch_size,bucketing)returntr_set,dv_set,tr_loader_bs,batch_size,mode,msg_listelse:# Testing modelmode='test'# Do not use bucketing for dev setdv_set=Dataset(path,dev_split,tokenizer,1)# Do not use bucketing for test settt_set=Dataset(path,test_split,tokenizer,1)# Messages to showmsg_list=_data_msg(name,path,dev_split.__str__(),len(dv_set),test_split.__str__(),len(tt_set),batch_size,False)msg_list=[m.replace('Dev','Test').replace('Train','Dev')forminmsg_list]returndv_set,tt_set,batch_size,batch_size,mode,msg_listurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L64-L102 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/data.pylanguagepythonidentifiercreate_textsetparameters(tokenizer, train_split, dev_split, name, path, bucketing, batch_size)argument_listreturn_statementreturn tr_set, dv_set, tr_loader_bs, batch_size, msg_listdocstringInterface for creating all kinds of text datasetdocstring_summaryInterface for creating all kinds of text datasetdocstring_tokensInterfaceforcreatingallkindsoftextdatasetfunctiondef create_textset(tokenizer, train_split, dev_split, name, path, bucketing, batch_size):\n ''' Interface for creating all kinds of text dataset'''\n msg_list = []\n\n # Recognize corpus\n if name.lower() == \:\n from corpus.librispeech import LibriTextDataset as Dataset\n else:\n raise NotImplementedError\n\n # Create dataset\n bucket_size = batch_size if bucketing else 1\n tr_loader_bs = 1 if bucketing else batch_size\n # Do not use bucketing for dev set\n dv_set = Dataset(path, dev_split, tokenizer, 1)\n tr_set = Dataset(path, train_split, tokenizer, bucket_size)\n\n # Messages to show\n msg_list = _data_msg(name, path, train_split.__str__(), len(tr_set),\n dev_split.__str__(), len(dv_set), batch_size, bucketing)\n\n return tr_set, dv_set, tr_loader_bs, batch_size, msg_listfunction_tokensdefcreate_textset(tokenizer,train_split,dev_split,name,path,bucketing,batch_size):msg_list=[]# Recognize corpusifname.lower()==\:fromcorpus.librispeechimportLibriTextDatasetasDatasetelse:raiseNotImplementedError# Create datasetbucket_size=batch_sizeifbucketingelse1tr_loader_bs=1ifbucketingelsebatch_size# Do not use bucketing for dev setdv_set=Dataset(path,dev_split,tokenizer,1)tr_set=Dataset(path,train_split,tokenizer,bucket_size)# Messages to showmsg_list=_data_msg(name,path,train_split.__str__(),len(tr_set),dev_split.__str__(),len(dv_set),batch_size,bucketing)returntr_set,dv_set,tr_loader_bs,batch_size,msg_listurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L105-L126 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/data.pylanguagepythonidentifierload_datasetparameters(n_jobs, use_gpu, pin_memory, ascending, corpus, audio, text)argument_listreturn_statementreturn tr_set, dv_set, feat_dim, tokenizer.vocab_size, tokenizer, data_msgdocstringPrepare dataloader for training\/validationdocstring_summaryPrepare dataloader for training\/validationdocstring_tokensPreparedataloaderfortraining\/validationfunctiondef load_dataset(n_jobs, use_gpu, pin_memory, ascending, corpus, audio, text):\n ''' Prepare dataloader for training\/validation'''\n\n # Audio feature extractor\n audio_transform, feat_dim = create_transform(audio.copy())\n # Text tokenizer\n tokenizer = load_text_encoder(**text)\n # Dataset (in testing mode, tr_set=dv_set, dv_set=tt_set)\n tr_set, dv_set, tr_loader_bs, dv_loader_bs, mode, data_msg = create_dataset(\n tokenizer, ascending, **corpus)\n # Collect function\n collect_tr = partial(collect_audio_batch,\n audio_transform=audio_transform, mode=mode)\n collect_dv = partial(collect_audio_batch,\n audio_transform=audio_transform, mode='test')\n # Shuffle\/drop applied to training set only\n shuffle = (mode == 'train' and not ascending)\n drop_last = shuffle\n # Create data loader\n tr_set = DataLoader(tr_set, batch_size=tr_loader_bs, shuffle=shuffle, drop_last=drop_last, collate_fn=collect_tr,\n num_workers=n_jobs, pin_memory=use_gpu)\n dv_set = DataLoader(dv_set, batch_size=dv_loader_bs, shuffle=False, drop_last=False, collate_fn=collect_dv,\n num_workers=n_jobs, pin_memory=pin_memory)\n # Messages to show\n data_msg.append('I\/O spec. | Audio feature = {}\\t| feature dim = {}\\t| Token type = {}\\t| Vocab size = {}'\n .format(audio['feat_type'], feat_dim, tokenizer.token_type, tokenizer.vocab_size))\n\n return tr_set, dv_set, feat_dim, tokenizer.vocab_size, tokenizer, data_msgfunction_tokensdefload_dataset(n_jobs,use_gpu,pin_memory,ascending,corpus,audio,text):# Audio feature extractoraudio_transform,feat_dim=create_transform(audio.copy())# Text tokenizertokenizer=load_text_encoder(**text)# Dataset (in testing mode, tr_set=dv_set, dv_set=tt_set)tr_set,dv_set,tr_loader_bs,dv_loader_bs,mode,data_msg=create_dataset(tokenizer,ascending,**corpus)# Collect functioncollect_tr=partial(collect_audio_batch,audio_transform=audio_transform,mode=mode)collect_dv=partial(collect_audio_batch,audio_transform=audio_transform,mode='test')# Shuffle\/drop applied to training set onlyshuffle=(mode=='train'andnotascending)drop_last=shuffle# Create data loadertr_set=DataLoader(tr_set,batch_size=tr_loader_bs,shuffle=shuffle,drop_last=drop_last,collate_fn=collect_tr,num_workers=n_jobs,pin_memory=use_gpu)dv_set=DataLoader(dv_set,batch_size=dv_loader_bs,shuffle=False,drop_last=False,collate_fn=collect_dv,num_workers=n_jobs,pin_memory=pin_memory)# Messages to showdata_msg.append('I\/O spec. | Audio feature = {}\\t| feature dim = {}\\t| Token type = {}\\t| Vocab size = {}'.format(audio['feat_type'],feat_dim,tokenizer.token_type,tokenizer.vocab_size))returntr_set,dv_set,feat_dim,tokenizer.vocab_size,tokenizer,data_msgurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L129-L156 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/data.pylanguagepythonidentifier_data_msgparameters(name, path, train_split, tr_set, dev_split, dv_set, batch_size, bucketing)argument_listreturn_statementreturn msg_listdocstringList msg for verbose functiondocstring_summaryList msg for verbose functiondocstring_tokensListmsgforverbosefunctionfunctiondef _data_msg(name, path, train_split, tr_set, dev_split, dv_set, batch_size, bucketing):\n ''' List msg for verbose function '''\n msg_list = []\n msg_list.append('Data spec. | Corpus = {} (from {})'.format(name, path))\n msg_list.append(' | Train sets = {}\\t| Number of utts = {}'.format(\n train_split, tr_set))\n msg_list.append(\n ' | Dev sets = {}\\t| Number of utts = {}'.format(dev_split, dv_set))\n msg_list.append(' | Batch size = {}\\t\\t| Bucketing = {}'.format(\n batch_size, bucketing))\n return msg_listfunction_tokensdef_data_msg(name,path,train_split,tr_set,dev_split,dv_set,batch_size,bucketing):msg_list=[]msg_list.append('Data spec. | Corpus = {} (from {})'.format(name,path))msg_list.append(' | Train sets = {}\\t| Number of utts = {}'.format(train_split,tr_set))msg_list.append(' | Dev sets = {}\\t| Number of utts = {}'.format(dev_split,dv_set))msg_list.append(' | Batch size = {}\\t\\t| Bucketing = {}'.format(batch_size,bucketing))returnmsg_listurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L181-L191 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/asr.pylanguagepythonidentifierASR.set_stateparameters(self, prev_state, prev_attn)argument_listreturn_statementdocstringSetting up all memory states for beam decodingdocstring_summarySetting up all memory states for beam decodingdocstring_tokensSettingupallmemorystatesforbeamdecodingfunctiondef set_state(self, prev_state, prev_attn):\n ''' Setting up all memory states for beam decoding'''\n self.decoder.set_state(prev_state)\n self.attention.set_mem(prev_attn)function_tokensdefset_state(self,prev_state,prev_attn):self.decoder.set_state(prev_state)self.attention.set_mem(prev_attn)urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L48-L51 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/asr.pylanguagepythonidentifierASR.forwardparameters(self, audio_feature, feature_len, decode_step, tf_rate=0.0, teacher=None,\n emb_decoder=None, get_dec_state=False)argument_listreturn_statementreturn ctc_output, encode_len, att_output, att_seq, dec_statedocstringArguments\n audio_feature - [BxTxD] Acoustic feature with shape \n feature_len - [B] Length of each sample in a batch\n decode_step - [int] The maximum number of attention decoder steps \n tf_rate - [0,1] The probability to perform teacher forcing for each step\n teacher - [BxL] Ground truth for teacher forcing with sentence length L\n emb_decoder - [obj] Introduces the word embedding decoder, different behavior for training\/inference\n At training stage, this ONLY affects self-sampling (output remains the same)\n At inference stage, this affects output to become log prob. with distribution fusion\n get_dec_state - [bool] If true, return decoder state [BxLxD] for other purposedocstring_summaryArguments\n audio_feature - [BxTxD] Acoustic feature with shape \n feature_len - [B] Length of each sample in a batch\n decode_step - [int] The maximum number of attention decoder steps \n tf_rate - [0,1] The probability to perform teacher forcing for each step\n teacher - [BxL] Ground truth for teacher forcing with sentence length L\n emb_decoder - [obj] Introduces the word embedding decoder, different behavior for training\/inference\n At training stage, this ONLY affects self-sampling (output remains the same)\n At inference stage, this affects output to become log prob. with distribution fusion\n get_dec_state - [bool] If true, return decoder state [BxLxD] for other purposedocstring_tokensArgumentsaudio_feature-[BxTxD]Acousticfeaturewithshapefeature_len-[B]Lengthofeachsampleinabatchdecode_step-[int]Themaximumnumberofattentiondecoderstepstf_rate-[01]Theprobabilitytoperformteacherforcingforeachstepteacher-[BxL]GroundtruthforteacherforcingwithsentencelengthLemb_decoder-[obj]Introducesthewordembeddingdecoderdifferentbehaviorfortraining\/inferenceAttrainingstagethisONLYaffectsself-sampling(outputremainsthesame)Atinferencestagethisaffectsoutputtobecomelogprob.withdistributionfusionget_dec_state-[bool]Iftruereturndecoderstate[BxLxD]forotherpurposefunctiondef forward(self, audio_feature, feature_len, decode_step, tf_rate=0.0, teacher=None,\n emb_decoder=None, get_dec_state=False):\n '''\n Arguments\n audio_feature - [BxTxD] Acoustic feature with shape \n feature_len - [B] Length of each sample in a batch\n decode_step - [int] The maximum number of attention decoder steps \n tf_rate - [0,1] The probability to perform teacher forcing for each step\n teacher - [BxL] Ground truth for teacher forcing with sentence length L\n emb_decoder - [obj] Introduces the word embedding decoder, different behavior for training\/inference\n At training stage, this ONLY affects self-sampling (output remains the same)\n At inference stage, this affects output to become log prob. with distribution fusion\n get_dec_state - [bool] If true, return decoder state [BxLxD] for other purpose\n '''\n # Init\n bs = audio_feature.shape[0]\n ctc_output, att_output, att_seq = None, None, None\n dec_state = [] if get_dec_state else None\n\n # Encode\n encode_feature, encode_len = self.encoder(audio_feature, feature_len)\n\n # CTC based decoding\n if self.enable_ctc:\n ctc_output = F.log_softmax(self.ctc_layer(encode_feature), dim=-1)\n\n # Attention based decoding\n if self.enable_att:\n # Init (init char = <SOS>, reset all rnn state and cell)\n self.decoder.init_state(bs)\n self.attention.reset_mem()\n last_char = self.pre_embed(torch.zeros(\n (bs), dtype=torch.long, device=encode_feature.device))\n att_seq, output_seq = [], []\n\n # Preprocess data for teacher forcing\n if teacher is not None:\n teacher = self.embed_drop(self.pre_embed(teacher))\n\n # Decode\n for t in range(decode_step):\n # Attend (inputs current state of first layer, encoded features)\n attn, context = self.attention(\n self.decoder.get_query(), encode_feature, encode_len)\n # Decode (inputs context + embedded last character)\n decoder_input = torch.cat([last_char, context], dim=-1)\n cur_char, d_state = self.decoder(decoder_input)\n # Prepare output as input of next step\n if (teacher is not None):\n # Training stage\n if (tf_rate == 1) or (torch.rand(1).item() <= tf_rate):\n # teacher forcing\n last_char = teacher[:, t, :]\n else:\n # self-sampling (replace by argmax may be another choice)\n with torch.no_grad():\n if (emb_decoder is not None) and emb_decoder.apply_fuse:\n _, cur_prob = emb_decoder(\n d_state, cur_char, return_loss=False)\n else:\n cur_prob = cur_char.softmax(dim=-1)\n sampled_char = Categorical(cur_prob).sample()\n last_char = self.embed_drop(\n self.pre_embed(sampled_char))\n else:\n # Inference stage\n if (emb_decoder is not None) and emb_decoder.apply_fuse:\n _, cur_char = emb_decoder(\n d_state, cur_char, return_loss=False)\n # argmax for inference\n last_char = self.pre_embed(torch.argmax(cur_char, dim=-1))\n\n # save output of each step\n output_seq.append(cur_char)\n att_seq.append(attn)\n if get_dec_state:\n dec_state.append(d_state)\n\n att_output = torch.stack(output_seq, dim=1) # BxTxV\n att_seq = torch.stack(att_seq, dim=2) # BxNxDtxT\n if get_dec_state:\n dec_state = torch.stack(dec_state, dim=1)\n\n return ctc_output, encode_len, att_output, att_seq, dec_statefunction_tokensdefforward(self,audio_feature,feature_len,decode_step,tf_rate=0.0,teacher=None,emb_decoder=None,get_dec_state=False):# Initbs=audio_feature.shape[0]ctc_output,att_output,att_seq=None,None,Nonedec_state=[]ifget_dec_stateelseNone# Encodeencode_feature,encode_len=self.encoder(audio_feature,feature_len)# CTC based decodingifself.enable_ctc:ctc_output=F.log_softmax(self.ctc_layer(encode_feature),dim=-1)# Attention based decodingifself.enable_att:# Init (init char = <SOS>, reset all rnn state and cell)self.decoder.init_state(bs)self.attention.reset_mem()last_char=self.pre_embed(torch.zeros((bs),dtype=torch.long,device=encode_feature.device))att_seq,output_seq=[],[]# Preprocess data for teacher forcingifteacherisnotNone:teacher=self.embed_drop(self.pre_embed(teacher))# Decodefortinrange(decode_step):# Attend (inputs current state of first layer, encoded features)attn,context=self.attention(self.decoder.get_query(),encode_feature,encode_len)# Decode (inputs context + embedded last character)decoder_input=torch.cat([last_char,context],dim=-1)cur_char,d_state=self.decoder(decoder_input)# Prepare output as input of next stepif(teacherisnotNone):# Training stageif(tf_rate==1)or(torch.rand(1).item()<=tf_rate):# teacher forcinglast_char=teacher[:,t,:]else:# self-sampling (replace by argmax may be another choice)withtorch.no_grad():if(emb_decoderisnotNone)andemb_decoder.apply_fuse:_,cur_prob=emb_decoder(d_state,cur_char,return_loss=False)else:cur_prob=cur_char.softmax(dim=-1)sampled_char=Categorical(cur_prob).sample()last_char=self.embed_drop(self.pre_embed(sampled_char))else:# Inference stageif(emb_decoderisnotNone)andemb_decoder.apply_fuse:_,cur_char=emb_decoder(d_state,cur_char,return_loss=False)# argmax for inferencelast_char=self.pre_embed(torch.argmax(cur_char,dim=-1))# save output of each stepoutput_seq.append(cur_char)att_seq.append(attn)ifget_dec_state:dec_state.append(d_state)att_output=torch.stack(output_seq,dim=1)# BxTxVatt_seq=torch.stack(att_seq,dim=2)# BxNxDtxTifget_dec_state:dec_state=torch.stack(dec_state,dim=1)returnctc_output,encode_len,att_output,att_seq,dec_stateurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L72-L155 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/asr.pylanguagepythonidentifierDecoder.init_stateparameters(self, bs)argument_listreturn_statementreturn self.get_state()docstringSet all hidden states to zerosdocstring_summarySet all hidden states to zerosdocstring_tokensSetallhiddenstatestozerosfunctiondef init_state(self, bs):\n ''' Set all hidden states to zeros '''\n device = next(self.parameters()).device\n if self.enable_cell:\n self.hidden_state = (torch.zeros((self.layer, bs, self.dim), device=device),\n torch.zeros((self.layer, bs, self.dim), device=device))\n else:\n self.hidden_state = torch.zeros(\n (self.layer, bs, self.dim), device=device)\n return self.get_state()function_tokensdefinit_state(self,bs):device=next(self.parameters()).deviceifself.enable_cell:self.hidden_state=(torch.zeros((self.layer,bs,self.dim),device=device),torch.zeros((self.layer,bs,self.dim),device=device))else:self.hidden_state=torch.zeros((self.layer,bs,self.dim),device=device)returnself.get_state()urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L180-L189 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/asr.pylanguagepythonidentifierDecoder.set_stateparameters(self, hidden_state)argument_listreturn_statementdocstringSet all hidden states\/cells, for decoding purposedocstring_summarySet all hidden states\/cells, for decoding purposedocstring_tokensSetallhiddenstates\/cellsfordecodingpurposefunctiondef set_state(self, hidden_state):\n ''' Set all hidden states\/cells, for decoding purpose'''\n device = next(self.parameters()).device\n if self.enable_cell:\n self.hidden_state = (hidden_state[0].to(\n device), hidden_state[1].to(device))\n else:\n self.hidden_state = hidden_state.to(device)function_tokensdefset_state(self,hidden_state):device=next(self.parameters()).deviceifself.enable_cell:self.hidden_state=(hidden_state[0].to(device),hidden_state[1].to(device))else:self.hidden_state=hidden_state.to(device)urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L191-L198 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/asr.pylanguagepythonidentifierDecoder.get_stateparameters(self)argument_listreturn_statementdocstringReturn all hidden states\/cells, for decoding purposedocstring_summaryReturn all hidden states\/cells, for decoding purposedocstring_tokensReturnallhiddenstates\/cellsfordecodingpurposefunctiondef get_state(self):\n ''' Return all hidden states\/cells, for decoding purpose'''\n if self.enable_cell:\n return (self.hidden_state[0].cpu(), self.hidden_state[1].cpu())\n else:\n return self.hidden_state.cpu()function_tokensdefget_state(self):ifself.enable_cell:return(self.hidden_state[0].cpu(),self.hidden_state[1].cpu())else:returnself.hidden_state.cpu()urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L200-L205 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/asr.pylanguagepythonidentifierDecoder.get_queryparameters(self)argument_listreturn_statementdocstringReturn state of all layers as query for attentiondocstring_summaryReturn state of all layers as query for attentiondocstring_tokensReturnstateofalllayersasqueryforattentionfunctiondef get_query(self):\n ''' Return state of all layers as query for attention '''\n if self.enable_cell:\n return self.hidden_state[0].transpose(0, 1).reshape(-1, self.dim*self.layer)\n else:\n return self.hidden_state.transpose(0, 1).reshape(-1, self.dim*self.layer)function_tokensdefget_query(self):ifself.enable_cell:returnself.hidden_state[0].transpose(0,1).reshape(-1,self.dim*self.layer)else:returnself.hidden_state.transpose(0,1).reshape(-1,self.dim*self.layer)urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L207-L212 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/asr.pylanguagepythonidentifierDecoder.forwardparameters(self, x)argument_listreturn_statementreturn char, xdocstringDecode and transform into vocabdocstring_summaryDecode and transform into vocabdocstring_tokensDecodeandtransformintovocabfunctiondef forward(self, x):\n ''' Decode and transform into vocab '''\n if not self.training:\n self.layers.flatten_parameters()\n x, self.hidden_state = self.layers(x.unsqueeze(1), self.hidden_state)\n x = x.squeeze(1)\n char = self.char_trans(self.final_dropout(x))\n return char, xfunction_tokensdefforward(self,x):ifnotself.training:self.layers.flatten_parameters()x,self.hidden_state=self.layers(x.unsqueeze(1),self.hidden_state)x=x.squeeze(1)char=self.char_trans(self.final_dropout(x))returnchar,xurlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L214-L221 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/ctc.pylanguagepythonidentifierCTCPrefixScore.full_computeparameters(self, g, r_prev)argument_listreturn_statementreturn psi, np.rollaxis(r, 2)docstringGiven prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function computes all possible tokens for c (memory inefficient)docstring_summaryGiven prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function computes all possible tokens for c (memory inefficient)docstring_tokensGivenprefixgreturntheprobabilityofallpossiblesequencey(wherey=concat(gc))Thisfunctioncomputesallpossibletokensforc(memoryinefficient)functiondef full_compute(self, g, r_prev):\n '''Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function computes all possible tokens for c (memory inefficient)'''\n prefix_length = len(g)\n last_char = g[-1] if prefix_length > 0 else 0\n\n # init. r\n r = np.full((self.input_length, 2, self.odim),\n self.logzero, dtype=np.float32)\n\n # start from len(g) because is impossible for CTC to generate |y|>|X|\n start = max(1, prefix_length)\n\n if prefix_length == 0:\n r[0, 0, :] = self.x[0, :] # if g = <sos>\n\n psi = r[start-1, 0, :]\n\n phi = np.logaddexp(r_prev[:, 0], r_prev[:, 1])\n\n for t in range(start, self.input_length):\n # prev_blank\n prev_blank = np.full((self.odim), r_prev[t-1, 1], dtype=np.float32)\n # prev_nonblank\n prev_nonblank = np.full(\n (self.odim), r_prev[t-1, 0], dtype=np.float32)\n prev_nonblank[last_char] = self.logzero\n\n phi = np.logaddexp(prev_nonblank, prev_blank)\n # P(h|current step is non-blank) = [ P(prev. step = y) + P()]*P(c)\n r[t, 0, :] = np.logaddexp(r[t-1, 0, :], phi) + self.x[t, :]\n # P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)\n r[t, 1, :] = np.logaddexp(\n r[t-1, 1, :], r[t-1, 0, :]) + self.x[t, self.blank]\n psi = np.logaddexp(psi, phi+self.x[t, :])\n\n #psi[self.eos] = np.logaddexp(r_prev[-1,0], r_prev[-1,1])\n return psi, np.rollaxis(r, 2)function_tokensdeffull_compute(self,g,r_prev):prefix_length=len(g)last_char=g[-1]ifprefix_length>0else0# init. rr=np.full((self.input_length,2,self.odim),self.logzero,dtype=np.float32)# start from len(g) because is impossible for CTC to generate |y|>|X|start=max(1,prefix_length)ifprefix_length==0:r[0,0,:]=self.x[0,:]# if g = <sos>psi=r[start-1,0,:]phi=np.logaddexp(r_prev[:,0],r_prev[:,1])fortinrange(start,self.input_length):# prev_blankprev_blank=np.full((self.odim),r_prev[t-1,1],dtype=np.float32)# prev_nonblankprev_nonblank=np.full((self.odim),r_prev[t-1,0],dtype=np.float32)prev_nonblank[last_char]=self.logzerophi=np.logaddexp(prev_nonblank,prev_blank)# P(h|current step is non-blank) = [ P(prev. step = y) + P()]*P(c)r[t,0,:]=np.logaddexp(r[t-1,0,:],phi)+self.x[t,:]# P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)r[t,1,:]=np.logaddexp(r[t-1,1,:],r[t-1,0,:])+self.x[t,self.blank]psi=np.logaddexp(psi,phi+self.x[t,:])#psi[self.eos] = np.logaddexp(r_prev[-1,0], r_prev[-1,1])returnpsi,np.rollaxis(r,2)urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/ctc.py#L37-L74 |
|
nwoAlexander-H-Liu\/End-to-end-ASR-Pytorchsha1103d144423e8e692f1d18cd9db27a96cb49fb9dpathsrc\/ctc.pylanguagepythonidentifierCTCPrefixScore.cheap_computeparameters(self, g, r_prev, candidates)argument_listreturn_statementreturn psi, np.rollaxis(r, 2)docstringGiven prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function considers only those tokens in candidates for c (memory efficient)docstring_summaryGiven prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function considers only those tokens in candidates for c (memory efficient)docstring_tokensGivenprefixgreturntheprobabilityofallpossiblesequencey(wherey=concat(gc))Thisfunctionconsidersonlythosetokensincandidatesforc(memoryefficient)functiondef cheap_compute(self, g, r_prev, candidates):\n '''Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function considers only those tokens in candidates for c (memory efficient)'''\n prefix_length = len(g)\n odim = len(candidates)\n last_char = g[-1] if prefix_length > 0 else 0\n\n # init. r\n r = np.full((self.input_length, 2, len(candidates)),\n self.logzero, dtype=np.float32)\n\n # start from len(g) because is impossible for CTC to generate |y|>|X|\n start = max(1, prefix_length)\n\n if prefix_length == 0:\n r[0, 0, :] = self.x[0, candidates] # if g = <sos>\n\n psi = r[start-1, 0, :]\n # Phi = (prev_nonblank,prev_blank)\n sum_prev = np.logaddexp(r_prev[:, 0], r_prev[:, 1])\n phi = np.repeat(sum_prev[..., None],odim,axis=-1)\n # Handle edge case : last tok of prefix in candidates\n if prefix_length>0 and last_char in candidates:\n phi[:,candidates.index(last_char)] = r_prev[:,1]\n\n for t in range(start, self.input_length):\n # prev_blank\n # prev_blank = np.full((odim), r_prev[t-1, 1], dtype=np.float32)\n # prev_nonblank\n # prev_nonblank = np.full((odim), r_prev[t-1, 0], dtype=np.float32)\n # phi = np.logaddexp(prev_nonblank, prev_blank)\n # P(h|current step is non-blank) = P(prev. step = y)*P(c)\n r[t, 0, :] = np.logaddexp( r[t-1, 0, :], phi[t-1]) + self.x[t, candidates]\n # P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)\n r[t, 1, :] = np.logaddexp( r[t-1, 1, :], r[t-1, 0, :]) + self.x[t, self.blank]\n psi = np.logaddexp(psi, phi[t-1,]+self.x[t, candidates])\n\n # P(end of sentence) = P(g)\n if self.eos in candidates:\n psi[candidates.index(self.eos)] = sum_prev[-1]\n return psi, np.rollaxis(r, 2)function_tokensdefcheap_compute(self,g,r_prev,candidates):prefix_length=len(g)odim=len(candidates)last_char=g[-1]ifprefix_length>0else0# init. rr=np.full((self.input_length,2,len(candidates)),self.logzero,dtype=np.float32)# start from len(g) because is impossible for CTC to generate |y|>|X|start=max(1,prefix_length)ifprefix_length==0:r[0,0,:]=self.x[0,candidates]# if g = <sos>psi=r[start-1,0,:]# Phi = (prev_nonblank,prev_blank)sum_prev=np.logaddexp(r_prev[:,0],r_prev[:,1])phi=np.repeat(sum_prev[...,None],odim,axis=-1)# Handle edge case : last tok of prefix in candidatesifprefix_length>0andlast_charincandidates:phi[:,candidates.index(last_char)]=r_prev[:,1]fortinrange(start,self.input_length):# prev_blank# prev_blank = np.full((odim), r_prev[t-1, 1], dtype=np.float32)# prev_nonblank# prev_nonblank = np.full((odim), r_prev[t-1, 0], dtype=np.float32)# phi = np.logaddexp(prev_nonblank, prev_blank)# P(h|current step is non-blank) = P(prev. step = y)*P(c)r[t,0,:]=np.logaddexp(r[t-1,0,:],phi[t-1])+self.x[t,candidates]# P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)r[t,1,:]=np.logaddexp(r[t-1,1,:],r[t-1,0,:])+self.x[t,self.blank]psi=np.logaddexp(psi,phi[t-1,]+self.x[t,candidates])# P(end of sentence) = P(g)ifself.eosincandidates:psi[candidates.index(self.eos)]=sum_prev[-1]returnpsi,np.rollaxis(r,2)urlhttps:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/ctc.py#L76-L116 |