sia_tp_sample / Alexander-H-Liu__End-to-end-ASR-Pytorch.jsonl
shahp7575's picture
commit files to HF hub
3a7f06a
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_asr.py","language":"python","identifier":"Solver.fetch_data","parameters":"(self, data)","argument_list":"","return_statement":"return feat, feat_len, txt, txt_len","docstring":"Move data to device and compute text seq. length","docstring_summary":"Move data to device and compute text seq. length","docstring_tokens":["Move","data","to","device","and","compute","text","seq",".","length"],"function":"def fetch_data(self, data):\n ''' Move data to device and compute text seq. length'''\n _, feat, feat_len, txt = data\n feat = feat.to(self.device)\n feat_len = feat_len.to(self.device)\n txt = txt.to(self.device)\n txt_len = torch.sum(txt != 0, dim=-1)\n\n return feat, feat_len, txt, txt_len","function_tokens":["def","fetch_data","(","self",",","data",")",":","_",",","feat",",","feat_len",",","txt","=","data","feat","=","feat",".","to","(","self",".","device",")","feat_len","=","feat_len",".","to","(","self",".","device",")","txt","=","txt",".","to","(","self",".","device",")","txt_len","=","torch",".","sum","(","txt","!=","0",",","dim","=","-","1",")","return","feat",",","feat_len",",","txt",",","txt_len"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_asr.py#L20-L28"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_asr.py","language":"python","identifier":"Solver.load_data","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Load data for training\/validation, store tokenizer and input\/output shape","docstring_summary":"Load data for training\/validation, store tokenizer and input\/output shape","docstring_tokens":["Load","data","for","training","\/","validation","store","tokenizer","and","input","\/","output","shape"],"function":"def load_data(self):\n ''' Load data for training\/validation, store tokenizer and input\/output shape'''\n self.tr_set, self.dv_set, self.feat_dim, self.vocab_size, self.tokenizer, msg = \\\n load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,\n self.curriculum > 0, **self.config['data'])\n self.verbose(msg)","function_tokens":["def","load_data","(","self",")",":","self",".","tr_set",",","self",".","dv_set",",","self",".","feat_dim",",","self",".","vocab_size",",","self",".","tokenizer",",","msg","=","load_dataset","(","self",".","paras",".","njobs",",","self",".","paras",".","gpu",",","self",".","paras",".","pin_memory",",","self",".","curriculum",">","0",",","*","*","self",".","config","[","'data'","]",")","self",".","verbose","(","msg",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_asr.py#L30-L35"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_asr.py","language":"python","identifier":"Solver.set_model","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Setup ASR model and optimizer","docstring_summary":"Setup ASR model and optimizer","docstring_tokens":["Setup","ASR","model","and","optimizer"],"function":"def set_model(self):\n ''' Setup ASR model and optimizer '''\n # Model\n init_adadelta = self.config['hparas']['optimizer'] == 'Adadelta'\n self.model = ASR(self.feat_dim, self.vocab_size, init_adadelta, **\n self.config['model']).to(self.device)\n self.verbose(self.model.create_msg())\n model_paras = [{'params': self.model.parameters()}]\n\n # Losses\n self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)\n # Note: zero_infinity=False is unstable?\n self.ctc_loss = torch.nn.CTCLoss(blank=0, zero_infinity=False)\n\n # Plug-ins\n self.emb_fuse = False\n self.emb_reg = ('emb' in self.config) and (\n self.config['emb']['enable'])\n if self.emb_reg:\n from src.plugin import EmbeddingRegularizer\n self.emb_decoder = EmbeddingRegularizer(\n self.tokenizer, self.model.dec_dim, **self.config['emb']).to(self.device)\n model_paras.append({'params': self.emb_decoder.parameters()})\n self.emb_fuse = self.emb_decoder.apply_fuse\n if self.emb_fuse:\n self.seq_loss = torch.nn.NLLLoss(ignore_index=0)\n self.verbose(self.emb_decoder.create_msg())\n\n # Optimizer\n self.optimizer = Optimizer(model_paras, **self.config['hparas'])\n self.verbose(self.optimizer.create_msg())\n\n # Enable AMP if needed\n self.enable_apex()\n\n # Automatically load pre-trained model if self.paras.load is given\n self.load_ckpt()","function_tokens":["def","set_model","(","self",")",":","# Model","init_adadelta","=","self",".","config","[","'hparas'","]","[","'optimizer'","]","==","'Adadelta'","self",".","model","=","ASR","(","self",".","feat_dim",",","self",".","vocab_size",",","init_adadelta",",","*","*","self",".","config","[","'model'","]",")",".","to","(","self",".","device",")","self",".","verbose","(","self",".","model",".","create_msg","(",")",")","model_paras","=","[","{","'params'",":","self",".","model",".","parameters","(",")","}","]","# Losses","self",".","seq_loss","=","torch",".","nn",".","CrossEntropyLoss","(","ignore_index","=","0",")","# Note: zero_infinity=False is unstable?","self",".","ctc_loss","=","torch",".","nn",".","CTCLoss","(","blank","=","0",",","zero_infinity","=","False",")","# Plug-ins","self",".","emb_fuse","=","False","self",".","emb_reg","=","(","'emb'","in","self",".","config",")","and","(","self",".","config","[","'emb'","]","[","'enable'","]",")","if","self",".","emb_reg",":","from","src",".","plugin","import","EmbeddingRegularizer","self",".","emb_decoder","=","EmbeddingRegularizer","(","self",".","tokenizer",",","self",".","model",".","dec_dim",",","*","*","self",".","config","[","'emb'","]",")",".","to","(","self",".","device",")","model_paras",".","append","(","{","'params'",":","self",".","emb_decoder",".","parameters","(",")","}",")","self",".","emb_fuse","=","self",".","emb_decoder",".","apply_fuse","if","self",".","emb_fuse",":","self",".","seq_loss","=","torch",".","nn",".","NLLLoss","(","ignore_index","=","0",")","self",".","verbose","(","self",".","emb_decoder",".","create_msg","(",")",")","# Optimizer","self",".","optimizer","=","Optimizer","(","model_paras",",","*","*","self",".","config","[","'hparas'","]",")","self",".","verbose","(","self",".","optimizer",".","create_msg","(",")",")","# Enable AMP if needed","self",".","enable_apex","(",")","# Automatically load pre-trained model if self.paras.load is given","self",".","load_ckpt","(",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_asr.py#L37-L73"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_asr.py","language":"python","identifier":"Solver.exec","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Training End-to-end ASR system","docstring_summary":"Training End-to-end ASR system","docstring_tokens":["Training","End","-","to","-","end","ASR","system"],"function":"def exec(self):\n ''' Training End-to-end ASR system '''\n self.verbose('Total training steps {}.'.format(\n human_format(self.max_step)))\n ctc_loss, att_loss, emb_loss = None, None, None\n n_epochs = 0\n self.timer.set()\n\n while self.step < self.max_step:\n # Renew dataloader to enable random sampling\n if self.curriculum > 0 and n_epochs == self.curriculum:\n self.verbose(\n 'Curriculum learning ends after {} epochs, starting random sampling.'.format(n_epochs))\n self.tr_set, _, _, _, _, _ = \\\n load_dataset(self.paras.njobs, self.paras.gpu, self.paras.pin_memory,\n False, **self.config['data'])\n for data in self.tr_set:\n # Pre-step : update tf_rate\/lr_rate and do zero_grad\n tf_rate = self.optimizer.pre_step(self.step)\n total_loss = 0\n\n # Fetch data\n feat, feat_len, txt, txt_len = self.fetch_data(data)\n self.timer.cnt('rd')\n\n # Forward model\n # Note: txt should NOT start w\/ <sos>\n ctc_output, encode_len, att_output, att_align, dec_state = \\\n self.model(feat, feat_len, max(txt_len), tf_rate=tf_rate,\n teacher=txt, get_dec_state=self.emb_reg)\n\n # Plugins\n if self.emb_reg:\n emb_loss, fuse_output = self.emb_decoder(\n dec_state, att_output, label=txt)\n total_loss += self.emb_decoder.weight*emb_loss\n\n # Compute all objectives\n if ctc_output is not None:\n if self.paras.cudnn_ctc:\n ctc_loss = self.ctc_loss(ctc_output.transpose(0, 1),\n txt.to_sparse().values().to(device='cpu', dtype=torch.int32),\n [ctc_output.shape[1]] *\n len(ctc_output),\n txt_len.cpu().tolist())\n else:\n ctc_loss = self.ctc_loss(ctc_output.transpose(\n 0, 1), txt, encode_len, txt_len)\n total_loss += ctc_loss*self.model.ctc_weight\n\n if att_output is not None:\n b, t, _ = att_output.shape\n att_output = fuse_output if self.emb_fuse else att_output\n att_loss = self.seq_loss(\n att_output.view(b*t, -1), txt.view(-1))\n total_loss += att_loss*(1-self.model.ctc_weight)\n\n self.timer.cnt('fw')\n\n # Backprop\n grad_norm = self.backward(total_loss)\n self.step += 1\n\n # Logger\n if (self.step == 1) or (self.step % self.PROGRESS_STEP == 0):\n self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'\n .format(total_loss.cpu().item(), grad_norm, self.timer.show()))\n self.write_log(\n 'loss', {'tr_ctc': ctc_loss, 'tr_att': att_loss})\n self.write_log('emb_loss', {'tr': emb_loss})\n self.write_log('wer', {'tr_att': cal_er(self.tokenizer, att_output, txt),\n 'tr_ctc': cal_er(self.tokenizer, ctc_output, txt, ctc=True)})\n if self.emb_fuse:\n if self.emb_decoder.fuse_learnable:\n self.write_log('fuse_lambda', {\n 'emb': self.emb_decoder.get_weight()})\n self.write_log(\n 'fuse_temp', {'temp': self.emb_decoder.get_temp()})\n\n # Validation\n if (self.step == 1) or (self.step % self.valid_step == 0):\n self.validate()\n\n # End of step\n # https:\/\/github.com\/pytorch\/pytorch\/issues\/13246#issuecomment-529185354\n torch.cuda.empty_cache()\n self.timer.set()\n if self.step > self.max_step:\n break\n n_epochs += 1\n self.log.close()","function_tokens":["def","exec","(","self",")",":","self",".","verbose","(","'Total training steps {}.'",".","format","(","human_format","(","self",".","max_step",")",")",")","ctc_loss",",","att_loss",",","emb_loss","=","None",",","None",",","None","n_epochs","=","0","self",".","timer",".","set","(",")","while","self",".","step","<","self",".","max_step",":","# Renew dataloader to enable random sampling","if","self",".","curriculum",">","0","and","n_epochs","==","self",".","curriculum",":","self",".","verbose","(","'Curriculum learning ends after {} epochs, starting random sampling.'",".","format","(","n_epochs",")",")","self",".","tr_set",",","_",",","_",",","_",",","_",",","_","=","load_dataset","(","self",".","paras",".","njobs",",","self",".","paras",".","gpu",",","self",".","paras",".","pin_memory",",","False",",","*","*","self",".","config","[","'data'","]",")","for","data","in","self",".","tr_set",":","# Pre-step : update tf_rate\/lr_rate and do zero_grad","tf_rate","=","self",".","optimizer",".","pre_step","(","self",".","step",")","total_loss","=","0","# Fetch data","feat",",","feat_len",",","txt",",","txt_len","=","self",".","fetch_data","(","data",")","self",".","timer",".","cnt","(","'rd'",")","# Forward model","# Note: txt should NOT start w\/ <sos>","ctc_output",",","encode_len",",","att_output",",","att_align",",","dec_state","=","self",".","model","(","feat",",","feat_len",",","max","(","txt_len",")",",","tf_rate","=","tf_rate",",","teacher","=","txt",",","get_dec_state","=","self",".","emb_reg",")","# Plugins","if","self",".","emb_reg",":","emb_loss",",","fuse_output","=","self",".","emb_decoder","(","dec_state",",","att_output",",","label","=","txt",")","total_loss","+=","self",".","emb_decoder",".","weight","*","emb_loss","# Compute all objectives","if","ctc_output","is","not","None",":","if","self",".","paras",".","cudnn_ctc",":","ctc_loss","=","self",".","ctc_loss","(","ctc_output",".","transpose","(","0",",","1",")",",","txt",".","to_sparse","(",")",".","values","(",")",".","to","(","device","=","'cpu'",",","dtype","=","torch",".","int32",")",",","[","ctc_output",".","shape","[","1","]","]","*","len","(","ctc_output",")",",","txt_len",".","cpu","(",")",".","tolist","(",")",")","else",":","ctc_loss","=","self",".","ctc_loss","(","ctc_output",".","transpose","(","0",",","1",")",",","txt",",","encode_len",",","txt_len",")","total_loss","+=","ctc_loss","*","self",".","model",".","ctc_weight","if","att_output","is","not","None",":","b",",","t",",","_","=","att_output",".","shape","att_output","=","fuse_output","if","self",".","emb_fuse","else","att_output","att_loss","=","self",".","seq_loss","(","att_output",".","view","(","b","*","t",",","-","1",")",",","txt",".","view","(","-","1",")",")","total_loss","+=","att_loss","*","(","1","-","self",".","model",".","ctc_weight",")","self",".","timer",".","cnt","(","'fw'",")","# Backprop","grad_norm","=","self",".","backward","(","total_loss",")","self",".","step","+=","1","# Logger","if","(","self",".","step","==","1",")","or","(","self",".","step","%","self",".","PROGRESS_STEP","==","0",")",":","self",".","progress","(","'Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'",".","format","(","total_loss",".","cpu","(",")",".","item","(",")",",","grad_norm",",","self",".","timer",".","show","(",")",")",")","self",".","write_log","(","'loss'",",","{","'tr_ctc'",":","ctc_loss",",","'tr_att'",":","att_loss","}",")","self",".","write_log","(","'emb_loss'",",","{","'tr'",":","emb_loss","}",")","self",".","write_log","(","'wer'",",","{","'tr_att'",":","cal_er","(","self",".","tokenizer",",","att_output",",","txt",")",",","'tr_ctc'",":","cal_er","(","self",".","tokenizer",",","ctc_output",",","txt",",","ctc","=","True",")","}",")","if","self",".","emb_fuse",":","if","self",".","emb_decoder",".","fuse_learnable",":","self",".","write_log","(","'fuse_lambda'",",","{","'emb'",":","self",".","emb_decoder",".","get_weight","(",")","}",")","self",".","write_log","(","'fuse_temp'",",","{","'temp'",":","self",".","emb_decoder",".","get_temp","(",")","}",")","# Validation","if","(","self",".","step","==","1",")","or","(","self",".","step","%","self",".","valid_step","==","0",")",":","self",".","validate","(",")","# End of step","# https:\/\/github.com\/pytorch\/pytorch\/issues\/13246#issuecomment-529185354","torch",".","cuda",".","empty_cache","(",")","self",".","timer",".","set","(",")","if","self",".","step",">","self",".","max_step",":","break","n_epochs","+=","1","self",".","log",".","close","(",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_asr.py#L77-L167"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_lm.py","language":"python","identifier":"Solver.fetch_data","parameters":"(self, data)","argument_list":"","return_statement":"return txt, txt_len","docstring":"Move data to device, insert <sos> and compute text seq. length","docstring_summary":"Move data to device, insert <sos> and compute text seq. length","docstring_tokens":["Move","data","to","device","insert","<sos",">","and","compute","text","seq",".","length"],"function":"def fetch_data(self, data):\n ''' Move data to device, insert <sos> and compute text seq. length'''\n txt = torch.cat(\n (torch.zeros((data.shape[0], 1), dtype=torch.long), data), dim=1).to(self.device)\n txt_len = torch.sum(data != 0, dim=-1)\n return txt, txt_len","function_tokens":["def","fetch_data","(","self",",","data",")",":","txt","=","torch",".","cat","(","(","torch",".","zeros","(","(","data",".","shape","[","0","]",",","1",")",",","dtype","=","torch",".","long",")",",","data",")",",","dim","=","1",")",".","to","(","self",".","device",")","txt_len","=","torch",".","sum","(","data","!=","0",",","dim","=","-","1",")","return","txt",",","txt_len"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_lm.py#L18-L23"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_lm.py","language":"python","identifier":"Solver.load_data","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Load data for training\/validation, store tokenizer and input\/output shape","docstring_summary":"Load data for training\/validation, store tokenizer and input\/output shape","docstring_tokens":["Load","data","for","training","\/","validation","store","tokenizer","and","input","\/","output","shape"],"function":"def load_data(self):\n ''' Load data for training\/validation, store tokenizer and input\/output shape'''\n self.tr_set, self.dv_set, self.vocab_size, self.tokenizer, msg = \\\n load_textset(self.paras.njobs, self.paras.gpu,\n self.paras.pin_memory, **self.config['data'])\n self.verbose(msg)","function_tokens":["def","load_data","(","self",")",":","self",".","tr_set",",","self",".","dv_set",",","self",".","vocab_size",",","self",".","tokenizer",",","msg","=","load_textset","(","self",".","paras",".","njobs",",","self",".","paras",".","gpu",",","self",".","paras",".","pin_memory",",","*","*","self",".","config","[","'data'","]",")","self",".","verbose","(","msg",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_lm.py#L25-L30"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_lm.py","language":"python","identifier":"Solver.set_model","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Setup ASR model and optimizer","docstring_summary":"Setup ASR model and optimizer","docstring_tokens":["Setup","ASR","model","and","optimizer"],"function":"def set_model(self):\n ''' Setup ASR model and optimizer '''\n\n # Model\n self.model = RNNLM(self.vocab_size, **\n self.config['model']).to(self.device)\n self.verbose(self.model.create_msg())\n # Losses\n self.seq_loss = torch.nn.CrossEntropyLoss(ignore_index=0)\n # Optimizer\n self.optimizer = Optimizer(\n self.model.parameters(), **self.config['hparas'])\n # Enable AMP if needed\n self.enable_apex()\n # load pre-trained model\n if self.paras.load:\n self.load_ckpt()\n ckpt = torch.load(self.paras.load, map_location=self.device)\n self.model.load_state_dict(ckpt['model'])\n self.optimizer.load_opt_state_dict(ckpt['optimizer'])\n self.step = ckpt['global_step']\n self.verbose('Load ckpt from {}, restarting at step {}'.format(\n self.paras.load, self.step))","function_tokens":["def","set_model","(","self",")",":","# Model","self",".","model","=","RNNLM","(","self",".","vocab_size",",","*","*","self",".","config","[","'model'","]",")",".","to","(","self",".","device",")","self",".","verbose","(","self",".","model",".","create_msg","(",")",")","# Losses","self",".","seq_loss","=","torch",".","nn",".","CrossEntropyLoss","(","ignore_index","=","0",")","# Optimizer","self",".","optimizer","=","Optimizer","(","self",".","model",".","parameters","(",")",",","*","*","self",".","config","[","'hparas'","]",")","# Enable AMP if needed","self",".","enable_apex","(",")","# load pre-trained model","if","self",".","paras",".","load",":","self",".","load_ckpt","(",")","ckpt","=","torch",".","load","(","self",".","paras",".","load",",","map_location","=","self",".","device",")","self",".","model",".","load_state_dict","(","ckpt","[","'model'","]",")","self",".","optimizer",".","load_opt_state_dict","(","ckpt","[","'optimizer'","]",")","self",".","step","=","ckpt","[","'global_step'","]","self",".","verbose","(","'Load ckpt from {}, restarting at step {}'",".","format","(","self",".","paras",".","load",",","self",".","step",")",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_lm.py#L32-L54"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"bin\/train_lm.py","language":"python","identifier":"Solver.exec","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Training End-to-end ASR system","docstring_summary":"Training End-to-end ASR system","docstring_tokens":["Training","End","-","to","-","end","ASR","system"],"function":"def exec(self):\n ''' Training End-to-end ASR system '''\n self.verbose('Total training steps {}.'.format(\n human_format(self.max_step)))\n self.timer.set()\n\n while self.step < self.max_step:\n for data in self.tr_set:\n # Pre-step : update tf_rate\/lr_rate and do zero_grad\n self.optimizer.pre_step(self.step)\n\n # Fetch data\n txt, txt_len = self.fetch_data(data)\n self.timer.cnt('rd')\n\n # Forward model\n pred, _ = self.model(txt[:, :-1], txt_len)\n\n # Compute all objectives\n lm_loss = self.seq_loss(\n pred.view(-1, self.vocab_size), txt[:, 1:].reshape(-1))\n self.timer.cnt('fw')\n\n # Backprop\n grad_norm = self.backward(lm_loss)\n self.step += 1\n\n # Logger\n if self.step % self.PROGRESS_STEP == 0:\n self.progress('Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'\n .format(lm_loss.cpu().item(), grad_norm, self.timer.show()))\n self.write_log('entropy', {'tr': lm_loss})\n self.write_log(\n 'perplexity', {'tr': torch.exp(lm_loss).cpu().item()})\n\n # Validation\n if (self.step == 1) or (self.step % self.valid_step == 0):\n self.validate()\n\n # End of step\n self.timer.set()\n if self.step > self.max_step:\n break\n self.log.close()","function_tokens":["def","exec","(","self",")",":","self",".","verbose","(","'Total training steps {}.'",".","format","(","human_format","(","self",".","max_step",")",")",")","self",".","timer",".","set","(",")","while","self",".","step","<","self",".","max_step",":","for","data","in","self",".","tr_set",":","# Pre-step : update tf_rate\/lr_rate and do zero_grad","self",".","optimizer",".","pre_step","(","self",".","step",")","# Fetch data","txt",",","txt_len","=","self",".","fetch_data","(","data",")","self",".","timer",".","cnt","(","'rd'",")","# Forward model","pred",",","_","=","self",".","model","(","txt","[",":",",",":","-","1","]",",","txt_len",")","# Compute all objectives","lm_loss","=","self",".","seq_loss","(","pred",".","view","(","-","1",",","self",".","vocab_size",")",",","txt","[",":",",","1",":","]",".","reshape","(","-","1",")",")","self",".","timer",".","cnt","(","'fw'",")","# Backprop","grad_norm","=","self",".","backward","(","lm_loss",")","self",".","step","+=","1","# Logger","if","self",".","step","%","self",".","PROGRESS_STEP","==","0",":","self",".","progress","(","'Tr stat | Loss - {:.2f} | Grad. Norm - {:.2f} | {}'",".","format","(","lm_loss",".","cpu","(",")",".","item","(",")",",","grad_norm",",","self",".","timer",".","show","(",")",")",")","self",".","write_log","(","'entropy'",",","{","'tr'",":","lm_loss","}",")","self",".","write_log","(","'perplexity'",",","{","'tr'",":","torch",".","exp","(","lm_loss",")",".","cpu","(",")",".","item","(",")","}",")","# Validation","if","(","self",".","step","==","1",")","or","(","self",".","step","%","self",".","valid_step","==","0",")",":","self",".","validate","(",")","# End of step","self",".","timer",".","set","(",")","if","self",".","step",">","self",".","max_step",":","break","self",".","log",".","close","(",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/bin\/train_lm.py#L56-L99"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"corpus\/librispeech.py","language":"python","identifier":"read_text","parameters":"(file)","argument_list":"","return_statement":"","docstring":"Get transcription of target wave file, \n it's somewhat redundant for accessing each txt multiplt times,\n but it works fine with multi-thread","docstring_summary":"Get transcription of target wave file, \n it's somewhat redundant for accessing each txt multiplt times,\n but it works fine with multi-thread","docstring_tokens":["Get","transcription","of","target","wave","file","it","s","somewhat","redundant","for","accessing","each","txt","multiplt","times","but","it","works","fine","with","multi","-","thread"],"function":"def read_text(file):\n '''Get transcription of target wave file, \n it's somewhat redundant for accessing each txt multiplt times,\n but it works fine with multi-thread'''\n src_file = '-'.join(file.split('-')[:-1])+'.trans.txt'\n idx = file.split('\/')[-1].split('.')[0]\n\n with open(src_file, 'r') as fp:\n for line in fp:\n if idx == line.split(' ')[0]:\n return line[:-1].split(' ', 1)[1]","function_tokens":["def","read_text","(","file",")",":","src_file","=","'-'",".","join","(","file",".","split","(","'-'",")","[",":","-","1","]",")","+","'.trans.txt'","idx","=","file",".","split","(","'\/'",")","[","-","1","]",".","split","(","'.'",")","[","0","]","with","open","(","src_file",",","'r'",")","as","fp",":","for","line","in","fp",":","if","idx","==","line",".","split","(","' '",")","[","0","]",":","return","line","[",":","-","1","]",".","split","(","' '",",","1",")","[","1","]"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/corpus\/librispeech.py#L15-L25"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/bert_embedding.py","language":"python","identifier":"generate_embedding","parameters":"(bert_model, labels)","argument_list":"","return_statement":"return embedding","docstring":"Generate bert's embedding from fine-tuned model.","docstring_summary":"Generate bert's embedding from fine-tuned model.","docstring_tokens":["Generate","bert","s","embedding","from","fine","-","tuned","model","."],"function":"def generate_embedding(bert_model, labels):\n \"\"\"Generate bert's embedding from fine-tuned model.\"\"\"\n batch_size, time = labels.shape\n\n cls_ids = torch.full(\n (batch_size, 1), bert_model.bert_text_encoder.cls_idx, dtype=labels.dtype, device=labels.device)\n bert_labels = torch.cat([cls_ids, labels], 1)\n # replace eos with sep\n eos_idx = bert_model.bert_text_encoder.eos_idx\n sep_idx = bert_model.bert_text_encoder.sep_idx\n bert_labels[bert_labels == eos_idx] = sep_idx\n\n embedding, _ = bert_model.bert(bert_labels, output_all_encoded_layers=True)\n # sum over all layers embedding\n embedding = torch.stack(embedding).sum(0)\n # get rid of cls\n embedding = embedding[:, 1:]\n\n assert labels.shape == embedding.shape[:-1]\n\n return embedding","function_tokens":["def","generate_embedding","(","bert_model",",","labels",")",":","batch_size",",","time","=","labels",".","shape","cls_ids","=","torch",".","full","(","(","batch_size",",","1",")",",","bert_model",".","bert_text_encoder",".","cls_idx",",","dtype","=","labels",".","dtype",",","device","=","labels",".","device",")","bert_labels","=","torch",".","cat","(","[","cls_ids",",","labels","]",",","1",")","# replace eos with sep","eos_idx","=","bert_model",".","bert_text_encoder",".","eos_idx","sep_idx","=","bert_model",".","bert_text_encoder",".","sep_idx","bert_labels","[","bert_labels","==","eos_idx","]","=","sep_idx","embedding",",","_","=","bert_model",".","bert","(","bert_labels",",","output_all_encoded_layers","=","True",")","# sum over all layers embedding","embedding","=","torch",".","stack","(","embedding",")",".","sum","(","0",")","# get rid of cls","embedding","=","embedding","[",":",",","1",":","]","assert","labels",".","shape","==","embedding",".","shape","[",":","-","1","]","return","embedding"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/bert_embedding.py#L38-L58"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/bert_embedding.py","language":"python","identifier":"load_fine_tuned_model","parameters":"(bert_model, text_encoder, path)","argument_list":"","return_statement":"return model","docstring":"Load fine-tuned bert model given text encoder and checkpoint path.","docstring_summary":"Load fine-tuned bert model given text encoder and checkpoint path.","docstring_tokens":["Load","fine","-","tuned","bert","model","given","text","encoder","and","checkpoint","path","."],"function":"def load_fine_tuned_model(bert_model, text_encoder, path):\n \"\"\"Load fine-tuned bert model given text encoder and checkpoint path.\"\"\"\n bert_text_encoder = BertLikeSentencePieceTextEncoder(text_encoder)\n\n model = BertForMaskedLM.from_pretrained(bert_model)\n model.bert_text_encoder = bert_text_encoder\n model.bert.embeddings.word_embeddings = nn.Embedding(\n bert_text_encoder.vocab_size, model.bert.embeddings.word_embeddings.weight.shape[1])\n model.config.vocab_size = bert_text_encoder.vocab_size\n model.cls = BertOnlyMLMHead(\n model.config, model.bert.embeddings.word_embeddings.weight)\n\n model.load_state_dict(torch.load(path))\n\n return model","function_tokens":["def","load_fine_tuned_model","(","bert_model",",","text_encoder",",","path",")",":","bert_text_encoder","=","BertLikeSentencePieceTextEncoder","(","text_encoder",")","model","=","BertForMaskedLM",".","from_pretrained","(","bert_model",")","model",".","bert_text_encoder","=","bert_text_encoder","model",".","bert",".","embeddings",".","word_embeddings","=","nn",".","Embedding","(","bert_text_encoder",".","vocab_size",",","model",".","bert",".","embeddings",".","word_embeddings",".","weight",".","shape","[","1","]",")","model",".","config",".","vocab_size","=","bert_text_encoder",".","vocab_size","model",".","cls","=","BertOnlyMLMHead","(","model",".","config",",","model",".","bert",".","embeddings",".","word_embeddings",".","weight",")","model",".","load_state_dict","(","torch",".","load","(","path",")",")","return","model"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/bert_embedding.py#L61-L75"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.backward","parameters":"(self, loss)","argument_list":"","return_statement":"return grad_norm","docstring":"Standard backward step with self.timer and debugger\n Arguments\n loss - the loss to perform loss.backward()","docstring_summary":"Standard backward step with self.timer and debugger\n Arguments\n loss - the loss to perform loss.backward()","docstring_tokens":["Standard","backward","step","with","self",".","timer","and","debugger","Arguments","loss","-","the","loss","to","perform","loss",".","backward","()"],"function":"def backward(self, loss):\n '''\n Standard backward step with self.timer and debugger\n Arguments\n loss - the loss to perform loss.backward()\n '''\n self.timer.set()\n loss.backward()\n grad_norm = torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.GRAD_CLIP)\n if math.isnan(grad_norm):\n self.verbose('Error : grad norm is NaN @ step '+str(self.step))\n else:\n self.optimizer.step()\n self.timer.cnt('bw')\n return grad_norm","function_tokens":["def","backward","(","self",",","loss",")",":","self",".","timer",".","set","(",")","loss",".","backward","(",")","grad_norm","=","torch",".","nn",".","utils",".","clip_grad_norm_","(","self",".","model",".","parameters","(",")",",","self",".","GRAD_CLIP",")","if","math",".","isnan","(","grad_norm",")",":","self",".","verbose","(","'Error : grad norm is NaN @ step '","+","str","(","self",".","step",")",")","else",":","self",".","optimizer",".","step","(",")","self",".","timer",".","cnt","(","'bw'",")","return","grad_norm"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L76-L91"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.load_ckpt","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Load ckpt if --load option is specified","docstring_summary":"Load ckpt if --load option is specified","docstring_tokens":["Load","ckpt","if","--","load","option","is","specified"],"function":"def load_ckpt(self):\n ''' Load ckpt if --load option is specified '''\n if self.paras.load:\n # Load weights\n ckpt = torch.load(\n self.paras.load, map_location=self.device if self.mode == 'train' else 'cpu')\n self.model.load_state_dict(ckpt['model'])\n if self.emb_decoder is not None:\n self.emb_decoder.load_state_dict(ckpt['emb_decoder'])\n # if self.amp:\n # amp.load_state_dict(ckpt['amp'])\n # Load task-dependent items\n metric = \"None\"\n score = 0.0\n for k, v in ckpt.items():\n if type(v) is float:\n metric, score = k, v\n if self.mode == 'train':\n self.step = ckpt['global_step']\n self.optimizer.load_opt_state_dict(ckpt['optimizer'])\n self.verbose('Load ckpt from {}, restarting at step {} (recorded {} = {:.2f} %)'.format(\n self.paras.load, self.step, metric, score))\n else:\n self.model.eval()\n if self.emb_decoder is not None:\n self.emb_decoder.eval()\n self.verbose('Evaluation target = {} (recorded {} = {:.2f} %)'.format(self.paras.load, metric, score))","function_tokens":["def","load_ckpt","(","self",")",":","if","self",".","paras",".","load",":","# Load weights","ckpt","=","torch",".","load","(","self",".","paras",".","load",",","map_location","=","self",".","device","if","self",".","mode","==","'train'","else","'cpu'",")","self",".","model",".","load_state_dict","(","ckpt","[","'model'","]",")","if","self",".","emb_decoder","is","not","None",":","self",".","emb_decoder",".","load_state_dict","(","ckpt","[","'emb_decoder'","]",")","# if self.amp:","# amp.load_state_dict(ckpt['amp'])","# Load task-dependent items","metric","=","\"None\"","score","=","0.0","for","k",",","v","in","ckpt",".","items","(",")",":","if","type","(","v",")","is","float",":","metric",",","score","=","k",",","v","if","self",".","mode","==","'train'",":","self",".","step","=","ckpt","[","'global_step'","]","self",".","optimizer",".","load_opt_state_dict","(","ckpt","[","'optimizer'","]",")","self",".","verbose","(","'Load ckpt from {}, restarting at step {} (recorded {} = {:.2f} %)'",".","format","(","self",".","paras",".","load",",","self",".","step",",","metric",",","score",")",")","else",":","self",".","model",".","eval","(",")","if","self",".","emb_decoder","is","not","None",":","self",".","emb_decoder",".","eval","(",")","self",".","verbose","(","'Evaluation target = {} (recorded {} = {:.2f} %)'",".","format","(","self",".","paras",".","load",",","metric",",","score",")",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L93-L119"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.verbose","parameters":"(self, msg)","argument_list":"","return_statement":"","docstring":"Verbose function for print information to stdout","docstring_summary":"Verbose function for print information to stdout","docstring_tokens":["Verbose","function","for","print","information","to","stdout"],"function":"def verbose(self, msg):\n ''' Verbose function for print information to stdout'''\n if self.paras.verbose:\n if type(msg) == list:\n for m in msg:\n print('[INFO]', m.ljust(100))\n else:\n print('[INFO]', msg.ljust(100))","function_tokens":["def","verbose","(","self",",","msg",")",":","if","self",".","paras",".","verbose",":","if","type","(","msg",")","==","list",":","for","m","in","msg",":","print","(","'[INFO]'",",","m",".","ljust","(","100",")",")","else",":","print","(","'[INFO]'",",","msg",".","ljust","(","100",")",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L121-L128"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.progress","parameters":"(self, msg)","argument_list":"","return_statement":"","docstring":"Verbose function for updating progress on stdout (do not include newline)","docstring_summary":"Verbose function for updating progress on stdout (do not include newline)","docstring_tokens":["Verbose","function","for","updating","progress","on","stdout","(","do","not","include","newline",")"],"function":"def progress(self, msg):\n ''' Verbose function for updating progress on stdout (do not include newline) '''\n if self.paras.verbose:\n sys.stdout.write(\"\\033[K\") # Clear line\n print('[{}] {}'.format(human_format(self.step), msg), end='\\r')","function_tokens":["def","progress","(","self",",","msg",")",":","if","self",".","paras",".","verbose",":","sys",".","stdout",".","write","(","\"\\033[K\"",")","# Clear line","print","(","'[{}] {}'",".","format","(","human_format","(","self",".","step",")",",","msg",")",",","end","=","'\\r'",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L130-L134"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.write_log","parameters":"(self, log_name, log_dict)","argument_list":"","return_statement":"","docstring":"Write log to TensorBoard\n log_name - <str> Name of tensorboard variable \n log_value - <dict>\/<array> Value of variable (e.g. dict of losses), passed if value = None","docstring_summary":"Write log to TensorBoard\n log_name - <str> Name of tensorboard variable \n log_value - <dict>\/<array> Value of variable (e.g. dict of losses), passed if value = None","docstring_tokens":["Write","log","to","TensorBoard","log_name","-","<str",">","Name","of","tensorboard","variable","log_value","-","<dict",">","\/","<array",">","Value","of","variable","(","e",".","g",".","dict","of","losses",")","passed","if","value","=","None"],"function":"def write_log(self, log_name, log_dict):\n '''\n Write log to TensorBoard\n log_name - <str> Name of tensorboard variable \n log_value - <dict>\/<array> Value of variable (e.g. dict of losses), passed if value = None\n '''\n if type(log_dict) is dict:\n log_dict = {key: val for key, val in log_dict.items() if (\n val is not None and not math.isnan(val))}\n if log_dict is None:\n pass\n elif len(log_dict) > 0:\n if 'align' in log_name or 'spec' in log_name:\n img, form = log_dict\n self.log.add_image(\n log_name, img, global_step=self.step, dataformats=form)\n elif 'text' in log_name or 'hyp' in log_name:\n self.log.add_text(log_name, log_dict, self.step)\n else:\n self.log.add_scalars(log_name, log_dict, self.step)","function_tokens":["def","write_log","(","self",",","log_name",",","log_dict",")",":","if","type","(","log_dict",")","is","dict",":","log_dict","=","{","key",":","val","for","key",",","val","in","log_dict",".","items","(",")","if","(","val","is","not","None","and","not","math",".","isnan","(","val",")",")","}","if","log_dict","is","None",":","pass","elif","len","(","log_dict",")",">","0",":","if","'align'","in","log_name","or","'spec'","in","log_name",":","img",",","form","=","log_dict","self",".","log",".","add_image","(","log_name",",","img",",","global_step","=","self",".","step",",","dataformats","=","form",")","elif","'text'","in","log_name","or","'hyp'","in","log_name",":","self",".","log",".","add_text","(","log_name",",","log_dict",",","self",".","step",")","else",":","self",".","log",".","add_scalars","(","log_name",",","log_dict",",","self",".","step",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L136-L155"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.save_checkpoint","parameters":"(self, f_name, metric, score, show_msg=True)","argument_list":"","return_statement":"","docstring":"Ckpt saver\n f_name - <str> the name phnof ckpt file (w\/o prefix) to store, overwrite if existed\n score - <float> The value of metric used to evaluate model","docstring_summary":"Ckpt saver\n f_name - <str> the name phnof ckpt file (w\/o prefix) to store, overwrite if existed\n score - <float> The value of metric used to evaluate model","docstring_tokens":["Ckpt","saver","f_name","-","<str",">","the","name","phnof","ckpt","file","(","w","\/","o","prefix",")","to","store","overwrite","if","existed","score","-","<float",">","The","value","of","metric","used","to","evaluate","model"],"function":"def save_checkpoint(self, f_name, metric, score, show_msg=True):\n '''' \n Ckpt saver\n f_name - <str> the name phnof ckpt file (w\/o prefix) to store, overwrite if existed\n score - <float> The value of metric used to evaluate model\n '''\n ckpt_path = os.path.join(self.ckpdir, f_name)\n full_dict = {\n \"model\": self.model.state_dict(),\n \"optimizer\": self.optimizer.get_opt_state_dict(),\n \"global_step\": self.step,\n metric: score\n }\n # Additional modules to save\n # if self.amp:\n # full_dict['amp'] = self.amp_lib.state_dict()\n if self.emb_decoder is not None:\n full_dict['emb_decoder'] = self.emb_decoder.state_dict()\n\n torch.save(full_dict, ckpt_path)\n if show_msg:\n self.verbose(\"Saved checkpoint (step = {}, {} = {:.2f}) and status @ {}\".\n format(human_format(self.step), metric, score, ckpt_path))","function_tokens":["def","save_checkpoint","(","self",",","f_name",",","metric",",","score",",","show_msg","=","True",")",":","ckpt_path","=","os",".","path",".","join","(","self",".","ckpdir",",","f_name",")","full_dict","=","{","\"model\"",":","self",".","model",".","state_dict","(",")",",","\"optimizer\"",":","self",".","optimizer",".","get_opt_state_dict","(",")",",","\"global_step\"",":","self",".","step",",","metric",":","score","}","# Additional modules to save","# if self.amp:","# full_dict['amp'] = self.amp_lib.state_dict()","if","self",".","emb_decoder","is","not","None",":","full_dict","[","'emb_decoder'","]","=","self",".","emb_decoder",".","state_dict","(",")","torch",".","save","(","full_dict",",","ckpt_path",")","if","show_msg",":","self",".","verbose","(","\"Saved checkpoint (step = {}, {} = {:.2f}) and status @ {}\"",".","format","(","human_format","(","self",".","step",")",",","metric",",","score",",","ckpt_path",")",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L157-L179"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.load_data","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Called by main to load all data\n After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set)\n No return value","docstring_summary":"Called by main to load all data\n After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set)\n No return value","docstring_tokens":["Called","by","main","to","load","all","data","After","this","call","data","related","attributes","should","be","setup","(","e",".","g",".","self",".","tr_set","self",".","dev_set",")","No","return","value"],"function":"def load_data(self):\n '''\n Called by main to load all data\n After this call, data related attributes should be setup (e.g. self.tr_set, self.dev_set)\n No return value\n '''\n raise NotImplementedError","function_tokens":["def","load_data","(","self",")",":","raise","NotImplementedError"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L193-L199"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.set_model","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Called by main to set models\n After this call, model related attributes should be setup (e.g. self.l2_loss)\n The followings MUST be setup\n - self.model (torch.nn.Module)\n - self.optimizer (src.Optimizer),\n init. w\/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas'])\n Loading pre-trained model should also be performed here \n No return value","docstring_summary":"Called by main to set models\n After this call, model related attributes should be setup (e.g. self.l2_loss)\n The followings MUST be setup\n - self.model (torch.nn.Module)\n - self.optimizer (src.Optimizer),\n init. w\/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas'])\n Loading pre-trained model should also be performed here \n No return value","docstring_tokens":["Called","by","main","to","set","models","After","this","call","model","related","attributes","should","be","setup","(","e",".","g",".","self",".","l2_loss",")","The","followings","MUST","be","setup","-","self",".","model","(","torch",".","nn",".","Module",")","-","self",".","optimizer","(","src",".","Optimizer",")","init",".","w","\/","self",".","optimizer","=","src",".","Optimizer","(","self",".","model",".","parameters","()","**","self",".","config","[","hparas","]",")","Loading","pre","-","trained","model","should","also","be","performed","here","No","return","value"],"function":"def set_model(self):\n '''\n Called by main to set models\n After this call, model related attributes should be setup (e.g. self.l2_loss)\n The followings MUST be setup\n - self.model (torch.nn.Module)\n - self.optimizer (src.Optimizer),\n init. w\/ self.optimizer = src.Optimizer(self.model.parameters(),**self.config['hparas'])\n Loading pre-trained model should also be performed here \n No return value\n '''\n raise NotImplementedError","function_tokens":["def","set_model","(","self",")",":","raise","NotImplementedError"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L202-L213"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/solver.py","language":"python","identifier":"BaseSolver.exec","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Called by main to execute training\/inference","docstring_summary":"Called by main to execute training\/inference","docstring_tokens":["Called","by","main","to","execute","training","\/","inference"],"function":"def exec(self):\n '''\n Called by main to execute training\/inference\n '''\n raise NotImplementedError","function_tokens":["def","exec","(","self",")",":","raise","NotImplementedError"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/solver.py#L216-L220"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/decode.py","language":"python","identifier":"Hypothesis.avgScore","parameters":"(self)","argument_list":"","return_statement":"return sum(self.output_scores) \/ len(self.output_scores)","docstring":"Return the averaged log probability of hypothesis","docstring_summary":"Return the averaged log probability of hypothesis","docstring_tokens":["Return","the","averaged","log","probability","of","hypothesis"],"function":"def avgScore(self):\n '''Return the averaged log probability of hypothesis'''\n assert len(self.output_scores) != 0\n return sum(self.output_scores) \/ len(self.output_scores)","function_tokens":["def","avgScore","(","self",")",":","assert","len","(","self",".","output_scores",")","!=","0","return","sum","(","self",".","output_scores",")","\/","len","(","self",".","output_scores",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/decode.py#L204-L207"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/decode.py","language":"python","identifier":"Hypothesis.addTopk","parameters":"(self, topi, topv, decoder_state, att_map=None,\n lm_state=None, ctc_state=None, ctc_prob=0.0, ctc_candidates=[])","argument_list":"","return_statement":"return None, new_hypothesis","docstring":"Expand current hypothesis with a given beam size","docstring_summary":"Expand current hypothesis with a given beam size","docstring_tokens":["Expand","current","hypothesis","with","a","given","beam","size"],"function":"def addTopk(self, topi, topv, decoder_state, att_map=None,\n lm_state=None, ctc_state=None, ctc_prob=0.0, ctc_candidates=[]):\n '''Expand current hypothesis with a given beam size'''\n new_hypothesis = []\n term_score = None\n ctc_s, ctc_p = None, None\n beam_size = topi.shape[-1]\n\n for i in range(beam_size):\n # Detect <eos>\n if topi[i].item() == 1:\n term_score = topv[i].cpu()\n continue\n\n idxes = self.output_seq[:] # pass by value\n scores = self.output_scores[:] # pass by value\n idxes.append(topi[i].cpu())\n scores.append(topv[i].cpu())\n if ctc_state is not None:\n # ToDo: Handle out-of-candidate case.\n idx = ctc_candidates.index(topi[i].item())\n ctc_s = ctc_state[idx, :, :]\n ctc_p = ctc_prob[idx]\n new_hypothesis.append(Hypothesis(decoder_state,\n output_seq=idxes, output_scores=scores, lm_state=lm_state,\n ctc_state=ctc_s, ctc_prob=ctc_p, att_map=att_map))\n if term_score is not None:\n self.output_seq.append(torch.tensor(1))\n self.output_scores.append(term_score)\n return self, new_hypothesis\n return None, new_hypothesis","function_tokens":["def","addTopk","(","self",",","topi",",","topv",",","decoder_state",",","att_map","=","None",",","lm_state","=","None",",","ctc_state","=","None",",","ctc_prob","=","0.0",",","ctc_candidates","=","[","]",")",":","new_hypothesis","=","[","]","term_score","=","None","ctc_s",",","ctc_p","=","None",",","None","beam_size","=","topi",".","shape","[","-","1","]","for","i","in","range","(","beam_size",")",":","# Detect <eos>","if","topi","[","i","]",".","item","(",")","==","1",":","term_score","=","topv","[","i","]",".","cpu","(",")","continue","idxes","=","self",".","output_seq","[",":","]","# pass by value","scores","=","self",".","output_scores","[",":","]","# pass by value","idxes",".","append","(","topi","[","i","]",".","cpu","(",")",")","scores",".","append","(","topv","[","i","]",".","cpu","(",")",")","if","ctc_state","is","not","None",":","# ToDo: Handle out-of-candidate case.","idx","=","ctc_candidates",".","index","(","topi","[","i","]",".","item","(",")",")","ctc_s","=","ctc_state","[","idx",",",":",",",":","]","ctc_p","=","ctc_prob","[","idx","]","new_hypothesis",".","append","(","Hypothesis","(","decoder_state",",","output_seq","=","idxes",",","output_scores","=","scores",",","lm_state","=","lm_state",",","ctc_state","=","ctc_s",",","ctc_prob","=","ctc_p",",","att_map","=","att_map",")",")","if","term_score","is","not","None",":","self",".","output_seq",".","append","(","torch",".","tensor","(","1",")",")","self",".","output_scores",".","append","(","term_score",")","return","self",",","new_hypothesis","return","None",",","new_hypothesis"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/decode.py#L209-L239"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/plugin.py","language":"python","identifier":"EmbeddingRegularizer.fuse_prob","parameters":"(self, x_emb, dec_logit)","argument_list":"","return_statement":"return log_fused_prob","docstring":"Takes context and decoder logit to perform word embedding fusion","docstring_summary":"Takes context and decoder logit to perform word embedding fusion","docstring_tokens":["Takes","context","and","decoder","logit","to","perform","word","embedding","fusion"],"function":"def fuse_prob(self, x_emb, dec_logit):\n ''' Takes context and decoder logit to perform word embedding fusion '''\n # Compute distribution for dec\/emb\n if self.fuse_normalize:\n emb_logit = nn.functional.linear(nn.functional.normalize(x_emb, dim=-1),\n nn.functional.normalize(self.emb_table.weight, dim=-1))\n else:\n emb_logit = nn.functional.linear(x_emb, self.emb_table.weight)\n emb_prob = (nn.functional.relu(self.temp)*emb_logit).softmax(dim=-1)\n dec_prob = dec_logit.softmax(dim=-1)\n # Mix distribution\n if self.fuse_learnable:\n fused_prob = (1-torch.sigmoid(self.fuse_lambda))*dec_prob +\\\n torch.sigmoid(self.fuse_lambda)*emb_prob\n else:\n fused_prob = (1-self.fuse_lambda)*dec_prob + \\\n self.fuse_lambda*emb_prob\n # Log-prob\n log_fused_prob = (fused_prob+self.eps).log()\n\n return log_fused_prob","function_tokens":["def","fuse_prob","(","self",",","x_emb",",","dec_logit",")",":","# Compute distribution for dec\/emb","if","self",".","fuse_normalize",":","emb_logit","=","nn",".","functional",".","linear","(","nn",".","functional",".","normalize","(","x_emb",",","dim","=","-","1",")",",","nn",".","functional",".","normalize","(","self",".","emb_table",".","weight",",","dim","=","-","1",")",")","else",":","emb_logit","=","nn",".","functional",".","linear","(","x_emb",",","self",".","emb_table",".","weight",")","emb_prob","=","(","nn",".","functional",".","relu","(","self",".","temp",")","*","emb_logit",")",".","softmax","(","dim","=","-","1",")","dec_prob","=","dec_logit",".","softmax","(","dim","=","-","1",")","# Mix distribution","if","self",".","fuse_learnable",":","fused_prob","=","(","1","-","torch",".","sigmoid","(","self",".","fuse_lambda",")",")","*","dec_prob","+","torch",".","sigmoid","(","self",".","fuse_lambda",")","*","emb_prob","else",":","fused_prob","=","(","1","-","self",".","fuse_lambda",")","*","dec_prob","+","self",".","fuse_lambda","*","emb_prob","# Log-prob","log_fused_prob","=","(","fused_prob","+","self",".","eps",")",".","log","(",")","return","log_fused_prob"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/plugin.py#L103-L123"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/data.py","language":"python","identifier":"collect_audio_batch","parameters":"(batch, audio_transform, mode)","argument_list":"","return_statement":"return file, audio_feat, audio_len, text","docstring":"Collects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...]","docstring_summary":"Collects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...]","docstring_tokens":["Collects","a","batch","should","be","list","of","tuples","(","audio_path","<str",">","list","of","int","token","<list",">",")","e",".","g",".","[","(","file1","txt1",")","(","file2","txt2",")","...","]"],"function":"def collect_audio_batch(batch, audio_transform, mode):\n '''Collects a batch, should be list of tuples (audio_path <str>, list of int token <list>) \n e.g. [(file1,txt1),(file2,txt2),...] '''\n\n # Bucketed batch should be [[(file1,txt1),(file2,txt2),...]]\n if type(batch[0]) is not tuple:\n batch = batch[0]\n # Make sure that batch size is reasonable\n first_len = audio_transform(str(batch[0][0])).shape[0]\n if first_len > HALF_BATCHSIZE_AUDIO_LEN and mode == 'train':\n batch = batch[:len(batch)\/\/2]\n\n # Read batch\n file, audio_feat, audio_len, text = [], [], [], []\n with torch.no_grad():\n for b in batch:\n file.append(str(b[0]).split('\/')[-1].split('.')[0])\n feat = audio_transform(str(b[0]))\n audio_feat.append(feat)\n audio_len.append(len(feat))\n text.append(torch.LongTensor(b[1]))\n # Descending audio length within each batch\n audio_len, file, audio_feat, text = zip(*[(feat_len, f_name, feat, txt)\n for feat_len, f_name, feat, txt in sorted(zip(audio_len, file, audio_feat, text), reverse=True, key=lambda x:x[0])])\n # Zero-padding\n audio_feat = pad_sequence(audio_feat, batch_first=True)\n text = pad_sequence(text, batch_first=True)\n audio_len = torch.LongTensor(audio_len)\n\n return file, audio_feat, audio_len, text","function_tokens":["def","collect_audio_batch","(","batch",",","audio_transform",",","mode",")",":","# Bucketed batch should be [[(file1,txt1),(file2,txt2),...]]","if","type","(","batch","[","0","]",")","is","not","tuple",":","batch","=","batch","[","0","]","# Make sure that batch size is reasonable","first_len","=","audio_transform","(","str","(","batch","[","0","]","[","0","]",")",")",".","shape","[","0","]","if","first_len",">","HALF_BATCHSIZE_AUDIO_LEN","and","mode","==","'train'",":","batch","=","batch","[",":","len","(","batch",")","\/\/","2","]","# Read batch","file",",","audio_feat",",","audio_len",",","text","=","[","]",",","[","]",",","[","]",",","[","]","with","torch",".","no_grad","(",")",":","for","b","in","batch",":","file",".","append","(","str","(","b","[","0","]",")",".","split","(","'\/'",")","[","-","1","]",".","split","(","'.'",")","[","0","]",")","feat","=","audio_transform","(","str","(","b","[","0","]",")",")","audio_feat",".","append","(","feat",")","audio_len",".","append","(","len","(","feat",")",")","text",".","append","(","torch",".","LongTensor","(","b","[","1","]",")",")","# Descending audio length within each batch","audio_len",",","file",",","audio_feat",",","text","=","zip","(","*","[","(","feat_len",",","f_name",",","feat",",","txt",")","for","feat_len",",","f_name",",","feat",",","txt","in","sorted","(","zip","(","audio_len",",","file",",","audio_feat",",","text",")",",","reverse","=","True",",","key","=","lambda","x",":","x","[","0","]",")","]",")","# Zero-padding","audio_feat","=","pad_sequence","(","audio_feat",",","batch_first","=","True",")","text","=","pad_sequence","(","text",",","batch_first","=","True",")","audio_len","=","torch",".","LongTensor","(","audio_len",")","return","file",",","audio_feat",",","audio_len",",","text"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L14-L43"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/data.py","language":"python","identifier":"collect_text_batch","parameters":"(batch, mode)","argument_list":"","return_statement":"return text","docstring":"Collects a batch of text, should be list of list of int token \n e.g. [txt1 <list>,txt2 <list>,...]","docstring_summary":"Collects a batch of text, should be list of list of int token \n e.g. [txt1 <list>,txt2 <list>,...]","docstring_tokens":["Collects","a","batch","of","text","should","be","list","of","list","of","int","token","e",".","g",".","[","txt1","<list",">","txt2","<list",">","...","]"],"function":"def collect_text_batch(batch, mode):\n '''Collects a batch of text, should be list of list of int token \n e.g. [txt1 <list>,txt2 <list>,...] '''\n\n # Bucketed batch should be [[txt1, txt2,...]]\n if type(batch[0][0]) is list:\n batch = batch[0]\n # Half batch size if input to long\n if len(batch[0]) > HALF_BATCHSIZE_TEXT_LEN and mode == 'train':\n batch = batch[:len(batch)\/\/2]\n # Read batch\n text = [torch.LongTensor(b) for b in batch]\n # Zero-padding\n text = pad_sequence(text, batch_first=True)\n\n return text","function_tokens":["def","collect_text_batch","(","batch",",","mode",")",":","# Bucketed batch should be [[txt1, txt2,...]]","if","type","(","batch","[","0","]","[","0","]",")","is","list",":","batch","=","batch","[","0","]","# Half batch size if input to long","if","len","(","batch","[","0","]",")",">","HALF_BATCHSIZE_TEXT_LEN","and","mode","==","'train'",":","batch","=","batch","[",":","len","(","batch",")","\/\/","2","]","# Read batch","text","=","[","torch",".","LongTensor","(","b",")","for","b","in","batch","]","# Zero-padding","text","=","pad_sequence","(","text",",","batch_first","=","True",")","return","text"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L46-L61"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/data.py","language":"python","identifier":"create_dataset","parameters":"(tokenizer, ascending, name, path, bucketing, batch_size,\n train_split=None, dev_split=None, test_split=None)","argument_list":"","return_statement":"","docstring":"Interface for creating all kinds of dataset","docstring_summary":"Interface for creating all kinds of dataset","docstring_tokens":["Interface","for","creating","all","kinds","of","dataset"],"function":"def create_dataset(tokenizer, ascending, name, path, bucketing, batch_size,\n train_split=None, dev_split=None, test_split=None):\n ''' Interface for creating all kinds of dataset'''\n\n # Recognize corpus\n if name.lower() == \"librispeech\":\n from corpus.librispeech import LibriDataset as Dataset\n else:\n raise NotImplementedError\n\n # Create dataset\n if train_split is not None:\n # Training mode\n mode = 'train'\n tr_loader_bs = 1 if bucketing and (not ascending) else batch_size\n bucket_size = batch_size if bucketing and (\n not ascending) else 1 # Ascending without bucketing\n # Do not use bucketing for dev set\n dv_set = Dataset(path, dev_split, tokenizer, 1)\n tr_set = Dataset(path, train_split, tokenizer,\n bucket_size, ascending=ascending)\n # Messages to show\n msg_list = _data_msg(name, path, train_split.__str__(), len(tr_set),\n dev_split.__str__(), len(dv_set), batch_size, bucketing)\n\n return tr_set, dv_set, tr_loader_bs, batch_size, mode, msg_list\n else:\n # Testing model\n mode = 'test'\n # Do not use bucketing for dev set\n dv_set = Dataset(path, dev_split, tokenizer, 1)\n # Do not use bucketing for test set\n tt_set = Dataset(path, test_split, tokenizer, 1)\n # Messages to show\n msg_list = _data_msg(name, path, dev_split.__str__(), len(dv_set),\n test_split.__str__(), len(tt_set), batch_size, False)\n msg_list = [m.replace('Dev', 'Test').replace(\n 'Train', 'Dev') for m in msg_list]\n return dv_set, tt_set, batch_size, batch_size, mode, msg_list","function_tokens":["def","create_dataset","(","tokenizer",",","ascending",",","name",",","path",",","bucketing",",","batch_size",",","train_split","=","None",",","dev_split","=","None",",","test_split","=","None",")",":","# Recognize corpus","if","name",".","lower","(",")","==","\"librispeech\"",":","from","corpus",".","librispeech","import","LibriDataset","as","Dataset","else",":","raise","NotImplementedError","# Create dataset","if","train_split","is","not","None",":","# Training mode","mode","=","'train'","tr_loader_bs","=","1","if","bucketing","and","(","not","ascending",")","else","batch_size","bucket_size","=","batch_size","if","bucketing","and","(","not","ascending",")","else","1","# Ascending without bucketing","# Do not use bucketing for dev set","dv_set","=","Dataset","(","path",",","dev_split",",","tokenizer",",","1",")","tr_set","=","Dataset","(","path",",","train_split",",","tokenizer",",","bucket_size",",","ascending","=","ascending",")","# Messages to show","msg_list","=","_data_msg","(","name",",","path",",","train_split",".","__str__","(",")",",","len","(","tr_set",")",",","dev_split",".","__str__","(",")",",","len","(","dv_set",")",",","batch_size",",","bucketing",")","return","tr_set",",","dv_set",",","tr_loader_bs",",","batch_size",",","mode",",","msg_list","else",":","# Testing model","mode","=","'test'","# Do not use bucketing for dev set","dv_set","=","Dataset","(","path",",","dev_split",",","tokenizer",",","1",")","# Do not use bucketing for test set","tt_set","=","Dataset","(","path",",","test_split",",","tokenizer",",","1",")","# Messages to show","msg_list","=","_data_msg","(","name",",","path",",","dev_split",".","__str__","(",")",",","len","(","dv_set",")",",","test_split",".","__str__","(",")",",","len","(","tt_set",")",",","batch_size",",","False",")","msg_list","=","[","m",".","replace","(","'Dev'",",","'Test'",")",".","replace","(","'Train'",",","'Dev'",")","for","m","in","msg_list","]","return","dv_set",",","tt_set",",","batch_size",",","batch_size",",","mode",",","msg_list"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L64-L102"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/data.py","language":"python","identifier":"create_textset","parameters":"(tokenizer, train_split, dev_split, name, path, bucketing, batch_size)","argument_list":"","return_statement":"return tr_set, dv_set, tr_loader_bs, batch_size, msg_list","docstring":"Interface for creating all kinds of text dataset","docstring_summary":"Interface for creating all kinds of text dataset","docstring_tokens":["Interface","for","creating","all","kinds","of","text","dataset"],"function":"def create_textset(tokenizer, train_split, dev_split, name, path, bucketing, batch_size):\n ''' Interface for creating all kinds of text dataset'''\n msg_list = []\n\n # Recognize corpus\n if name.lower() == \"librispeech\":\n from corpus.librispeech import LibriTextDataset as Dataset\n else:\n raise NotImplementedError\n\n # Create dataset\n bucket_size = batch_size if bucketing else 1\n tr_loader_bs = 1 if bucketing else batch_size\n # Do not use bucketing for dev set\n dv_set = Dataset(path, dev_split, tokenizer, 1)\n tr_set = Dataset(path, train_split, tokenizer, bucket_size)\n\n # Messages to show\n msg_list = _data_msg(name, path, train_split.__str__(), len(tr_set),\n dev_split.__str__(), len(dv_set), batch_size, bucketing)\n\n return tr_set, dv_set, tr_loader_bs, batch_size, msg_list","function_tokens":["def","create_textset","(","tokenizer",",","train_split",",","dev_split",",","name",",","path",",","bucketing",",","batch_size",")",":","msg_list","=","[","]","# Recognize corpus","if","name",".","lower","(",")","==","\"librispeech\"",":","from","corpus",".","librispeech","import","LibriTextDataset","as","Dataset","else",":","raise","NotImplementedError","# Create dataset","bucket_size","=","batch_size","if","bucketing","else","1","tr_loader_bs","=","1","if","bucketing","else","batch_size","# Do not use bucketing for dev set","dv_set","=","Dataset","(","path",",","dev_split",",","tokenizer",",","1",")","tr_set","=","Dataset","(","path",",","train_split",",","tokenizer",",","bucket_size",")","# Messages to show","msg_list","=","_data_msg","(","name",",","path",",","train_split",".","__str__","(",")",",","len","(","tr_set",")",",","dev_split",".","__str__","(",")",",","len","(","dv_set",")",",","batch_size",",","bucketing",")","return","tr_set",",","dv_set",",","tr_loader_bs",",","batch_size",",","msg_list"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L105-L126"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/data.py","language":"python","identifier":"load_dataset","parameters":"(n_jobs, use_gpu, pin_memory, ascending, corpus, audio, text)","argument_list":"","return_statement":"return tr_set, dv_set, feat_dim, tokenizer.vocab_size, tokenizer, data_msg","docstring":"Prepare dataloader for training\/validation","docstring_summary":"Prepare dataloader for training\/validation","docstring_tokens":["Prepare","dataloader","for","training","\/","validation"],"function":"def load_dataset(n_jobs, use_gpu, pin_memory, ascending, corpus, audio, text):\n ''' Prepare dataloader for training\/validation'''\n\n # Audio feature extractor\n audio_transform, feat_dim = create_transform(audio.copy())\n # Text tokenizer\n tokenizer = load_text_encoder(**text)\n # Dataset (in testing mode, tr_set=dv_set, dv_set=tt_set)\n tr_set, dv_set, tr_loader_bs, dv_loader_bs, mode, data_msg = create_dataset(\n tokenizer, ascending, **corpus)\n # Collect function\n collect_tr = partial(collect_audio_batch,\n audio_transform=audio_transform, mode=mode)\n collect_dv = partial(collect_audio_batch,\n audio_transform=audio_transform, mode='test')\n # Shuffle\/drop applied to training set only\n shuffle = (mode == 'train' and not ascending)\n drop_last = shuffle\n # Create data loader\n tr_set = DataLoader(tr_set, batch_size=tr_loader_bs, shuffle=shuffle, drop_last=drop_last, collate_fn=collect_tr,\n num_workers=n_jobs, pin_memory=use_gpu)\n dv_set = DataLoader(dv_set, batch_size=dv_loader_bs, shuffle=False, drop_last=False, collate_fn=collect_dv,\n num_workers=n_jobs, pin_memory=pin_memory)\n # Messages to show\n data_msg.append('I\/O spec. | Audio feature = {}\\t| feature dim = {}\\t| Token type = {}\\t| Vocab size = {}'\n .format(audio['feat_type'], feat_dim, tokenizer.token_type, tokenizer.vocab_size))\n\n return tr_set, dv_set, feat_dim, tokenizer.vocab_size, tokenizer, data_msg","function_tokens":["def","load_dataset","(","n_jobs",",","use_gpu",",","pin_memory",",","ascending",",","corpus",",","audio",",","text",")",":","# Audio feature extractor","audio_transform",",","feat_dim","=","create_transform","(","audio",".","copy","(",")",")","# Text tokenizer","tokenizer","=","load_text_encoder","(","*","*","text",")","# Dataset (in testing mode, tr_set=dv_set, dv_set=tt_set)","tr_set",",","dv_set",",","tr_loader_bs",",","dv_loader_bs",",","mode",",","data_msg","=","create_dataset","(","tokenizer",",","ascending",",","*","*","corpus",")","# Collect function","collect_tr","=","partial","(","collect_audio_batch",",","audio_transform","=","audio_transform",",","mode","=","mode",")","collect_dv","=","partial","(","collect_audio_batch",",","audio_transform","=","audio_transform",",","mode","=","'test'",")","# Shuffle\/drop applied to training set only","shuffle","=","(","mode","==","'train'","and","not","ascending",")","drop_last","=","shuffle","# Create data loader","tr_set","=","DataLoader","(","tr_set",",","batch_size","=","tr_loader_bs",",","shuffle","=","shuffle",",","drop_last","=","drop_last",",","collate_fn","=","collect_tr",",","num_workers","=","n_jobs",",","pin_memory","=","use_gpu",")","dv_set","=","DataLoader","(","dv_set",",","batch_size","=","dv_loader_bs",",","shuffle","=","False",",","drop_last","=","False",",","collate_fn","=","collect_dv",",","num_workers","=","n_jobs",",","pin_memory","=","pin_memory",")","# Messages to show","data_msg",".","append","(","'I\/O spec. | Audio feature = {}\\t| feature dim = {}\\t| Token type = {}\\t| Vocab size = {}'",".","format","(","audio","[","'feat_type'","]",",","feat_dim",",","tokenizer",".","token_type",",","tokenizer",".","vocab_size",")",")","return","tr_set",",","dv_set",",","feat_dim",",","tokenizer",".","vocab_size",",","tokenizer",",","data_msg"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L129-L156"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/data.py","language":"python","identifier":"_data_msg","parameters":"(name, path, train_split, tr_set, dev_split, dv_set, batch_size, bucketing)","argument_list":"","return_statement":"return msg_list","docstring":"List msg for verbose function","docstring_summary":"List msg for verbose function","docstring_tokens":["List","msg","for","verbose","function"],"function":"def _data_msg(name, path, train_split, tr_set, dev_split, dv_set, batch_size, bucketing):\n ''' List msg for verbose function '''\n msg_list = []\n msg_list.append('Data spec. | Corpus = {} (from {})'.format(name, path))\n msg_list.append(' | Train sets = {}\\t| Number of utts = {}'.format(\n train_split, tr_set))\n msg_list.append(\n ' | Dev sets = {}\\t| Number of utts = {}'.format(dev_split, dv_set))\n msg_list.append(' | Batch size = {}\\t\\t| Bucketing = {}'.format(\n batch_size, bucketing))\n return msg_list","function_tokens":["def","_data_msg","(","name",",","path",",","train_split",",","tr_set",",","dev_split",",","dv_set",",","batch_size",",","bucketing",")",":","msg_list","=","[","]","msg_list",".","append","(","'Data spec. | Corpus = {} (from {})'",".","format","(","name",",","path",")",")","msg_list",".","append","(","' | Train sets = {}\\t| Number of utts = {}'",".","format","(","train_split",",","tr_set",")",")","msg_list",".","append","(","' | Dev sets = {}\\t| Number of utts = {}'",".","format","(","dev_split",",","dv_set",")",")","msg_list",".","append","(","' | Batch size = {}\\t\\t| Bucketing = {}'",".","format","(","batch_size",",","bucketing",")",")","return","msg_list"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/data.py#L181-L191"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/asr.py","language":"python","identifier":"ASR.set_state","parameters":"(self, prev_state, prev_attn)","argument_list":"","return_statement":"","docstring":"Setting up all memory states for beam decoding","docstring_summary":"Setting up all memory states for beam decoding","docstring_tokens":["Setting","up","all","memory","states","for","beam","decoding"],"function":"def set_state(self, prev_state, prev_attn):\n ''' Setting up all memory states for beam decoding'''\n self.decoder.set_state(prev_state)\n self.attention.set_mem(prev_attn)","function_tokens":["def","set_state","(","self",",","prev_state",",","prev_attn",")",":","self",".","decoder",".","set_state","(","prev_state",")","self",".","attention",".","set_mem","(","prev_attn",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L48-L51"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/asr.py","language":"python","identifier":"ASR.forward","parameters":"(self, audio_feature, feature_len, decode_step, tf_rate=0.0, teacher=None,\n emb_decoder=None, get_dec_state=False)","argument_list":"","return_statement":"return ctc_output, encode_len, att_output, att_seq, dec_state","docstring":"Arguments\n audio_feature - [BxTxD] Acoustic feature with shape \n feature_len - [B] Length of each sample in a batch\n decode_step - [int] The maximum number of attention decoder steps \n tf_rate - [0,1] The probability to perform teacher forcing for each step\n teacher - [BxL] Ground truth for teacher forcing with sentence length L\n emb_decoder - [obj] Introduces the word embedding decoder, different behavior for training\/inference\n At training stage, this ONLY affects self-sampling (output remains the same)\n At inference stage, this affects output to become log prob. with distribution fusion\n get_dec_state - [bool] If true, return decoder state [BxLxD] for other purpose","docstring_summary":"Arguments\n audio_feature - [BxTxD] Acoustic feature with shape \n feature_len - [B] Length of each sample in a batch\n decode_step - [int] The maximum number of attention decoder steps \n tf_rate - [0,1] The probability to perform teacher forcing for each step\n teacher - [BxL] Ground truth for teacher forcing with sentence length L\n emb_decoder - [obj] Introduces the word embedding decoder, different behavior for training\/inference\n At training stage, this ONLY affects self-sampling (output remains the same)\n At inference stage, this affects output to become log prob. with distribution fusion\n get_dec_state - [bool] If true, return decoder state [BxLxD] for other purpose","docstring_tokens":["Arguments","audio_feature","-","[","BxTxD","]","Acoustic","feature","with","shape","feature_len","-","[","B","]","Length","of","each","sample","in","a","batch","decode_step","-","[","int","]","The","maximum","number","of","attention","decoder","steps","tf_rate","-","[","0","1","]","The","probability","to","perform","teacher","forcing","for","each","step","teacher","-","[","BxL","]","Ground","truth","for","teacher","forcing","with","sentence","length","L","emb_decoder","-","[","obj","]","Introduces","the","word","embedding","decoder","different","behavior","for","training","\/","inference","At","training","stage","this","ONLY","affects","self","-","sampling","(","output","remains","the","same",")","At","inference","stage","this","affects","output","to","become","log","prob",".","with","distribution","fusion","get_dec_state","-","[","bool","]","If","true","return","decoder","state","[","BxLxD","]","for","other","purpose"],"function":"def forward(self, audio_feature, feature_len, decode_step, tf_rate=0.0, teacher=None,\n emb_decoder=None, get_dec_state=False):\n '''\n Arguments\n audio_feature - [BxTxD] Acoustic feature with shape \n feature_len - [B] Length of each sample in a batch\n decode_step - [int] The maximum number of attention decoder steps \n tf_rate - [0,1] The probability to perform teacher forcing for each step\n teacher - [BxL] Ground truth for teacher forcing with sentence length L\n emb_decoder - [obj] Introduces the word embedding decoder, different behavior for training\/inference\n At training stage, this ONLY affects self-sampling (output remains the same)\n At inference stage, this affects output to become log prob. with distribution fusion\n get_dec_state - [bool] If true, return decoder state [BxLxD] for other purpose\n '''\n # Init\n bs = audio_feature.shape[0]\n ctc_output, att_output, att_seq = None, None, None\n dec_state = [] if get_dec_state else None\n\n # Encode\n encode_feature, encode_len = self.encoder(audio_feature, feature_len)\n\n # CTC based decoding\n if self.enable_ctc:\n ctc_output = F.log_softmax(self.ctc_layer(encode_feature), dim=-1)\n\n # Attention based decoding\n if self.enable_att:\n # Init (init char = <SOS>, reset all rnn state and cell)\n self.decoder.init_state(bs)\n self.attention.reset_mem()\n last_char = self.pre_embed(torch.zeros(\n (bs), dtype=torch.long, device=encode_feature.device))\n att_seq, output_seq = [], []\n\n # Preprocess data for teacher forcing\n if teacher is not None:\n teacher = self.embed_drop(self.pre_embed(teacher))\n\n # Decode\n for t in range(decode_step):\n # Attend (inputs current state of first layer, encoded features)\n attn, context = self.attention(\n self.decoder.get_query(), encode_feature, encode_len)\n # Decode (inputs context + embedded last character)\n decoder_input = torch.cat([last_char, context], dim=-1)\n cur_char, d_state = self.decoder(decoder_input)\n # Prepare output as input of next step\n if (teacher is not None):\n # Training stage\n if (tf_rate == 1) or (torch.rand(1).item() <= tf_rate):\n # teacher forcing\n last_char = teacher[:, t, :]\n else:\n # self-sampling (replace by argmax may be another choice)\n with torch.no_grad():\n if (emb_decoder is not None) and emb_decoder.apply_fuse:\n _, cur_prob = emb_decoder(\n d_state, cur_char, return_loss=False)\n else:\n cur_prob = cur_char.softmax(dim=-1)\n sampled_char = Categorical(cur_prob).sample()\n last_char = self.embed_drop(\n self.pre_embed(sampled_char))\n else:\n # Inference stage\n if (emb_decoder is not None) and emb_decoder.apply_fuse:\n _, cur_char = emb_decoder(\n d_state, cur_char, return_loss=False)\n # argmax for inference\n last_char = self.pre_embed(torch.argmax(cur_char, dim=-1))\n\n # save output of each step\n output_seq.append(cur_char)\n att_seq.append(attn)\n if get_dec_state:\n dec_state.append(d_state)\n\n att_output = torch.stack(output_seq, dim=1) # BxTxV\n att_seq = torch.stack(att_seq, dim=2) # BxNxDtxT\n if get_dec_state:\n dec_state = torch.stack(dec_state, dim=1)\n\n return ctc_output, encode_len, att_output, att_seq, dec_state","function_tokens":["def","forward","(","self",",","audio_feature",",","feature_len",",","decode_step",",","tf_rate","=","0.0",",","teacher","=","None",",","emb_decoder","=","None",",","get_dec_state","=","False",")",":","# Init","bs","=","audio_feature",".","shape","[","0","]","ctc_output",",","att_output",",","att_seq","=","None",",","None",",","None","dec_state","=","[","]","if","get_dec_state","else","None","# Encode","encode_feature",",","encode_len","=","self",".","encoder","(","audio_feature",",","feature_len",")","# CTC based decoding","if","self",".","enable_ctc",":","ctc_output","=","F",".","log_softmax","(","self",".","ctc_layer","(","encode_feature",")",",","dim","=","-","1",")","# Attention based decoding","if","self",".","enable_att",":","# Init (init char = <SOS>, reset all rnn state and cell)","self",".","decoder",".","init_state","(","bs",")","self",".","attention",".","reset_mem","(",")","last_char","=","self",".","pre_embed","(","torch",".","zeros","(","(","bs",")",",","dtype","=","torch",".","long",",","device","=","encode_feature",".","device",")",")","att_seq",",","output_seq","=","[","]",",","[","]","# Preprocess data for teacher forcing","if","teacher","is","not","None",":","teacher","=","self",".","embed_drop","(","self",".","pre_embed","(","teacher",")",")","# Decode","for","t","in","range","(","decode_step",")",":","# Attend (inputs current state of first layer, encoded features)","attn",",","context","=","self",".","attention","(","self",".","decoder",".","get_query","(",")",",","encode_feature",",","encode_len",")","# Decode (inputs context + embedded last character)","decoder_input","=","torch",".","cat","(","[","last_char",",","context","]",",","dim","=","-","1",")","cur_char",",","d_state","=","self",".","decoder","(","decoder_input",")","# Prepare output as input of next step","if","(","teacher","is","not","None",")",":","# Training stage","if","(","tf_rate","==","1",")","or","(","torch",".","rand","(","1",")",".","item","(",")","<=","tf_rate",")",":","# teacher forcing","last_char","=","teacher","[",":",",","t",",",":","]","else",":","# self-sampling (replace by argmax may be another choice)","with","torch",".","no_grad","(",")",":","if","(","emb_decoder","is","not","None",")","and","emb_decoder",".","apply_fuse",":","_",",","cur_prob","=","emb_decoder","(","d_state",",","cur_char",",","return_loss","=","False",")","else",":","cur_prob","=","cur_char",".","softmax","(","dim","=","-","1",")","sampled_char","=","Categorical","(","cur_prob",")",".","sample","(",")","last_char","=","self",".","embed_drop","(","self",".","pre_embed","(","sampled_char",")",")","else",":","# Inference stage","if","(","emb_decoder","is","not","None",")","and","emb_decoder",".","apply_fuse",":","_",",","cur_char","=","emb_decoder","(","d_state",",","cur_char",",","return_loss","=","False",")","# argmax for inference","last_char","=","self",".","pre_embed","(","torch",".","argmax","(","cur_char",",","dim","=","-","1",")",")","# save output of each step","output_seq",".","append","(","cur_char",")","att_seq",".","append","(","attn",")","if","get_dec_state",":","dec_state",".","append","(","d_state",")","att_output","=","torch",".","stack","(","output_seq",",","dim","=","1",")","# BxTxV","att_seq","=","torch",".","stack","(","att_seq",",","dim","=","2",")","# BxNxDtxT","if","get_dec_state",":","dec_state","=","torch",".","stack","(","dec_state",",","dim","=","1",")","return","ctc_output",",","encode_len",",","att_output",",","att_seq",",","dec_state"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L72-L155"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/asr.py","language":"python","identifier":"Decoder.init_state","parameters":"(self, bs)","argument_list":"","return_statement":"return self.get_state()","docstring":"Set all hidden states to zeros","docstring_summary":"Set all hidden states to zeros","docstring_tokens":["Set","all","hidden","states","to","zeros"],"function":"def init_state(self, bs):\n ''' Set all hidden states to zeros '''\n device = next(self.parameters()).device\n if self.enable_cell:\n self.hidden_state = (torch.zeros((self.layer, bs, self.dim), device=device),\n torch.zeros((self.layer, bs, self.dim), device=device))\n else:\n self.hidden_state = torch.zeros(\n (self.layer, bs, self.dim), device=device)\n return self.get_state()","function_tokens":["def","init_state","(","self",",","bs",")",":","device","=","next","(","self",".","parameters","(",")",")",".","device","if","self",".","enable_cell",":","self",".","hidden_state","=","(","torch",".","zeros","(","(","self",".","layer",",","bs",",","self",".","dim",")",",","device","=","device",")",",","torch",".","zeros","(","(","self",".","layer",",","bs",",","self",".","dim",")",",","device","=","device",")",")","else",":","self",".","hidden_state","=","torch",".","zeros","(","(","self",".","layer",",","bs",",","self",".","dim",")",",","device","=","device",")","return","self",".","get_state","(",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L180-L189"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/asr.py","language":"python","identifier":"Decoder.set_state","parameters":"(self, hidden_state)","argument_list":"","return_statement":"","docstring":"Set all hidden states\/cells, for decoding purpose","docstring_summary":"Set all hidden states\/cells, for decoding purpose","docstring_tokens":["Set","all","hidden","states","\/","cells","for","decoding","purpose"],"function":"def set_state(self, hidden_state):\n ''' Set all hidden states\/cells, for decoding purpose'''\n device = next(self.parameters()).device\n if self.enable_cell:\n self.hidden_state = (hidden_state[0].to(\n device), hidden_state[1].to(device))\n else:\n self.hidden_state = hidden_state.to(device)","function_tokens":["def","set_state","(","self",",","hidden_state",")",":","device","=","next","(","self",".","parameters","(",")",")",".","device","if","self",".","enable_cell",":","self",".","hidden_state","=","(","hidden_state","[","0","]",".","to","(","device",")",",","hidden_state","[","1","]",".","to","(","device",")",")","else",":","self",".","hidden_state","=","hidden_state",".","to","(","device",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L191-L198"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/asr.py","language":"python","identifier":"Decoder.get_state","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Return all hidden states\/cells, for decoding purpose","docstring_summary":"Return all hidden states\/cells, for decoding purpose","docstring_tokens":["Return","all","hidden","states","\/","cells","for","decoding","purpose"],"function":"def get_state(self):\n ''' Return all hidden states\/cells, for decoding purpose'''\n if self.enable_cell:\n return (self.hidden_state[0].cpu(), self.hidden_state[1].cpu())\n else:\n return self.hidden_state.cpu()","function_tokens":["def","get_state","(","self",")",":","if","self",".","enable_cell",":","return","(","self",".","hidden_state","[","0","]",".","cpu","(",")",",","self",".","hidden_state","[","1","]",".","cpu","(",")",")","else",":","return","self",".","hidden_state",".","cpu","(",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L200-L205"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/asr.py","language":"python","identifier":"Decoder.get_query","parameters":"(self)","argument_list":"","return_statement":"","docstring":"Return state of all layers as query for attention","docstring_summary":"Return state of all layers as query for attention","docstring_tokens":["Return","state","of","all","layers","as","query","for","attention"],"function":"def get_query(self):\n ''' Return state of all layers as query for attention '''\n if self.enable_cell:\n return self.hidden_state[0].transpose(0, 1).reshape(-1, self.dim*self.layer)\n else:\n return self.hidden_state.transpose(0, 1).reshape(-1, self.dim*self.layer)","function_tokens":["def","get_query","(","self",")",":","if","self",".","enable_cell",":","return","self",".","hidden_state","[","0","]",".","transpose","(","0",",","1",")",".","reshape","(","-","1",",","self",".","dim","*","self",".","layer",")","else",":","return","self",".","hidden_state",".","transpose","(","0",",","1",")",".","reshape","(","-","1",",","self",".","dim","*","self",".","layer",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L207-L212"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/asr.py","language":"python","identifier":"Decoder.forward","parameters":"(self, x)","argument_list":"","return_statement":"return char, x","docstring":"Decode and transform into vocab","docstring_summary":"Decode and transform into vocab","docstring_tokens":["Decode","and","transform","into","vocab"],"function":"def forward(self, x):\n ''' Decode and transform into vocab '''\n if not self.training:\n self.layers.flatten_parameters()\n x, self.hidden_state = self.layers(x.unsqueeze(1), self.hidden_state)\n x = x.squeeze(1)\n char = self.char_trans(self.final_dropout(x))\n return char, x","function_tokens":["def","forward","(","self",",","x",")",":","if","not","self",".","training",":","self",".","layers",".","flatten_parameters","(",")","x",",","self",".","hidden_state","=","self",".","layers","(","x",".","unsqueeze","(","1",")",",","self",".","hidden_state",")","x","=","x",".","squeeze","(","1",")","char","=","self",".","char_trans","(","self",".","final_dropout","(","x",")",")","return","char",",","x"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/asr.py#L214-L221"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/ctc.py","language":"python","identifier":"CTCPrefixScore.full_compute","parameters":"(self, g, r_prev)","argument_list":"","return_statement":"return psi, np.rollaxis(r, 2)","docstring":"Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function computes all possible tokens for c (memory inefficient)","docstring_summary":"Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function computes all possible tokens for c (memory inefficient)","docstring_tokens":["Given","prefix","g","return","the","probability","of","all","possible","sequence","y","(","where","y","=","concat","(","g","c","))","This","function","computes","all","possible","tokens","for","c","(","memory","inefficient",")"],"function":"def full_compute(self, g, r_prev):\n '''Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function computes all possible tokens for c (memory inefficient)'''\n prefix_length = len(g)\n last_char = g[-1] if prefix_length > 0 else 0\n\n # init. r\n r = np.full((self.input_length, 2, self.odim),\n self.logzero, dtype=np.float32)\n\n # start from len(g) because is impossible for CTC to generate |y|>|X|\n start = max(1, prefix_length)\n\n if prefix_length == 0:\n r[0, 0, :] = self.x[0, :] # if g = <sos>\n\n psi = r[start-1, 0, :]\n\n phi = np.logaddexp(r_prev[:, 0], r_prev[:, 1])\n\n for t in range(start, self.input_length):\n # prev_blank\n prev_blank = np.full((self.odim), r_prev[t-1, 1], dtype=np.float32)\n # prev_nonblank\n prev_nonblank = np.full(\n (self.odim), r_prev[t-1, 0], dtype=np.float32)\n prev_nonblank[last_char] = self.logzero\n\n phi = np.logaddexp(prev_nonblank, prev_blank)\n # P(h|current step is non-blank) = [ P(prev. step = y) + P()]*P(c)\n r[t, 0, :] = np.logaddexp(r[t-1, 0, :], phi) + self.x[t, :]\n # P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)\n r[t, 1, :] = np.logaddexp(\n r[t-1, 1, :], r[t-1, 0, :]) + self.x[t, self.blank]\n psi = np.logaddexp(psi, phi+self.x[t, :])\n\n #psi[self.eos] = np.logaddexp(r_prev[-1,0], r_prev[-1,1])\n return psi, np.rollaxis(r, 2)","function_tokens":["def","full_compute","(","self",",","g",",","r_prev",")",":","prefix_length","=","len","(","g",")","last_char","=","g","[","-","1","]","if","prefix_length",">","0","else","0","# init. r","r","=","np",".","full","(","(","self",".","input_length",",","2",",","self",".","odim",")",",","self",".","logzero",",","dtype","=","np",".","float32",")","# start from len(g) because is impossible for CTC to generate |y|>|X|","start","=","max","(","1",",","prefix_length",")","if","prefix_length","==","0",":","r","[","0",",","0",",",":","]","=","self",".","x","[","0",",",":","]","# if g = <sos>","psi","=","r","[","start","-","1",",","0",",",":","]","phi","=","np",".","logaddexp","(","r_prev","[",":",",","0","]",",","r_prev","[",":",",","1","]",")","for","t","in","range","(","start",",","self",".","input_length",")",":","# prev_blank","prev_blank","=","np",".","full","(","(","self",".","odim",")",",","r_prev","[","t","-","1",",","1","]",",","dtype","=","np",".","float32",")","# prev_nonblank","prev_nonblank","=","np",".","full","(","(","self",".","odim",")",",","r_prev","[","t","-","1",",","0","]",",","dtype","=","np",".","float32",")","prev_nonblank","[","last_char","]","=","self",".","logzero","phi","=","np",".","logaddexp","(","prev_nonblank",",","prev_blank",")","# P(h|current step is non-blank) = [ P(prev. step = y) + P()]*P(c)","r","[","t",",","0",",",":","]","=","np",".","logaddexp","(","r","[","t","-","1",",","0",",",":","]",",","phi",")","+","self",".","x","[","t",",",":","]","# P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)","r","[","t",",","1",",",":","]","=","np",".","logaddexp","(","r","[","t","-","1",",","1",",",":","]",",","r","[","t","-","1",",","0",",",":","]",")","+","self",".","x","[","t",",","self",".","blank","]","psi","=","np",".","logaddexp","(","psi",",","phi","+","self",".","x","[","t",",",":","]",")","#psi[self.eos] = np.logaddexp(r_prev[-1,0], r_prev[-1,1])","return","psi",",","np",".","rollaxis","(","r",",","2",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/ctc.py#L37-L74"}
{"nwo":"Alexander-H-Liu\/End-to-end-ASR-Pytorch","sha":"1103d144423e8e692f1d18cd9db27a96cb49fb9d","path":"src\/ctc.py","language":"python","identifier":"CTCPrefixScore.cheap_compute","parameters":"(self, g, r_prev, candidates)","argument_list":"","return_statement":"return psi, np.rollaxis(r, 2)","docstring":"Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function considers only those tokens in candidates for c (memory efficient)","docstring_summary":"Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function considers only those tokens in candidates for c (memory efficient)","docstring_tokens":["Given","prefix","g","return","the","probability","of","all","possible","sequence","y","(","where","y","=","concat","(","g","c","))","This","function","considers","only","those","tokens","in","candidates","for","c","(","memory","efficient",")"],"function":"def cheap_compute(self, g, r_prev, candidates):\n '''Given prefix g, return the probability of all possible sequence y (where y = concat(g,c))\n This function considers only those tokens in candidates for c (memory efficient)'''\n prefix_length = len(g)\n odim = len(candidates)\n last_char = g[-1] if prefix_length > 0 else 0\n\n # init. r\n r = np.full((self.input_length, 2, len(candidates)),\n self.logzero, dtype=np.float32)\n\n # start from len(g) because is impossible for CTC to generate |y|>|X|\n start = max(1, prefix_length)\n\n if prefix_length == 0:\n r[0, 0, :] = self.x[0, candidates] # if g = <sos>\n\n psi = r[start-1, 0, :]\n # Phi = (prev_nonblank,prev_blank)\n sum_prev = np.logaddexp(r_prev[:, 0], r_prev[:, 1])\n phi = np.repeat(sum_prev[..., None],odim,axis=-1)\n # Handle edge case : last tok of prefix in candidates\n if prefix_length>0 and last_char in candidates:\n phi[:,candidates.index(last_char)] = r_prev[:,1]\n\n for t in range(start, self.input_length):\n # prev_blank\n # prev_blank = np.full((odim), r_prev[t-1, 1], dtype=np.float32)\n # prev_nonblank\n # prev_nonblank = np.full((odim), r_prev[t-1, 0], dtype=np.float32)\n # phi = np.logaddexp(prev_nonblank, prev_blank)\n # P(h|current step is non-blank) = P(prev. step = y)*P(c)\n r[t, 0, :] = np.logaddexp( r[t-1, 0, :], phi[t-1]) + self.x[t, candidates]\n # P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)\n r[t, 1, :] = np.logaddexp( r[t-1, 1, :], r[t-1, 0, :]) + self.x[t, self.blank]\n psi = np.logaddexp(psi, phi[t-1,]+self.x[t, candidates])\n\n # P(end of sentence) = P(g)\n if self.eos in candidates:\n psi[candidates.index(self.eos)] = sum_prev[-1]\n return psi, np.rollaxis(r, 2)","function_tokens":["def","cheap_compute","(","self",",","g",",","r_prev",",","candidates",")",":","prefix_length","=","len","(","g",")","odim","=","len","(","candidates",")","last_char","=","g","[","-","1","]","if","prefix_length",">","0","else","0","# init. r","r","=","np",".","full","(","(","self",".","input_length",",","2",",","len","(","candidates",")",")",",","self",".","logzero",",","dtype","=","np",".","float32",")","# start from len(g) because is impossible for CTC to generate |y|>|X|","start","=","max","(","1",",","prefix_length",")","if","prefix_length","==","0",":","r","[","0",",","0",",",":","]","=","self",".","x","[","0",",","candidates","]","# if g = <sos>","psi","=","r","[","start","-","1",",","0",",",":","]","# Phi = (prev_nonblank,prev_blank)","sum_prev","=","np",".","logaddexp","(","r_prev","[",":",",","0","]",",","r_prev","[",":",",","1","]",")","phi","=","np",".","repeat","(","sum_prev","[","...",",","None","]",",","odim",",","axis","=","-","1",")","# Handle edge case : last tok of prefix in candidates","if","prefix_length",">","0","and","last_char","in","candidates",":","phi","[",":",",","candidates",".","index","(","last_char",")","]","=","r_prev","[",":",",","1","]","for","t","in","range","(","start",",","self",".","input_length",")",":","# prev_blank","# prev_blank = np.full((odim), r_prev[t-1, 1], dtype=np.float32)","# prev_nonblank","# prev_nonblank = np.full((odim), r_prev[t-1, 0], dtype=np.float32)","# phi = np.logaddexp(prev_nonblank, prev_blank)","# P(h|current step is non-blank) = P(prev. step = y)*P(c)","r","[","t",",","0",",",":","]","=","np",".","logaddexp","(","r","[","t","-","1",",","0",",",":","]",",","phi","[","t","-","1","]",")","+","self",".","x","[","t",",","candidates","]","# P(h|current step is blank) = [P(prev. step is blank) + P(prev. step is non-blank)]*P(now=blank)","r","[","t",",","1",",",":","]","=","np",".","logaddexp","(","r","[","t","-","1",",","1",",",":","]",",","r","[","t","-","1",",","0",",",":","]",")","+","self",".","x","[","t",",","self",".","blank","]","psi","=","np",".","logaddexp","(","psi",",","phi","[","t","-","1",",","]","+","self",".","x","[","t",",","candidates","]",")","# P(end of sentence) = P(g)","if","self",".","eos","in","candidates",":","psi","[","candidates",".","index","(","self",".","eos",")","]","=","sum_prev","[","-","1","]","return","psi",",","np",".","rollaxis","(","r",",","2",")"],"url":"https:\/\/github.com\/Alexander-H-Liu\/End-to-end-ASR-Pytorch\/blob\/1103d144423e8e692f1d18cd9db27a96cb49fb9d\/src\/ctc.py#L76-L116"}