|
import whisper |
|
import re |
|
import sys |
|
import os, random, copy |
|
import numpy as np |
|
import torch |
|
import pandas as pd |
|
import torchaudio |
|
from tqdm.notebook import tqdm |
|
import collections, json |
|
import editdistance |
|
from whisper.normalizers import EnglishTextNormalizer |
|
from argparse import ArgumentParser |
|
from num2words import num2words |
|
sys.path.append('/home3/huyuchen/pytorch_workplace/my_jiwer') |
|
from my_jiwer import wer_embdiff |
|
import fasttext |
|
from huggingface_hub import hf_hub_download |
|
from pathlib import Path |
|
from typing import Optional |
|
from sentencepiece import SentencePieceProcessor, SentencePieceTrainer |
|
from sentence_transformers import SentenceTransformer |
|
from argparse import ArgumentParser |
|
from evaluate import load |
|
from lit_gpt.tokenizer import Tokenizer |
|
eval_wer = load("wer") |
|
normalizer = EnglishTextNormalizer() |
|
|
|
checkpoint_dir = Path('/home3/huyuchen/pytorch_workplace/wgpt/checkpoints/Llama-2-7b-hf') |
|
tokenizer = Tokenizer(checkpoint_dir) |
|
|
|
sbert_model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') |
|
|
|
|
|
def calculate_wer(all_hypo, all_refer): |
|
return eval_wer.compute(predictions=all_hypo, references=all_refer) |
|
|
|
def word_emb_diff(reference, hypothesis): |
|
output, edit_ops = wer_embdiff(reference, hypothesis) |
|
ref_words, hypo_words = output.references[0], output.hypotheses[0] |
|
|
|
emb_diffs = [] |
|
for op in edit_ops: |
|
if op.tag == 'replace': |
|
ref_word, hypo_word = ref_words[op.src_pos], hypo_words[op.dest_pos] |
|
elif op.tag == 'delete': |
|
ref_word, hypo_word = ref_words[op.src_pos], None |
|
elif op.tag == 'insert': |
|
ref_word, hypo_word = None, hypo_words[op.dest_pos] |
|
else: |
|
continue |
|
|
|
ref_emb = torch.from_numpy(sbert_model.encode([ref_word])[0]) if ref_word else torch.zeros([384]) |
|
hypo_emb = torch.from_numpy(sbert_model.encode([hypo_word])[0]) if hypo_word else torch.zeros([384]) |
|
|
|
emb_diff = ref_emb - hypo_emb |
|
emb_diffs.append(emb_diff) |
|
|
|
|
|
|
|
if len(emb_diffs) == 0: |
|
return torch.zeros([384]) |
|
else: |
|
return torch.stack(emb_diffs, dim=0).mean(dim=0) |
|
|
|
def sent_emb_diff(reference, hypothesis): |
|
embeddings = sbert_model.encode([reference, hypothesis]) |
|
ref_emb, hypo_emb = torch.from_numpy(embeddings[0]), torch.from_numpy(embeddings[1]) |
|
emb_diff = ref_emb - hypo_emb |
|
|
|
|
|
return emb_diff |
|
|
|
def generate_prompt(input1, input2): |
|
return ( |
|
f"Below is the best-hypotheses transcribed from speech recognition system. Please try to revise it using the words which are only included into other-hypothesis, and write the response for the true transcription.\n\n### Best-hypothesis:\n{input1}\n\n### Other-hypothesis:\n{input2}\n\n### Response:\n" |
|
) |
|
|
|
|
|
DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
|
model = whisper.load_model('large-v2') |
|
|
|
f_noisy_wav = open(f'noisy_wav.scp', 'r') |
|
f_clean_wav = open(f'clean_wav.scp', 'r') |
|
f_text = open(f'text', 'r') |
|
|
|
id = 0 |
|
pt_file = [] |
|
all_hypo, all_refer = [], [] |
|
for line in f_noisy_wav.readlines(): |
|
utt_id, audio_path = line.strip().split()[0], line.strip().split()[1] |
|
clean_line = f_clean_wav.readline() |
|
clean_utt_id, clean_audio_path = clean_line.strip().split()[0], clean_line.strip().split()[1] |
|
assert clean_utt_id == utt_id, (line, clean_line) |
|
gt = ' '.join(f_text.readline().strip().split()[1:]) |
|
audio = whisper.load_audio(audio_path) |
|
|
|
mel = whisper.log_mel_spectrogram(audio).to(model.device) |
|
options = whisper.DecodingOptions(language='en', beam_size=50) |
|
texts, confidences = whisper.decode_score(model, mel, options) |
|
|
|
|
|
audio_features = model.encoder(mel.unsqueeze(0))[0] |
|
|
|
|
|
clean_audio = whisper.load_audio(clean_audio_path) |
|
|
|
clean_mel = whisper.log_mel_spectrogram(clean_audio).to(model.device) |
|
clean_audio_features = model.encoder(clean_mel.unsqueeze(0))[0] |
|
|
|
input, score = [], [] |
|
for text, confidence in zip(texts, confidences): |
|
if len(input) < 5 and len(text) > 0 and text not in input: |
|
input.append(text) |
|
score.append(confidence) |
|
|
|
|
|
|
|
if len(input) < 5: |
|
options = whisper.DecodingOptions(language='en', temperature=1.2) |
|
for _ in range(5 - len(input)): |
|
result = whisper.decode(model, mel, options) |
|
text, condidence = result.text, result.avg_logprob |
|
if text in input: |
|
continue |
|
inserted = False |
|
for i in range(len(input)): |
|
if condidence > score[i]: |
|
input.insert(i, text) |
|
score.insert(i, condidence) |
|
inserted = True |
|
break |
|
if not inserted: |
|
input.append(text) |
|
score.append(condidence) |
|
|
|
|
|
|
|
if len(input) < 5: |
|
num_to_add = 5 - len(input) |
|
for _ in range(num_to_add): |
|
rand_id = random.randint(0, len(input) - 1) |
|
rep_input, rep_score = copy.deepcopy(input[rand_id]), copy.deepcopy(score[rand_id]) |
|
input.insert(rand_id + 1, rep_input) |
|
score.insert(rand_id + 1, rep_score) |
|
|
|
for i in range(len(input)): |
|
try: |
|
text = normalizer(input[i]) |
|
text = re.sub(r"[-+]?\d*\.?\d+|\d+%?", lambda m: num2words(m.group()), text).replace('%', ' percent') |
|
except Exception: |
|
text = normalizer(input[i]) |
|
print(f'input exception: {text}') |
|
input[i] = text if len(text) > 0 else '<UNK>' |
|
|
|
try: |
|
output = normalizer(gt) |
|
output = re.sub(r"[-+]?\d*\.?\d+|\d+%?", lambda m: num2words(m.group()), output).replace('%', ' percent') |
|
except Exception: |
|
output = normalizer(gt) |
|
print(f'output exception: {output}') |
|
output = output if len(output) > 0 else '<UNK>' |
|
|
|
cur_wer = calculate_wer([input[0]], [output]) |
|
|
|
|
|
we_diffs, se_diffs = [], [] |
|
for i in range(5): |
|
for j in range(i + 1, 5): |
|
we_diffs.append(word_emb_diff(input[i], input[j])) |
|
se_diffs.append(sent_emb_diff(input[i], input[j])) |
|
|
|
we_diff = torch.stack(we_diffs, dim=0) |
|
se_diff = torch.stack(se_diffs, dim=0) |
|
emb_diff = torch.cat([we_diff, se_diff], dim=0) |
|
|
|
|
|
input1 = input[0] + '.' |
|
input2 = '. '.join(input[1:]) + '.' |
|
|
|
full_prompt = generate_prompt(input1, input2) |
|
full_prompt_and_response = full_prompt + output |
|
encoded_full_prompt = tokenizer.encode(full_prompt, max_length=1024) |
|
encoded_full_prompt_and_response = tokenizer.encode(full_prompt_and_response, eos=True, max_length=1024) |
|
|
|
labels = encoded_full_prompt_and_response.clone() |
|
labels[: len(encoded_full_prompt)] = -1 |
|
|
|
|
|
data = {"id": utt_id, "input_ids": encoded_full_prompt_and_response, "input_ids_no_response": encoded_full_prompt, "labels": labels, |
|
"input": input, 'ground_truth': output, "am_score": score, 'emb_diff': emb_diff, 'audio_features': noisy_audio_features, |
|
'clean_audio_features': clean_audio_features} |
|
|
|
pt_file.append(data) |
|
|
|
|
|
id += 1 |
|
print(f'utterance {id}: wer = {cur_wer}, confidence = {score[0]}') |
|
all_hypo.append(input[0]) |
|
all_refer.append(output) |
|
|
|
|
|
torch.save(pt_file, f'/home3/huyuchen/pytorch_workplace/wllama/hypo_paradise_v2/train_rats.pt') |
|
|
|
|
|
f_noisy_wav.close() |
|
f_clean_wav.close() |
|
f_text.close() |
|
|
|
all_wer = calculate_wer(all_hypo, all_refer) |
|
print(f'all wer = {all_wer}') |
|
|
|
|