# -*- coding: utf-8 -*- """demo 2/3.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1QeNS57tZzvJudeNjQczKJ-PbN0l1tK6V # Import library """ import os import librosa import gradio as gr import noisereduce as nr from scipy.io import wavfile from transformers import WhisperProcessor, WhisperForConditionalGeneration """# Load model""" from google.colab import drive import os drive.mount('/content/gdrive') # load model and processor" processor = WhisperProcessor.from_pretrained("/content/gdrive/MyDrive/ColabNotebookShared/Speech2TextHuyenNguyen/Model/FPTVinTest2") model = WhisperForConditionalGeneration.from_pretrained("/content/gdrive/MyDrive/ColabNotebookShared/Speech2TextHuyenNguyen/Model/FPTVinTest2/checkpoint-1332").to("cuda") model.config.forced_decoder_ids = processor.get_decoder_prompt_ids(task = "transcribe") """# Slipt audio""" from pydub import AudioSegment def preprocessing(path): # CONVERT MP3 -> WAV type_file = path.split(".")[-1] sound = AudioSegment.from_file(path, type_file) path_list = [] # SPLIT AUDIO time_audio = int(sound.duration_seconds / 20) + 1 for i in range(time_audio): t1 = i * 20 * 1000 t2 = (i+1) * 20 * 1000 if i == (time_audio-1): newAudio = sound[t1:] else: newAudio = sound[t1:t2] newAudio = newAudio.split_to_mono()[0] newAudio = newAudio.set_frame_rate(16000) # convert frequency : mọi freq --> 16000kHz audio_path = '/content/new_audio' + str(i) + '.wav' newAudio.export(audio_path, format="wav") path_list.append(audio_path) return path_list """# Capitalization""" !git lfs install !git clone https://github.com/huyenxam/Vicap.git # Commented out IPython magic to ensure Python compatibility. # %cd {"/content/Vicap"} import os from gec_model import GecBERTModel cache_dir = "./" model_cap = GecBERTModel( vocab_path=os.path.join(cache_dir, "vocabulary"), model_paths="dragonSwing/vibert-capu", split_chunk=True ) """# Spelling Correction""" from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer_spell = AutoTokenizer.from_pretrained("VietAI/vit5-base") model_spell = AutoModelForSeq2SeqLM.from_pretrained("HuyenNguyen/Vi-test1") model_spell.cuda() def spelling_text(text): encoding = tokenizer_spell(text, return_tensors="pt") input_ids, attention_masks = encoding["input_ids"].to("cuda"), encoding["attention_mask"].to("cuda") outputs = model_spell.generate( input_ids=input_ids, attention_mask=attention_masks, max_length=30, ) for output in outputs: line = tokenizer_spell.decode(output, skip_special_tokens=True, clean_up_tokenization_spaces=True) return line def spelling(transcription): sentences = transcription.split(" ") len_sen = int(len(sentences) / 25) + 1 result = "" for i in range(len_sen): t1 = i * 24 t2 = (i+1) * 24 if i == (len_sen - 1): text = " ".join(sentences[t1:]) else: text = " ".join(sentences[t1:t2]) result = result + " " + spelling_text(text) return result """# Speech To Text""" import torch import numpy as np import gradio as gr from scipy.io.wavfile import write import pytube as pt from transformers import pipeline from huggingface_hub import model_info def transcribe(microphone, file_upload): warn_output = "" if (microphone is not None) and (file_upload is not None): warn_output = ( "WARNING: You've uploaded an audio file and used the microphone. " "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" ) elif (microphone is None) and (file_upload is None): return "ERROR: You have to either use the microphone or upload an audio file" path = microphone if microphone is not None else file_upload X_new, sr_new = librosa.load(path) dst = "/content/audio.wav" write(dst, sr_new, X_new) # Split audio transcription = "" path_list = preprocessing(dst) for audio_path in path_list: # X, sr = noise(audio_path) X, sr = librosa.load(audio_path, sr=16000) input_features = processor(X.astype('float16'), return_tensors="pt").input_features # predicted_ids = model.generate(input_features.to("cuda"), temperature=1.0) predicted_ids = model.generate(input_features.to("cuda")) text = processor.batch_decode(predicted_ids, skip_special_tokens = True)[0] transcription = transcription + " " + text transcription_spell = spelling(transcription) transcription_cap = model_cap(transcription_spell)[0] # sentence_result = "Câu gốc: " + transcription + "\n" + "Câu sửa lỗi chính tả: " + transcription_spell + "\n" + "Thêm dấu: " + transcription_cap return transcription_cap def _return_yt_html_embed(yt_url): video_id = yt_url.split("?v=")[-1] HTML_str = ( f'
' "
" ) return HTML_str def yt_transcribe(yt_url): # yt = pt.YouTube(yt_url) # html_embed_str = _return_yt_html_embed(yt_url) # stream = yt.streams.filter(only_audio=True)[0] # src = "/content/audio.mp3" # dst = "/content/audio.wav" # stream.download(filename=src) # X_new, sr_new = librosa.load(src) # write(dst, sr_new, X_new) # # X_new, sr_new = librosa.load(src) # path_list = preprocessing(dst) # transcription = " " # for audio_path in path_list: # # X, sr = noise(audio_path) # X, sr = librosa.load(audio_path, sr=16000) # input_features = processor(X.astype('float16'), return_tensors="pt").input_features # predicted_ids = model.generate(input_features.to("cuda")) # text = processor.batch_decode(predicted_ids, skip_special_tokens = True)[0] # transcription = transcription + " " + text # transcription = spelling(transcription) # transcription = model_cap(transcription)[0] return "ouput", 'This feature is temporarily locked' demo = gr.Blocks() mf_transcribe = gr.Interface( fn=transcribe, inputs=[ gr.inputs.Audio(source="microphone", type="filepath", optional=True), gr.inputs.Audio(source="upload", type="filepath", optional=True), ], outputs="text", layout="horizontal", theme="huggingface", title="PYLAB Demo: Transcribe Audio", allow_flagging="never", ) yt_transcribe = gr.Interface( fn=yt_transcribe, inputs=[gr.inputs.Textbox(lines=1, placeholder="Paste the URL to a YouTube video here", label="YouTube URL")], outputs=["html", "text"], layout="horizontal", theme="huggingface", title="PYLAB Demo: Transcribe YouTube", allow_flagging="never", ) with demo: gr.TabbedInterface([mf_transcribe, yt_transcribe], ["Transcribe Audio", "Transcribe YouTube"]) demo.launch(enable_queue=True)