AlexMo's picture
Update app.py
9ceaecf
raw
history blame
3.25 kB
import whisper
from pytube import YouTube
from transformers import pipeline
import gradio as gr
import os
import re
model = whisper.load_model("base")
# model = pipeline(model="AlexMo/FIFA_WC22_WINNER_LANGUAGE_MODEL")
summarizer = pipeline("summarization")
def transcribe(microphone, file_upload):
warn_output = ""
if (microphone is not None) and (file_upload is not None):
warn_output = (
"WARNING: You've uploaded an audio file and used the microphone. "
"The recorded file from the microphone will be used and the uploaded audio will be discarded.\n"
)
elif (microphone is None) and (file_upload is None):
return "ERROR: You have to either use the microphone or upload an audio file"
file = microphone if microphone is not None else file_upload
text = summarizer(file)["text"]
return warn_output + text
def getAudio(url):
link = YouTube(url)
video = link.streams.filter(only_audio=True).first()
file = video.download(output_path=".")
base, ext = os.path.splitext(file)
file_ext = base + '.mp3'
os.rename(file, file_ext)
return file_ext
def getText(url):
if url != '':
output_text_transcribe = ''
res = model.transcribe(getAudio(url))
return res['text'].strip()
def getSummary(article):
header = ' '.join(re.split(r'(?<=[.:;])\s', article)[:5])
b = summarizer(header, min_length=15, max_length=120, do_sample=False)
b = b[0]['summary_text'].replace(' .', '.').strip()
return b
mf_transcribe = gr.Interface(
fn=transcribe,
inputs=[
gr.inputs.Audio(source="microphone", type="filepath", optional=True),
gr.inputs.Audio(source="upload", type="filepath", optional=True),
],
outputs="text",
layout="horizontal",
theme="huggingface",
title="Dutch whisper summarizer",
description=(
"Get a transcription and summary of some Dutch audio input!."
),
allow_flagging="never",
)
with gr.Blocks() as demo:
gr.HTML(
"""
<div style="text-align: center; max-width: 500px; margin: 0 auto;">
<div>
<h1>Dutch whisperer</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
Summarize audio files, mic input or Youtube videos using OpenAI's Whisper
</p>
</div>
"""
)
with gr.Tab('Get a summary from your own mic or audio file'):
mf_transcribe
with gr.Tab('Summary of Youtube video'):
input_text_url = gr.Textbox(placeholder='Youtube video URL', label='URL')
result_button_transcribe = gr.Button('1. Transcribe')
output_text_transcribe = gr.Textbox(placeholder='Transcript of the YouTube video.', label='Transcript')
result_button_summary = gr.Button('2. Create Summary')
output_text_summary = gr.Textbox(placeholder='Summary of the YouTube video transcript.', label='Summary')
result_button_transcribe.click(getText, inputs=input_text_url, outputs=output_text_transcribe)
result_button_summary.click(getSummary, inputs=output_text_transcribe, outputs=output_text_summary)
demo.launch(debug=True)