Spaces:
Sleeping
Sleeping
yellowcandle
commited on
Commit
•
344a72e
1
Parent(s):
f8b77d4
fix error
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import gradio as gr
|
|
4 |
import torch
|
5 |
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, AutoModelForCausalLM, AutoTokenizer
|
6 |
|
7 |
-
@spaces.GPU(duration=
|
8 |
def transcribe_audio(audio, model_id):
|
9 |
if audio is None:
|
10 |
return "Please upload an audio file."
|
@@ -36,7 +36,7 @@ def transcribe_audio(audio, model_id):
|
|
36 |
result = pipe(audio)
|
37 |
return result["text"]
|
38 |
|
39 |
-
@spaces.GPU(duration=180)
|
40 |
def proofread(text):
|
41 |
if text is None:
|
42 |
return "Please provide the transcribed text for proofreading."
|
@@ -70,7 +70,7 @@ with gr.Blocks() as demo:
|
|
70 |
with gr.Row():
|
71 |
with gr.Column():
|
72 |
audio = gr.Audio(sources="upload", type="filepath")
|
73 |
-
video = gr.Video(sources="upload"
|
74 |
model_dropdown = gr.Dropdown(choices=["openai/whisper-large-v3", "alvanlii/whisper-small-cantonese"], value="openai/whisper-large-v3")
|
75 |
|
76 |
transcribe_button = gr.Button("Transcribe")
|
|
|
4 |
import torch
|
5 |
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, AutoModelForCausalLM, AutoTokenizer
|
6 |
|
7 |
+
@spaces.GPU(duration=60)
|
8 |
def transcribe_audio(audio, model_id):
|
9 |
if audio is None:
|
10 |
return "Please upload an audio file."
|
|
|
36 |
result = pipe(audio)
|
37 |
return result["text"]
|
38 |
|
39 |
+
# @spaces.GPU(duration=180)
|
40 |
def proofread(text):
|
41 |
if text is None:
|
42 |
return "Please provide the transcribed text for proofreading."
|
|
|
70 |
with gr.Row():
|
71 |
with gr.Column():
|
72 |
audio = gr.Audio(sources="upload", type="filepath")
|
73 |
+
video = gr.Video(sources="upload")
|
74 |
model_dropdown = gr.Dropdown(choices=["openai/whisper-large-v3", "alvanlii/whisper-small-cantonese"], value="openai/whisper-large-v3")
|
75 |
|
76 |
transcribe_button = gr.Button("Transcribe")
|