Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ from transformers.pipelines.audio_utils import ffmpeg_read
|
|
8 |
import tempfile
|
9 |
import os
|
10 |
|
11 |
-
MODEL_NAME = "dataprizma/whisper-
|
12 |
BATCH_SIZE = 8
|
13 |
FILE_LIMIT_MB = 1000
|
14 |
YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
|
@@ -18,14 +18,14 @@ device = 0 if torch.cuda.is_available() else "cpu"
|
|
18 |
pipe = pipeline(
|
19 |
task="automatic-speech-recognition",
|
20 |
model=MODEL_NAME,
|
21 |
-
chunk_length_s=
|
22 |
device=device,
|
23 |
)
|
24 |
|
25 |
|
26 |
def transcribe(inputs, task):
|
27 |
if inputs is None:
|
28 |
-
raise gr.Error("
|
29 |
|
30 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
31 |
return text
|
@@ -99,7 +99,7 @@ mf_transcribe = gr.Interface(
|
|
99 |
outputs="text",
|
100 |
layout="horizontal",
|
101 |
theme="huggingface",
|
102 |
-
title="Whisper
|
103 |
description=(
|
104 |
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
|
105 |
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
@@ -117,7 +117,7 @@ file_transcribe = gr.Interface(
|
|
117 |
outputs="text",
|
118 |
layout="horizontal",
|
119 |
theme="huggingface",
|
120 |
-
title="Whisper
|
121 |
description=(
|
122 |
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
|
123 |
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
@@ -135,7 +135,7 @@ yt_transcribe = gr.Interface(
|
|
135 |
outputs=["html", "text"],
|
136 |
layout="horizontal",
|
137 |
theme="huggingface",
|
138 |
-
title="Whisper
|
139 |
description=(
|
140 |
"Transcribe long-form YouTube videos with the click of a button! Demo uses the OpenAI Whisper checkpoint"
|
141 |
f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
|
|
|
8 |
import tempfile
|
9 |
import os
|
10 |
|
11 |
+
MODEL_NAME = "dataprizma/whisper-large-v3-turbo"
|
12 |
BATCH_SIZE = 8
|
13 |
FILE_LIMIT_MB = 1000
|
14 |
YT_LENGTH_LIMIT_S = 3600 # limit to 1 hour YouTube files
|
|
|
18 |
pipe = pipeline(
|
19 |
task="automatic-speech-recognition",
|
20 |
model=MODEL_NAME,
|
21 |
+
chunk_length_s=15,
|
22 |
device=device,
|
23 |
)
|
24 |
|
25 |
|
26 |
def transcribe(inputs, task):
|
27 |
if inputs is None:
|
28 |
+
raise gr.Error("Fayl tanlanmadi yoki yuklashad xatolik! Iltimos qaytadan urinib ko'ring.")
|
29 |
|
30 |
text = pipe(inputs, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)["text"]
|
31 |
return text
|
|
|
99 |
outputs="text",
|
100 |
layout="horizontal",
|
101 |
theme="huggingface",
|
102 |
+
title="Whisper Uzbek: Transcribe Audio",
|
103 |
description=(
|
104 |
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
|
105 |
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
|
|
117 |
outputs="text",
|
118 |
layout="horizontal",
|
119 |
theme="huggingface",
|
120 |
+
title="Whisper Uzbek: Transcribe Audio",
|
121 |
description=(
|
122 |
"Transcribe long-form microphone or audio inputs with the click of a button! Demo uses the OpenAI Whisper"
|
123 |
f" checkpoint [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe audio files"
|
|
|
135 |
outputs=["html", "text"],
|
136 |
layout="horizontal",
|
137 |
theme="huggingface",
|
138 |
+
title="Whisper Uzbek: Transcribe YouTube",
|
139 |
description=(
|
140 |
"Transcribe long-form YouTube videos with the click of a button! Demo uses the OpenAI Whisper checkpoint"
|
141 |
f" [{MODEL_NAME}](https://huggingface.co/{MODEL_NAME}) and 🤗 Transformers to transcribe video files of"
|