Spaces:
Sleeping
Sleeping
paulbauriegel
commited on
Commit
•
3f1afb1
1
Parent(s):
8c58041
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from faster_whisper import WhisperModel
|
2 |
+
import pandas as pd
|
3 |
+
import gradio as gr
|
4 |
+
import psutil
|
5 |
+
|
6 |
+
model = WhisperModel(whisper_model, device="cpu", compute_type="int8")
|
7 |
+
|
8 |
+
def speech_to_text_translate(mic=None, file=None, lang=None):
|
9 |
+
if mic is not None:
|
10 |
+
audio = mic
|
11 |
+
elif file is not None:
|
12 |
+
audio = file
|
13 |
+
else:
|
14 |
+
raise gr.Error("You must either provide a mic recording or a file")
|
15 |
+
if lang is None:
|
16 |
+
raise gr.Error("Select a transcribe language")
|
17 |
+
|
18 |
+
time_start = time.time()
|
19 |
+
segments, info = model.transcribe(audio, task='translate', beam_size=5)
|
20 |
+
print("Detected language '%s' with probability %f" % (info.language, info.language_probability))
|
21 |
+
|
22 |
+
# Decode audio to Text
|
23 |
+
objects = [s._asdict() for s in segments]
|
24 |
+
time_end = time.time()
|
25 |
+
time_diff = time_end - time_start
|
26 |
+
memory = psutil.virtual_memory()
|
27 |
+
system_info = f"""
|
28 |
+
*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB.*
|
29 |
+
*Processing time: {time_diff:.5} seconds.*
|
30 |
+
"""
|
31 |
+
df_results = pd.DataFrame(objects)
|
32 |
+
df_results = df_results.drop(columns=['seek', 'tokens', 'avg_logprob'])
|
33 |
+
return df_results, system_info
|
34 |
+
|
35 |
+
theme=gr.themes.Default().set(
|
36 |
+
color_accent="#e20074",
|
37 |
+
|
38 |
+
# Buttons
|
39 |
+
button_primary_text_color='white',
|
40 |
+
button_primary_text_color_hover='black',
|
41 |
+
button_primary_background_fill="#e20074",
|
42 |
+
button_primary_background_fill_hover='#c00063', # --telekom-color-primary-hovered
|
43 |
+
button_primary_border_color="#e20074",
|
44 |
+
button_primary_border_color_hover="#c00063",
|
45 |
+
stat_background_fill="#e20074",
|
46 |
+
|
47 |
+
# Dark Mode
|
48 |
+
button_primary_background_fill_dark="#e20074",
|
49 |
+
button_primary_background_fill_hover_dark='#c00063', # --telekom-color-primary-hovered
|
50 |
+
button_primary_border_color_dark="#e20074",
|
51 |
+
button_primary_border_color_hover_dark="#c00063",
|
52 |
+
stat_background_fill_dark="#e20074",
|
53 |
+
)
|
54 |
+
|
55 |
+
with gr.Blocks(title='Whisper Demo', theme=theme) as demo:
|
56 |
+
gr.Markdown('''
|
57 |
+
<div>
|
58 |
+
<h1 style='text-align: center'>Simple Whisper Demo</h1>
|
59 |
+
A simple Whisper demo using local CPU Inference of the largest-v2 Model
|
60 |
+
</div>
|
61 |
+
''')
|
62 |
+
audio_in = gr.Audio(label="Record", source='microphone', type="filepath")
|
63 |
+
file_in = gr.Audio(label="Upload", source='upload', type="filepath")
|
64 |
+
drop_down = gr.Dropdown(["de", "en", "es", "fr", "ru"], value="en")
|
65 |
+
transcribe_btn = gr.Button("Transcribe audio", variant="primary")
|
66 |
+
translate_btn = gr.Button("Translate audio")
|
67 |
+
trans_df = gr.DataFrame(label="Transcription dataframe", row_count=(0, "dynamic"), max_rows = 10, wrap=True, overflow_row_behaviour='paginate')
|
68 |
+
sys_info = gr.Markdown(f"*Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB*")
|
69 |
+
transcribe_btn.click(speech_to_text_simple,
|
70 |
+
[audio_in, file_in],
|
71 |
+
[trans_df, sys_info]
|
72 |
+
)
|
73 |
+
translate_btn.click(speech_to_text_translate,
|
74 |
+
[audio_in, file_in, drop_down],
|
75 |
+
[trans_df, sys_info]
|
76 |
+
|