File size: 3,205 Bytes
3f1afb1
 
 
 
0dca430
95ddc82
3f1afb1
8cf1ebf
3f1afb1
478506b
3f1afb1
 
 
 
 
 
a375331
 
 
 
478506b
199cc77
3f1afb1
 
67cfc3d
3f1afb1
 
 
 
 
 
a45d825
 
3f1afb1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67cfc3d
3f1afb1
 
 
1307cdd
478506b
 
3f1afb1
 
478506b
3f1afb1
925603d
5365aef
ef9f4cf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from faster_whisper import WhisperModel
import pandas as pd
import gradio as gr
import psutil
import time
import whisperx

model = WhisperModel('large-v2', device="cpu", compute_type="int8")

def speech_to_text(mic=None, file=None, lang=None):
    if mic is not None:
        audio = mic
    elif file is not None:
        audio = file
    else:
        raise gr.Error("You must either provide a mic recording or a file")

    if lang is None or lang == '':
        lang = None

    task = 'transcribe' if lang is None else 'translate'
    print(lang, task)
    
    time_start = time.time()
    segments, info = model.transcribe(audio, task=task, language=lang, beam_size=5)
    print("Detected language '%s' with probability %f" % (info.language, info.language_probability))

    #  Decode audio to Text
    objects = [s._asdict() for s in segments]
    time_end = time.time()
    time_diff = time_end - time_start
    #memory = psutil.virtual_memory()
    # *Memory: {memory.total / (1024 * 1024 * 1024):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 * 1024 * 1024):.2f}GB.* 
    system_info = f"""
    *Processing time: {time_diff:.5} seconds.*
    """
    df_results = pd.DataFrame(objects)
    df_results = df_results.drop(columns=['seek', 'tokens', 'avg_logprob'])
    return df_results, system_info

theme=gr.themes.Default().set(
    color_accent="#e20074",

    # Buttons
    button_primary_text_color='white',
    button_primary_text_color_hover='black',
    button_primary_background_fill="#e20074",
    button_primary_background_fill_hover='#c00063', # --telekom-color-primary-hovered
    button_primary_border_color="#e20074",
    button_primary_border_color_hover="#c00063",
    stat_background_fill="#e20074",

    # Dark Mode
    button_primary_background_fill_dark="#e20074",
    button_primary_background_fill_hover_dark='#c00063', # --telekom-color-primary-hovered
    button_primary_border_color_dark="#e20074",
    button_primary_border_color_hover_dark="#c00063",
    stat_background_fill_dark="#e20074",
)

with gr.Blocks(title='Whisper Demo', theme=theme) as demo:
    gr.Markdown('''
        <div>
        <h1 style='text-align: center'>Simple Whisper Demo</h1>
        A simple Whisper demo using local CPU Inference of the largest-v2 Model
        </div>
    ''')
    audio_in = gr.Audio(label="Record", source='microphone', type="filepath")
    file_in = gr.Audio(label="Upload", source='upload', type="filepath")
    drop_down = gr.Dropdown(["de", "en", "es", "fr", "ru", None], value=None)
    transcribe_btn = gr.Button("Transcribe audio", variant="primary")
    translate_btn = gr.Button("Translate audio")
    trans_df = gr.DataFrame(label="Transcription dataframe", row_count=(0, "dynamic"), max_rows = 10, wrap=True, overflow_row_behaviour='paginate')
    sys_info = gr.Markdown("")
    transcribe_btn.click(speech_to_text, 
                                 [audio_in, file_in, drop_down], 
                                 [trans_df, sys_info]
                                )
    translate_btn.click(speech_to_text, 
                            [audio_in, file_in, drop_down], 
                            [trans_df, sys_info])

demo.launch()