File size: 1,671 Bytes
0af8dc2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
import gradio as gr
from scipy.io import wavfile
from wav2vec_aligen import speaker_pronunciation_assesment



def analyze_audio(audio):
# Write the processed audio to a temporary WAV file
    if audio is None:
      return 'the audio is missing'
    temp_filename = 'temp_audio.wav'
    wavfile.write(temp_filename, audio[0], audio[1])


    result = speaker_pronunciation_assesment(temp_filename)
    accuracy_score = result['pronunciation_accuracy']
    fluency_score   = result['fluency_score']
    total_score       = result['total_score']
    content_scores = result['content_scores']
  
    result_markdown = f"""|Language Aspect| Score|
    |---|---| 
    |Pronunciation Accuracy| {accuracy_score}|
    |Fluency| {fluency_score}|
    |Total Score| {total_score}|
    |Content Score| {content_scores}|
    """
    return result_markdown
  
import gradio as gr

CHOICES     = ['Daibers', 'Carbon', 'Reptiles']


def get_paired_text(value):
    text = f'## {value}'
    return text

with gr.Blocks() as demo:
    with gr.Row():
        with gr.Column():
            with gr.Row():
                drp_down = gr.Dropdown(choices=CHOICES, scale=2)
                show_text_btn = gr.Button("Select", scale=1)
            read_text = gr.Markdown(label='Listen to speech')
            show_text_btn.click(get_paired_text, inputs=drp_down, outputs=read_text)
            audio_area = gr.Audio(label='Reapet the sentence')
            analyize_audio_btn = gr.Button("Submit", scale=1)
        with gr.Column():
            capt_area = gr.Markdown(label='CAPT Scores')
            analyize_audio_btn.click(analyze_audio, inputs=audio_area, outputs=capt_area)
demo.launch()