|
import gradio as gr |
|
from PIL import Image |
|
import os |
|
token = os.environ.get('HF_TOKEN') |
|
whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT") |
|
tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan") |
|
talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token) |
|
|
|
def infer(audio): |
|
gpt_response = whisper_to_gpt(audio, "translate", fn_index=0) |
|
|
|
audio_response = tts(gpt_response[1], "English Text", "English Accent", "English Speaker's Voice", fn_index=0) |
|
|
|
|
|
return audio_response |
|
|
|
inputs = gr.Audio(source="microphone",type="filepath") |
|
outputs = [gr.Audio()] |
|
|
|
demo = gr.Interface(fn=infer, inputs=inputs, outputs=outputs) |
|
demo.launch() |
|
|
|
|