import gradio as gr from PIL import Image import os token = os.environ.get('HF_TOKEN') whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT") tts = gr.Interface.load(name="spaces/StevenLimcorn/fastspeech2-TTS") talking_face = gr.Interface.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token) def infer(audio): gpt_response = whisper_to_gpt(audio, "translate", fn_index=0) #print(gpt_response) audio_response = tts(gpt_response[1], "Fastspeech2 + Melgan", fn_index=0) image = Image.open(r"wise_woman_portrait.png") portrait = talking_face(image, audio_response) return audio_response, portrait inputs = gr.Audio(source="microphone",type="filepath") outputs = [gr.Audio(), gr.Video()] demo = gr.Interface(fn=infer, inputs=inputs, outputs=outputs) demo.launch()