fffiloni commited on
Commit
60ace2e
1 Parent(s): 5cd1297

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,19 +3,19 @@ from PIL import Image
3
  import os
4
  token = os.environ.get('HF_TOKEN')
5
  whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
6
- tts = gr.Interface.load(name="spaces/StevenLimcorn/fastspeech2-TTS")
7
  talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token)
8
 
9
  def infer(audio):
10
  gpt_response = whisper_to_gpt(audio, "translate", fn_index=0)
11
  #print(gpt_response)
12
- audio_response = tts(gpt_response[1], "Fastspeech2 + Melgan", fn_index=0)
13
  #image = Image.open(r"wise_woman_portrait.png")
14
- portrait = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
15
- return audio_response, portrait
16
 
17
  inputs = gr.Audio(source="microphone",type="filepath")
18
- outputs = [gr.Audio(), gr.Video()]
19
 
20
  demo = gr.Interface(fn=infer, inputs=inputs, outputs=outputs)
21
  demo.launch()
 
3
  import os
4
  token = os.environ.get('HF_TOKEN')
5
  whisper_to_gpt = gr.Blocks.load(name="spaces/fffiloni/whisper-to-chatGPT")
6
+ tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan")
7
  talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token)
8
 
9
  def infer(audio):
10
  gpt_response = whisper_to_gpt(audio, "translate", fn_index=0)
11
  #print(gpt_response)
12
+ audio_response = tts(gpt_response[1], "English Text", "English Accent", "English Speaker's Voice", fn_index=0)
13
  #image = Image.open(r"wise_woman_portrait.png")
14
+ #portrait = talking_face("wise_woman_portrait.png", audio_response, fn_index=0)
15
+ return audio_response
16
 
17
  inputs = gr.Audio(source="microphone",type="filepath")
18
+ outputs = [gr.Audio()]
19
 
20
  demo = gr.Interface(fn=infer, inputs=inputs, outputs=outputs)
21
  demo.launch()