0xSynapse commited on
Commit
4c6fdbd
1 Parent(s): 3810bf3

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +21 -0
  2. output10.wav +0 -0
  3. requirements.txt +1 -0
app.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.system("pip install gradio==2.7.5.2")
3
+ os.system("python -m pip install paddlepaddle-gpu==2.2.1.post112 -f https://www.paddlepaddle.org.cn/whl/linux/mkl/avx/stable.html")
4
+ os.system("hub install wav2lip==1.0.0")
5
+ import gradio as gr
6
+ import paddlehub as hub
7
+
8
+ module = hub.Module(name="wav2lip")
9
+
10
+ def inference(image,audio):
11
+ module.wav2lip_transfer(face=video, audio=audio, output_dir='.', use_gpu=False, face_enhancement=True)
12
+ return "result.mp4"
13
+
14
+ title = "Wav2lip"
15
+ description = "Gradio demo for Wav2lip: Accurately Lip-syncing Videos In The Wild. To use it, simply upload your image and audio file, or click one of the examples to load them. Read more at the links below. Please trim audio file to maximum of 3-4 seconds"
16
+
17
+ article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2008.10010' target='_blank'>A Lip Sync Expert Is All You Need for Speech to Lip Generation In The Wild</a> | <a href='https://github.com/PaddlePaddle/PaddleGAN/blob/develop/docs/en_US/tutorials/wav2lip.md' target='_blank'>Github Repo</a></p>"
18
+ examples=[['monatest.jpeg',"game.wav"]]
19
+ iface = gr.Interface(inference, [gr.inputs.Video(type=("filepath","webcam")),gr.inputs.Audio(source=("upload","microphone"), type="filepath")],
20
+ outputs=gr.outputs.Video(label="Output Video"),examples=examples,title=title,article=article,description=description)
21
+ iface.launch(cache_examples=True,enable_queue=True)
output10.wav ADDED
Binary file (808 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ paddlehub==2.2.1