Spaces:
Sleeping
Sleeping
Update modules/sadtalker_test.py
Browse files- modules/sadtalker_test.py +17 -3
modules/sadtalker_test.py
CHANGED
@@ -8,6 +8,12 @@ from src.facerender.animate import AnimateFromCoeff
|
|
8 |
from src.generate_batch import get_data
|
9 |
from src.generate_facerender_batch import get_facerender_data
|
10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
from modules.text2speech import text2speech
|
12 |
|
13 |
class SadTalker():
|
@@ -68,7 +74,13 @@ class SadTalker():
|
|
68 |
|
69 |
if os.path.isfile(driven_audio):
|
70 |
audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
else:
|
73 |
text2speech
|
74 |
|
@@ -86,12 +98,12 @@ class SadTalker():
|
|
86 |
batch = get_data(first_coeff_path, audio_path, self.device)
|
87 |
coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style)
|
88 |
#coeff2video
|
89 |
-
batch_size =
|
90 |
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode)
|
91 |
self.animate_from_coeff.generate(data, save_dir, enhancer='gfpgan' if use_enhancer else None)
|
92 |
video_name = data['video_name']
|
93 |
print(f'The generated video is named {video_name} in {save_dir}')
|
94 |
-
|
95 |
torch.cuda.empty_cache()
|
96 |
torch.cuda.synchronize()
|
97 |
|
@@ -100,4 +112,6 @@ class SadTalker():
|
|
100 |
|
101 |
else:
|
102 |
return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4')
|
|
|
|
|
103 |
|
|
|
8 |
from src.generate_batch import get_data
|
9 |
from src.generate_facerender_batch import get_facerender_data
|
10 |
|
11 |
+
from pydub import AudioSegment
|
12 |
+
|
13 |
+
def mp3_to_wav(mp3_filename,wav_filename,frame_rate):
|
14 |
+
mp3_file = AudioSegment.from_file(file=mp3_filename)
|
15 |
+
mp3_file.set_frame_rate(frame_rate).export(wav_filename,format="wav")
|
16 |
+
|
17 |
from modules.text2speech import text2speech
|
18 |
|
19 |
class SadTalker():
|
|
|
74 |
|
75 |
if os.path.isfile(driven_audio):
|
76 |
audio_path = os.path.join(input_dir, os.path.basename(driven_audio))
|
77 |
+
|
78 |
+
#### mp3 to wav
|
79 |
+
if '.mp3' in audio_path:
|
80 |
+
mp3_to_wav(driven_audio, audio_path.replace('.mp3', '.wav'), 16000)
|
81 |
+
audio_path = audio_path.replace('.mp3', '.wav')
|
82 |
+
else:
|
83 |
+
shutil.move(driven_audio, input_dir)
|
84 |
else:
|
85 |
text2speech
|
86 |
|
|
|
98 |
batch = get_data(first_coeff_path, audio_path, self.device)
|
99 |
coeff_path = self.audio_to_coeff.generate(batch, save_dir, pose_style)
|
100 |
#coeff2video
|
101 |
+
batch_size = 4
|
102 |
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, batch_size, still_mode=still_mode)
|
103 |
self.animate_from_coeff.generate(data, save_dir, enhancer='gfpgan' if use_enhancer else None)
|
104 |
video_name = data['video_name']
|
105 |
print(f'The generated video is named {video_name} in {save_dir}')
|
106 |
+
|
107 |
torch.cuda.empty_cache()
|
108 |
torch.cuda.synchronize()
|
109 |
|
|
|
112 |
|
113 |
else:
|
114 |
return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4')
|
115 |
+
|
116 |
+
|
117 |
|