Plachta commited on
Commit
1174a8a
β€’
1 Parent(s): bdf3420

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +77 -21
app.py CHANGED
@@ -82,28 +82,43 @@ from modules.audio import mel_spectrogram
82
 
83
  to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  @spaces.GPU
86
  @torch.no_grad()
87
  @torch.inference_mode()
88
  def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, n_quantizers):
 
89
  # Load audio
90
  source_audio = librosa.load(source, sr=sr)[0]
91
  ref_audio = librosa.load(target, sr=sr)[0]
92
- # source_sr, source_audio = source
93
- # ref_sr, ref_audio = target
94
- # # if any of the inputs has 2 channels, take the first only
95
- # if source_audio.ndim == 2:
96
- # source_audio = source_audio[:, 0]
97
- # if ref_audio.ndim == 2:
98
- # ref_audio = ref_audio[:, 0]
99
- #
100
- # source_audio, ref_audio = source_audio / 32768.0, ref_audio / 32768.0
101
- #
102
- # # if source or audio sr not equal to default sr, resample
103
- # if source_sr != sr:
104
- # source_audio = librosa.resample(source_audio, source_sr, sr)
105
- # if ref_sr != sr:
106
- # ref_audio = librosa.resample(ref_audio, ref_sr, sr)
107
 
108
  # Process audio
109
  source_audio = torch.tensor(source_audio[:sr * 30]).unsqueeze(0).float().to(device)
@@ -157,24 +172,61 @@ def voice_conversion(source, target, diffusion_steps, length_adjust, inference_c
157
  feat2 = feat2 - feat2.mean(dim=0, keepdim=True)
158
  style2 = campplus_model(feat2.unsqueeze(0))
159
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  # Length regulation
161
- cond = model.length_regulator(S_alt, ylens=target_lengths, n_quantizers=int(n_quantizers))[0]
162
- prompt_condition = model.length_regulator(S_ori, ylens=target2_lengths, n_quantizers=int(n_quantizers))[0]
163
  cat_condition = torch.cat([prompt_condition, cond], dim=1)
164
 
165
  # Voice Conversion
166
- vc_target = model.cfm.inference(cat_condition, torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
167
  mel2, style2, None, diffusion_steps, inference_cfg_rate=inference_cfg_rate)
168
  vc_target = vc_target[:, :, mel2.size(-1):]
169
 
170
  # Convert to waveform
171
- vc_wave = hift_gen.inference(vc_target)
 
 
 
 
 
172
 
173
  return sr, vc_wave.squeeze(0).cpu().numpy()
174
 
175
 
176
  if __name__ == "__main__":
177
- description = "Zero-shot voice conversion with in-context learning. Check out our [GitHub repository](https://github.com/Plachtaa/seed-vc) for details and updates."
178
  inputs = [
179
  gr.Audio(type="filepath", label="Source Audio"),
180
  gr.Audio(type="filepath", label="Reference Audio"),
@@ -182,9 +234,13 @@ if __name__ == "__main__":
182
  gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Length Adjust", info="<1.0 for speed-up speech, >1.0 for slow-down speech"),
183
  gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Inference CFG Rate", info="has subtle influence"),
184
  gr.Slider(minimum=1, maximum=3, step=1, value=3, label="N Quantizers", info="the less quantizer used, the less prosody of source audio is preserved"),
 
 
 
 
185
  ]
186
 
187
- examples = [["examples/source/yae_0.wav", "examples/reference/dingzhen_0.wav", 50, 1.0, 0.7, 1]]
188
 
189
  outputs = gr.Audio(label="Output Audio")
190
 
 
82
 
83
  to_mel = lambda x: mel_spectrogram(x, **mel_fn_args)
84
 
85
+ # f0 conditioned model
86
+ dit_checkpoint_path, dit_config_path = load_custom_model_from_hf("Plachta/Seed-VC",
87
+ "DiT_step_404000_seed_v2_uvit_facodec_small_wavenet_f0_pruned.pth",
88
+ "config_dit_mel_seed_facodec_small_wavenet_f0.yml")
89
+
90
+ config = yaml.safe_load(open(dit_config_path, 'r'))
91
+ model_params = recursive_munch(config['model_params'])
92
+ model_f0 = build_model(model_params, stage='DiT')
93
+ hop_length = config['preprocess_params']['spect_params']['hop_length']
94
+ sr = config['preprocess_params']['sr']
95
+
96
+ # Load checkpoints
97
+ model_f0, _, _, _ = load_checkpoint(model_f0, None, dit_checkpoint_path,
98
+ load_only_params=True, ignore_modules=[], is_distributed=False)
99
+ for key in model_f0:
100
+ model_f0[key].eval()
101
+ model_f0[key].to(device)
102
+ model_f0.cfm.estimator.setup_caches(max_batch_size=1, max_seq_length=8192)
103
+
104
+ # f0 extractor
105
+ from modules.rmvpe import RMVPE
106
+
107
+ model_path = load_custom_model_from_hf("lj1995/VoiceConversionWebUI", "rmvpe.pt", None)
108
+ rmvpe = RMVPE(model_path, is_half=False, device=device)
109
+
110
+ def adjust_f0_semitones(f0_sequence, n_semitones):
111
+ factor = 2 ** (n_semitones / 12)
112
+ return f0_sequence * factor
113
+
114
  @spaces.GPU
115
  @torch.no_grad()
116
  @torch.inference_mode()
117
  def voice_conversion(source, target, diffusion_steps, length_adjust, inference_cfg_rate, n_quantizers):
118
+ inference_module = model if not f0_condition else model_f0
119
  # Load audio
120
  source_audio = librosa.load(source, sr=sr)[0]
121
  ref_audio = librosa.load(target, sr=sr)[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
  # Process audio
124
  source_audio = torch.tensor(source_audio[:sr * 30]).unsqueeze(0).float().to(device)
 
172
  feat2 = feat2 - feat2.mean(dim=0, keepdim=True)
173
  style2 = campplus_model(feat2.unsqueeze(0))
174
 
175
+ if f0_condition:
176
+ waves_16k = torchaudio.functional.resample(waves_24k, sr, 16000)
177
+ converted_waves_16k = torchaudio.functional.resample(converted_waves_24k, sr, 16000)
178
+ F0_ori = rmvpe.infer_from_audio(waves_16k[0], thred=0.03)
179
+ F0_alt = rmvpe.infer_from_audio(converted_waves_16k[0], thred=0.03)
180
+
181
+ F0_ori = torch.from_numpy(F0_ori).to(device)[None]
182
+ F0_alt = torch.from_numpy(F0_alt).to(device)[None]
183
+
184
+ voiced_F0_ori = F0_ori[F0_ori > 1]
185
+ voiced_F0_alt = F0_alt[F0_alt > 1]
186
+
187
+ log_f0_alt = torch.log(F0_alt + 1e-5)
188
+ voiced_log_f0_ori = torch.log(voiced_F0_ori + 1e-5)
189
+ voiced_log_f0_alt = torch.log(voiced_F0_alt + 1e-5)
190
+ median_log_f0_ori = torch.median(voiced_log_f0_ori)
191
+ median_log_f0_alt = torch.median(voiced_log_f0_alt)
192
+ # mean_log_f0_ori = torch.mean(voiced_log_f0_ori)
193
+ # mean_log_f0_alt = torch.mean(voiced_log_f0_alt)
194
+
195
+ # shift alt log f0 level to ori log f0 level
196
+ shifted_log_f0_alt = log_f0_alt.clone()
197
+ if auto_f0_adjust:
198
+ shifted_log_f0_alt[F0_alt > 1] = log_f0_alt[F0_alt > 1] - median_log_f0_alt + median_log_f0_ori
199
+ shifted_f0_alt = torch.exp(shifted_log_f0_alt)
200
+ if pitch_shift != 0:
201
+ shifted_f0_alt[F0_alt > 1] = adjust_f0_semitones(shifted_f0_alt[F0_alt > 1], pitch_shift)
202
+ else:
203
+ F0_ori = None
204
+ F0_alt = None
205
+ shifted_f0_alt = None
206
+
207
  # Length regulation
208
+ cond = inference_module.length_regulator(S_alt, ylens=target_lengths, n_quantizers=int(n_quantizers), f0=shifted_f0_alt)[0]
209
+ prompt_condition = inference_module.length_regulator(S_ori, ylens=target2_lengths, n_quantizers=int(n_quantizers), f0=F0_ori)[0]
210
  cat_condition = torch.cat([prompt_condition, cond], dim=1)
211
 
212
  # Voice Conversion
213
+ vc_target = inference_module.cfm.inference(cat_condition, torch.LongTensor([cat_condition.size(1)]).to(mel2.device),
214
  mel2, style2, None, diffusion_steps, inference_cfg_rate=inference_cfg_rate)
215
  vc_target = vc_target[:, :, mel2.size(-1):]
216
 
217
  # Convert to waveform
218
+ # if f0_condition:
219
+ # f04vocoder = torch.nn.functional.interpolate(shifted_f0_alt.unsqueeze(1), size=vc_target.size(-1),
220
+ # mode='nearest').squeeze(1)
221
+ # else:
222
+ f04vocoder = None
223
+ vc_wave = hift_gen.inference(vc_target, f0=f04vocoder)
224
 
225
  return sr, vc_wave.squeeze(0).cpu().numpy()
226
 
227
 
228
  if __name__ == "__main__":
229
+ description = "Zero-shot voice conversion with in-context learning. Check out our [GitHub repository](https://github.com/Plachtaa/seed-vc) for details and updates."
230
  inputs = [
231
  gr.Audio(type="filepath", label="Source Audio"),
232
  gr.Audio(type="filepath", label="Reference Audio"),
 
234
  gr.Slider(minimum=0.5, maximum=2.0, step=0.1, value=1.0, label="Length Adjust", info="<1.0 for speed-up speech, >1.0 for slow-down speech"),
235
  gr.Slider(minimum=0.0, maximum=1.0, step=0.1, value=0.7, label="Inference CFG Rate", info="has subtle influence"),
236
  gr.Slider(minimum=1, maximum=3, step=1, value=3, label="N Quantizers", info="the less quantizer used, the less prosody of source audio is preserved"),
237
+ gr.Checkbox(label="Use F0 conditioned model", value=False, info="Must set to true for singing voice conversion"),
238
+ gr.Checkbox(label="Auto F0 adjust", value=True,
239
+ info="Roughly adjust F0 to match target voice. Only works when F0 conditioned model is used."),
240
+ gr.Slider(label='Pitch shift', minimum=-24, maximum=24, step=1, value=0, info='Pitch shift in semitones, only works when F0 conditioned model is used'),
241
  ]
242
 
243
+ examples = [["examples/source/yae_0.wav", "examples/reference/dingzhen_0.wav", 50, 1.0, 0.7, 1, False, True, 0],]
244
 
245
  outputs = gr.Audio(label="Output Audio")
246