mrfakename commited on
Commit
a9a195b
1 Parent(s): e2646fd

Sync from GitHub repo

Browse files

This Space is synced from the GitHub repo: https://github.com/SWivid/F5-TTS. Please submit contributions to the Space there

Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -209,7 +209,8 @@ def split_text_into_batches(text, max_chars=200, split_words=SPLIT_WORDS):
209
  batches.append(current_batch)
210
 
211
  return batches
212
- @spaces.GPU
 
213
  def infer_batch(ref_audio, ref_text, gen_text_batches, exp_name, remove_silence, progress=gr.Progress()):
214
  if exp_name == "F5-TTS":
215
  ema_model = F5TTS_ema_model
@@ -294,6 +295,7 @@ def infer_batch(ref_audio, ref_text, gen_text_batches, exp_name, remove_silence,
294
 
295
  return (target_sample_rate, final_wave), spectrogram_path
296
 
 
297
  def infer(ref_audio_orig, ref_text, gen_text, exp_name, remove_silence, custom_split_words=''):
298
  if not custom_split_words.strip():
299
  custom_words = [word.strip() for word in custom_split_words.split(',')]
@@ -342,7 +344,8 @@ def infer(ref_audio_orig, ref_text, gen_text, exp_name, remove_silence, custom_s
342
 
343
  gr.Info(f"Generating audio using {exp_name} in {len(gen_text_batches)} batches")
344
  return infer_batch((audio, sr), ref_text, gen_text_batches, exp_name, remove_silence)
345
-
 
346
  def generate_podcast(script, speaker1_name, ref_audio1, ref_text1, speaker2_name, ref_audio2, ref_text2, exp_name, remove_silence):
347
  # Split the script into speaker blocks
348
  speaker_pattern = re.compile(f"^({re.escape(speaker1_name)}|{re.escape(speaker2_name)}):", re.MULTILINE)
@@ -678,7 +681,7 @@ with gr.Blocks() as app_emotional:
678
 
679
  # Output audio
680
  audio_output_emotional = gr.Audio(label="Synthesized Audio")
681
-
682
  def generate_emotional_speech(
683
  regular_audio,
684
  regular_ref_text,
 
209
  batches.append(current_batch)
210
 
211
  return batches
212
+
213
+ @gpu_decorator
214
  def infer_batch(ref_audio, ref_text, gen_text_batches, exp_name, remove_silence, progress=gr.Progress()):
215
  if exp_name == "F5-TTS":
216
  ema_model = F5TTS_ema_model
 
295
 
296
  return (target_sample_rate, final_wave), spectrogram_path
297
 
298
+ @gpu_decorator
299
  def infer(ref_audio_orig, ref_text, gen_text, exp_name, remove_silence, custom_split_words=''):
300
  if not custom_split_words.strip():
301
  custom_words = [word.strip() for word in custom_split_words.split(',')]
 
344
 
345
  gr.Info(f"Generating audio using {exp_name} in {len(gen_text_batches)} batches")
346
  return infer_batch((audio, sr), ref_text, gen_text_batches, exp_name, remove_silence)
347
+
348
+ @gpu_decorator
349
  def generate_podcast(script, speaker1_name, ref_audio1, ref_text1, speaker2_name, ref_audio2, ref_text2, exp_name, remove_silence):
350
  # Split the script into speaker blocks
351
  speaker_pattern = re.compile(f"^({re.escape(speaker1_name)}|{re.escape(speaker2_name)}):", re.MULTILINE)
 
681
 
682
  # Output audio
683
  audio_output_emotional = gr.Audio(label="Synthesized Audio")
684
+ @gpu_decorator
685
  def generate_emotional_speech(
686
  regular_audio,
687
  regular_ref_text,