import gradio as gr import torch from parler_tts import ParlerTTSForConditionalGeneration from transformers import AutoTokenizer, AutoFeatureExtractor, set_seed device = "cuda:0" if torch.cuda.is_available() else "cpu" repo_id = "parler-tts/parler_tts_mini_v0.1" model = ParlerTTSForConditionalGeneration.from_pretrained(repo_id).to(device) tokenizer = AutoTokenizer.from_pretrained(repo_id) feature_extractor = AutoFeatureExtractor.from_pretrained(repo_id) SAMPLE_RATE = feature_extractor.sampling_rate SEED = 42 def gen_tts(text, description): inputs = tokenizer(description, return_tensors="pt").to(device) prompt = tokenizer(text, return_tensors="pt").to(device) set_seed(SEED) generation = model.generate( input_ids=inputs.input_ids, prompt_input_ids=prompt.input_ids, do_sample=True, temperature=1.0 ) audio_arr = generation.cpu().numpy().squeeze() return SAMPLE_RATE, audio_arr with gr.Blocks(css=css) as block: gr.HTML("""

This space is a headless component of the cloud rendering engine used by AiTube.

It is not available for public use, but you can use the original space.

""") with gr.Row(): input_text = gr.Textbox(label="Input Text") description = gr.Textbox(label="Description") run_button = gr.Button("Generate Audio") audio_out = gr.Textbox() inputs = [input_text, description] outputs = [audio_out] run_button.click(fn=gen_tts, inputs=inputs, outputs=outputs, queue=True) block.queue() block.launch(share=True)