File size: 2,928 Bytes
5139131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
# -*- coding: utf-8 -*-
"""app

Automatically generated by Colaboratory.

Original file is located at
    https://colab.research.google.com/drive/1Ri9kvaz9F7Te2-5HZNUzHm-O6vgoSzhc
"""

import os
import tempfile
import gradio as gr
from TTS.utils.synthesizer import Synthesizer
from huggingface_hub import hf_hub_download

# Define constants
modelInfo=[["vits-male-azure","best_model_15934.pth","config.json","saillab/persian-tts-azure-grapheme-60K/resolve/main/"],
    ["common_voice_reduce","checkpoint_26000.pth","config.json","https://huggingface.co/saillab/persian-tts-cv15-reduct-grapheme-multispeaker/resolve/main/"],
    ["vits_arman_ebook","best_model_66651.pth","config.json","https://huggingface.co/saillab/persian-tts-grapheme-arm24-finetuned-on1/resolve/main/"]
# # Extract model names from MODEL_INFO
# MODEL_NAMES = [info[0] for info in MODEL_INFO]
]
MODEL_NAMES=[
    "vits male azure(best)",
    "common voice reduce",
    "vits arman ebook",
    #"persian-tts-grapheme-arm24-finetuned-on1",
    #"glowtts-male",
    #"glowtts-female",
    #"female tacotron2"
]

MAX_TXT_LEN = 400
TOKEN = os.getenv('HUGGING_FACE_HUB_TOKEN')

# # Download models
# for model_name, model_file, config_file, repo_name in MODEL_INFO:
#     os.makedirs(model_name, exist_ok=True)
#     print(f"|> Downloading: {model_name}")

#     # Use hf_hub_download to download models from private Hugging Face repositories
#     hf_hub_download(repo_id=repo_name, filename=model_file, use_auth_token=TOKEN)
#     hf_hub_download(repo_id=repo_name, filename=config_file, use_auth_token=TOKEN)

repo_name = "saillab/persian-tts-azure-grapheme-60K"
filename = "checkpoint_61800.pth"

model_file = hf_hub_download(repo_name, filename, use_auth_token=TOKEN)
config_file = hf_hub_download(repo_name, "config.json", use_auth_token=TOKEN)


def synthesize(text: str, model_name: str) -> str:
    """Synthesize speech using the selected model."""
    if len(text) > MAX_TXT_LEN:
        text = text[:MAX_TXT_LEN]
        print(f"Input text was cut off as it exceeded the {MAX_TXT_LEN} character limit.")

    synthesizer = Synthesizer(model_file, config_file)
    if synthesizer is None:
        raise NameError("Model not found")

    wavs = synthesizer.tts(text)

    with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
        synthesizer.save_wav(wavs, fp)
        return fp.name


iface = gr.Interface(
    fn=synthesize,
    inputs=[
        gr.Textbox(label="Enter Text to Synthesize:", value="زین همرهان سست عناصر، دلم گرفت."),
        gr.Radio(label="Pick a Model", choices=MODEL_NAMES, value=MODEL_NAMES[0]),
    ],
    outputs=gr.Audio(label="Output", type='filepath'),
    examples=[["زین همرهان سست عناصر، دلم گرفت.", MODEL_NAMES[0]]],
    title='Persian TTS Playground',
    description="Persian text to speech model demo",
    article="",
    live=False
)

iface.launch()