File size: 3,119 Bytes
01e1229
 
 
 
 
 
 
 
 
 
 
 
8f06cef
 
f5aefe9
8f06cef
01e1229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5cc818d
2814fc6
 
 
01e1229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
from io import BytesIO
import requests
from os.path import exists, join
from TTS.utils.synthesizer import Synthesizer
from enum import Enum
from crh_preprocessor.preprocessor import preprocess
from torch import no_grad


class Voices(Enum):
    """List of available voices for the model."""

    #Arslan = "arslan"
    Sevil = "sevil"
    Eskander = "eskander"
    #Abibulla = "abibulla"


class TTS:
    """ """

    def __init__(self, use_cuda=False) -> None:
        """
        Class to setup a text-to-speech engine, from download to model creation.  \n
        Downloads or uses files from `cache_folder` directory.  \n
        By default stores in current directory."""
        self.__setup_cache(use_cuda=use_cuda)

    def tts(self, text: str, voice: str, output_fp=BytesIO()):
        """
        Run a Text-to-Speech engine and output to `output_fp` BytesIO-like object.
        - `text` - your model input text.
        - `voice` - one of predefined voices from `Voices` enum.
        - `output_fp` - file-like object output. Stores in RAM by default.
        """

        if voice not in [option.value for option in Voices]:
            raise ValueError(
                f"Invalid value for voice selected! Please use one of the following values: {', '.join([option.value for option in Voices])}."
            )

        text = preprocess(text)

        with no_grad():
            wavs = self.synthesizer.tts(text, speaker_name=voice)
            self.synthesizer.save_wav(wavs, output_fp)

        output_fp.seek(0)

        return output_fp, text

    def __setup_cache(self, use_cuda=False):
        """Downloads models and stores them into `cache_folder`. By default stores in current directory."""
        print("downloading uk/crh/vits-tts")
        release_number = "v1.0.0"
        model_link = f"https://github.com/robinhad/qirimtatar-tts/releases/download/{release_number}/model.pth"
        config_link = f"https://github.com/robinhad/qirimtatar-tts/releases/download/{release_number}/config.json"
        speakers_link = f"https://github.com/robinhad/qirimtatar-tts/releases/download/{release_number}/speakers.pth"

        cache_folder = "."

        model_path = join(cache_folder, "model.pth")
        config_path = join(cache_folder, "config.json")
        speakers_path = join(cache_folder, "speakers.pth")

        self.__download(model_link, model_path)
        self.__download(config_link, config_path)
        self.__download(speakers_link, speakers_path)

        self.synthesizer = Synthesizer(
            model_path, config_path, speakers_path, None, None, use_cuda=use_cuda
        )

        if self.synthesizer is None:
            raise NameError("Model not found")

    def __download(self, url, file_name):
        """Downloads file from `url` into local `file_name` file."""
        if not exists(file_name):
            print(f"Downloading {file_name}")
            r = requests.get(url, allow_redirects=True)
            with open(file_name, "wb") as file:
                file.write(r.content)
        else:
            print(f"Found {file_name}. Skipping download...")