Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
""" from https://github.com/keithito/tacotron """ | |
''' | |
Cleaners are transformations that run over the input text at both training and eval time. | |
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" | |
hyperparameter. Some cleaners are English-specific. You'll typically want to use: | |
1. "english_cleaners" for English text | |
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using | |
the Unidecode library (https://pypi.python.org/pypi/Unidecode) | |
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update | |
the symbols in symbols.py to match your data). | |
''' | |
import os | |
import re | |
import sys | |
import cn2an | |
import jieba | |
import pyopenjtalk | |
from jamo import h2j, j2hcj | |
from pypinyin import lazy_pinyin, BOPOMOFO | |
from unidecode import unidecode | |
jieba.initialize() | |
pyopenjtalk._lazy_init() | |
# This is a list of Korean classifiers preceded by pure Korean numerals. | |
_korean_classifiers = 'κ΅°λ° κΆ κ° κ·Έλ£¨ λ’ λ λ λ§λ¦¬ λͺ¨ λͺ¨κΈ λ λ° λ°μ§ λ°© λ² λ² λ³΄λ£¨ μ΄ μ μ μ μ μνΌ μ μ§ μ± μ² μ²© μΆ μΌ€λ ν¨ ν΅' | |
# Regular expression matching whitespace: | |
_whitespace_re = re.compile(r'\s+') | |
# Regular expression matching Japanese without punctuation marks: | |
_japanese_characters = re.compile( | |
r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') | |
# Regular expression matching non-Japanese characters or punctuation marks: | |
_japanese_marks = re.compile( | |
r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') | |
# List of (regular expression, replacement) pairs for abbreviations: | |
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ | |
('mrs', 'misess'), | |
('mr', 'mister'), | |
('dr', 'doctor'), | |
('st', 'saint'), | |
('co', 'company'), | |
('jr', 'junior'), | |
('maj', 'major'), | |
('gen', 'general'), | |
('drs', 'doctors'), | |
('rev', 'reverend'), | |
('lt', 'lieutenant'), | |
('hon', 'honorable'), | |
('sgt', 'sergeant'), | |
('capt', 'captain'), | |
('esq', 'esquire'), | |
('ltd', 'limited'), | |
('col', 'colonel'), | |
('ft', 'fort'), | |
]] | |
# List of (symbol, Japanese) pairs for marks: | |
_symbols_to_japanese = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ | |
('οΌ ', 'γγΌγ»γ³γ') | |
]] | |
# List of (hangul, hangul divided) pairs: | |
_hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ | |
('γ³', 'γ±γ '), | |
('γ΅', 'γ΄γ '), | |
('γΆ', 'γ΄γ '), | |
('γΊ', 'γΉγ±'), | |
('γ»', 'γΉγ '), | |
('γΌ', 'γΉγ '), | |
('γ½', 'γΉγ '), | |
('γΎ', 'γΉγ '), | |
('γΏ', 'γΉγ '), | |
('γ ', 'γΉγ '), | |
('γ ', 'γ γ '), | |
('γ ', 'γ γ '), | |
('γ ', 'γ γ '), | |
('γ ', 'γ γ £'), | |
('γ ', 'γ γ '), | |
('γ ', 'γ γ '), | |
('γ ', 'γ γ £'), | |
('γ ’', 'γ ‘γ £'), | |
('γ ', 'γ £γ '), | |
('γ ', 'γ £γ '), | |
('γ ', 'γ £γ '), | |
('γ ', 'γ £γ '), | |
('γ ', 'γ £γ '), | |
('γ ', 'γ £γ ') | |
]] | |
# List of (Latin alphabet, hangul) pairs: | |
_latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ | |
('a', 'μμ΄'), | |
('b', 'λΉ'), | |
('c', 'μ'), | |
('d', 'λ'), | |
('e', 'μ΄'), | |
('f', 'μν'), | |
('g', 'μ§'), | |
('h', 'μμ΄μΉ'), | |
('i', 'μμ΄'), | |
('j', 'μ μ΄'), | |
('k', 'μΌμ΄'), | |
('l', 'μ'), | |
('m', 'μ '), | |
('n', 'μ'), | |
('o', 'μ€'), | |
('p', 'νΌ'), | |
('q', 'ν'), | |
('r', 'μλ₯΄'), | |
('s', 'μμ€'), | |
('t', 'ν°'), | |
('u', 'μ '), | |
('v', 'λΈμ΄'), | |
('w', 'λλΈμ '), | |
('x', 'μμ€'), | |
('y', 'μμ΄'), | |
('z', 'μ νΈ') | |
]] | |
# List of (Latin alphabet, bopomofo) pairs: | |
_latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ | |
('a', 'γΛ'), | |
('b', 'γ γ§Λ'), | |
('c', 'γγ§Λ'), | |
('d', 'γγ§Λ'), | |
('e', 'γ§Λ'), | |
('f', 'γΛγγ¨Λ'), | |
('g', 'γγ§Λ'), | |
('h', 'γΛγγ©Λ'), | |
('i', 'γΛ'), | |
('j', 'γγΛ'), | |
('k', 'γγΛ'), | |
('l', 'γΛγΛ'), | |
('m', 'γΛγγ¨Λ'), | |
('n', 'γ£Λ'), | |
('o', 'γ‘Λ'), | |
('p', 'γγ§Λ'), | |
('q', 'γγ§γ‘Λ'), | |
('r', 'γΛ'), | |
('s', 'γΛγΛ'), | |
('t', 'γγ§Λ'), | |
('u', 'γ§γ‘Λ'), | |
('v', 'γ¨γ§Λ'), | |
('w', 'γγΛγ γ¨Λγγ§γ‘Λ'), | |
('x', 'γΛγγ¨ΛγΛ'), | |
('y', 'γ¨γΛ'), | |
('z', 'γγΛ') | |
]] | |
# List of (bopomofo, romaji) pairs: | |
_bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ | |
('γ γ', 'pβΌwo'), | |
('γγ', 'pΚ°wo'), | |
('γγ', 'mwo'), | |
('γγ', 'fwo'), | |
('γ ', 'pβΌ'), | |
('γ', 'pΚ°'), | |
('γ', 'm'), | |
('γ', 'f'), | |
('γ', 'tβΌ'), | |
('γ', 'tΚ°'), | |
('γ', 'n'), | |
('γ', 'l'), | |
('γ', 'kβΌ'), | |
('γ', 'kΚ°'), | |
('γ', 'h'), | |
('γ', 'Κ§βΌ'), | |
('γ', 'Κ§Κ°'), | |
('γ', 'Κ'), | |
('γ', 'Κ¦`βΌ'), | |
('γ', 'Κ¦`Κ°'), | |
('γ', 's`'), | |
('γ', 'ΙΉ`'), | |
('γ', 'Κ¦βΌ'), | |
('γ', 'Κ¦Κ°'), | |
('γ', 's'), | |
('γ', 'a'), | |
('γ', 'o'), | |
('γ', 'Ι'), | |
('γ', 'e'), | |
('γ', 'ai'), | |
('γ', 'ei'), | |
('γ ', 'au'), | |
('γ‘', 'ou'), | |
('γ§γ’', 'yeNN'), | |
('γ’', 'aNN'), | |
('γ§γ£', 'iNN'), | |
('γ£', 'ΙNN'), | |
('γ€', 'aNg'), | |
('γ§γ₯', 'iNg'), | |
('γ¨γ₯', 'uNg'), | |
('γ©γ₯', 'yuNg'), | |
('γ₯', 'ΙNg'), | |
('γ¦', 'ΙΙ»'), | |
('γ§', 'i'), | |
('γ¨', 'u'), | |
('γ©', 'Ι₯'), | |
('Λ', 'β'), | |
('Λ', 'β'), | |
('Λ', 'ββ'), | |
('Λ', 'β'), | |
('Λ', ''), | |
('οΌ', ','), | |
('γ', '.'), | |
('οΌ', '!'), | |
('οΌ', '?'), | |
('β', '-') | |
]] | |
def expand_abbreviations(text): | |
for regex, replacement in _abbreviations: | |
text = re.sub(regex, replacement, text) | |
return text | |
def lowercase(text): | |
return text.lower() | |
def collapse_whitespace(text): | |
return re.sub(_whitespace_re, ' ', text) | |
def convert_to_ascii(text): | |
return unidecode(text) | |
def symbols_to_japanese(text): | |
for regex, replacement in _symbols_to_japanese: | |
text = re.sub(regex, replacement, text) | |
return text | |
def japanese_to_romaji_with_accent(text): | |
'''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' | |
text = symbols_to_japanese(text) | |
sentences = re.split(_japanese_marks, text) | |
marks = re.findall(_japanese_marks, text) | |
text = '' | |
for i, sentence in enumerate(sentences): | |
if re.match(_japanese_characters, sentence): | |
if text != '': | |
text += ' ' | |
labels = pyopenjtalk.extract_fullcontext(sentence) | |
for n, label in enumerate(labels): | |
phoneme = re.search(r'\-([^\+]*)\+', label).group(1) | |
if phoneme not in ['sil', 'pau']: | |
text += phoneme.replace('ch', 'Κ§').replace('sh', 'Κ').replace('cl', 'Q') | |
else: | |
continue | |
n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) | |
a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) | |
a2 = int(re.search(r"\+(\d+)\+", label).group(1)) | |
a3 = int(re.search(r"\+(\d+)/", label).group(1)) | |
if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil', 'pau']: | |
a2_next = -1 | |
else: | |
a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) | |
# Accent phrase boundary | |
if a3 == 1 and a2_next == 1: | |
text += ' ' | |
# Falling | |
elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: | |
text += 'β' | |
# Rising | |
elif a2 == 1 and a2_next == 2: | |
text += 'β' | |
if i < len(marks): | |
text += unidecode(marks[i]).replace(' ', '') | |
return text | |
def latin_to_hangul(text): | |
for regex, replacement in _latin_to_hangul: | |
text = re.sub(regex, replacement, text) | |
return text | |
def divide_hangul(text): | |
for regex, replacement in _hangul_divided: | |
text = re.sub(regex, replacement, text) | |
return text | |
def hangul_number(num, sino=True): | |
'''Reference https://github.com/Kyubyong/g2pK''' | |
num = re.sub(',', '', num) | |
if num == '0': | |
return 'μ' | |
if not sino and num == '20': | |
return 'μ€λ¬΄' | |
digits = '123456789' | |
names = 'μΌμ΄μΌμ¬μ€μ‘μΉ νꡬ' | |
digit2name = {d: n for d, n in zip(digits, names)} | |
modifiers = 'ν λ μΈ λ€ λ€μ― μ¬μ― μΌκ³± μ¬λ μν' | |
decimals = 'μ΄ μ€λ¬Ό μλ₯Έ λ§ν μ° μμ μΌν μ¬λ μν' | |
digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())} | |
digit2dec = {d: dec for d, dec in zip(digits, decimals.split())} | |
spelledout = [] | |
for i, digit in enumerate(num): | |
i = len(num) - i - 1 | |
if sino: | |
if i == 0: | |
name = digit2name.get(digit, '') | |
elif i == 1: | |
name = digit2name.get(digit, '') + 'μ' | |
name = name.replace('μΌμ', 'μ') | |
else: | |
if i == 0: | |
name = digit2mod.get(digit, '') | |
elif i == 1: | |
name = digit2dec.get(digit, '') | |
if digit == '0': | |
if i % 4 == 0: | |
last_three = spelledout[-min(3, len(spelledout)):] | |
if ''.join(last_three) == '': | |
spelledout.append('') | |
continue | |
else: | |
spelledout.append('') | |
continue | |
if i == 2: | |
name = digit2name.get(digit, '') + 'λ°±' | |
name = name.replace('μΌλ°±', 'λ°±') | |
elif i == 3: | |
name = digit2name.get(digit, '') + 'μ²' | |
name = name.replace('μΌμ²', 'μ²') | |
elif i == 4: | |
name = digit2name.get(digit, '') + 'λ§' | |
name = name.replace('μΌλ§', 'λ§') | |
elif i == 5: | |
name = digit2name.get(digit, '') + 'μ' | |
name = name.replace('μΌμ', 'μ') | |
elif i == 6: | |
name = digit2name.get(digit, '') + 'λ°±' | |
name = name.replace('μΌλ°±', 'λ°±') | |
elif i == 7: | |
name = digit2name.get(digit, '') + 'μ²' | |
name = name.replace('μΌμ²', 'μ²') | |
elif i == 8: | |
name = digit2name.get(digit, '') + 'μ΅' | |
elif i == 9: | |
name = digit2name.get(digit, '') + 'μ' | |
elif i == 10: | |
name = digit2name.get(digit, '') + 'λ°±' | |
elif i == 11: | |
name = digit2name.get(digit, '') + 'μ²' | |
elif i == 12: | |
name = digit2name.get(digit, '') + 'μ‘°' | |
elif i == 13: | |
name = digit2name.get(digit, '') + 'μ' | |
elif i == 14: | |
name = digit2name.get(digit, '') + 'λ°±' | |
elif i == 15: | |
name = digit2name.get(digit, '') + 'μ²' | |
spelledout.append(name) | |
return ''.join(elem for elem in spelledout) | |
def number_to_hangul(text): | |
'''Reference https://github.com/Kyubyong/g2pK''' | |
tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text)) | |
for token in tokens: | |
num, classifier = token | |
if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers: | |
spelledout = hangul_number(num, sino=False) | |
else: | |
spelledout = hangul_number(num, sino=True) | |
text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}') | |
# digit by digit for remaining digits | |
digits = '0123456789' | |
names = 'μμΌμ΄μΌμ¬μ€μ‘μΉ νꡬ' | |
for d, n in zip(digits, names): | |
text = text.replace(d, n) | |
return text | |
def number_to_chinese(text): | |
numbers = re.findall(r'\d+(?:\.?\d+)?', text) | |
for number in numbers: | |
text = text.replace(number, cn2an.an2cn(number), 1) | |
return text | |
def chinese_to_bopomofo(text): | |
text = text.replace('γ', 'οΌ').replace('οΌ', 'οΌ').replace('οΌ', 'οΌ') | |
words = jieba.lcut(text, cut_all=False) | |
text = '' | |
for word in words: | |
bopomofos = lazy_pinyin(word, BOPOMOFO) | |
if not re.search('[\u4e00-\u9fff]', word): | |
text += word | |
continue | |
for i in range(len(bopomofos)): | |
if re.match('[\u3105-\u3129]', bopomofos[i][-1]): | |
bopomofos[i] += 'Λ' | |
if text != '': | |
text += ' ' | |
text += ''.join(bopomofos) | |
return text | |
def latin_to_bopomofo(text): | |
for regex, replacement in _latin_to_bopomofo: | |
text = re.sub(regex, replacement, text) | |
return text | |
def bopomofo_to_romaji(text): | |
for regex, replacement in _bopomofo_to_romaji: | |
text = re.sub(regex, replacement, text) | |
return text | |
def basic_cleaners(text): | |
'''Basic pipeline that lowercases and collapses whitespace without transliteration.''' | |
text = lowercase(text) | |
text = collapse_whitespace(text) | |
return text | |
def transliteration_cleaners(text): | |
'''Pipeline for non-English text that transliterates to ASCII.''' | |
text = convert_to_ascii(text) | |
text = lowercase(text) | |
text = collapse_whitespace(text) | |
return text | |
def japanese_cleaners(text): | |
text = japanese_to_romaji_with_accent(text) | |
if len(text) > 0 and re.match('[A-Za-z]', text[-1]): | |
text += '.' | |
return text | |
def japanese_cleaners2(text): | |
return japanese_cleaners(text).replace('ts', 'Κ¦').replace('...', 'β¦') | |
def korean_cleaners(text): | |
'''Pipeline for Korean text''' | |
text = latin_to_hangul(text) | |
text = number_to_hangul(text) | |
text = j2hcj(h2j(text)) | |
text = divide_hangul(text) | |
if len(text) > 0 and re.match('[\u3131-\u3163]', text[-1]): | |
text += '.' | |
return text | |
def chinese_cleaners(text): | |
'''Pipeline for Chinese text''' | |
text = number_to_chinese(text) | |
text = chinese_to_bopomofo(text) | |
text = latin_to_bopomofo(text) | |
if len(text) > 0 and re.match('[ΛΛΛΛΛ]', text[-1]): | |
text += 'γ' | |
return text | |
def zh_ja_mixture_cleaners(text): | |
chinese_texts = re.findall(r'\[ZH\].*?\[ZH\]', text) | |
japanese_texts = re.findall(r'\[JA\].*?\[JA\]', text) | |
for chinese_text in chinese_texts: | |
cleaned_text = number_to_chinese(chinese_text[4:-4]) | |
cleaned_text = chinese_to_bopomofo(cleaned_text) | |
cleaned_text = latin_to_bopomofo(cleaned_text) | |
cleaned_text = bopomofo_to_romaji(cleaned_text) | |
cleaned_text = re.sub('i[aoe]', lambda x: 'y' + x.group(0)[1:], cleaned_text) | |
cleaned_text = re.sub('u[aoΙe]', lambda x: 'w' + x.group(0)[1:], cleaned_text) | |
cleaned_text = re.sub('([Κ¦sΙΉ]`[βΌΚ°]?)([βββ]+)', lambda x: x.group(1) + 'ΙΉ`' + x.group(2), cleaned_text).replace( | |
'Ι»', 'ΙΉ`') | |
cleaned_text = re.sub('([Κ¦s][βΌΚ°]?)([βββ]+)', lambda x: x.group(1) + 'ΙΉ' + x.group(2), cleaned_text) | |
text = text.replace(chinese_text, cleaned_text + ' ', 1) | |
for japanese_text in japanese_texts: | |
cleaned_text = japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts', 'Κ¦').replace('u', 'Ι―').replace( | |
'...', 'β¦') | |
text = text.replace(japanese_text, cleaned_text + ' ', 1) | |
text = text[:-1] | |
if len(text) > 0 and re.match('[A-Za-zΙ―ΙΉΙΙ₯βββ]', text[-1]): | |
text += '.' | |
return text | |