""" from https://github.com/keithito/tacotron """ ''' Cleaners are transformations that run over the input text at both training and eval time. Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" hyperparameter. Some cleaners are English-specific. You'll typically want to use: 1. "english_cleaners" for English text 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using the Unidecode library (https://pypi.python.org/pypi/Unidecode) 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update the symbols in symbols.py to match your data). ''' import re from unidecode import unidecode # import pyopenjtalk from jamo import h2j, j2hcj from pypinyin import lazy_pinyin, BOPOMOFO import jieba, cn2an # This is a list of Korean classifiers preceded by pure Korean numerals. _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통' # Regular expression matching whitespace: _whitespace_re = re.compile(r'\s+') # Regular expression matching Japanese without punctuation marks: _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') # Regular expression matching non-Japanese characters or punctuation marks: _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') # List of (regular expression, replacement) pairs for abbreviations: _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [ ('mrs', 'misess'), ('mr', 'mister'), ('dr', 'doctor'), ('st', 'saint'), ('co', 'company'), ('jr', 'junior'), ('maj', 'major'), ('gen', 'general'), ('drs', 'doctors'), ('rev', 'reverend'), ('lt', 'lieutenant'), ('hon', 'honorable'), ('sgt', 'sergeant'), ('capt', 'captain'), ('esq', 'esquire'), ('ltd', 'limited'), ('col', 'colonel'), ('ft', 'fort'), ]] # List of (hangul, hangul divided) pairs: _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [ ('ㄳ', 'ㄱㅅ'), ('ㄵ', 'ㄴㅈ'), ('ㄶ', 'ㄴㅎ'), ('ㄺ', 'ㄹㄱ'), ('ㄻ', 'ㄹㅁ'), ('ㄼ', 'ㄹㅂ'), ('ㄽ', 'ㄹㅅ'), ('ㄾ', 'ㄹㅌ'), ('ㄿ', 'ㄹㅍ'), ('ㅀ', 'ㄹㅎ'), ('ㅄ', 'ㅂㅅ'), ('ㅘ', 'ㅗㅏ'), ('ㅙ', 'ㅗㅐ'), ('ㅚ', 'ㅗㅣ'), ('ㅝ', 'ㅜㅓ'), ('ㅞ', 'ㅜㅔ'), ('ㅟ', 'ㅜㅣ'), ('ㅢ', 'ㅡㅣ'), ('ㅑ', 'ㅣㅏ'), ('ㅒ', 'ㅣㅐ'), ('ㅕ', 'ㅣㅓ'), ('ㅖ', 'ㅣㅔ'), ('ㅛ', 'ㅣㅗ'), ('ㅠ', 'ㅣㅜ') ]] # List of (Latin alphabet, hangul) pairs: _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ ('a', '에이'), ('b', '비'), ('c', '시'), ('d', '디'), ('e', '이'), ('f', '에프'), ('g', '지'), ('h', '에이치'), ('i', '아이'), ('j', '제이'), ('k', '케이'), ('l', '엘'), ('m', '엠'), ('n', '엔'), ('o', '오'), ('p', '피'), ('q', '큐'), ('r', '아르'), ('s', '에스'), ('t', '티'), ('u', '유'), ('v', '브이'), ('w', '더블유'), ('x', '엑스'), ('y', '와이'), ('z', '제트') ]] # List of (Latin alphabet, bopomofo) pairs: _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ ('a', 'ㄟˉ'), ('b', 'ㄅㄧˋ'), ('c', 'ㄙㄧˉ'), ('d', 'ㄉㄧˋ'), ('e', 'ㄧˋ'), ('f', 'ㄝˊㄈㄨˋ'), ('g', 'ㄐㄧˋ'), ('h', 'ㄝˇㄑㄩˋ'), ('i', 'ㄞˋ'), ('j', 'ㄐㄟˋ'), ('k', 'ㄎㄟˋ'), ('l', 'ㄝˊㄛˋ'), ('m', 'ㄝˊㄇㄨˋ'), ('n', 'ㄣˉ'), ('o', 'ㄡˉ'), ('p', 'ㄆㄧˉ'), ('q', 'ㄎㄧㄡˉ'), ('r', 'ㄚˋ'), ('s', 'ㄝˊㄙˋ'), ('t', 'ㄊㄧˋ'), ('u', 'ㄧㄡˉ'), ('v', 'ㄨㄧˉ'), ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'), ('x', 'ㄝˉㄎㄨˋㄙˋ'), ('y', 'ㄨㄞˋ'), ('z', 'ㄗㄟˋ') ]] # List of (bopomofo, romaji) pairs: _bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [ ('ㄅㄛ', 'p⁼wo'), ('ㄆㄛ', 'pʰwo'), ('ㄇㄛ', 'mwo'), ('ㄈㄛ', 'fwo'), ('ㄅ', 'p⁼'), ('ㄆ', 'pʰ'), ('ㄇ', 'm'), ('ㄈ', 'f'), ('ㄉ', 't⁼'), ('ㄊ', 'tʰ'), ('ㄋ', 'n'), ('ㄌ', 'l'), ('ㄍ', 'k⁼'), ('ㄎ', 'kʰ'), ('ㄏ', 'h'), ('ㄐ', 'ʧ⁼'), ('ㄑ', 'ʧʰ'), ('ㄒ', 'ʃ'), ('ㄓ', 'ʦ`⁼'), ('ㄔ', 'ʦ`ʰ'), ('ㄕ', 's`'), ('ㄖ', 'ɹ`'), ('ㄗ', 'ʦ⁼'), ('ㄘ', 'ʦʰ'), ('ㄙ', 's'), ('ㄚ', 'a'), ('ㄛ', 'o'), ('ㄜ', 'ə'), ('ㄝ', 'e'), ('ㄞ', 'ai'), ('ㄟ', 'ei'), ('ㄠ', 'au'), ('ㄡ', 'ou'), ('ㄧㄢ', 'yeNN'), ('ㄢ', 'aNN'), ('ㄧㄣ', 'iNN'), ('ㄣ', 'əNN'), ('ㄤ', 'aNg'), ('ㄧㄥ', 'iNg'), ('ㄨㄥ', 'uNg'), ('ㄩㄥ', 'yuNg'), ('ㄥ', 'əNg'), ('ㄦ', 'əɻ'), ('ㄧ', 'i'), ('ㄨ', 'u'), ('ㄩ', 'ɥ'), ('ˉ', '→'), ('ˊ', '↑'), ('ˇ', '↓↑'), ('ˋ', '↓'), ('˙', ''), (',', ','), ('。', '.'), ('!', '!'), ('?', '?'), ('—', '-') ]] def expand_abbreviations(text): for regex, replacement in _abbreviations: text = re.sub(regex, replacement, text) return text def lowercase(text): return text.lower() def collapse_whitespace(text): return re.sub(_whitespace_re, ' ', text) def convert_to_ascii(text): return unidecode(text) def japanese_to_romaji_with_accent(text): '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' sentences = re.split(_japanese_marks, text) marks = re.findall(_japanese_marks, text) text = '' for i, sentence in enumerate(sentences): if re.match(_japanese_characters, sentence): if text!='': text+=' ' labels = pyopenjtalk.extract_fullcontext(sentence) for n, label in enumerate(labels): phoneme = re.search(r'\-([^\+]*)\+', label).group(1) if phoneme not in ['sil','pau']: text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q') else: continue n_moras = int(re.search(r'/F:(\d+)_', label).group(1)) a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1)) a2 = int(re.search(r"\+(\d+)\+", label).group(1)) a3 = int(re.search(r"\+(\d+)/", label).group(1)) if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']: a2_next=-1 else: a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1)) # Accent phrase boundary if a3 == 1 and a2_next == 1: text += ' ' # Falling elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras: text += '↓' # Rising elif a2 == 1 and a2_next == 2: text += '↑' if i