158 lines
4.6 KiB
Python
158 lines
4.6 KiB
Python
|
# -*- encoding: utf-8 -*-
|
||
|
|
||
|
import functools
|
||
|
import logging
|
||
|
import pickle
|
||
|
from pathlib import Path
|
||
|
from typing import Any, Dict, Iterable, List, NamedTuple, Set, Tuple, Union
|
||
|
|
||
|
import numpy as np
|
||
|
import yaml
|
||
|
|
||
|
|
||
|
import warnings
|
||
|
|
||
|
root_dir = Path(__file__).resolve().parent
|
||
|
|
||
|
logger_initialized = {}
|
||
|
|
||
|
|
||
|
class TokenIDConverter:
|
||
|
def __init__(
|
||
|
self,
|
||
|
token_list: Union[List, str],
|
||
|
):
|
||
|
|
||
|
self.token_list = token_list
|
||
|
self.unk_symbol = token_list[-1]
|
||
|
self.token2id = {v: i for i, v in enumerate(self.token_list)}
|
||
|
self.unk_id = self.token2id[self.unk_symbol]
|
||
|
|
||
|
def get_num_vocabulary_size(self) -> int:
|
||
|
return len(self.token_list)
|
||
|
|
||
|
def ids2tokens(self, integers: Union[np.ndarray, Iterable[int]]) -> List[str]:
|
||
|
if isinstance(integers, np.ndarray) and integers.ndim != 1:
|
||
|
raise TokenIDConverterError(f"Must be 1 dim ndarray, but got {integers.ndim}")
|
||
|
return [self.token_list[i] for i in integers]
|
||
|
|
||
|
def tokens2ids(self, tokens: Iterable[str]) -> List[int]:
|
||
|
|
||
|
return [self.token2id.get(i, self.unk_id) for i in tokens]
|
||
|
|
||
|
|
||
|
class CharTokenizer:
|
||
|
def __init__(
|
||
|
self,
|
||
|
symbol_value: Union[Path, str, Iterable[str]] = None,
|
||
|
space_symbol: str = "<space>",
|
||
|
remove_non_linguistic_symbols: bool = False,
|
||
|
):
|
||
|
|
||
|
self.space_symbol = space_symbol
|
||
|
self.non_linguistic_symbols = self.load_symbols(symbol_value)
|
||
|
self.remove_non_linguistic_symbols = remove_non_linguistic_symbols
|
||
|
|
||
|
@staticmethod
|
||
|
def load_symbols(value: Union[Path, str, Iterable[str]] = None) -> Set:
|
||
|
if value is None:
|
||
|
return set()
|
||
|
|
||
|
if isinstance(value, Iterable[str]):
|
||
|
return set(value)
|
||
|
|
||
|
file_path = Path(value)
|
||
|
if not file_path.exists():
|
||
|
logging.warning("%s doesn't exist.", file_path)
|
||
|
return set()
|
||
|
|
||
|
with file_path.open("r", encoding="utf-8") as f:
|
||
|
return set(line.rstrip() for line in f)
|
||
|
|
||
|
def text2tokens(self, line: Union[str, list]) -> List[str]:
|
||
|
tokens = []
|
||
|
while len(line) != 0:
|
||
|
for w in self.non_linguistic_symbols:
|
||
|
if line.startswith(w):
|
||
|
if not self.remove_non_linguistic_symbols:
|
||
|
tokens.append(line[: len(w)])
|
||
|
line = line[len(w) :]
|
||
|
break
|
||
|
else:
|
||
|
t = line[0]
|
||
|
if t == " ":
|
||
|
t = "<space>"
|
||
|
tokens.append(t)
|
||
|
line = line[1:]
|
||
|
return tokens
|
||
|
|
||
|
def tokens2text(self, tokens: Iterable[str]) -> str:
|
||
|
tokens = [t if t != self.space_symbol else " " for t in tokens]
|
||
|
return "".join(tokens)
|
||
|
|
||
|
def __repr__(self):
|
||
|
return (
|
||
|
f"{self.__class__.__name__}("
|
||
|
f'space_symbol="{self.space_symbol}"'
|
||
|
f'non_linguistic_symbols="{self.non_linguistic_symbols}"'
|
||
|
f")"
|
||
|
)
|
||
|
|
||
|
|
||
|
class Hypothesis(NamedTuple):
|
||
|
"""Hypothesis data type."""
|
||
|
|
||
|
yseq: np.ndarray
|
||
|
score: Union[float, np.ndarray] = 0
|
||
|
scores: Dict[str, Union[float, np.ndarray]] = dict()
|
||
|
states: Dict[str, Any] = dict()
|
||
|
|
||
|
def asdict(self) -> dict:
|
||
|
"""Convert data to JSON-friendly dict."""
|
||
|
return self._replace(
|
||
|
yseq=self.yseq.tolist(),
|
||
|
score=float(self.score),
|
||
|
scores={k: float(v) for k, v in self.scores.items()},
|
||
|
)._asdict()
|
||
|
|
||
|
|
||
|
def read_yaml(yaml_path: Union[str, Path]) -> Dict:
|
||
|
if not Path(yaml_path).exists():
|
||
|
raise FileExistsError(f"The {yaml_path} does not exist.")
|
||
|
|
||
|
with open(str(yaml_path), "rb") as f:
|
||
|
data = yaml.load(f, Loader=yaml.Loader)
|
||
|
return data
|
||
|
|
||
|
|
||
|
@functools.lru_cache()
|
||
|
def get_logger(name="funasr_torch"):
|
||
|
"""Initialize and get a logger by name.
|
||
|
If the logger has not been initialized, this method will initialize the
|
||
|
logger by adding one or two handlers, otherwise the initialized logger will
|
||
|
be directly returned. During initialization, a StreamHandler will always be
|
||
|
added.
|
||
|
Args:
|
||
|
name (str): Logger name.
|
||
|
Returns:
|
||
|
logging.Logger: The expected logger.
|
||
|
"""
|
||
|
logger = logging.getLogger(name)
|
||
|
if name in logger_initialized:
|
||
|
return logger
|
||
|
|
||
|
for logger_name in logger_initialized:
|
||
|
if name.startswith(logger_name):
|
||
|
return logger
|
||
|
|
||
|
formatter = logging.Formatter(
|
||
|
"[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%Y/%m/%d %H:%M:%S"
|
||
|
)
|
||
|
|
||
|
sh = logging.StreamHandler()
|
||
|
sh.setFormatter(formatter)
|
||
|
logger.addHandler(sh)
|
||
|
logger_initialized[name] = True
|
||
|
logger.propagate = False
|
||
|
return logger
|