# This is an example that demonstrates how to configure a model file. # You can modify the configuration according to your own requirements. # to print the register_table: # from funasr.register import tables # tables.print() # network architecture model: Conformer model_conf: ctc_weight: 0.3 lsm_weight: 0.1 # label smoothing option length_normalized_loss: false # encoder encoder: ConformerEncoder encoder_conf: output_size: 256 # dimension of attention attention_heads: 4 linear_units: 2048 # the number of units of position-wise feed forward num_blocks: 12 # the number of encoder blocks dropout_rate: 0.1 positional_dropout_rate: 0.1 attention_dropout_rate: 0.0 input_layer: conv2d # encoder architecture type normalize_before: true pos_enc_layer_type: rel_pos selfattention_layer_type: rel_selfattn activation_type: swish macaron_style: true use_cnn_module: true cnn_module_kernel: 15 # decoder decoder: TransformerRWKVDecoder decoder_conf: attention_heads: 4 linear_units: 2048 num_blocks: 6 dropout_rate: 0.1 positional_dropout_rate: 0.1 self_attention_dropout_rate: 0.0 src_attention_dropout_rate: 0.0 input_layer: embed rwkv_cfg: n_embd: 256 dropout: 0 head_size_a: 64 ctx_len: 512 dim_att: 256 #${model_conf.rwkv_cfg.n_embd} dim_ffn: null head_size_divisor: 4 n_layer: 6 pre_ffn: 0 ln0: false ln1: false # frontend related frontend: WavFrontend frontend_conf: fs: 16000 window: hamming n_mels: 80 frame_length: 25 frame_shift: 10 lfr_m: 1 lfr_n: 1 specaug: SpecAug specaug_conf: apply_time_warp: true time_warp_window: 5 time_warp_mode: bicubic apply_freq_mask: true freq_mask_width_range: - 0 - 30 num_freq_mask: 2 apply_time_mask: true time_mask_width_range: - 0 - 40 num_time_mask: 2 train_conf: accum_grad: 1 grad_clip: 5 max_epoch: 150 keep_nbest_models: 10 log_interval: 50 optim: adam optim_conf: lr: 0.0005 scheduler: warmuplr scheduler_conf: warmup_steps: 30000 dataset: AudioDataset dataset_conf: index_ds: IndexDSJsonl batch_sampler: EspnetStyleBatchSampler batch_type: length # example or length batch_size: 25000 # if batch_type is example, batch_size is the numbers of samples; if length, batch_size is source_token_len+target_token_len; max_token_length: 2048 # filter samples if source_token_len+target_token_len > max_token_length, buffer_size: 1024 shuffle: True num_workers: 4 preprocessor_speech: SpeechPreprocessSpeedPerturb preprocessor_speech_conf: speed_perturb: [0.9, 1.0, 1.1] tokenizer: CharTokenizer tokenizer_conf: unk_symbol: ctc_conf: dropout_rate: 0.0 ctc_type: builtin reduce: true ignore_nan_grad: true normalize: null