# @package _group_ common: fp16: false fp16_no_flatten_grads: true log_format: json log_interval: 100 tensorboard_logdir: tb reset_logging: false suppress_crashes: false checkpoint: save_interval: 1000 save_interval_updates: 1000 no_epoch_checkpoints: true best_checkpoint_metric: weighted_lm_ppl save_dir: . distributed_training: distributed_world_size: 1 task: _name: unpaired_audio_text data: ??? text_data: ??? labels: phn sort_by_length: false unfiltered: false max_length: null append_eos: false kenlm_path: ??? dataset: num_workers: 6 batch_size: 160 skip_invalid_size_inputs_valid_test: true valid_subset: valid validate_interval: 1000 validate_interval_updates: 1000 criterion: _name: model log_keys: - accuracy_dense - accuracy_token - temp - code_ppl optimization: max_update: 150000 clip_norm: 5.0 lr: [0] optimizer: _name: composite groups: generator: lr: [0.0004] lr_float: null optimizer: _name: adam adam_betas: [0.5,0.98] adam_eps: 1e-06 weight_decay: 0 amsgrad: false lr_scheduler: _name: fixed warmup_updates: 0 discriminator: lr: [ 0.0005 ] lr_float: null optimizer: _name: adam adam_betas: [0.5,0.98] adam_eps: 1e-06 weight_decay: 0.0001 amsgrad: false lr_scheduler: _name: fixed warmup_updates: 0 lr_scheduler: pass_through model: _name: wav2vec_u discriminator_dim: 384 discriminator_depth: 2 discriminator_kernel: 6 discriminator_linear_emb: false discriminator_causal: true discriminator_max_pool: false discriminator_act_after_linear: false discriminator_dropout: 0.0 discriminator_weight_norm: false generator_stride: 1 generator_kernel: 4 generator_bias: false generator_dropout: 0.1 smoothness_weight: 0.5 smoothing: 0 smoothing_one_sided: false gumbel: false hard_gumbel: false gradient_penalty: 1.5 code_penalty: 4.0 temp: [ 2,0.1,0.99995 ] input_dim: 512 segmentation: type: JOIN mean_pool_join: false remove_zeros: false