# @package _group_ common: fp16: false fp16_no_flatten_grads: true log_format: json log_interval: 100 tensorboard_logdir: tb reset_logging: false suppress_crashes: false checkpoint: save_interval: 1000 save_interval_updates: 1000 no_epoch_checkpoints: true best_checkpoint_metric: weighted_lm_ppl save_dir: . distributed_training: distributed_world_size: 1 task: _name: unpaired_audio_text data: ??? text_data: ??? labels: phn sort_by_length: false unfiltered: false max_length: null append_eos: false kenlm_path: ??? aux_target_postfix: km dataset: num_workers: 6 batch_size: 160 skip_invalid_size_inputs_valid_test: true valid_subset: valid validate_interval: 1000 validate_interval_updates: 1000 criterion: _name: model log_keys: - accuracy_dense - accuracy_token - temp - code_ppl optimization: max_update: 150000 clip_norm: 5.0 lr: [0] optimizer: _name: composite groups: generator: lr: [0.00005] lr_float: null optimizer: _name: adam adam_betas: [0.5,0.98] adam_eps: 1e-06 weight_decay: 0 amsgrad: false lr_scheduler: _name: fixed warmup_updates: 0 discriminator: lr: [ 0.0003 ] lr_float: null optimizer: _name: adam adam_betas: [0.5,0.98] adam_eps: 1e-06 weight_decay: 0.0001 amsgrad: false lr_scheduler: _name: fixed warmup_updates: 0 lr_scheduler: pass_through model: _name: wav2vec_u discriminator_dim: 384 discriminator_depth: 2 discriminator_kernel: 8 discriminator_linear_emb: false discriminator_causal: true discriminator_max_pool: false discriminator_act_after_linear: false discriminator_dropout: 0.0 discriminator_weight_norm: false generator_stride: 3 generator_kernel: 9 generator_bias: false generator_dropout: 0.1 generator_batch_norm: 30 generator_residual: true smoothness_weight: 1.5 smoothing: 0 smoothing_one_sided: false gumbel: false hard_gumbel: false gradient_penalty: 1.0 code_penalty: 3.0 temp: [ 2,0.1,0.99995 ] input_dim: 1024 mmi_weight: 0.5 target_dim: 64 segmentation: type: JOIN mean_pool_join: false remove_zeros: false hydra: job: config: override_dirname: kv_sep: ':' item_sep: '__' exclude_keys: - run_config - distributed_training.distributed_port - common.user_dir - task.data - task.kenlm_path - task.text_data - model.generator_layers - task.labels - task.force_model_seed sweep: dir: /checkpoint/${env:USER}/${env:PREFIX}/${hydra.job.config_name}/${hydra.job.override_dirname} subdir: ${hydra.job.num} launcher: submitit_folder: ${hydra.sweep.dir} timeout_min: 3000 cpus_per_task: 10 gpus_per_node: 1 tasks_per_node: 1 mem_gb: 120 nodes: 1 name: ${env:PREFIX}_${hydra.job.config_name} partition: devlab,learnlab,learnfair,scavenge comment: intern_endding_soon constraint: volta32gb max_num_timeout: 30