# lightning.pytorch==2.1.3 seed_everything: 31 trainer: accelerator: auto strategy: auto devices: auto num_nodes: 1 precision: null logger: null callbacks: - class_path: callbacks.SaveImageCallback init_args: save_interval: 0 final_dir: out fast_dev_run: false max_epochs: 10 min_epochs: 10 max_steps: -1 min_steps: null max_time: null limit_train_batches: null limit_val_batches: 50 limit_test_batches: null limit_predict_batches: null overfit_batches: 0.0 val_check_interval: null check_val_every_n_epoch: 1 num_sanity_val_steps: null log_every_n_steps: 3 enable_checkpointing: null enable_progress_bar: null enable_model_summary: null accumulate_grad_batches: 1 gradient_clip_val: null gradient_clip_algorithm: null deterministic: null benchmark: null inference_mode: true use_distributed_sampler: true profiler: null detect_anomaly: false barebones: false plugins: null sync_batchnorm: false reload_dataloaders_every_n_epochs: 0 default_root_dir: null model: transform: tanh width: 256 depth: 8 bias: true alpha: 0.0 data: val_size: 10000 train_size: 10000 batch_size: 256 num_workers: 3 ckpt_path: null optimizer: class_path: torch.optim.AdamW init_args: lr: 0.001 betas: - 0.9 - 0.999 eps: 1.0e-08 weight_decay: 0.01 amsgrad: false maximize: false foreach: null capturable: false differentiable: false fused: null lr_scheduler: class_path: lightning.pytorch.cli.ReduceLROnPlateau init_args: monitor: hp_metric mode: min factor: 0.05 patience: 5 threshold: 0.0001 threshold_mode: rel cooldown: 10 min_lr: 0.0 eps: 1.0e-08 verbose: true