|
|
@ -27,12 +27,12 @@ learning_rate_values = [1e-3] |
|
|
|
# learning_rate_values = [5e-4] |
|
|
|
|
|
|
|
# alpha_values = [0, .25, 0.5, 0.75, 1] # alpha = 0 is unsupervised. alpha = 1 is supervised. |
|
|
|
alpha_values = [0] |
|
|
|
alpha_values = [1.0] |
|
|
|
# widths = [2**k for k in range(4, 13)] |
|
|
|
# depths = [1, 2, 4, 8, 16] |
|
|
|
widths, depths = [512], [4] |
|
|
|
|
|
|
|
batch_size_values = [64, 256, 1024] |
|
|
|
batch_size_values = [256] |
|
|
|
max_epochs_values = [100] |
|
|
|
seeds = list(range(21, 1992)) |
|
|
|
optimizers = [ |
|
|
@ -73,7 +73,7 @@ for idx, params in enumerate(search_params): |
|
|
|
python newmain.py fit \ |
|
|
|
--seed_everything {s} \ |
|
|
|
--data.batch_size {bs} \ |
|
|
|
--data.train_size 50000 \ |
|
|
|
--data.train_size 0 \ |
|
|
|
--data.val_size 10000 \ |
|
|
|
--model.alpha {a} \ |
|
|
|
--model.width {w} \ |
|
|
@ -90,13 +90,13 @@ python newmain.py fit \ |
|
|
|
--trainer.callbacks.init_args.save_interval 0 \ |
|
|
|
--optimizer torch.optim.{opt} \ |
|
|
|
--optimizer.init_args.lr {lr} \ |
|
|
|
--trainer.callbacks+ lightning.pytorch.callbacks.LearningRateFinder \ |
|
|
|
--lr_scheduler lightning.pytorch.cli.ReduceLROnPlateau \ |
|
|
|
--lr_scheduler.init_args.monitor hp_metric \ |
|
|
|
--lr_scheduler.init_args.factor 0.05 \ |
|
|
|
--lr_scheduler.init_args.patience 5 \ |
|
|
|
--lr_scheduler.init_args.cooldown 10 \ |
|
|
|
--lr_scheduler.init_args.verbose true |
|
|
|
--trainer.callbacks+ lightning.pytorch.callbacks.LearningRateFinder |
|
|
|
# --lr_scheduler lightning.pytorch.cli.ReduceLROnPlateau \ |
|
|
|
# --lr_scheduler.init_args.monitor hp_metric \ |
|
|
|
# --lr_scheduler.init_args.factor 0.05 \ |
|
|
|
# --lr_scheduler.init_args.patience 5 \ |
|
|
|
# --lr_scheduler.init_args.cooldown 10 \ |
|
|
|
# --lr_scheduler.init_args.verbose true |
|
|
|
""" |
|
|
|
# job_name = f"color2_{bs}_{a}_{lr:2.2e}" |
|
|
|
# job_plugin.run(cmd, machine=Machine.T4, name=job_name) |
|
|
|