Browse Source

depth of 1 led to consistently bad learning

new-sep-loss
Michael Pilosov, PhD 10 months ago
parent
commit
a44580a15b
  1. 25
      newsearch.py

25
newsearch.py

@ -30,7 +30,7 @@ learning_rate_values = [1e-3]
alpha_values = [1.0] alpha_values = [1.0]
# widths = [2**k for k in range(4, 13)] # widths = [2**k for k in range(4, 13)]
# depths = [1, 2, 4, 8, 16] # depths = [1, 2, 4, 8, 16]
widths, depths = [64, 128, 256], [4, 8] widths, depths = [512], [4]
batch_size_values = [256] batch_size_values = [256]
max_epochs_values = [20] max_epochs_values = [20]
@ -38,8 +38,8 @@ seeds = list(range(21, 1992))
optimizers = [ optimizers = [
# "Adagrad", # "Adagrad",
"Adam", "Adam",
"SGD", # "SGD",
"AdamW", # "AdamW",
# "LBFGS", # "LBFGS",
# "RAdam", # "RAdam",
# "RMSprop", # "RMSprop",
@ -63,6 +63,9 @@ all_params = [
# perform random search with a limit # perform random search with a limit
search_params = sample(all_params, min(NUM_JOBS, len(all_params))) search_params = sample(all_params, min(NUM_JOBS, len(all_params)))
# --trainer.callbacks+ lightning.pytorch.callbacks.EarlyStopping \
# --trainer.callbacks.init_args.monitor hp_metric \
for idx, params in enumerate(search_params): for idx, params in enumerate(search_params):
a, lr, bs, me, s, w, d, opt = params a, lr, bs, me, s, w, d, opt = params
# cmd = f"cd ~/colors && python main.py --alpha {a} --lr {lr} --bs {bs} --max_epochs {me} --seed {s} --width {w}" # cmd = f"cd ~/colors && python main.py --alpha {a} --lr {lr} --bs {bs} --max_epochs {me} --seed {s} --width {w}"
@ -87,19 +90,17 @@ python newmain.py fit \
--trainer.callbacks.init_args.save_interval 0 \ --trainer.callbacks.init_args.save_interval 0 \
--optimizer torch.optim.{opt} \ --optimizer torch.optim.{opt} \
--optimizer.init_args.lr {lr} \ --optimizer.init_args.lr {lr} \
--lr_scheduler lightning.pytorch.cli.ReduceLROnPlateau \ --trainer.callbacks+ lightning.pytorch.callbacks.LearningRateFinder
--lr_scheduler.init_args.monitor hp_metric \ # --lr_scheduler lightning.pytorch.cli.ReduceLROnPlateau \
--lr_scheduler.init_args.factor 0.05 \ # --lr_scheduler.init_args.monitor hp_metric \
--lr_scheduler.init_args.patience 5 \ # --lr_scheduler.init_args.factor 0.05 \
--lr_scheduler.init_args.cooldown 10 \ # --lr_scheduler.init_args.patience 5 \
--lr_scheduler.init_args.verbose true # --lr_scheduler.init_args.cooldown 10 \
# --lr_scheduler.init_args.verbose true
""" """
test_cmd = f"{cmd.strip()} --print_config > out/config_v{idx:04d}.txt"
# job_name = f"color2_{bs}_{a}_{lr:2.2e}" # job_name = f"color2_{bs}_{a}_{lr:2.2e}"
# job_plugin.run(cmd, machine=Machine.T4, name=job_name) # job_plugin.run(cmd, machine=Machine.T4, name=job_name)
print(f"Running {params}: {cmd}") print(f"Running {params}: {cmd}")
cmd = f"{test_cmd.strip()} && {cmd}"
try: try:
# Run the command and wait for it to complete # Run the command and wait for it to complete
# subprocess.run(test_cmd, shell=True, check=True) # subprocess.run(test_cmd, shell=True, check=True)

Loading…
Cancel
Save