Browse Source

anchors and more train data, exp w batch

new-sep-loss
Michael Pilosov, PhD 10 months ago
parent
commit
4342a54cc8
  1. 6
      model.py
  2. 8
      newsearch.py

6
model.py

@ -3,7 +3,7 @@ import torch
import torch.nn as nn import torch.nn as nn
from torch.optim.lr_scheduler import ReduceLROnPlateau from torch.optim.lr_scheduler import ReduceLROnPlateau
from losses import preservation_loss from losses import circle_norm, preservation_loss
from utils import RGBMYC_ANCHOR from utils import RGBMYC_ANCHOR
@ -58,8 +58,8 @@ class ColorTransformerModel(L.LightningModule):
alpha = self.hparams.alpha alpha = self.hparams.alpha
# N = len(outputs) # N = len(outputs)
# distance = circle_norm(outputs, labels).mean() distance = circle_norm(outputs, labels).mean()
distance = torch.norm(outputs - labels).mean() # distance = torch.norm(outputs - labels).mean()
# Backprop with this: # Backprop with this:
loss = (1 - alpha) * p_loss + alpha * distance loss = (1 - alpha) * p_loss + alpha * distance

8
newsearch.py

@ -32,14 +32,14 @@ alpha_values = [0]
# depths = [1, 2, 4, 8, 16] # depths = [1, 2, 4, 8, 16]
widths, depths = [512], [4] widths, depths = [512], [4]
batch_size_values = [256] batch_size_values = [64, 256, 1024]
max_epochs_values = [100] max_epochs_values = [100]
seeds = list(range(21, 1992)) seeds = list(range(21, 1992))
optimizers = [ optimizers = [
# "Adagrad", # "Adagrad",
"Adam", # "Adam",
# "SGD", # "SGD",
# "AdamW", "AdamW",
# "LBFGS", # "LBFGS",
# "RAdam", # "RAdam",
# "RMSprop", # "RMSprop",
@ -73,7 +73,7 @@ for idx, params in enumerate(search_params):
python newmain.py fit \ python newmain.py fit \
--seed_everything {s} \ --seed_everything {s} \
--data.batch_size {bs} \ --data.batch_size {bs} \
--data.train_size 10000 \ --data.train_size 50000 \
--data.val_size 10000 \ --data.val_size 10000 \
--model.alpha {a} \ --model.alpha {a} \
--model.width {w} \ --model.width {w} \

Loading…
Cancel
Save