Browse Source

use millions of colors

new-sep-loss
Michael Pilosov 10 months ago
parent
commit
1f7d4c1890
  1. BIN
      color_128_0.3_1.00e-06.png
  2. 7
      dataloader.py
  3. 25
      experiments.csv
  4. 10
      main.py
  5. 2
      model.py
  6. 2
      search.py

BIN
color_128_0.3_1.00e-06.png

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

7
dataloader.py

@ -4,8 +4,8 @@ from torch.utils.data import DataLoader, TensorDataset
from utils import extract_colors, preprocess_data
def create_dataloader(N: int = 50, **kwargs):
rgb_tensor, _ = extract_colors()
def create_dataloader(N: int = 1e8, **kwargs):
rgb_tensor = torch.rand((int(N), 3), dtype=torch.float32)
rgb_tensor = preprocess_data(rgb_tensor)
# Creating a dataset and data loader
dataset = TensorDataset(rgb_tensor, torch.zeros(len(rgb_tensor)))
@ -36,7 +36,8 @@ def create_named_dataloader(N: int = 0, **kwargs):
if __name__ == "__main__":
batch_size = 4
train_dataloader = create_dataloader(batch_size=batch_size, shuffle=True)
train_dataloader = create_dataloader(N=1e6, batch_size=batch_size, shuffle=True)
print(len(train_dataloader.dataset))
train_dataloader_with_names = create_named_dataloader(
batch_size=batch_size, shuffle=True
)

25
experiments.csv

@ -1,25 +0,0 @@
,batch_size,alpha,learning_rate
0,32.0,0.3,0.0001
1,32.0,0.3,0.01
2,32.0,0.9,1e-06
3,32.0,0.7,0.001
4,64.0,0.5,0.001
5,64.0,0.1,1e-06
6,32.0,0.1,0.001
7,128.0,0.5,1e-06
8,128.0,0.7,0.001
9,128.0,0.9,1e-05
10,128.0,0.1,1e-06
11,128.0,0.3,1e-06
12,64.0,0.3,0.01
13,64.0,0.1,1e-06
14,128.0,0.5,0.001
15,32.0,0.3,1e-05
16,32.0,0.7,1e-06
17,32.0,0.3,1e-06
18,64.0,0.3,0.0001
19,64.0,0.3,1e-06
20,128.0,0.5,1e-05
21,32.0,0.1,0.01
22,64.0,0.1,1e-05
23,64.0,0.3,0.001
1 batch_size alpha learning_rate
2 0 32.0 0.3 0.0001
3 1 32.0 0.3 0.01
4 2 32.0 0.9 1e-06
5 3 32.0 0.7 0.001
6 4 64.0 0.5 0.001
7 5 64.0 0.1 1e-06
8 6 32.0 0.1 0.001
9 7 128.0 0.5 1e-06
10 8 128.0 0.7 0.001
11 9 128.0 0.9 1e-05
12 10 128.0 0.1 1e-06
13 11 128.0 0.3 1e-06
14 12 64.0 0.3 0.01
15 13 64.0 0.1 1e-06
16 14 128.0 0.5 0.001
17 15 32.0 0.3 1e-05
18 16 32.0 0.7 1e-06
19 17 32.0 0.3 1e-06
20 18 64.0 0.3 0.0001
21 19 64.0 0.3 1e-06
22 20 128.0 0.5 1e-05
23 21 32.0 0.1 0.01
24 22 64.0 0.1 1e-05
25 23 64.0 0.3 0.001

10
main.py

@ -7,7 +7,7 @@ import torch
from pytorch_lightning.callbacks import EarlyStopping
from callbacks import SaveImageCallback
from dataloader import create_named_dataloader
from dataloader import create_dataloader
from model import ColorTransformerModel
@ -65,20 +65,20 @@ if __name__ == "__main__":
early_stop_callback = EarlyStopping(
monitor="hp_metric", # Metric to monitor
min_delta=1e-5, # Minimum change in the monitored quantity to qualify as an improvement
patience=24, # Number of epochs with no improvement after which training will be stopped
patience=5, # Number of epochs with no improvement after which training will be stopped
mode="min", # Mode can be either 'min' for minimizing the monitored quantity or 'max' for maximizing it.
verbose=True,
)
save_img_callback = SaveImageCallback(
save_interval=0,
save_interval=1,
final_dir="out",
)
# Initialize data loader with parsed arguments
# named_data_loader also has grayscale extras. TODO: remove unnamed
train_dataloader = create_named_dataloader(
N=0,
train_dataloader = create_dataloader(
N=1e8,
batch_size=args.bs,
shuffle=True,
num_workers=args.num_workers,

2
model.py

@ -122,7 +122,7 @@ class ColorTransformerModel(pl.LightningModule):
lr=self.hparams.learning_rate,
)
lr_scheduler = ReduceLROnPlateau(
optimizer, mode="min", factor=0.05, patience=10, cooldown=20, verbose=True
optimizer, mode="min", factor=0.05, patience=5, cooldown=10, verbose=True
)
return {
"optimizer": optimizer,

2
search.py

@ -24,7 +24,7 @@ alpha_values = [0, 1, 2]
widths = [64, 128, 256, 512]
# learning_rate_values = [5e-4]
batch_size_values = [32, 64, 128]
max_epochs_values = [500]
max_epochs_values = [50]
seeds = list(range(21, 1992))
# Generate all possible combinations of hyperparameters

Loading…
Cancel
Save