File size: 4,747 Bytes
8a996ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
669093a
af5f7d2
8a996ed
 
 
 
 
 
 
 
af5f7d2
8a996ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
{
    "seed": 1,
    
    "decoder": {
        "unets": [
            {
                "dim": 32,
                "cond_dim": 512,
                "image_embed_dim": 768,
                "text_embed_dim": 768,
                "cond_on_text_encodings": true,
                "channels": 3,
                "dim_mults": [1, 2, 3, 4],
                "num_resnet_blocks": 4,
                "attn_heads": 8,
                "attn_dim_head": 64,
                "sparse_attn": true,
                "memory_efficient": true,
		        "self_attn": [false, true, true, true]
            },
            {
                "dim": 32,
                "cond_dim": 512,
                "image_embed_dim": 768,
                "text_embed_dim": 768,
                "cond_on_text_encodings": true,
                "channels": 3,
                "dim_mults": [1, 2, 3, 4],
                "num_resnet_blocks": 4,
                "attn_heads": 8,
                "attn_dim_head": 64,
                "sparse_attn": true,
                "memory_efficient": true,
		        "self_attn": [false, true, true, true]
            },
            {
                "dim": 192,
                "cond_dim": 512,
                "image_embed_dim": 768,
                "text_embed_dim": 768,
                "cond_on_text_encodings": true,
                "init_cross_embed": false,
                "channels": 3,
                "dim_mults": [1, 2, 3, 4],
                "num_resnet_blocks": 3,
                "attn_heads": 8,
                "attn_dim_head": 64,
                "sparse_attn": false,
                "memory_efficient": true,
                "self_attn": [false, false, false, false]
            }
        ],
        "clip": {
            "make": "openai",
            "model": "ViT-L/14"
        },
        "image_sizes": [64, 256, 1024],
        "random_crop_sizes": [null, null, 256],
        "channels": 3,
        "timesteps": 1000,
        "loss_type": "l2",
        "beta_schedule": ["cosine", "cosine", "cosine"],
        "learned_variance": true,
        "text_cond_drop_prob": 0.0,
        "image_cond_drop_prob": 0.0
    },
    "data": {
        "webdataset_base_url": "pipe:aws s3 cp --quiet s3://s-datasets/laion-high-resolution/{}.tar -",
        "num_workers": 6,
        "batch_size": 8,
        "start_shard": 0,
        "end_shard": 17535,
        "shard_width": 5,
        "index_width": 4,
        "splits": {
            "train": 0.75,
            "val": 0.15,
            "test": 0.1
        },
        "shuffle_train": false,
        "resample_train": true,
        "preprocessing": {
            "RandomResizedCrop": {
                "size": [1024, 1024],
                "scale": [0.75, 1.0],
                "ratio": [1.0, 1.0]
            },
            "ToTensor": true
        }
    },
    "train": {
        "epochs": 1000,
        "lr": 1.2e-4,
        "wd": 0.0,
        "max_grad_norm": 0.5,
        "save_every_n_samples": 2000000,
        "n_sample_images": 2,
        "device": "cuda:0",
        "epoch_samples": 10000000,
        "validation_samples": 100000,
        "use_ema": true,
        "ema_beta": 0.9999,
        "unet_training_mask": [false, false, true]
    },
    "evaluate": {
        "n_evaluation_samples": 2,
        "FID": {
            "feature": 64
        },
        "LPIPS": {
            "net_type": "vgg",
            "reduction": "mean"
        }
    },
    "tracker": {
        "data_path": "/fsx/aidan/new/multinode/experiments/decoder_1024/.tracker-data",
        "overwrite_data_path": true,

        "log": {
            "log_type": "wandb",

            "wandb_entity": "Veldrovive",
            "wandb_project": "upsamplers_1024",
            "wandb_resume": false,

            "auto_resume": true,
            "verbose": true
        },

        "load": {
            "load_from": null,
            "only_auto_resume": true,
	        "file_path": "/fsx/aidan/new/multinode/experiments/decoder_1024/models/checkpoints/latest.pth"
        },

        "save": [
        {
            "save_to": "huggingface",
            "huggingface_repo": "laion/DALLE2-PyTorch",
            
            "save_meta_to": "upsampler/1024/v1.0.3/",
            "save_latest_to": "upsampler/1024/v1.0.3/latest.pth",

            "save_type": "model"
        },{
            "save_to": "huggingface",
            "huggingface_repo": "laion/DALLE2-PyTorch",
            
            "save_latest_to": "upsampler/1024/v1.0.2/checkpoints/latest.pth",

            "save_type": "checkpoint"
        },{
            "save_to": "local",
            "save_latest_to": "/fsx/aidan/new/multinode/experiments/decoder_1024/models/checkpoints/latest.pth",

            "save_type": "checkpoint"
        }]
    }
}