{ "seed": 1, "decoder": { "unets": [ { "dim": 416, "cond_dim": 512, "image_embed_dim": 768, "text_embed_dim": 768, "cond_on_text_encodings": true, "channels": 3, "dim_mults": [1, 2, 3, 4], "num_resnet_blocks": 4, "attn_heads": 8, "attn_dim_head": 64, "sparse_attn": true, "memory_efficient": true } ], "clip":{ "make": "openai", "model": "ViT-L/14" }, "image_sizes": [64], "channels": 3, "timesteps": 1000, "loss_type": "l2", "beta_schedule": ["cosine"], "learned_variance": true }, "data": { "webdataset_base_url": "pipe:aws s3 cp --quiet s3://s-datasets/laion-aesthetic/data/laion2B-en-aesthetic/{}.tar -", "embeddings_url": "s3://s-datasets/laion-aesthetic/ordered_embeddings/", "num_workers": 12, "batch_size": 22, "start_shard": 0, "end_shard": 5247, "shard_width": 5, "index_width": 4, "splits": { "train": 0.75, "val": 0.15, "test": 0.1 }, "shuffle_train": false, "resample_train": true, "preprocessing": { "RandomResizedCrop": { "size": [64, 64], "scale": [0.75, 1.0], "ratio": [1.0, 1.0] }, "ToTensor": true } }, "train": { "epochs": 1000, "lr":1e-4, "wd": 0.01, "max_grad_norm": 0.5, "save_every_n_samples": 1000, "n_sample_images": 10, "device": "cuda:0", "epoch_samples": 1500, "validation_samples": 1000, "use_ema": true, "ema_beta": 0.99, "save_all": false, "save_latest": true, "save_best": true, "unet_training_mask": [true] }, "evaluate": { "n_evaluation_samples": 30, "FID": { "feature": 64 }, "LPIPS": { "net_type": "vgg", "reduction": "mean" } }, "tracker": { "data_path": ".tracker-data", "overwrite_data_path": true, "log": { "log_type": "wandb", "wandb_entity": "veldrovive", "wandb_project": "trackers-test", "wandb_run_name": "nousr config", "verbose": true }, "load": { "load_from": "local", "file_path": "/home/aidan/DALLE2-pytorch/latest.pth" }, "save": [{ "save_to": "wandb" }, { "save_to": "huggingface", "huggingface_repo": "Veldrovive/test_model", "save_latest_to": "latest_{epoch}.pth", "save_type": "model" }] } }