LoraDump / dino_(dinoartforame) /auto_save_dinoartforame2.toml
sulph's picture
Upload 375 files
831c14e verified
[[subsets]]
num_repeats = 2
caption_extension = ".txt"
shuffle_caption = false
flip_aug = false
is_reg = false
image_dir = "E:/Everything artificial intelligence/loradataset\\5_ohwx dino_(dinoartforame)"
keep_tokens = 0
[sample_args]
[logging_args]
[general_args.args]
pretrained_model_name_or_path = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Stable-diffusion/animagineXLV3_v30.safetensors"
mixed_precision = "fp16"
seed = 23
max_data_loader_n_workers = 1
persistent_data_loader_workers = true
max_token_length = 225
prior_loss_weight = 1.0
vae = "E:/Everything artificial intelligence/stable-diffusion-webui/models/VAE/sdxl_vae.safetensors"
sdxl = true
xformers = true
cache_latents = true
gradient_checkpointing = true
max_train_steps = 1000
[general_args.dataset_args]
resolution = 1024
batch_size = 1
[network_args.args]
network_dim = 16
network_alpha = 8.0
ip_noise_gamma = 0.1
min_timestep = 0
max_timestep = 1000
network_dropout = 0.3
[optimizer_args.args]
optimizer_type = "AdamW8bit"
lr_scheduler = "cosine"
learning_rate = 0.001
max_grad_norm = 1.0
lr_scheduler_type = "LoraEasyCustomOptimizer.CustomOptimizers.CosineAnnealingWarmupRestarts"
lr_scheduler_num_cycles = 2
text_encoder_lr = 0.0001
warmup_ratio = 0.15
min_snr_gamma = 8
scale_weight_norms = 5.0
[saving_args.args]
output_dir = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/dino_(dinoartforame)"
save_precision = "fp16"
save_model_as = "safetensors"
save_toml = true
save_toml_location = "E:/Everything artificial intelligence/stable-diffusion-webui/models/Lora/dino_(dinoartforame)"
output_name = "dinoartforame2"
save_every_n_epochs = 2
[bucket_args.dataset_args]
enable_bucket = true
min_bucket_reso = 512
max_bucket_reso = 2048
bucket_reso_steps = 64
bucket_no_upscale = true
[noise_args.args]
noise_offset = 0.0357
[network_args.args.network_args]
conv_dim = 24
conv_alpha = 12.0
[optimizer_args.args.lr_scheduler_args]
min_lr = 1e-6
gamma = 0.85
[optimizer_args.args.optimizer_args]
weight_decay = "0.05"
betas = "0.9,0.99"