[Path] #pretrained_model_name_or_path = "H:\\WEBUI_Last\\models\\Stable-diffusion\\Trim.safetensors" #train_data_dir = "E:\\Dataset\\Akashi(AZ)" #__root_dir = "H:\\sd-scripts\\" #logging_dir = "calc:f'{__root_dir}logs'" #output_dir = "calc:f'{__root_dir}outputs'" #sample_prompts = "calc:f'{__root_dir}prompts\\A.txt'" pretrained_model_name_or_path = "/notebooks/box/Trim.safetensors" train_data_dir = "/notebooks/LoRA_Image/" __root_dir = "/notebooks/LoRA/sd-scripts/" logging_dir = "calc:f'{__root_dir}logs'" output_dir = "calc:f'{__root_dir}outputs'" sample_prompts = "/notebooks/LoRA_Setting/A.txt" [Prompt] sample_every_n_epochs = 1 sample_sampler = "k_euler_a" [Else] shuffle_caption = true caption_extension = ".txt" keep_tokens = 1 color_aug = true __upper_px = 512 __resolution_x = "calc:512 + __upper_px" __resolution_y = "calc:512 + __upper_px" resolution = "calc:f'{__resolution_x},{__resolution_y}'" enable_bucket = true min_bucket_reso = "calc:320 + __upper_px" bucket_no_upscale = true caption_dropout_every_n_epochs = 9 caption_tag_dropout_rate = 0.2 save_every_n_epochs = 2 train_batch_size = 1 xformers = true max_train_epochs = 16 persistent_data_loader_workers = true seed = 987 mixed_precision = "fp16" save_precision = "ref:mixed_precision" clip_skip = 2 optimizer_type = "DAdaptation" #optimizer_args = [ "relative_step=True", "scale_parameter=True", "warmup_init=True",] __ratio = 1.0 text_encoder_lr= "calc:__ratio / 6" unet_lr = "calc:__ratio / 4" #lr_scheduler = "cosine_with_restarts" #lr_warmup_steps = 500 #lr_scheduler_num_cycles = 4 network_module = "networks.lora" network_dim = 128 network_alpha = "calc:float(network_dim) / 1024 * 1000" __upper_mixed_precision = "calc:mixed_precision.upper()" output_name = "calc:f'Akashi_{optimizer_type}{__resolution_x}pxRatio46{__upper_mixed_precision}'" log_prefix= "ref:output_name"