File size: 2,795 Bytes
c7dea59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
job: custom_job
config:
  name: flux_train_replicate
  process:
  - type: custom_sd_trainer
    training_folder: output
    device: cuda:0
    trigger_word: TSVETAEVA
    network:
      type: lora
      linear: 24
      linear_alpha: 24
      network_kwargs:
        only_if_contains:
        - transformer.single_transformer_blocks.7.norm.linear
        - transformer.single_transformer_blocks.7.proj_mlp
        - transformer.single_transformer_blocks.7.proj_out
        - transformer.single_transformer_blocks.7.attn.to_q
        - transformer.single_transformer_blocks.7.attn.to_k
        - transformer.single_transformer_blocks.7.attn.to_v
        - transformer.single_transformer_blocks.12.norm.linear
        - transformer.single_transformer_blocks.12.proj_mlp
        - transformer.single_transformer_blocks.12.proj_out
        - transformer.single_transformer_blocks.12.attn.to_q
        - transformer.single_transformer_blocks.12.attn.to_k
        - transformer.single_transformer_blocks.12.attn.to_v
        - transformer.single_transformer_blocks.16.norm.linear
        - transformer.single_transformer_blocks.16.proj_mlp
        - transformer.single_transformer_blocks.16.proj_out
        - transformer.single_transformer_blocks.16.attn.to_q
        - transformer.single_transformer_blocks.16.attn.to_k
        - transformer.single_transformer_blocks.16.attn.to_v
        - transformer.single_transformer_blocks.20.norm.linear
        - transformer.single_transformer_blocks.20.proj_mlp
        - transformer.single_transformer_blocks.20.proj_out
        - transformer.single_transformer_blocks.20.attn.to_q
        - transformer.single_transformer_blocks.20.attn.to_k
        - transformer.single_transformer_blocks.20.attn.to_v
    save:
      dtype: float16
      save_every: 1001
      max_step_saves_to_keep: 1
    datasets:
    - folder_path: input_images
      caption_ext: txt
      caption_dropout_rate: 0.05
      shuffle_tokens: false
      cache_latents_to_disk: false
      cache_latents: true
      resolution:
      - 512
      - 768
      - 1024
    train:
      batch_size: 1
      steps: 1000
      gradient_accumulation_steps: 1
      train_unet: true
      train_text_encoder: false
      content_or_style: balanced
      gradient_checkpointing: true
      noise_scheduler: flowmatch
      optimizer: adamw8bit
      lr: 0.0005
      ema_config:
        use_ema: true
        ema_decay: 0.99
      dtype: bf16
    model:
      name_or_path: FLUX.1-dev
      is_flux: true
      quantize: true
    sample:
      sampler: flowmatch
      sample_every: 1001
      width: 1024
      height: 1024
      prompts: []
      neg: ''
      seed: 42
      walk_seed: true
      guidance_scale: 3.5
      sample_steps: 28
meta:
  name: flux_train_replicate
  version: '1.0'