serkandyck commited on
Commit
56a012e
1 Parent(s): b5c0fe6

Upload train_lora_flux_24gb.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. train_lora_flux_24gb.yaml +6 -5
train_lora_flux_24gb.yaml CHANGED
@@ -21,7 +21,7 @@ config:
21
  linear_alpha: 16
22
  save:
23
  dtype: float16 # precision to save
24
- save_every: 250 # save every this many steps
25
  max_step_saves_to_keep: 4 # how many intermittent saves to keep
26
  push_to_hub: false #change this to True to push your trained model to Hugging Face.
27
  # You can either set up a HF_TOKEN env variable or you'll be prompted to log-in
@@ -42,14 +42,14 @@ config:
42
  resolution: [512, 768, 1024] # flux enjoys multiple resolutions
43
  train:
44
  batch_size: 1
45
- steps: 2000 # total number of steps to train 500 - 4000 is a good range
46
  gradient_accumulation_steps: 1
47
  train_unet: true
48
  train_text_encoder: false # probably won't work with flux
49
  gradient_checkpointing: true # need the on unless you have a ton of vram
50
  noise_scheduler: "flowmatch" # for training only
51
  optimizer: "adamw8bit"
52
- lr: 1e-4
53
  # skip_first_sample: true
54
  # uncomment to completely disable sampling
55
  # disable_sampling: true
@@ -66,11 +66,11 @@ config:
66
  # huggingface model name or path
67
  name_or_path: "black-forest-labs/FLUX.1-dev"
68
  is_flux: true
69
- quantize: true # run 8bit mixed precision
70
  # low_vram: true # uncomment this if the GPU is connected to your monitors. It will use less vram to quantize, but is slower.
71
  sample:
72
  sampler: "flowmatch" # must match train.noise_scheduler
73
- sample_every: 250 # sample every this many steps
74
  width: 1024
75
  height: 1024
76
  prompts:
@@ -92,6 +92,7 @@ config:
92
  walk_seed: true
93
  guidance_scale: 4
94
  sample_steps: 20
 
95
  # you can add any additional meta info here. [name] is replaced with config name at top
96
  meta:
97
  name: "[name]"
 
21
  linear_alpha: 16
22
  save:
23
  dtype: float16 # precision to save
24
+ save_every: 500 # save every this many steps
25
  max_step_saves_to_keep: 4 # how many intermittent saves to keep
26
  push_to_hub: false #change this to True to push your trained model to Hugging Face.
27
  # You can either set up a HF_TOKEN env variable or you'll be prompted to log-in
 
42
  resolution: [512, 768, 1024] # flux enjoys multiple resolutions
43
  train:
44
  batch_size: 1
45
+ steps: 3000 # total number of steps to train 500 - 4000 is a good range
46
  gradient_accumulation_steps: 1
47
  train_unet: true
48
  train_text_encoder: false # probably won't work with flux
49
  gradient_checkpointing: true # need the on unless you have a ton of vram
50
  noise_scheduler: "flowmatch" # for training only
51
  optimizer: "adamw8bit"
52
+ lr: 0.0001
53
  # skip_first_sample: true
54
  # uncomment to completely disable sampling
55
  # disable_sampling: true
 
66
  # huggingface model name or path
67
  name_or_path: "black-forest-labs/FLUX.1-dev"
68
  is_flux: true
69
+ quantize: True # run 8bit mixed precision
70
  # low_vram: true # uncomment this if the GPU is connected to your monitors. It will use less vram to quantize, but is slower.
71
  sample:
72
  sampler: "flowmatch" # must match train.noise_scheduler
73
+ sample_every: 500 # sample every this many steps
74
  width: 1024
75
  height: 1024
76
  prompts:
 
92
  walk_seed: true
93
  guidance_scale: 4
94
  sample_steps: 20
95
+ trigger_word: p3r5on
96
  # you can add any additional meta info here. [name] is replaced with config name at top
97
  meta:
98
  name: "[name]"