multimodalart HF staff commited on
Commit
c4295a2
1 Parent(s): f313ae3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +326 -306
app.py CHANGED
@@ -573,331 +573,351 @@ with gr.Blocks(css=css, theme=theme) as demo:
573
  ### Train a high quality SDXL LoRA in a breeze ༄ with state-of-the-art techniques and for cheap
574
  <small>Dreambooth with Pivotal Tuning, Prodigy and more! Use the trained LoRAs with diffusers, AUTO1111, Comfy. [blog about the training script](https://huggingface.co/blog/sdxl_lora_advanced_script), [Colab Pro](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb), [run locally or in a cloud](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py)</small>''', elem_id="main_title")
575
  #gr.LoginButton(elem_classes=["login_logout"])
576
- with gr.Column(elem_classes=["main_logged"]) as main_ui:
577
- lora_name = gr.Textbox(label="The name of your LoRA", info="This has to be a unique name", placeholder="e.g.: Persian Miniature Painting style, Cat Toy")
578
- training_option = gr.Radio(
579
- label="What are you training?", choices=["object", "style", "character", "face", "custom"]
580
- )
581
- concept_sentence = gr.Textbox(
582
- label="Concept sentence",
583
- info="Sentence to be used in all images for captioning. TOK is a special mandatory token, used to teach the model your concept.",
584
- placeholder="e.g.: A photo of TOK, in the style of TOK",
585
- visible=False,
586
- interactive=True,
587
- )
588
- with gr.Group(visible=False) as image_upload:
589
- with gr.Row():
590
- images = gr.File(
591
- file_types=["image"],
592
- label="Upload your images",
593
- file_count="multiple",
594
- interactive=True,
595
- visible=True,
596
- scale=1,
597
- )
598
- with gr.Column(scale=3, visible=False) as captioning_area:
599
- with gr.Column():
600
- gr.Markdown(
601
- """# Custom captioning
602
- To improve the quality of your outputs, you can add a custom caption for each image, describing exactly what is taking place in each of them. Including TOK is mandatory. You can leave things as is if you don't want to include captioning.
603
- """
604
- )
605
- do_captioning = gr.Button("Add AI captions with BLIP-2")
606
- output_components = [captioning_area]
607
- caption_list = []
608
- for i in range(1, MAX_IMAGES + 1):
609
- locals()[f"captioning_row_{i}"] = gr.Row(visible=False)
610
- with locals()[f"captioning_row_{i}"]:
611
- locals()[f"image_{i}"] = gr.Image(
612
- width=111,
613
- height=111,
614
- min_width=111,
615
- interactive=False,
616
- scale=2,
617
- show_label=False,
618
- show_share_button=False,
619
- show_download_button=False
620
- )
621
- locals()[f"caption_{i}"] = gr.Textbox(
622
- label=f"Caption {i}", scale=15, interactive=True
623
- )
624
-
625
- output_components.append(locals()[f"captioning_row_{i}"])
626
- output_components.append(locals()[f"image_{i}"])
627
- output_components.append(locals()[f"caption_{i}"])
628
- caption_list.append(locals()[f"caption_{i}"])
629
- with gr.Accordion(open=False, label="Advanced options", visible=False, elem_classes=['accordion']) as advanced:
630
- with gr.Row():
631
- with gr.Column():
632
- optimizer = gr.Dropdown(
633
- label="Optimizer",
634
- info="Prodigy is an auto-optimizer and works good by default. If you prefer to set your own learning rates, change it to AdamW. If you don't have enough VRAM to train with AdamW, pick 8-bit Adam.",
635
- choices=[
636
- ("Prodigy", "prodigy"),
637
- ("AdamW", "adamW"),
638
- ("8-bit Adam", "8bitadam"),
639
- ],
640
- value="prodigy",
641
  interactive=True,
 
 
642
  )
643
- use_snr_gamma = gr.Checkbox(label="Use SNR Gamma")
644
- snr_gamma = gr.Number(
645
- label="snr_gamma",
646
- info="SNR weighting gamma to re-balance the loss",
647
- value=5.000,
648
- step=0.1,
649
- visible=False,
650
- )
651
- mixed_precision = gr.Dropdown(
652
- label="Mixed Precision",
653
- choices=["no", "fp16", "bf16"],
654
- value="bf16",
655
- )
656
- learning_rate = gr.Number(
657
- label="UNet Learning rate",
658
- minimum=0.0,
659
- maximum=10.0,
660
- step=0.0000001,
661
- value=1.0, # For prodigy you start high and it will optimize down
662
- )
663
- max_train_steps = gr.Number(
664
- label="Max train steps", minimum=1, maximum=50000, value=1000
665
- )
666
- lora_rank = gr.Number(
667
- label="LoRA Rank",
668
- info="Rank for the Low Rank Adaptation (LoRA), a higher rank produces a larger LoRA",
669
- value=8,
670
- step=2,
671
- minimum=2,
672
- maximum=1024,
673
- )
674
- repeats = gr.Number(
675
- label="Repeats",
676
- info="How many times to repeat the training data.",
677
- value=1,
678
- minimum=1,
679
- maximum=200,
680
- )
681
- with gr.Column():
682
- with_prior_preservation = gr.Checkbox(
683
- label="Prior preservation loss",
684
- info="Prior preservation helps to ground the model to things that are similar to your concept. Good for faces.",
685
- value=False,
686
- )
687
- with gr.Column(visible=False) as prior_preservation_params:
688
- with gr.Tab("prompt"):
689
- class_prompt = gr.Textbox(
690
- label="Class Prompt",
691
- info="The prompt that will be used to generate your class images",
692
- )
693
-
694
- with gr.Tab("images"):
695
- class_images = gr.File(
696
- file_types=["image"],
697
- label="Upload your images",
698
- file_count="multiple",
699
  )
700
- num_class_images = gr.Number(
701
- label="Number of class images, if there are less images uploaded then the number you put here, additional images will be sampled with Class Prompt",
702
- value=20,
703
- )
704
- train_text_encoder_ti = gr.Checkbox(
705
- label="Do textual inversion",
706
- value=True,
707
- info="Will train a textual inversion embedding together with the LoRA. Increases quality significantly. If untoggled, you can remove the special TOK token from the prompts.",
708
- )
709
- with gr.Group(visible=True) as pivotal_tuning_params:
710
- train_text_encoder_ti_frac = gr.Number(
711
- label="Pivot Textual Inversion",
712
- info="% of epochs to train textual inversion for",
713
- value=0.5,
714
- step=0.1,
715
- )
716
- num_new_tokens_per_abstraction = gr.Number(
717
- label="Tokens to train",
718
- info="Number of tokens to train in the textual inversion",
719
- value=2,
720
- minimum=1,
721
- maximum=1024,
722
- interactive=True,
723
- )
724
- with gr.Group(visible=False) as text_encoder_train_params:
725
- train_text_encoder = gr.Checkbox(
726
- label="Train Text Encoder", value=True
727
- )
728
- train_text_encoder_frac = gr.Number(
729
- label="Pivot Text Encoder",
730
- info="% of epochs to train the text encoder for",
731
- value=0.8,
732
- step=0.1,
733
- )
734
- text_encoder_learning_rate = gr.Number(
735
- label="Text encoder learning rate",
736
- minimum=0.0,
737
- maximum=10.0,
738
- step=0.0000001,
739
- value=1.0,
740
- )
741
- seed = gr.Number(label="Seed", value=42)
742
- resolution = gr.Number(
743
- label="Resolution",
744
- info="Only square sizes are supported for now, the value will be width and height",
745
- value=1024,
746
- )
747
-
748
- with gr.Accordion(open=False, label="Even more advanced options", elem_classes=['accordion']):
749
  with gr.Row():
750
  with gr.Column():
751
- gradient_accumulation_steps = gr.Number(
752
- info="If you change this setting, the pricing calculation will be wrong",
753
- label="gradient_accumulation_steps",
754
- value=1
755
- )
756
- train_batch_size = gr.Number(
757
- info="If you change this setting, the pricing calculation will be wrong",
758
- label="Train batch size",
759
- value=2
760
- )
761
- num_train_epochs = gr.Number(
762
- info="If you change this setting, the pricing calculation will be wrong",
763
- label="num_train_epochs",
764
- value=1
765
- )
766
- checkpointing_steps = gr.Number(
767
- info="How many steps to save intermediate checkpoints",
768
- label="checkpointing_steps",
769
- value=100000,
770
- visible=False #hack to not let users break this for now
771
- )
772
- prior_loss_weight = gr.Number(
773
- label="prior_loss_weight",
774
- value=1
775
- )
776
- gradient_checkpointing = gr.Checkbox(
777
- label="gradient_checkpointing",
778
- info="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass",
779
- value=True,
780
- )
781
- adam_beta1 = gr.Number(
782
- label="adam_beta1",
783
- value=0.9,
784
- minimum=0,
785
- maximum=1,
786
- step=0.01
787
- )
788
- adam_beta2 = gr.Number(
789
- label="adam_beta2",
790
- minimum=0,
791
- maximum=1,
792
- step=0.01,
793
- value=0.999
794
- )
795
- use_prodigy_beta3 = gr.Checkbox(
796
- label="Use Prodigy Beta 3?"
797
- )
798
- prodigy_beta3 = gr.Number(
799
- label="Prodigy Beta 3",
800
- value=None,
801
- step=0.01,
802
- minimum=0,
803
- maximum=1,
804
- )
805
- prodigy_decouple = gr.Checkbox(
806
- label="Prodigy Decouple",
807
- value=True
808
- )
809
- adam_weight_decay = gr.Number(
810
- label="Adam Weight Decay",
811
- value=1e-04,
812
- step=0.00001,
813
- minimum=0,
814
- maximum=1,
815
- )
816
- use_adam_weight_decay_text_encoder = gr.Checkbox(
817
- label="Use Adam Weight Decay Text Encoder"
818
  )
819
- adam_weight_decay_text_encoder = gr.Number(
820
- label="Adam Weight Decay Text Encoder",
821
- value=None,
822
- step=0.00001,
823
- minimum=0,
824
- maximum=1,
 
825
  )
826
- adam_epsilon = gr.Number(
827
- label="Adam Epsilon",
828
- value=1e-08,
829
- step=0.00000001,
830
- minimum=0,
831
- maximum=1,
832
  )
833
- prodigy_use_bias_correction = gr.Checkbox(
834
- label="Prodigy Use Bias Correction",
835
- value=True
 
 
 
836
  )
837
- prodigy_safeguard_warmup = gr.Checkbox(
838
- label="Prodigy Safeguard Warmup",
839
- value=True
840
  )
841
- max_grad_norm = gr.Number(
842
- label="Max Grad Norm",
843
- value=1.0,
844
- minimum=0.1,
845
- maximum=10,
846
- step=0.1,
 
847
  )
848
- enable_xformers_memory_efficient_attention = gr.Checkbox(
849
- label="enable_xformers_memory_efficient_attention"
 
 
 
 
850
  )
851
  with gr.Column():
852
- scale_lr = gr.Checkbox(
853
- label="Scale learning rate",
854
- info="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size",
855
- )
856
- lr_num_cycles = gr.Number(
857
- label="lr_num_cycles",
858
- value=1
859
  )
860
- lr_scheduler = gr.Dropdown(
861
- label="lr_scheduler",
862
- choices=[
863
- "linear",
864
- "cosine",
865
- "cosine_with_restarts",
866
- "polynomial",
867
- "constant",
868
- "constant_with_warmup",
869
- ],
870
- value="constant",
 
 
 
 
 
 
 
 
 
 
871
  )
872
- lr_power = gr.Number(
873
- label="lr_power",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
874
  value=1.0,
875
- minimum=0.1,
876
- maximum=10
877
- )
878
- lr_warmup_steps = gr.Number(
879
- label="lr_warmup_steps",
880
- value=0
881
  )
882
- dataloader_num_workers = gr.Number(
883
- label="Dataloader num workers", value=0, minimum=0, maximum=64
 
 
 
884
  )
885
- local_rank = gr.Number(
886
- label="local_rank",
887
- value=-1
888
- )
889
- with gr.Column(visible=False) as cost_estimation:
890
- with gr.Group(elem_id="cost_box"):
891
- training_cost_estimate = gr.Markdown(elem_id="training_cost")
892
- token = gr.Textbox(label="Your Hugging Face write token", info="A Hugging Face write token you can obtain on the settings page", type="password", placeholder="hf_OhHiThIsIsNoTaReALToKeNGOoDTry")
893
- with gr.Group(visible=False) as no_payment_method:
894
- with gr.Row():
895
- gr.HTML("<h3 style='margin: 0'>Your Hugging Face account doesn't have a payment method set up. Set one up <a href='https://huggingface.co/settings/billing/payment' target='_blank'>here</a> and come back here to train your LoRA</h3>")
896
- payment_setup = gr.Button("I have set up a payment method")
897
 
898
- start = gr.Button("Start training", visible=False, interactive=True)
899
- progress_area = gr.Markdown("")
900
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
901
  #gr.LogoutButton(elem_classes=["login_logout"])
902
  output_components.insert(1, advanced)
903
  output_components.insert(1, cost_estimation)
 
573
  ### Train a high quality SDXL LoRA in a breeze ༄ with state-of-the-art techniques and for cheap
574
  <small>Dreambooth with Pivotal Tuning, Prodigy and more! Use the trained LoRAs with diffusers, AUTO1111, Comfy. [blog about the training script](https://huggingface.co/blog/sdxl_lora_advanced_script), [Colab Pro](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb), [run locally or in a cloud](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py)</small>''', elem_id="main_title")
575
  #gr.LoginButton(elem_classes=["login_logout"])
576
+ with gr.Tab("Train on Spaces"):
577
+ with gr.Column(elem_classes=["main_logged"]) as main_ui:
578
+ lora_name = gr.Textbox(label="The name of your LoRA", info="This has to be a unique name", placeholder="e.g.: Persian Miniature Painting style, Cat Toy")
579
+ training_option = gr.Radio(
580
+ label="What are you training?", choices=["object", "style", "character", "face", "custom"]
581
+ )
582
+ concept_sentence = gr.Textbox(
583
+ label="Concept sentence",
584
+ info="Sentence to be used in all images for captioning. TOK is a special mandatory token, used to teach the model your concept.",
585
+ placeholder="e.g.: A photo of TOK, in the style of TOK",
586
+ visible=False,
587
+ interactive=True,
588
+ )
589
+ with gr.Group(visible=False) as image_upload:
590
+ with gr.Row():
591
+ images = gr.File(
592
+ file_types=["image"],
593
+ label="Upload your images",
594
+ file_count="multiple",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
595
  interactive=True,
596
+ visible=True,
597
+ scale=1,
598
  )
599
+ with gr.Column(scale=3, visible=False) as captioning_area:
600
+ with gr.Column():
601
+ gr.Markdown(
602
+ """# Custom captioning
603
+ To improve the quality of your outputs, you can add a custom caption for each image, describing exactly what is taking place in each of them. Including TOK is mandatory. You can leave things as is if you don't want to include captioning.
604
+ """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605
  )
606
+ do_captioning = gr.Button("Add AI captions with BLIP-2")
607
+ output_components = [captioning_area]
608
+ caption_list = []
609
+ for i in range(1, MAX_IMAGES + 1):
610
+ locals()[f"captioning_row_{i}"] = gr.Row(visible=False)
611
+ with locals()[f"captioning_row_{i}"]:
612
+ locals()[f"image_{i}"] = gr.Image(
613
+ width=111,
614
+ height=111,
615
+ min_width=111,
616
+ interactive=False,
617
+ scale=2,
618
+ show_label=False,
619
+ show_share_button=False,
620
+ show_download_button=False
621
+ )
622
+ locals()[f"caption_{i}"] = gr.Textbox(
623
+ label=f"Caption {i}", scale=15, interactive=True
624
+ )
625
+
626
+ output_components.append(locals()[f"captioning_row_{i}"])
627
+ output_components.append(locals()[f"image_{i}"])
628
+ output_components.append(locals()[f"caption_{i}"])
629
+ caption_list.append(locals()[f"caption_{i}"])
630
+ with gr.Accordion(open=False, label="Advanced options", visible=False, elem_classes=['accordion']) as advanced:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631
  with gr.Row():
632
  with gr.Column():
633
+ optimizer = gr.Dropdown(
634
+ label="Optimizer",
635
+ info="Prodigy is an auto-optimizer and works good by default. If you prefer to set your own learning rates, change it to AdamW. If you don't have enough VRAM to train with AdamW, pick 8-bit Adam.",
636
+ choices=[
637
+ ("Prodigy", "prodigy"),
638
+ ("AdamW", "adamW"),
639
+ ("8-bit Adam", "8bitadam"),
640
+ ],
641
+ value="prodigy",
642
+ interactive=True,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
643
  )
644
+ use_snr_gamma = gr.Checkbox(label="Use SNR Gamma")
645
+ snr_gamma = gr.Number(
646
+ label="snr_gamma",
647
+ info="SNR weighting gamma to re-balance the loss",
648
+ value=5.000,
649
+ step=0.1,
650
+ visible=False,
651
  )
652
+ mixed_precision = gr.Dropdown(
653
+ label="Mixed Precision",
654
+ choices=["no", "fp16", "bf16"],
655
+ value="bf16",
 
 
656
  )
657
+ learning_rate = gr.Number(
658
+ label="UNet Learning rate",
659
+ minimum=0.0,
660
+ maximum=10.0,
661
+ step=0.0000001,
662
+ value=1.0, # For prodigy you start high and it will optimize down
663
  )
664
+ max_train_steps = gr.Number(
665
+ label="Max train steps", minimum=1, maximum=50000, value=1000
 
666
  )
667
+ lora_rank = gr.Number(
668
+ label="LoRA Rank",
669
+ info="Rank for the Low Rank Adaptation (LoRA), a higher rank produces a larger LoRA",
670
+ value=8,
671
+ step=2,
672
+ minimum=2,
673
+ maximum=1024,
674
  )
675
+ repeats = gr.Number(
676
+ label="Repeats",
677
+ info="How many times to repeat the training data.",
678
+ value=1,
679
+ minimum=1,
680
+ maximum=200,
681
  )
682
  with gr.Column():
683
+ with_prior_preservation = gr.Checkbox(
684
+ label="Prior preservation loss",
685
+ info="Prior preservation helps to ground the model to things that are similar to your concept. Good for faces.",
686
+ value=False,
 
 
 
687
  )
688
+ with gr.Column(visible=False) as prior_preservation_params:
689
+ with gr.Tab("prompt"):
690
+ class_prompt = gr.Textbox(
691
+ label="Class Prompt",
692
+ info="The prompt that will be used to generate your class images",
693
+ )
694
+
695
+ with gr.Tab("images"):
696
+ class_images = gr.File(
697
+ file_types=["image"],
698
+ label="Upload your images",
699
+ file_count="multiple",
700
+ )
701
+ num_class_images = gr.Number(
702
+ label="Number of class images, if there are less images uploaded then the number you put here, additional images will be sampled with Class Prompt",
703
+ value=20,
704
+ )
705
+ train_text_encoder_ti = gr.Checkbox(
706
+ label="Do textual inversion",
707
+ value=True,
708
+ info="Will train a textual inversion embedding together with the LoRA. Increases quality significantly. If untoggled, you can remove the special TOK token from the prompts.",
709
  )
710
+ with gr.Group(visible=True) as pivotal_tuning_params:
711
+ train_text_encoder_ti_frac = gr.Number(
712
+ label="Pivot Textual Inversion",
713
+ info="% of epochs to train textual inversion for",
714
+ value=0.5,
715
+ step=0.1,
716
+ )
717
+ num_new_tokens_per_abstraction = gr.Number(
718
+ label="Tokens to train",
719
+ info="Number of tokens to train in the textual inversion",
720
+ value=2,
721
+ minimum=1,
722
+ maximum=1024,
723
+ interactive=True,
724
+ )
725
+ with gr.Group(visible=False) as text_encoder_train_params:
726
+ train_text_encoder = gr.Checkbox(
727
+ label="Train Text Encoder", value=True
728
+ )
729
+ train_text_encoder_frac = gr.Number(
730
+ label="Pivot Text Encoder",
731
+ info="% of epochs to train the text encoder for",
732
+ value=0.8,
733
+ step=0.1,
734
+ )
735
+ text_encoder_learning_rate = gr.Number(
736
+ label="Text encoder learning rate",
737
+ minimum=0.0,
738
+ maximum=10.0,
739
+ step=0.0000001,
740
  value=1.0,
 
 
 
 
 
 
741
  )
742
+ seed = gr.Number(label="Seed", value=42)
743
+ resolution = gr.Number(
744
+ label="Resolution",
745
+ info="Only square sizes are supported for now, the value will be width and height",
746
+ value=1024,
747
  )
 
 
 
 
 
 
 
 
 
 
 
 
748
 
749
+ with gr.Accordion(open=False, label="Even more advanced options", elem_classes=['accordion']):
750
+ with gr.Row():
751
+ with gr.Column():
752
+ gradient_accumulation_steps = gr.Number(
753
+ info="If you change this setting, the pricing calculation will be wrong",
754
+ label="gradient_accumulation_steps",
755
+ value=1
756
+ )
757
+ train_batch_size = gr.Number(
758
+ info="If you change this setting, the pricing calculation will be wrong",
759
+ label="Train batch size",
760
+ value=2
761
+ )
762
+ num_train_epochs = gr.Number(
763
+ info="If you change this setting, the pricing calculation will be wrong",
764
+ label="num_train_epochs",
765
+ value=1
766
+ )
767
+ checkpointing_steps = gr.Number(
768
+ info="How many steps to save intermediate checkpoints",
769
+ label="checkpointing_steps",
770
+ value=100000,
771
+ visible=False #hack to not let users break this for now
772
+ )
773
+ prior_loss_weight = gr.Number(
774
+ label="prior_loss_weight",
775
+ value=1
776
+ )
777
+ gradient_checkpointing = gr.Checkbox(
778
+ label="gradient_checkpointing",
779
+ info="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass",
780
+ value=True,
781
+ )
782
+ adam_beta1 = gr.Number(
783
+ label="adam_beta1",
784
+ value=0.9,
785
+ minimum=0,
786
+ maximum=1,
787
+ step=0.01
788
+ )
789
+ adam_beta2 = gr.Number(
790
+ label="adam_beta2",
791
+ minimum=0,
792
+ maximum=1,
793
+ step=0.01,
794
+ value=0.999
795
+ )
796
+ use_prodigy_beta3 = gr.Checkbox(
797
+ label="Use Prodigy Beta 3?"
798
+ )
799
+ prodigy_beta3 = gr.Number(
800
+ label="Prodigy Beta 3",
801
+ value=None,
802
+ step=0.01,
803
+ minimum=0,
804
+ maximum=1,
805
+ )
806
+ prodigy_decouple = gr.Checkbox(
807
+ label="Prodigy Decouple",
808
+ value=True
809
+ )
810
+ adam_weight_decay = gr.Number(
811
+ label="Adam Weight Decay",
812
+ value=1e-04,
813
+ step=0.00001,
814
+ minimum=0,
815
+ maximum=1,
816
+ )
817
+ use_adam_weight_decay_text_encoder = gr.Checkbox(
818
+ label="Use Adam Weight Decay Text Encoder"
819
+ )
820
+ adam_weight_decay_text_encoder = gr.Number(
821
+ label="Adam Weight Decay Text Encoder",
822
+ value=None,
823
+ step=0.00001,
824
+ minimum=0,
825
+ maximum=1,
826
+ )
827
+ adam_epsilon = gr.Number(
828
+ label="Adam Epsilon",
829
+ value=1e-08,
830
+ step=0.00000001,
831
+ minimum=0,
832
+ maximum=1,
833
+ )
834
+ prodigy_use_bias_correction = gr.Checkbox(
835
+ label="Prodigy Use Bias Correction",
836
+ value=True
837
+ )
838
+ prodigy_safeguard_warmup = gr.Checkbox(
839
+ label="Prodigy Safeguard Warmup",
840
+ value=True
841
+ )
842
+ max_grad_norm = gr.Number(
843
+ label="Max Grad Norm",
844
+ value=1.0,
845
+ minimum=0.1,
846
+ maximum=10,
847
+ step=0.1,
848
+ )
849
+ enable_xformers_memory_efficient_attention = gr.Checkbox(
850
+ label="enable_xformers_memory_efficient_attention"
851
+ )
852
+ with gr.Column():
853
+ scale_lr = gr.Checkbox(
854
+ label="Scale learning rate",
855
+ info="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size",
856
+ )
857
+ lr_num_cycles = gr.Number(
858
+ label="lr_num_cycles",
859
+ value=1
860
+ )
861
+ lr_scheduler = gr.Dropdown(
862
+ label="lr_scheduler",
863
+ choices=[
864
+ "linear",
865
+ "cosine",
866
+ "cosine_with_restarts",
867
+ "polynomial",
868
+ "constant",
869
+ "constant_with_warmup",
870
+ ],
871
+ value="constant",
872
+ )
873
+ lr_power = gr.Number(
874
+ label="lr_power",
875
+ value=1.0,
876
+ minimum=0.1,
877
+ maximum=10
878
+ )
879
+ lr_warmup_steps = gr.Number(
880
+ label="lr_warmup_steps",
881
+ value=0
882
+ )
883
+ dataloader_num_workers = gr.Number(
884
+ label="Dataloader num workers", value=0, minimum=0, maximum=64
885
+ )
886
+ local_rank = gr.Number(
887
+ label="local_rank",
888
+ value=-1
889
+ )
890
+ with gr.Column(visible=False) as cost_estimation:
891
+ with gr.Group(elem_id="cost_box"):
892
+ training_cost_estimate = gr.Markdown(elem_id="training_cost")
893
+ token = gr.Textbox(label="Your Hugging Face write token", info="A Hugging Face write token you can obtain on the settings page", type="password", placeholder="hf_OhHiThIsIsNoTaReALToKeNGOoDTry")
894
+ with gr.Group(visible=False) as no_payment_method:
895
+ with gr.Row():
896
+ gr.HTML("<h3 style='margin: 0'>Your Hugging Face account doesn't have a payment method set up. Set one up <a href='https://huggingface.co/settings/billing/payment' target='_blank'>here</a> and come back here to train your LoRA</h3>")
897
+ payment_setup = gr.Button("I have set up a payment method")
898
+
899
+ start = gr.Button("Start training", visible=False, interactive=True)
900
+ progress_area = gr.Markdown("")
901
+ with gr.Tab("Train locally"):
902
+ gr.Markdown(f'''To use LoRA Ease locally with a UI, you can clone this repository (yes, HF Spaces are git repos!)
903
+ ```bash
904
+ git clone https://huggingface.co/spaces/multimodalart/lora-ease
905
+ ```
906
+
907
+ Install the dependencies in the `requirements_local.txt` with
908
+
909
+ ```bash
910
+ pip install -r requirements_local.txt
911
+ ```
912
+ (if you prefer, do it in a venv environment)
913
+
914
+ Now you can run LoRA Ease locally by doing a simple
915
+ ```py
916
+ python app.py
917
+ ```
918
+
919
+ If you prefer command line, you can run our [training script]({training_script_url}) yourself.
920
+ ''')
921
  #gr.LogoutButton(elem_classes=["login_logout"])
922
  output_components.insert(1, advanced)
923
  output_components.insert(1, cost_estimation)