multimodalart HF staff commited on
Commit
f6521a5
1 Parent(s): 7fa432c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -3
app.py CHANGED
@@ -859,18 +859,18 @@ with gr.Blocks(css=css, theme=theme) as demo:
859
  fn=check_token,
860
  inputs=token,
861
  outputs=[no_payment_method, start],
862
- concurrency_limit=50
863
  )
864
  concept_sentence.change(
865
  check_if_tok,
866
  inputs=[concept_sentence, train_text_encoder_ti],
867
- concurrency_limit=50
868
  )
869
  use_snr_gamma.change(
870
  lambda x: gr.update(visible=x),
871
  inputs=use_snr_gamma,
872
  outputs=snr_gamma,
873
- queue=False
874
  )
875
  with_prior_preservation.change(
876
  lambda x: gr.update(visible=x),
@@ -888,6 +888,10 @@ with gr.Blocks(css=css, theme=theme) as demo:
888
  inputs=train_text_encoder_ti,
889
  outputs=text_encoder_train_params,
890
  queue=False,
 
 
 
 
891
  )
892
  train_text_encoder.change(
893
  lambda x: [gr.update(visible=x), gr.update(visible=x)],
 
859
  fn=check_token,
860
  inputs=token,
861
  outputs=[no_payment_method, start],
862
+ concurrency_limit=50,
863
  )
864
  concept_sentence.change(
865
  check_if_tok,
866
  inputs=[concept_sentence, train_text_encoder_ti],
867
+ concurrency_limit=50,
868
  )
869
  use_snr_gamma.change(
870
  lambda x: gr.update(visible=x),
871
  inputs=use_snr_gamma,
872
  outputs=snr_gamma,
873
+ queue=False,
874
  )
875
  with_prior_preservation.change(
876
  lambda x: gr.update(visible=x),
 
888
  inputs=train_text_encoder_ti,
889
  outputs=text_encoder_train_params,
890
  queue=False,
891
+ ).then(
892
+ lambda x: gr.Warning("As you have disabled Pivotal Tuning, you can remove TOK from your prompts and try to find a unique token for them") if not x else None,
893
+ inputs=train_text_encoder_ti,
894
+ concurrency_limit=50,
895
  )
896
  train_text_encoder.change(
897
  lambda x: [gr.update(visible=x), gr.update(visible=x)],