multimodalart HF staff commited on
Commit
fcca0da
1 Parent(s): a650e86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +21 -2
app.py CHANGED
@@ -68,6 +68,7 @@ def load_captioning(uploaded_images, concept_sentence):
68
 
69
  # Update for the sample caption area
70
  updates.append(gr.update(visible=True))
 
71
  updates.append(gr.update(placeholder=f'A portrait of person in a bustling cafe {concept_sentence}', value=f'A person in a bustling cafe {concept_sentence}'))
72
  updates.append(gr.update(placeholder=f"A mountainous landscape in the style of {concept_sentence}"))
73
  updates.append(gr.update(placeholder=f"A {concept_sentence} in a mall"))
@@ -238,6 +239,15 @@ def swap_visibilty(profile: Union[gr.OAuthProfile, None]):
238
  else:
239
  return gr.update(elem_classes=["main_ui_logged_in"])
240
 
 
 
 
 
 
 
 
 
 
241
 
242
  with gr.Blocks(theme=theme, css=css) as demo:
243
  gr.Markdown(
@@ -318,7 +328,8 @@ with gr.Blocks(theme=theme, css=css) as demo:
318
  sample_1 = gr.Textbox(label="Test prompt 1")
319
  sample_2 = gr.Textbox(label="Test prompt 2")
320
  sample_3 = gr.Textbox(label="Test prompt 3")
321
-
 
322
  output_components.append(sample)
323
  output_components.append(sample_1)
324
  output_components.append(sample_2)
@@ -367,7 +378,15 @@ with gr.Blocks(theme=theme, css=css) as demo:
367
 
368
  dataset_folder = gr.State()
369
 
370
- images.upload(load_captioning, inputs=[images, concept_sentence], outputs=output_components)
 
 
 
 
 
 
 
 
371
 
372
  start.click(fn=create_dataset, inputs=[images] + caption_list, outputs=dataset_folder).then(
373
  fn=start_training,
 
68
 
69
  # Update for the sample caption area
70
  updates.append(gr.update(visible=True))
71
+ # Update prompt samples
72
  updates.append(gr.update(placeholder=f'A portrait of person in a bustling cafe {concept_sentence}', value=f'A person in a bustling cafe {concept_sentence}'))
73
  updates.append(gr.update(placeholder=f"A mountainous landscape in the style of {concept_sentence}"))
74
  updates.append(gr.update(placeholder=f"A {concept_sentence} in a mall"))
 
239
  else:
240
  return gr.update(elem_classes=["main_ui_logged_in"])
241
 
242
+ def update_pricing(steps):
243
+ updates.append()
244
+ seconds_per_iteration = 7.54
245
+ total_seconds = (steps * seconds_per_iteration) + 240
246
+ cost_per_second = 0.80/60/60
247
+ cost = round(cost_per_second * total_seconds, 2)
248
+ cost_preview = f'''To train this LoRA, a paid L4 GPU will be hooked under the hood during training and then removed once finished.
249
+ ## Estimated to cost <b>< US$ {str(cost)}</b> for {round(int(total_seconds)/60, 2)} minutes with your current train settings <small>({int(iterations)} iterations at {seconds_per_iteration}s/it)</small>'''
250
+ return gr.update(visible=True), gr.update(cost_preview)
251
 
252
  with gr.Blocks(theme=theme, css=css) as demo:
253
  gr.Markdown(
 
328
  sample_1 = gr.Textbox(label="Test prompt 1")
329
  sample_2 = gr.Textbox(label="Test prompt 2")
330
  sample_3 = gr.Textbox(label="Test prompt 3")
331
+ with gr.Group(visible=False) as cost_preview:
332
+ cost_preview_info = gr.Markdown()
333
  output_components.append(sample)
334
  output_components.append(sample_1)
335
  output_components.append(sample_2)
 
378
 
379
  dataset_folder = gr.State()
380
 
381
+ images.upload(
382
+ load_captioning,
383
+ inputs=[images, concept_sentence],
384
+ outputs=output_components
385
+ ).then(
386
+ update_pricing,
387
+ inputs=[steps],
388
+ outputs=[cost_preview, cost_preview_info]
389
+ )
390
 
391
  start.click(fn=create_dataset, inputs=[images] + caption_list, outputs=dataset_folder).then(
392
  fn=start_training,