Spaces:
Runtime error
Runtime error
Explicit image upload field suggestion
#40
by
multimodalart
HF staff
- opened
- app_dialogue.py +89 -122
app_dialogue.py
CHANGED
@@ -333,6 +333,13 @@ textbox = gr.Textbox(
|
|
333 |
label="Text input",
|
334 |
scale=6,
|
335 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
336 |
with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
337 |
gr.HTML("""<h1 align="center">🐶 IDEFICS Playground</h1>""")
|
338 |
with gr.Row(variant="panel"):
|
@@ -360,128 +367,88 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
360 |
visible=False,
|
361 |
)
|
362 |
|
363 |
-
imagebox = gr.Image(type="filepath", label="Image input", visible=False)
|
364 |
-
|
365 |
-
with gr.Row():
|
366 |
-
# def prefetch_images_in_history(user_prompt_str):
|
367 |
-
# """
|
368 |
-
# Pre-fetch the images that are passed in the chatbot default history.
|
369 |
-
# """
|
370 |
-
# return prompt_list_to_markdown(handle_manual_images_in_user_prompt(user_prompt_str))
|
371 |
-
|
372 |
-
chatbot = gr.Chatbot(
|
373 |
-
elem_id="chatbot",
|
374 |
-
label="IDEFICS",
|
375 |
-
visible=True,
|
376 |
-
height=750,
|
377 |
-
avatar_images=[None, BOT_AVATAR]
|
378 |
-
# value=[
|
379 |
-
# [
|
380 |
-
# (
|
381 |
-
# prefetch_images_in_history(
|
382 |
-
# """Write a funny story including the following characters.
|
383 |
-
# There should be drama and most importantly romance.
|
384 |
-
# Character 1:<fake_token_around_image><image:https://m.media-amazon.com/images/M/MV5BODg1OTczMWEtNTU3MS00OTUzLThjODEtNDg1MWQwZmExYmFlXkEyXkFqcGdeQWFybm8@._V1_QL75_UX500_CR0,0,500,281_.jpg><fake_token_around_image>
|
385 |
-
# Character 2:<fake_token_around_image><image:https://static.wikia.nocookie.net/dreamworks/images/0/09/Shrek_dreamworks.png/revision/latest?cb=20170921225439><fake_token_around_image>
|
386 |
-
# Character 3:<fake_token_around_image><image:https://upload.wikimedia.org/wikipedia/en/e/ef/Marsupilami_1.jpg><fake_token_around_image>
|
387 |
-
# The plot should take place in the world pictured here:<fake_token_around_image><image:https://www.astronomy.com/wp-content/uploads/sites/2/2021/09/ON1.jpg><fake_token_around_image>"""
|
388 |
-
# )
|
389 |
-
# ),
|
390 |
-
# """Once upon a time, in a world where the sky was a dome and the ground was covered in grass, there lived a beautiful princess named Mulan. She was kind, brave, and always ready to help others. One day, while walking through the forest, she stumbled upon a strange creature. It was a green ogre named Shrek, who was lost and looking for his way back home.
|
391 |
-
# Mulan, being the kind-hearted person she was, decided to help Shrek find his way back. As they traveled together, they began to develop a strong bond. Shrek was impressed by Mulan's bravery and determination, while Mulan admired Shrek's loyalty and sense of humor.
|
392 |
-
# However, their journey was not without obstacles. They encountered a mischievous cat named Hoboua, who was always causing trouble and getting into fights. Despite his troublemaking ways, Hoboua had a good heart and eventually became a valuable ally to Mulan and Shrek.
|
393 |
-
# As they traveled, they faced many challenges, including battling fierce dragons and navigating treacherous mountains. Through it all, Mulan and Shrek grew closer, and their feelings for each other deepened.
|
394 |
-
# Finally, they reached Shrek's home, and he was reunited with his family and friends. Mulan, however, was sad to leave him behind. But Shrek had a surprise for her. He had fallen in love with her and wanted to be with her forever.
|
395 |
-
# Mulan was overjoyed, and they shared a passionate kiss. From that day on, they lived happily ever after, exploring the world together and facing any challenges that came their way.
|
396 |
-
# And so, the story of Mulan and Shrek's romance came to an end, leaving a lasting impression on all who heard it.""",
|
397 |
-
# ],
|
398 |
-
# ],
|
399 |
-
)
|
400 |
-
|
401 |
-
with gr.Group():
|
402 |
-
with gr.Row():
|
403 |
-
textbox.render()
|
404 |
-
submit_btn = gr.Button(value="▶️ Submit", visible=True)
|
405 |
-
clear_btn = gr.ClearButton([textbox, imagebox, chatbot], value="🧹 Clear")
|
406 |
-
regenerate_btn = gr.Button(value="🔄 Regenerate", visible=True)
|
407 |
-
upload_btn = gr.UploadButton("📁 Upload image", file_types=["image"])
|
408 |
-
# with gr.Group():
|
409 |
-
# with gr.Row():
|
410 |
-
# with gr.Column(scale=1, min_width=50):
|
411 |
-
# dope_bttn = gr.Button("Dope🔥")
|
412 |
-
# with gr.Column(scale=1, min_width=50):
|
413 |
-
# problematic_bttn = gr.Button("Problematic😬")
|
414 |
-
|
415 |
with gr.Row():
|
416 |
-
with gr.
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
decoding_strategy = gr.Radio(
|
435 |
-
[
|
436 |
-
"Greedy",
|
437 |
-
"Top P Sampling",
|
438 |
-
],
|
439 |
-
value="Greedy",
|
440 |
-
label="Decoding strategy",
|
441 |
-
interactive=True,
|
442 |
-
info="Higher values is equivalent to sampling more low-probability tokens.",
|
443 |
-
)
|
444 |
-
temperature = gr.Slider(
|
445 |
-
minimum=0.0,
|
446 |
-
maximum=5.0,
|
447 |
-
value=0.4,
|
448 |
-
step=0.1,
|
449 |
-
interactive=True,
|
450 |
-
visible=False,
|
451 |
-
label="Sampling temperature",
|
452 |
-
info="Higher values will produce more diverse outputs.",
|
453 |
-
)
|
454 |
-
decoding_strategy.change(
|
455 |
-
fn=lambda selection: gr.Slider.update(
|
456 |
-
visible=(
|
457 |
-
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
|
458 |
)
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
|
482 |
-
|
483 |
-
|
484 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
485 |
|
486 |
def model_inference(
|
487 |
model_selector,
|
@@ -867,7 +834,7 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
867 |
inputs=[textbox, imagebox],
|
868 |
outputs=[textbox, imagebox, chatbot],
|
869 |
fn=process_example,
|
870 |
-
cache_examples=
|
871 |
examples_per_page=6,
|
872 |
label=(
|
873 |
"Click on any example below to get started.\nFor convenience, the model generations have been"
|
@@ -876,4 +843,4 @@ with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
|
876 |
)
|
877 |
|
878 |
demo.queue(concurrency_count=40, max_size=40)
|
879 |
-
demo.launch()
|
|
|
333 |
label="Text input",
|
334 |
scale=6,
|
335 |
)
|
336 |
+
chatbot = gr.Chatbot(
|
337 |
+
elem_id="chatbot",
|
338 |
+
label="IDEFICS",
|
339 |
+
visible=True,
|
340 |
+
height=750,
|
341 |
+
avatar_images=[None, BOT_AVATAR]
|
342 |
+
)
|
343 |
with gr.Blocks(title="IDEFICS Playground", theme=gr.themes.Base()) as demo:
|
344 |
gr.HTML("""<h1 align="center">🐶 IDEFICS Playground</h1>""")
|
345 |
with gr.Row(variant="panel"):
|
|
|
367 |
visible=False,
|
368 |
)
|
369 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
370 |
with gr.Row():
|
371 |
+
with gr.Column():
|
372 |
+
imagebox = gr.Image(type="filepath", label="Image input")
|
373 |
+
with gr.Group():
|
374 |
+
with gr.Row():
|
375 |
+
textbox.render()
|
376 |
+
submit_btn = gr.Button(value="▶️ Submit", visible=True)
|
377 |
+
with gr.Row():
|
378 |
+
clear_btn = gr.ClearButton([textbox, imagebox, chatbot], value="🧹 Clear")
|
379 |
+
regenerate_btn = gr.Button(value="🔄 Regenerate", visible=True)
|
380 |
+
upload_btn = gr.UploadButton("📁 Upload image", file_types=["image"],visible=False)
|
381 |
+
with gr.Accordion("Advanced settings", open=False, visible=True) as parameter_row:
|
382 |
+
max_new_tokens = gr.Slider(
|
383 |
+
minimum=8,
|
384 |
+
maximum=1024,
|
385 |
+
value=512,
|
386 |
+
step=1,
|
387 |
+
interactive=True,
|
388 |
+
label="Maximum number of new tokens to generate",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
389 |
)
|
390 |
+
repetition_penalty = gr.Slider(
|
391 |
+
minimum=0.0,
|
392 |
+
maximum=5.0,
|
393 |
+
value=1.0,
|
394 |
+
step=0.01,
|
395 |
+
interactive=True,
|
396 |
+
label="Repetition penalty",
|
397 |
+
info="1.0 is equivalent to no penalty",
|
398 |
+
)
|
399 |
+
decoding_strategy = gr.Radio(
|
400 |
+
[
|
401 |
+
"Greedy",
|
402 |
+
"Top P Sampling",
|
403 |
+
],
|
404 |
+
value="Greedy",
|
405 |
+
label="Decoding strategy",
|
406 |
+
interactive=True,
|
407 |
+
info="Higher values is equivalent to sampling more low-probability tokens.",
|
408 |
+
)
|
409 |
+
temperature = gr.Slider(
|
410 |
+
minimum=0.0,
|
411 |
+
maximum=5.0,
|
412 |
+
value=0.4,
|
413 |
+
step=0.1,
|
414 |
+
interactive=True,
|
415 |
+
visible=False,
|
416 |
+
label="Sampling temperature",
|
417 |
+
info="Higher values will produce more diverse outputs.",
|
418 |
+
)
|
419 |
+
decoding_strategy.change(
|
420 |
+
fn=lambda selection: gr.Slider.update(
|
421 |
+
visible=(
|
422 |
+
selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
|
423 |
+
)
|
424 |
+
),
|
425 |
+
inputs=decoding_strategy,
|
426 |
+
outputs=temperature,
|
427 |
+
)
|
428 |
+
top_p = gr.Slider(
|
429 |
+
minimum=0.01,
|
430 |
+
maximum=0.99,
|
431 |
+
value=0.8,
|
432 |
+
step=0.01,
|
433 |
+
interactive=True,
|
434 |
+
visible=False,
|
435 |
+
label="Top P",
|
436 |
+
info="Higher values is equivalent to sampling more low-probability tokens.",
|
437 |
+
)
|
438 |
+
decoding_strategy.change(
|
439 |
+
fn=lambda selection: gr.Slider.update(visible=(selection in ["Top P Sampling"])),
|
440 |
+
inputs=decoding_strategy,
|
441 |
+
outputs=top_p,
|
442 |
+
)
|
443 |
+
gr.Markdown(
|
444 |
+
"""<p><strong>💡 Pro tip</strong>:<br>
|
445 |
+
You can input an arbitrary number of images at arbitrary positions in the same query.<br>
|
446 |
+
You will need to input each image with its URL with the syntax <code><fake_token_around_image><image:IMAGE_URL><fake_token_around_image></code>.<br>
|
447 |
+
For example, for two images, you could input <code>TEXT_1<fake_token_around_image><image:IMAGE_URL_1><fake_token_around_image>TEXT_2<fake_token_around_image><image:IMAGE_URL_2><fake_token_around_image>TEXT_3</code>.<br>
|
448 |
+
In the particular case where two images are consecutive, it is not necessary to add an additional separator: <code><fake_token_around_image><image:IMAGE_URL_1><fake_token_around_image><image:IMAGE_URL_2><fake_token_around_image></code>.</p>"""
|
449 |
+
)
|
450 |
+
with gr.Column():
|
451 |
+
chatbot.render()
|
452 |
|
453 |
def model_inference(
|
454 |
model_selector,
|
|
|
834 |
inputs=[textbox, imagebox],
|
835 |
outputs=[textbox, imagebox, chatbot],
|
836 |
fn=process_example,
|
837 |
+
cache_examples=False,
|
838 |
examples_per_page=6,
|
839 |
label=(
|
840 |
"Click on any example below to get started.\nFor convenience, the model generations have been"
|
|
|
843 |
)
|
844 |
|
845 |
demo.queue(concurrency_count=40, max_size=40)
|
846 |
+
demo.launch()
|