Spaces:
Runtime error
Runtime error
VictorSanh
commited on
Commit
•
6f82f94
1
Parent(s):
fb1e744
4 more examples
Browse files- app_dialogue.py +31 -26
- example_images/barbie.jpeg +0 -0
- example_images/cat_cloud.jpeg +0 -0
- example_images/eye_glasses.jpeg +0 -0
- example_images/oppenheimer.jpeg +0 -0
- example_images/shampoo.jpg +0 -0
app_dialogue.py
CHANGED
@@ -17,13 +17,9 @@ import torch
|
|
17 |
from threading import Thread
|
18 |
from typing import List, Dict, Union
|
19 |
import urllib
|
20 |
-
from urllib.parse import urlparse
|
21 |
from PIL import Image
|
22 |
import io
|
23 |
-
import pandas as pd
|
24 |
import datasets
|
25 |
-
import json
|
26 |
-
import requests
|
27 |
|
28 |
import gradio as gr
|
29 |
from transformers import AutoProcessor, TextIteratorStreamer
|
@@ -81,7 +77,7 @@ EXAMPLES = [
|
|
81 |
],
|
82 |
[
|
83 |
{
|
84 |
-
"text": "Why is this image cute",
|
85 |
"files": [
|
86 |
f"{examples_path}/example_images/kittens-cats-pet-cute-preview.jpg"
|
87 |
],
|
@@ -123,6 +119,30 @@ EXAMPLES = [
|
|
123 |
"files": [f"{examples_path}/example_images/Van-Gogh-Starry-Night.jpg"],
|
124 |
}
|
125 |
],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
]
|
127 |
|
128 |
API_TOKEN = os.getenv("HF_AUTH_TOKEN")
|
@@ -339,9 +359,9 @@ def flag_dope(
|
|
339 |
if isinstance(ex[0], dict):
|
340 |
images.append(img_to_bytes(ex[0]["file"]["path"]))
|
341 |
else:
|
342 |
-
|
343 |
conversation.append({"User": ex[0], "Assistant": ex[1]})
|
344 |
-
|
345 |
data = {
|
346 |
"model_selector": [model_selector],
|
347 |
"images": [images],
|
@@ -376,9 +396,9 @@ def flag_problematic(
|
|
376 |
if isinstance(ex[0], dict):
|
377 |
images.append(img_to_bytes(ex[0]["file"]["path"]))
|
378 |
else:
|
379 |
-
|
380 |
conversation.append({"User": ex[0], "Assistant": ex[1]})
|
381 |
-
|
382 |
data = {
|
383 |
"model_selector": [model_selector],
|
384 |
"images": [images],
|
@@ -475,7 +495,7 @@ with gr.Blocks(
|
|
475 |
gr.Markdown("# 🐶 Idefics2-Chatty Playground 🐶")
|
476 |
gr.Markdown("In this demo you'll be able to chat with [Idefics2-8B-chatty](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty), a variant of [Idefics2-8B](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty) further fine-tuned on chat datasets")
|
477 |
gr.Markdown("If you want to learn more about Idefics2 and its variants, you can check our [blog post](https://huggingface.co/blog/idefics2).")
|
478 |
-
|
479 |
# model selector should be set to `visbile=False` ultimately
|
480 |
with gr.Row(elem_id="model_selector_row"):
|
481 |
model_selector = gr.Dropdown(
|
@@ -487,7 +507,7 @@ with gr.Blocks(
|
|
487 |
label="Model",
|
488 |
visible=False,
|
489 |
)
|
490 |
-
|
491 |
decoding_strategy.change(
|
492 |
fn=lambda selection: gr.Slider(
|
493 |
visible=(
|
@@ -503,21 +523,6 @@ with gr.Blocks(
|
|
503 |
inputs=decoding_strategy,
|
504 |
outputs=temperature,
|
505 |
)
|
506 |
-
decoding_strategy.change(
|
507 |
-
fn=lambda selection: gr.Slider(
|
508 |
-
visible=(
|
509 |
-
selection
|
510 |
-
in [
|
511 |
-
"contrastive_sampling",
|
512 |
-
"beam_sampling",
|
513 |
-
"Top P Sampling",
|
514 |
-
"sampling_top_k",
|
515 |
-
]
|
516 |
-
)
|
517 |
-
),
|
518 |
-
inputs=decoding_strategy,
|
519 |
-
outputs=repetition_penalty,
|
520 |
-
)
|
521 |
decoding_strategy.change(
|
522 |
fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
|
523 |
inputs=decoding_strategy,
|
|
|
17 |
from threading import Thread
|
18 |
from typing import List, Dict, Union
|
19 |
import urllib
|
|
|
20 |
from PIL import Image
|
21 |
import io
|
|
|
22 |
import datasets
|
|
|
|
|
23 |
|
24 |
import gradio as gr
|
25 |
from transformers import AutoProcessor, TextIteratorStreamer
|
|
|
77 |
],
|
78 |
[
|
79 |
{
|
80 |
+
"text": "Why is this image cute?",
|
81 |
"files": [
|
82 |
f"{examples_path}/example_images/kittens-cats-pet-cute-preview.jpg"
|
83 |
],
|
|
|
119 |
"files": [f"{examples_path}/example_images/Van-Gogh-Starry-Night.jpg"],
|
120 |
}
|
121 |
],
|
122 |
+
[
|
123 |
+
{
|
124 |
+
"text": "Describe this image in detail and explain why it is disturbing.",
|
125 |
+
"files": [f"{examples_path}/example_images/cat_cloud.jpeg"],
|
126 |
+
}
|
127 |
+
],
|
128 |
+
[
|
129 |
+
{
|
130 |
+
"text": "Why is that image comical?",
|
131 |
+
"files": [f"{examples_path}/example_images/eye_glasses.jpeg"],
|
132 |
+
}
|
133 |
+
],
|
134 |
+
[
|
135 |
+
{
|
136 |
+
"text": "Write an online add for that product.",
|
137 |
+
"files": [f"{examples_path}/example_images/shampoo.jpg"],
|
138 |
+
}
|
139 |
+
],
|
140 |
+
[
|
141 |
+
{
|
142 |
+
"text": "The respective main characters of these two movies meet in real life. Imagine their discussion. It should be sassy, and the beginning of a mysterious adventure.",
|
143 |
+
"files": [f"{examples_path}/example_images/barbie.jpeg", f"{examples_path}/example_images/oppenheimer.jpeg"],
|
144 |
+
}
|
145 |
+
],
|
146 |
]
|
147 |
|
148 |
API_TOKEN = os.getenv("HF_AUTH_TOKEN")
|
|
|
359 |
if isinstance(ex[0], dict):
|
360 |
images.append(img_to_bytes(ex[0]["file"]["path"]))
|
361 |
else:
|
362 |
+
|
363 |
conversation.append({"User": ex[0], "Assistant": ex[1]})
|
364 |
+
|
365 |
data = {
|
366 |
"model_selector": [model_selector],
|
367 |
"images": [images],
|
|
|
396 |
if isinstance(ex[0], dict):
|
397 |
images.append(img_to_bytes(ex[0]["file"]["path"]))
|
398 |
else:
|
399 |
+
|
400 |
conversation.append({"User": ex[0], "Assistant": ex[1]})
|
401 |
+
|
402 |
data = {
|
403 |
"model_selector": [model_selector],
|
404 |
"images": [images],
|
|
|
495 |
gr.Markdown("# 🐶 Idefics2-Chatty Playground 🐶")
|
496 |
gr.Markdown("In this demo you'll be able to chat with [Idefics2-8B-chatty](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty), a variant of [Idefics2-8B](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty) further fine-tuned on chat datasets")
|
497 |
gr.Markdown("If you want to learn more about Idefics2 and its variants, you can check our [blog post](https://huggingface.co/blog/idefics2).")
|
498 |
+
|
499 |
# model selector should be set to `visbile=False` ultimately
|
500 |
with gr.Row(elem_id="model_selector_row"):
|
501 |
model_selector = gr.Dropdown(
|
|
|
507 |
label="Model",
|
508 |
visible=False,
|
509 |
)
|
510 |
+
|
511 |
decoding_strategy.change(
|
512 |
fn=lambda selection: gr.Slider(
|
513 |
visible=(
|
|
|
523 |
inputs=decoding_strategy,
|
524 |
outputs=temperature,
|
525 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
526 |
decoding_strategy.change(
|
527 |
fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
|
528 |
inputs=decoding_strategy,
|
example_images/barbie.jpeg
ADDED
example_images/cat_cloud.jpeg
ADDED
example_images/eye_glasses.jpeg
ADDED
example_images/oppenheimer.jpeg
ADDED
example_images/shampoo.jpg
ADDED