freddyaboulton HF staff commited on
Commit
dcd93f8
1 Parent(s): de056e5

Upload folder using huggingface_hub

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. run.ipynb +1 -1
  3. run.py +24 -23
  4. tuples_testcase.py +46 -0
README.md CHANGED
@@ -5,7 +5,7 @@ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
- sdk_version: 4.44.1
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
 
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
8
+ sdk_version: 5.0.0
9
  app_file: run.py
10
  pinned: false
11
  hf_oauth: true
run.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio plotly"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/messages_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import plotly.express as px\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "def random_plot():\n", " df = px.data.iris()\n", " fig = px.scatter(df, x=\"sepal_width\", y=\"sepal_length\", color=\"species\",\n", " size='petal_length', hover_data=['petal_width'])\n", " return fig\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history):\n", " history[-1][1] = \"Cool!\"\n", " return history\n", "\n", "fig = random_plot()\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " scale=1,\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/tuples_testcase.py"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append({\"role\": \"user\", \"content\": {\"path\": x}})\n", " if message[\"text\"] is not None:\n", " history.append({\"role\": \"user\", \"content\": message[\"text\"]})\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "\n", "def bot(history: list):\n", " response = \"**That's cool!**\"\n", " history.append({\"role\": \"assistant\", \"content\": \"\"})\n", " for character in response:\n", " history[-1][\"content\"] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(elem_id=\"chatbot\", bubble_full_width=False, type=\"messages\")\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " file_count=\"multiple\",\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None, like_user_message=True)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
run.py CHANGED
@@ -1,46 +1,47 @@
1
  import gradio as gr
2
- import plotly.express as px
3
 
4
  # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
5
 
6
- def random_plot():
7
- df = px.data.iris()
8
- fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species",
9
- size='petal_length', hover_data=['petal_width'])
10
- return fig
11
 
12
  def print_like_dislike(x: gr.LikeData):
13
  print(x.index, x.value, x.liked)
14
 
 
15
  def add_message(history, message):
16
  for x in message["files"]:
17
- history.append(((x,), None))
18
  if message["text"] is not None:
19
- history.append((message["text"], None))
20
  return history, gr.MultimodalTextbox(value=None, interactive=False)
21
 
22
- def bot(history):
23
- history[-1][1] = "Cool!"
24
- return history
25
 
26
- fig = random_plot()
 
 
 
 
 
 
27
 
28
- with gr.Blocks(fill_height=True) as demo:
29
- chatbot = gr.Chatbot(
30
- elem_id="chatbot",
31
- bubble_full_width=False,
32
- scale=1,
33
- )
34
 
35
- chat_input = gr.MultimodalTextbox(interactive=True,
36
- file_count="multiple",
37
- placeholder="Enter message or upload file...", show_label=False)
38
 
39
- chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
 
 
 
 
 
 
 
 
 
40
  bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
41
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
42
 
43
- chatbot.like(print_like_dislike, None, None)
44
 
45
  if __name__ == "__main__":
46
  demo.launch()
 
1
  import gradio as gr
2
+ import time
3
 
4
  # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
5
 
 
 
 
 
 
6
 
7
  def print_like_dislike(x: gr.LikeData):
8
  print(x.index, x.value, x.liked)
9
 
10
+
11
  def add_message(history, message):
12
  for x in message["files"]:
13
+ history.append({"role": "user", "content": {"path": x}})
14
  if message["text"] is not None:
15
+ history.append({"role": "user", "content": message["text"]})
16
  return history, gr.MultimodalTextbox(value=None, interactive=False)
17
 
 
 
 
18
 
19
+ def bot(history: list):
20
+ response = "**That's cool!**"
21
+ history.append({"role": "assistant", "content": ""})
22
+ for character in response:
23
+ history[-1]["content"] += character
24
+ time.sleep(0.05)
25
+ yield history
26
 
 
 
 
 
 
 
27
 
28
+ with gr.Blocks() as demo:
29
+ chatbot = gr.Chatbot(elem_id="chatbot", bubble_full_width=False, type="messages")
 
30
 
31
+ chat_input = gr.MultimodalTextbox(
32
+ interactive=True,
33
+ file_count="multiple",
34
+ placeholder="Enter message or upload file...",
35
+ show_label=False,
36
+ )
37
+
38
+ chat_msg = chat_input.submit(
39
+ add_message, [chatbot, chat_input], [chatbot, chat_input]
40
+ )
41
  bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
42
  bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
43
 
44
+ chatbot.like(print_like_dislike, None, None, like_user_message=True)
45
 
46
  if __name__ == "__main__":
47
  demo.launch()
tuples_testcase.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import plotly.express as px
3
+
4
+ # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
5
+
6
+ def random_plot():
7
+ df = px.data.iris()
8
+ fig = px.scatter(df, x="sepal_width", y="sepal_length", color="species",
9
+ size='petal_length', hover_data=['petal_width'])
10
+ return fig
11
+
12
+ def print_like_dislike(x: gr.LikeData):
13
+ print(x.index, x.value, x.liked)
14
+
15
+ def add_message(history, message):
16
+ for x in message["files"]:
17
+ history.append(((x,), None))
18
+ if message["text"] is not None:
19
+ history.append((message["text"], None))
20
+ return history, gr.MultimodalTextbox(value=None, interactive=False)
21
+
22
+ def bot(history):
23
+ history[-1][1] = "Cool!"
24
+ return history
25
+
26
+ fig = random_plot()
27
+
28
+ with gr.Blocks(fill_height=True) as demo:
29
+ chatbot = gr.Chatbot(
30
+ elem_id="chatbot",
31
+ bubble_full_width=False,
32
+ scale=1,
33
+ )
34
+
35
+ chat_input = gr.MultimodalTextbox(interactive=True,
36
+ file_count="multiple",
37
+ placeholder="Enter message or upload file...", show_label=False)
38
+
39
+ chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
40
+ bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
41
+ bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
42
+
43
+ chatbot.like(print_like_dislike, None, None)
44
+
45
+ if __name__ == "__main__":
46
+ demo.launch()