MaziyarPanahi commited on
Commit
d02b0d1
1 Parent(s): 340a6dd

Update app.py (#1)

Browse files

- Update app.py (e4f69e3b1b96505eae6c4f2685c3c1fa4f306168)

Files changed (1) hide show
  1. app.py +18 -7
app.py CHANGED
@@ -1,5 +1,8 @@
1
  import gradio as gr
2
- from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration, TextIteratorStreamer
 
 
 
3
  from threading import Thread
4
  import re
5
  import time
@@ -7,9 +10,17 @@ from PIL import Image
7
  import torch
8
  import spaces
9
 
10
- processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
11
 
12
- model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
 
 
 
 
 
 
 
 
 
13
  model.to("cuda:0")
14
 
15
  @spaces.GPU
@@ -26,7 +37,7 @@ def bot_streaming(message, history):
26
 
27
  if image is None:
28
  gr.Error("You need to upload an image for LLaVA to work.")
29
- prompt=f"[INST] <image>\n{message['text']} [/INST]"
30
  image = Image.open(image).convert("RGB")
31
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
32
 
@@ -37,7 +48,7 @@ def bot_streaming(message, history):
37
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
38
  thread.start()
39
 
40
- text_prompt =f"[INST] \n{message['text']} [/INST]"
41
 
42
 
43
  buffer = ""
@@ -50,8 +61,8 @@ def bot_streaming(message, history):
50
  yield generated_text_without_prompt
51
 
52
 
53
- demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA NeXT", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
54
  {"text": "How to make this pastry?", "files":["./baklava.png"]}],
55
- description="Try [LLaVA NeXT](https://huggingface.co/docs/transformers/main/en/model_doc/llava_next) in this demo (more specifically, the [Mistral-7B variant](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf)). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
56
  stop_btn="Stop Generation", multimodal=True)
57
  demo.launch(debug=True)
 
1
  import gradio as gr
2
+
3
+ from transformers import AutoProcessor, LlavaForConditionalGeneration
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
5
+
6
  from threading import Thread
7
  import re
8
  import time
 
10
  import torch
11
  import spaces
12
 
 
13
 
14
+ model_id = "xtuner/llava-llama-3-8b-v1_1-transformers"
15
+
16
+ processor = AutoProcessor.from_pretrained(model_id)
17
+
18
+ model = LlavaForConditionalGeneration.from_pretrained(
19
+ model_id,
20
+ torch_dtype=torch.float16,
21
+ low_cpu_mem_usage=True,
22
+ )
23
+
24
  model.to("cuda:0")
25
 
26
  @spaces.GPU
 
37
 
38
  if image is None:
39
  gr.Error("You need to upload an image for LLaVA to work.")
40
+ prompt=f"<|start_header_id|>user<|end_header_id|>\n\n<image>\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
41
  image = Image.open(image).convert("RGB")
42
  inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
43
 
 
48
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
49
  thread.start()
50
 
51
+ text_prompt =f"<|start_header_id|>user<|end_header_id|>\n\n{message['text']}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
52
 
53
 
54
  buffer = ""
 
61
  yield generated_text_without_prompt
62
 
63
 
64
+ demo = gr.ChatInterface(fn=bot_streaming, title="LLaVA Llama-3-8B", examples=[{"text": "What is on the flower?", "files":["./bee.jpg"]},
65
  {"text": "How to make this pastry?", "files":["./baklava.png"]}],
66
+ description="Try [LLaVA Llama-3-8B](https://huggingface.co/xtuner/llava-llama-3-8b-v1_1-transformers). Upload an image and start chatting about it, or simply try one of the examples below. If you don't upload an image, you will receive an error.",
67
  stop_btn="Stop Generation", multimodal=True)
68
  demo.launch(debug=True)