girishwangikar commited on
Commit
d971da0
1 Parent(s): 016c524

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +94 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
4
+ from qwen_vl_utils import process_vision_info
5
+ from PIL import Image
6
+ from datetime import datetime
7
+ import numpy as np
8
+ import os
9
+
10
+ # Function to save image array as a file and return the path
11
+ def array_to_image_path(image_array):
12
+ img = Image.fromarray(np.uint8(image_array))
13
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
14
+ filename = f"image_{timestamp}.png"
15
+ img.save(filename)
16
+ return os.path.abspath(filename)
17
+
18
+ # Load model and processor
19
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
20
+ "Qwen/Qwen2-VL-2B-Instruct",
21
+ trust_remote_code=True,
22
+ torch_dtype=torch.float32,
23
+ device_map="cpu"
24
+ ).eval()
25
+
26
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
27
+
28
+ DESCRIPTION = "[Qwen2-VL-2B Demo (CPU Version)](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct)"
29
+
30
+ def run_example(image, text_input):
31
+ image_path = array_to_image_path(image)
32
+
33
+ image = Image.fromarray(image).convert("RGB")
34
+ messages = [
35
+ {
36
+ "role": "user",
37
+ "content": [
38
+ {
39
+ "type": "image",
40
+ "image": image_path,
41
+ },
42
+ {"type": "text", "text": text_input},
43
+ ],
44
+ }
45
+ ]
46
+
47
+ # Preparation for inference
48
+ text = processor.apply_chat_template(
49
+ messages, tokenize=False, add_generation_prompt=True
50
+ )
51
+ image_inputs, video_inputs = process_vision_info(messages)
52
+ inputs = processor(
53
+ text=[text],
54
+ images=image_inputs,
55
+ videos=video_inputs,
56
+ padding=True,
57
+ return_tensors="pt",
58
+ )
59
+
60
+ # Inference: Generation of the output
61
+ with torch.no_grad():
62
+ generated_ids = model.generate(**inputs, max_new_tokens=128)
63
+ generated_ids_trimmed = [
64
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
65
+ ]
66
+ output_text = processor.batch_decode(
67
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
68
+ )
69
+
70
+ return output_text[0]
71
+
72
+ css = """
73
+ #output {
74
+ height: 500px;
75
+ overflow: auto;
76
+ border: 1px solid #ccc;
77
+ }
78
+ """
79
+
80
+ with gr.Blocks(css=css) as demo:
81
+ gr.Markdown(DESCRIPTION)
82
+ with gr.Tab(label="Qwen2-VL-2B Input (CPU)"):
83
+ with gr.Row():
84
+ with gr.Column():
85
+ input_img = gr.Image(label="Input Picture")
86
+ text_input = gr.Textbox(label="Question")
87
+ submit_btn = gr.Button(value="Submit")
88
+ with gr.Column():
89
+ output_text = gr.Textbox(label="Output Text")
90
+
91
+ submit_btn.click(run_example, [input_img, text_input], [output_text])
92
+
93
+ demo.queue(api_open=False)
94
+ demo.launch()