KingNish commited on
Commit
a16627e
1 Parent(s): ef594db

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -139
app.py CHANGED
@@ -9,158 +9,61 @@ from datetime import datetime
9
  import numpy as np
10
  import os
11
 
 
 
12
 
13
- # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
-
15
- # models = {
16
- # "Qwen/Qwen2-VL-7B-Instruct": AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto", _attn_implementation="flash_attention_2").cuda().eval()
17
-
18
- # }
19
- def array_to_image_path(image):
20
- if image is None:
21
- gr.Warning("No video provided. Please upload an video before submitting.")
22
- raise ValueError("No image provided. Please upload an image before submitting.")
23
-
24
- # Generate a unique filename using timestamp
25
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
26
- filename = f"image_{timestamp}.png"
27
-
28
- # Save the image
29
- image.save(filename)
30
-
31
- # Get the full path of the saved image
32
- full_path = os.path.abspath(filename)
33
-
34
- return full_path
35
-
36
- def array_to_video_path(video):
37
- if video is None:
38
- gr.Warning("No video provided. Please upload an video before submitting.")
39
- raise ValueError("No video provided. Please upload an video before submitting.")
40
-
41
- # Generate a unique filename using timestamp
42
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
43
- filename = f"video_{timestamp}.mp4"
44
-
45
- # Save the image
46
- video.save(filename)
47
-
48
- # Get the full path of the saved image
49
- full_path = os.path.abspath(filename)
50
-
51
- return full_path
52
-
53
- models = {
54
- "Qwen/Qwen2-VL-7B-Instruct": Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True, torch_dtype="auto").cuda().eval()
55
-
56
- }
57
-
58
- processors = {
59
- "Qwen/Qwen2-VL-7B-Instruct": AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", trust_remote_code=True)
60
- }
61
 
62
  DESCRIPTION = "[Qwen2-VL-7B Demo](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
63
 
64
- kwargs = {}
65
- kwargs['torch_dtype'] = torch.bfloat16
 
 
 
66
 
67
- user_prompt = '<|user|>\n'
68
- assistant_prompt = '<|assistant|>\n'
69
- prompt_suffix = "<|end|>\n"
 
70
 
71
  @spaces.GPU
72
- def qwen_image(image, text_input=None, model_id="Qwen/Qwen2-VL-7B-Instruct"):
73
- image_path = array_to_image_path(image)
74
 
75
- print(image_path)
76
- model = models[model_id]
77
- processor = processors[model_id]
78
  messages = [
79
- {
80
  "role": "user",
81
  "content": [
82
  {
83
- "type": "image",
84
- "image": image_path,
 
85
  },
86
  {"type": "text", "text": text_input},
87
  ],
88
  }
89
  ]
90
-
91
- # Preparation for inference
92
- text = processor.apply_chat_template(
93
- messages, tokenize=False, add_generation_prompt=True
94
- )
95
- image_inputs, video_inputs = process_vision_info(messages)
96
- inputs = processor(
97
- text=[text],
98
- images=image_inputs,
99
- videos=video_inputs,
100
- padding=True,
101
- return_tensors="pt",
102
- )
103
- inputs = inputs.to("cuda")
104
-
105
- # Inference: Generation of the output
106
- generated_ids = model.generate(**inputs, max_new_tokens=1024)
107
- generated_ids_trimmed = [
108
- out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
109
- ]
110
- output_text = processor.batch_decode(
111
- generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
112
- )
113
-
114
- return output_text[0]
115
 
116
- @spaces.GPU(duration=125)
117
- def qwen_video(video, text_input=None, model_id="Qwen/Qwen2-VL-7B-Instruct"):
118
- video_path = array_to_video_path(video)
119
-
120
- print(video_path)
121
- model = models[model_id]
122
- processor = processors[model_id]
123
-
124
- messages = [
125
- {
126
- "role": "user",
127
- "content": [
128
- {
129
- "type": "video",
130
- "video": video_path,
131
- "max_pixels": 360 * 420,
132
- "fps": 6.0,
133
- },
134
- {"type": "text", "text": text_input},
135
- ],
136
- }
137
- ]
138
-
139
- # Preparation for inference
140
- text = processor.apply_chat_template(
141
- messages, tokenize=False, add_generation_prompt=True
142
- )
143
- image_inputs, video_inputs = process_vision_info(messages)
144
  inputs = processor(
145
  text=[text],
146
  images=image_inputs,
147
  videos=video_inputs,
148
  padding=True,
149
  return_tensors="pt",
150
- )
151
- inputs = inputs.to("cuda")
152
-
153
- # Inference: Generation of the output
154
  generated_ids = model.generate(**inputs, max_new_tokens=1024)
155
- generated_ids_trimmed = [
156
- out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
157
- ]
158
- output_text = processor.batch_decode(
159
- generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
160
- )
161
-
162
- return output_text[0]
163
 
 
 
164
  css = """
165
  #output {
166
  height: 500px;
@@ -171,28 +74,28 @@ css = """
171
 
172
  with gr.Blocks(css=css) as demo:
173
  gr.Markdown(DESCRIPTION)
174
- with gr.Tab(label="Qwen2-VL-7B Input"):
 
175
  with gr.Row():
176
  with gr.Column():
177
  input_img = gr.Image(label="Input Picture", type="pil")
178
- model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="Qwen/Qwen2-VL-7B-Instruct")
179
- text_input = gr.Textbox(label="Question")
180
- submit_btn = gr.Button(value="Submit")
181
  with gr.Column():
182
- output_text = gr.Textbox(label="Output Text")
 
 
183
 
184
- submit_btn.click(qwen_image, [input_img, text_input, model_selector], [output_text])
185
- with gr.Tab(label="Qwen2-VL-7B Input"):
186
  with gr.Row():
187
  with gr.Column():
188
- input_img = gr.Video(label="Input Picture")
189
- model_selector = gr.Dropdown(choices=list(models.keys()), label="Model", value="Qwen/Qwen2-VL-7B-Instruct")
190
- text_input = gr.Textbox(label="Question")
191
- submit_btn = gr.Button(value="Submit")
192
  with gr.Column():
193
- output_text = gr.Textbox(label="Output Text")
194
 
195
- submit_btn.click(qwen_video, [input_img, text_input, model_selector], [output_text])
196
 
197
  demo.queue(api_open=False)
198
  demo.launch(debug=True)
 
9
  import numpy as np
10
  import os
11
 
12
+ # Install flash-attn
13
+ subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
14
 
15
+ # Model and Processor Loading (Done once at startup)
16
+ MODEL_ID = "Qwen/Qwen2-VL-7B-Instruct"
17
+ model = Qwen2VLForConditionalGeneration.from_pretrained(MODEL_ID, trust_remote_code=True, attn_implementation="flash_attention_2", torch_dtype="auto").cuda().eval()
18
+ processor = AutoProcessor.from_pretrained(MODEL_ID, trust_remote_code=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  DESCRIPTION = "[Qwen2-VL-7B Demo](https://huggingface.co/Qwen/Qwen2-VL-7B-Instruct)"
21
 
22
+ # Helper function to save media and return path
23
+ def save_media_and_get_path(media, media_type):
24
+ if media is None:
25
+ gr.Warning(f"No {media_type} provided. Please upload a {media_type} before submitting.")
26
+ raise ValueError(f"No {media_type} provided.")
27
 
28
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
29
+ filename = f"{media_type}_{timestamp}.{'png' if media_type == 'image' else 'mp4'}"
30
+ media.save(filename)
31
+ return os.path.abspath(filename)
32
 
33
  @spaces.GPU
34
+ def qwen_inference(media, media_type, text_input=None):
35
+ media_path = save_media_and_get_path(media, media_type)
36
 
 
 
 
37
  messages = [
38
+ {
39
  "role": "user",
40
  "content": [
41
  {
42
+ "type": media_type,
43
+ media_type: media_path,
44
+ **({"max_pixels": 360 * 420, "fps": 6.0} if media_type == "video" else {}),
45
  },
46
  {"type": "text", "text": text_input},
47
  ],
48
  }
49
  ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
+ text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
52
+ image_inputs, video_inputs = process_vision_info(messages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  inputs = processor(
54
  text=[text],
55
  images=image_inputs,
56
  videos=video_inputs,
57
  padding=True,
58
  return_tensors="pt",
59
+ ).to("cuda")
60
+
 
 
61
  generated_ids = model.generate(**inputs, max_new_tokens=1024)
62
+ generated_ids_trimmed = [out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
63
+ output_text = processor.batch_decode(generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 
 
 
 
 
 
64
 
65
+ return output_text
66
+
67
  css = """
68
  #output {
69
  height: 500px;
 
74
 
75
  with gr.Blocks(css=css) as demo:
76
  gr.Markdown(DESCRIPTION)
77
+
78
+ with gr.Tab(label="Image Input"):
79
  with gr.Row():
80
  with gr.Column():
81
  input_img = gr.Image(label="Input Picture", type="pil")
82
+ text_input_image = gr.Textbox(label="Question")
83
+ submit_btn_image = gr.Button(value="Submit")
 
84
  with gr.Column():
85
+ output_text_image = gr.Textbox(label="Output Text")
86
+
87
+ submit_btn_image.click(qwen_inference, [input_img, "image", text_input_image], [output_text_image])
88
 
89
+ with gr.Tab(label="Video Input"):
 
90
  with gr.Row():
91
  with gr.Column():
92
+ input_video = gr.Video(label="Input Video")
93
+ text_input_video = gr.Textbox(label="Question")
94
+ submit_btn_video = gr.Button(value="Submit")
 
95
  with gr.Column():
96
+ output_text_video = gr.Textbox(label="Output Text")
97
 
98
+ submit_btn_video.click(qwen_inference, [input_video, "video", text_input_video], [output_text_video])
99
 
100
  demo.queue(api_open=False)
101
  demo.launch(debug=True)