girishwangikar commited on
Commit
6ce9885
1 Parent(s): 3e2d679

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -5
app.py CHANGED
@@ -1,8 +1,28 @@
1
- import os
2
 
3
- commandline_args = os.getenv("COMMANDLINE_ARGS", "")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
 
5
 
 
 
 
6
  import gradio as gr
7
  import torch
8
  from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
@@ -23,12 +43,11 @@ def array_to_image_path(image_array):
23
  # Load model and processor
24
  model = Qwen2VLForConditionalGeneration.from_pretrained(
25
  "Qwen/Qwen2-VL-2B-Instruct",
26
- trust_remote_code=True,
27
  torch_dtype=torch.float32,
28
  device_map="cpu"
29
  ).eval()
30
 
31
- processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct", trust_remote_code=True)
32
 
33
  DESCRIPTION = "[Qwen2-VL-2B Demo (CPU Version)](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct)"
34
 
@@ -95,5 +114,10 @@ with gr.Blocks(css=css) as demo:
95
 
96
  submit_btn.click(run_example, [input_img, text_input], [output_text])
97
 
 
 
98
  demo.queue(api_open=False)
99
- demo.launch(inline=False, server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)), debug=True, enable_queue=("--no-gradio-queue" not in commandline_args))
 
 
 
 
1
+ The error you're encountering stems from two separate issues:
2
 
3
+ 1. **`trust_remote_code` warning:**
4
+ This warning is triggered because `trust_remote_code` is used in the wrong context. It only affects Auto classes (like `AutoModel` or `AutoProcessor`) but has no effect when loading the model directly using `Qwen2VLForConditionalGeneration`. You can safely remove it when loading the model. Here's the corrected model loading line:
5
+
6
+ ```python
7
+ model = Qwen2VLForConditionalGeneration.from_pretrained(
8
+ "Qwen/Qwen2-VL-2B-Instruct",
9
+ torch_dtype=torch.float32,
10
+ device_map="cpu"
11
+ ).eval()
12
+ ```
13
+
14
+ 2. **`enable_queue` argument in `launch`:**
15
+ The argument `enable_queue` has been replaced by `queue` in recent Gradio versions. Instead of using `enable_queue=False`, you should use `queue=False`. Here’s how to fix the `demo.launch()` call:
16
+
17
+ ```python
18
+ demo.launch(inline=False, server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)), debug=True, queue=False)
19
+ ```
20
 
21
+ This should resolve the issues you're encountering. Here's the corrected code:
22
 
23
+ ### Final Code Fix:
24
+
25
+ ```python
26
  import gradio as gr
27
  import torch
28
  from transformers import Qwen2VLForConditionalGeneration, AutoProcessor
 
43
  # Load model and processor
44
  model = Qwen2VLForConditionalGeneration.from_pretrained(
45
  "Qwen/Qwen2-VL-2B-Instruct",
 
46
  torch_dtype=torch.float32,
47
  device_map="cpu"
48
  ).eval()
49
 
50
+ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
51
 
52
  DESCRIPTION = "[Qwen2-VL-2B Demo (CPU Version)](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct)"
53
 
 
114
 
115
  submit_btn.click(run_example, [input_img, text_input], [output_text])
116
 
117
+ commandline_args = os.getenv("COMMANDLINE_ARGS", "")
118
+
119
  demo.queue(api_open=False)
120
+ demo.launch(inline=False, server_name="0.0.0.0", server_port=int(os.getenv("PORT", 7860)), debug=True, queue=("--no-gradio-queue" not in commandline_args))
121
+ ```
122
+
123
+ This code should now work without the previous errors.