File size: 10,952 Bytes
b596e22
ab3d7d0
471e971
ab3d7d0
39b759d
ab3d7d0
 
 
 
 
 
39b759d
 
 
6c5150b
 
 
73b2bf3
 
6c5150b
 
471e971
6c5150b
 
 
 
ab3d7d0
 
 
 
 
 
 
 
 
 
6c5150b
ab3d7d0
 
 
 
 
 
 
 
6c5150b
ab3d7d0
 
 
 
 
 
 
 
 
 
 
 
6c5150b
ab3d7d0
6c5150b
 
7826ae6
6c5150b
 
 
 
84ce9df
7826ae6
 
ab3d7d0
6c5150b
 
ab3d7d0
6c5150b
7826ae6
ab3d7d0
 
 
 
 
 
 
da76dba
 
 
 
 
6c5150b
 
84ce9df
 
 
e809d4e
7826ae6
 
 
e809d4e
 
 
 
 
 
 
471e971
 
e809d4e
 
 
471e971
 
e809d4e
 
 
 
 
 
 
 
 
7826ae6
e809d4e
84ce9df
e809d4e
 
 
471e971
e809d4e
 
 
 
 
 
7826ae6
e809d4e
6c5150b
 
 
 
84ce9df
6c5150b
7826ae6
ab3d7d0
6c5150b
 
ab3d7d0
6c5150b
7826ae6
ab3d7d0
 
 
 
 
 
 
 
da76dba
e809d4e
 
 
 
7826ae6
e809d4e
 
7826ae6
e809d4e
da76dba
 
 
 
 
d259dc9
 
 
 
 
 
e809d4e
d259dc9
e809d4e
 
471e971
e809d4e
 
 
 
 
 
471e971
d259dc9
 
e809d4e
 
 
7826ae6
e809d4e
84ce9df
e809d4e
 
 
 
 
 
 
 
 
d259dc9
e809d4e
7826ae6
e809d4e
471e971
e809d4e
7826ae6
1757eeb
471e971
 
5099c24
e809d4e
 
6c5150b
ab3d7d0
 
6c5150b
 
 
 
da76dba
 
6c5150b
 
 
 
 
 
ab3d7d0
6c5150b
e809d4e
d259dc9
 
 
 
ab3d7d0
d259dc9
 
e2474e5
e809d4e
 
84ce9df
7826ae6
e809d4e
 
 
 
d259dc9
 
 
 
1757eeb
 
e809d4e
 
 
 
 
84ce9df
e809d4e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84ce9df
 
 
 
 
 
 
 
e809d4e
5099c24
471e971
 
1757eeb
5099c24
471e971
 
 
 
6c5150b
 
 
 
 
 
84ce9df
6c5150b
 
ab3d7d0
6c5150b
7826ae6
1757eeb
 
 
e809d4e
6c5150b
 
 
 
 
84ce9df
7826ae6
 
ab3d7d0
6c5150b
7826ae6
471e971
 
 
 
 
7826ae6
5099c24
 
 
 
6c5150b
5099c24
 
 
92754e8
5099c24
471e971
ab3d7d0
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
import gradio as gr
from transformers import AutoModel, AutoTokenizer
import torch
import threading
import os

# caching the mode 
model_cache = {}
tokenizer_cache = {}
model_lock = threading.Lock()

from huggingface_hub import login
hf_token = os.environ.get('hf_token', None)


# Define the models and their paths
model_paths = {
    "H2OVL-Mississippi-2B":"h2oai/h2ovl-mississippi-2b",
    "H2OVL-Mississippi-0.8B":"h2oai/h2ovl-mississippi-800m",
    # Add more models as needed
}


def load_model_and_set_image_function(model_name):
    # Get the model path from the model_paths dictionary
    model_path = model_paths[model_name]
    
    
    with model_lock:
        if model_name in model_cache:
            # model is already loaded; retrieve it from the cache
            print(f"Model {model_name} is already loaded. Retrieving from cache.")
            
        else:
            # load the model and tokenizer
            print(f"Loading model {model_name}...")

            model = AutoModel.from_pretrained(
                model_path,
                torch_dtype=torch.bfloat16,
                low_cpu_mem_usage=True,
                trust_remote_code=True,
                use_auth_token=hf_token,
                # device_map="auto"
            ).eval().cuda()

            tokenizer = AutoTokenizer.from_pretrained(
                model_path,
                trust_remote_code=True,
                use_fast=False,
                use_auth_token=hf_token
            )
            
            # add the model and tokenizer to the cache
            model_cache[model_name] = model
            tokenizer_cache[model_name] = tokenizer
            print(f"Model {model_name} loaded successfully.")
            

    return model_name
    

def inference(image_input, 
              user_message,
              temperature, 
              top_p, 
              max_new_tokens, 
              tile_num,
              chatbot,
              state, 
              model_name):
    
    # Check if model_state is None
    if model_name is None:
        chatbot.append(("System", "Please select a model to start the conversation."))
        return chatbot, state, ""   
    
    with model_lock:
        if model_name not in model_cache:
            chatbot.append(("System", "Model not loaded. Please wait for the model to load."))
            return chatbot, state, ""
        model = model_cache[model_name]
        tokenizer = tokenizer_cache[model_name]

    # Check for empty or invalid user message
    if not user_message or user_message.strip() == '' or user_message.lower() == 'system':
        chatbot.append(("System", "Please enter a valid message to continue the conversation."))
        return chatbot, state, ""
    
    
    # if image is provided, store it in image_state:
    if chatbot is None:
        chatbot = []
        
    if image_input is None:
        chatbot.append(("System", "Please provide an image to start the conversation."))
        return chatbot, state, ""
        
    # Initialize history (state) if it's None
    if state is None:
        state = None  # model.chat function handles None as empty history        

    # Append user message to chatbot
    chatbot.append((user_message, None))

    # Set generation config
    do_sample = (float(temperature) != 0.0)    


    generation_config = dict(
        num_beams=1,
        max_new_tokens=int(max_new_tokens),
        do_sample=do_sample,
        temperature= float(temperature),
        top_p= float(top_p),
    )

    # Call model.chat with history
    response_text, new_state = model.chat(
        tokenizer,
        image_input,
        user_message,
        max_tiles = int(tile_num),
        generation_config=generation_config,
        history=state,
        return_history=True
    )
    
    # update the satet with new_state
    state = new_state
    # Update chatbot with the model's response
    chatbot[-1] = (user_message, response_text)    
    
    return chatbot, state, ""

def regenerate_response(chatbot, 
                        temperature, 
                        top_p, 
                        max_new_tokens, 
                        tile_num,
                        state, 
                        image_input,
                        model_name):
    
    # Check if model_state is None
    if model_name is None:
        chatbot.append(("System", "Please select a model to start the conversation."))
        return chatbot, state
    
    
    with model_lock:
        if model_name not in model_cache:
            chatbot.append(("System", "Model not loaded. Please wait for the model to load."))
            return chatbot, state
        model = model_cache[model_name]
        tokenizer = tokenizer_cache[model_name]
        
    # Check if there is a previous user message
    if chatbot is None or len(chatbot) == 0:
        chatbot = []
        chatbot.append(("System", "Nothing to regenerate. Please start a conversation first."))
        return chatbot, state, 
    
    # Get the last user message
    last_user_message, _ = chatbot[-1]
    
    # Check for empty or invalid last user message
    if not last_user_message or last_user_message.strip() == '' or last_user_message.lower() == 'system':
        chatbot.append(("System", "Cannot regenerate response for an empty or invalid message."))
        return chatbot, state
    
    # Remove last assistant's response from state
    if state is not None and len(state) > 0:
        state = state[:-1]  # Remove last assistant's response from history
        if len(state) == 0:
            state = None
    else:
        state = None
   
    # Set generation config
    do_sample = (float(temperature) != 0.0)    

    generation_config = dict(
        num_beams=1,
        max_new_tokens=int(max_new_tokens),
        do_sample=do_sample,
        temperature= float(temperature),
        top_p= float(top_p),
    )
    
    
    # Regenerate the response
    response_text, new_state = model.chat(
        tokenizer,
        image_input,
        last_user_message,
        max_tiles = int(tile_num),
        generation_config=generation_config,
        history=state,  # Exclude last assistant's response
        return_history=True
    )
    
    # Update the state with new_state
    state = new_state
    
    # Update chatbot with the regenerated response
    chatbot[-1] = (last_user_message, response_text)
       
    return chatbot, state


def clear_all():
    return [], None, None, ""  # Clear chatbot, state, reset image_input

# Build the Gradio interface
with gr.Blocks() as demo:
    gr.Markdown("# **H2OVL-Mississippi**")
    
    state= gr.State()
    model_state = gr.State()
    # tokenizer_state = gr.State()
    # image_load_function_state = gr.State()

    with gr.Row():
        model_dropdown = gr.Dropdown(
            choices=list(model_paths.keys()),
            label="Select Model",
            value="H2OVL-Mississippi-2B"
        )
    
    # When the model selection changes, load the new model
    model_dropdown.change(
        fn=load_model_and_set_image_function,
        inputs=[model_dropdown],
        outputs=[model_state]
    )
    
    # Load the default model when the app starts
    demo.load(
        fn=load_model_and_set_image_function,
        inputs=[model_dropdown],
        outputs=[model_state]
    )
    
    with gr.Row(equal_height=True):
        # First column with image input
        with gr.Column(scale=1):
            image_input = gr.Image(type="filepath", label="Upload an Image")
             
     
        # Second column with chatbot and user input
        with gr.Column(scale=2):    
            chatbot = gr.Chatbot(label="Conversation")
            user_input = gr.Textbox(label="What is your question", 
                                    placeholder="Type your message here",
                                    interactive=True,
                                    lines=1)
        
    with gr.Accordion('Parameters', open=False):
        with gr.Row():
            temperature_input = gr.Slider(
                minimum=0.0, 
                maximum=1.0, 
                step=0.1, 
                value=0.2, 
                interactive=True,
                label="Temperature")
            top_p_input = gr.Slider(
                minimum=0.0, 
                maximum=1.0, 
                step=0.1, 
                value=0.9,
                interactive=True, 
                label="Top P")
            max_new_tokens_input = gr.Slider(
                minimum=0, 
                maximum=4096, 
                step=64, 
                value=1024, 
                interactive=True,
                label="Max New Tokens (default: 1024)")
            tile_num = gr.Slider(
                minimum=2, 
                maximum=12, 
                step=1, 
                value=6, 
                interactive=True,
                label="Tile Number (default: 6)"
            )
            
    with gr.Row():
        submit_button = gr.Button("Submit")
        regenerate_button = gr.Button("Regenerate")
        clear_button = gr.Button("Clear")        

    # When the submit button is clicked, call the inference function
    submit_button.click(
        fn=inference, 
        inputs=[
            image_input, 
            user_input, 
            temperature_input, 
            top_p_input, 
            max_new_tokens_input, 
            tile_num,
            chatbot, 
            state, 
            model_state
        ], 
        outputs=[chatbot, state, user_input]
    )
    # When the regenerate button is clicked, re-run the last inference
    regenerate_button.click(
        fn=regenerate_response,
        inputs=[
            chatbot, 
            temperature_input, 
            top_p_input,
            max_new_tokens_input, 
            tile_num,
            state,
            image_input, 
            model_state
            ],
        outputs=[chatbot, state]
    )

    clear_button.click(
        fn=clear_all, 
        inputs=None, 
        outputs=[chatbot, state, image_input, user_input]
    )            
    gr.Examples(
        examples=[
            ["assets/driver_license.png", "Extract the text from the image and fill the following json {'license_number':'',\n'full_name':'',\n'date_of_birth':'',\n'address':'',\n'issue_date':'',\n'expiration_date':'',\n}"],
            ["assets/receipt.jpg", "Read the text on the image"],
            ["assets/invoice.png", "Please extract the following fields, and return the result in JSON format: supplier_name, supplier_address, customer_name, customer_address, invoice_number, invoice_total_amount, invoice_tax_amount"],
            ["assets/CBA-1H23-Results-Presentation_wheel.png", "What is the efficiency of H2O.AI in document processing?"],
        ],
        inputs = [image_input, user_input],
        label = "examples",
    )
demo.queue()   
demo.launch(max_threads=10)