Update app.py
Browse files
app.py
CHANGED
@@ -159,6 +159,22 @@ class InferenceDemo(object):
|
|
159 |
self.conversation = conv_templates[args.conv_mode].copy()
|
160 |
self.num_frames = args.num_frames
|
161 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
162 |
|
163 |
def is_valid_video_filename(name):
|
164 |
video_extensions = ["avi", "mp4", "mov", "mkv", "flv", "wmv", "mjpeg"]
|
@@ -212,13 +228,6 @@ def load_image(image_file):
|
|
212 |
return image
|
213 |
|
214 |
|
215 |
-
def clear_history(history):
|
216 |
-
|
217 |
-
our_chatbot.conversation = conv_templates[our_chatbot.conv_mode].copy()
|
218 |
-
|
219 |
-
return None
|
220 |
-
|
221 |
-
|
222 |
def clear_response(history):
|
223 |
for index_conv in range(1, len(history)):
|
224 |
# loop until get a text response from our model.
|
@@ -229,17 +238,25 @@ def clear_response(history):
|
|
229 |
history = history[:-index_conv]
|
230 |
return history, question
|
231 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
232 |
|
233 |
|
234 |
def add_message(history, message):
|
235 |
# history=[]
|
236 |
-
global our_chatbot
|
237 |
global chat_image_num
|
238 |
if not history:
|
239 |
history = []
|
240 |
-
our_chatbot =
|
241 |
-
|
242 |
-
|
|
|
243 |
chat_image_num = 0
|
244 |
print("# Add message message",message)
|
245 |
if len(message["files"]) <= 1:
|
@@ -248,9 +265,11 @@ def add_message(history, message):
|
|
248 |
chat_image_num += 1
|
249 |
if chat_image_num > 1:
|
250 |
history = []
|
251 |
-
|
252 |
-
|
253 |
-
|
|
|
|
|
254 |
chat_image_num = 0
|
255 |
for x in message["files"]:
|
256 |
history.append(((x,), None))
|
@@ -261,7 +280,8 @@ def add_message(history, message):
|
|
261 |
|
262 |
print("### Not bigger than one history", history)
|
263 |
print("### Not bigger than one conv", our_chatbot.conversation)
|
264 |
-
|
|
|
265 |
else:
|
266 |
for x in message["files"]:
|
267 |
history.append(((x,), None))
|
@@ -270,16 +290,19 @@ def add_message(history, message):
|
|
270 |
|
271 |
print("### Bigger than one history", history)
|
272 |
print("### Bigger than one conv", our_chatbot.conversation)
|
273 |
-
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
274 |
|
275 |
|
276 |
@spaces.GPU
|
277 |
def bot(history, temperature, top_p, max_output_tokens):
|
|
|
278 |
# if not history:
|
279 |
# gr.Warning("Only one image can be uploaded in a conversation. Please reduce the number of images and start a new conversation.")
|
280 |
# return history
|
281 |
print("### turn start history",history)
|
|
|
282 |
print("### turn start conv",our_chatbot.conversation)
|
|
|
283 |
text = history[-1][0]
|
284 |
images_this_term = []
|
285 |
text_this_term = ""
|
@@ -455,7 +478,6 @@ def bot(history, temperature, top_p, max_output_tokens):
|
|
455 |
repo_id=repo_name,
|
456 |
repo_type="dataset")
|
457 |
|
458 |
-
|
459 |
|
460 |
txt = gr.Textbox(
|
461 |
scale=4,
|
@@ -464,6 +486,7 @@ txt = gr.Textbox(
|
|
464 |
container=False,
|
465 |
)
|
466 |
|
|
|
467 |
with gr.Blocks(
|
468 |
css=".message-wrap.svelte-1lcyrx4>div.svelte-1lcyrx4 img {min-width: 40px}",
|
469 |
) as demo:
|
@@ -501,6 +524,10 @@ with gr.Blocks(
|
|
501 |
with gr.Row():
|
502 |
chatbot = gr.Chatbot([], elem_id="PULSE", bubble_full_width=False, height=750)
|
503 |
|
|
|
|
|
|
|
|
|
504 |
with gr.Row():
|
505 |
upvote_btn = gr.Button(value="π Upvote", interactive=True)
|
506 |
downvote_btn = gr.Button(value="π Downvote", interactive=True)
|
@@ -604,5 +631,5 @@ if __name__ == "__main__":
|
|
604 |
chat_image_num = 0
|
605 |
print("### tokenzier",tokenizer)
|
606 |
model=model.to(torch.device('cuda'))
|
607 |
-
our_chatbot = None
|
608 |
demo.launch()
|
|
|
159 |
self.conversation = conv_templates[args.conv_mode].copy()
|
160 |
self.num_frames = args.num_frames
|
161 |
|
162 |
+
class ChatSessionManager:
|
163 |
+
def __init__(self):
|
164 |
+
self.chatbot_instance = None
|
165 |
+
|
166 |
+
def initialize_chatbot(self, args, model_path, tokenizer, model, image_processor, context_len):
|
167 |
+
self.chatbot_instance = InferenceDemo(args, model_path, tokenizer, model, image_processor, context_len)
|
168 |
+
print(f"Initialized Chatbot instance with ID: {id(self.chatbot_instance)}")
|
169 |
+
|
170 |
+
def reset_chatbot(self):
|
171 |
+
self.chatbot_instance = None
|
172 |
+
|
173 |
+
def get_chatbot(self, args, model_path, tokenizer, model, image_processor, context_len):
|
174 |
+
if self.chatbot_instance is None:
|
175 |
+
self.initialize_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
176 |
+
return self.chatbot_instance
|
177 |
+
|
178 |
|
179 |
def is_valid_video_filename(name):
|
180 |
video_extensions = ["avi", "mp4", "mov", "mkv", "flv", "wmv", "mjpeg"]
|
|
|
228 |
return image
|
229 |
|
230 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
231 |
def clear_response(history):
|
232 |
for index_conv in range(1, len(history)):
|
233 |
# loop until get a text response from our model.
|
|
|
238 |
history = history[:-index_conv]
|
239 |
return history, question
|
240 |
|
241 |
+
chat_manager = ChatSessionManager()
|
242 |
+
|
243 |
+
|
244 |
+
def clear_history(history):
|
245 |
+
chatbot_instance = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
246 |
+
chatbot_instance.conversation = conv_templates[chatbot_instance.conv_mode].copy()
|
247 |
+
return None
|
248 |
|
249 |
|
250 |
def add_message(history, message):
|
251 |
# history=[]
|
252 |
+
# global our_chatbot
|
253 |
global chat_image_num
|
254 |
if not history:
|
255 |
history = []
|
256 |
+
our_chatbot = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
257 |
+
# our_chatbot = InferenceDemo(
|
258 |
+
# args, model_path, tokenizer, model, image_processor, context_len
|
259 |
+
# )
|
260 |
chat_image_num = 0
|
261 |
print("# Add message message",message)
|
262 |
if len(message["files"]) <= 1:
|
|
|
265 |
chat_image_num += 1
|
266 |
if chat_image_num > 1:
|
267 |
history = []
|
268 |
+
chat_manager.reset_chatbot()
|
269 |
+
our_chatbot = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
270 |
+
# our_chatbot = InferenceDemo(
|
271 |
+
# args, model_path, tokenizer, model, image_processor, context_len
|
272 |
+
# )
|
273 |
chat_image_num = 0
|
274 |
for x in message["files"]:
|
275 |
history.append(((x,), None))
|
|
|
280 |
|
281 |
print("### Not bigger than one history", history)
|
282 |
print("### Not bigger than one conv", our_chatbot.conversation)
|
283 |
+
print(f"### Chatbot instance ID: {id(our_chatbot)}")
|
284 |
+
return history, gr.MultimodalTextbox(value=None, interactive=False)#, our_chatbot
|
285 |
else:
|
286 |
for x in message["files"]:
|
287 |
history.append(((x,), None))
|
|
|
290 |
|
291 |
print("### Bigger than one history", history)
|
292 |
print("### Bigger than one conv", our_chatbot.conversation)
|
293 |
+
return history, gr.MultimodalTextbox(value=None, interactive=False)#, our_chatbot
|
294 |
|
295 |
|
296 |
@spaces.GPU
|
297 |
def bot(history, temperature, top_p, max_output_tokens):
|
298 |
+
# global our_chatbot
|
299 |
# if not history:
|
300 |
# gr.Warning("Only one image can be uploaded in a conversation. Please reduce the number of images and start a new conversation.")
|
301 |
# return history
|
302 |
print("### turn start history",history)
|
303 |
+
our_chatbot = chat_manager.get_chatbot(args, model_path, tokenizer, model, image_processor, context_len)
|
304 |
print("### turn start conv",our_chatbot.conversation)
|
305 |
+
print(f"### Chatbot instance ID: {id(our_chatbot)}")
|
306 |
text = history[-1][0]
|
307 |
images_this_term = []
|
308 |
text_this_term = ""
|
|
|
478 |
repo_id=repo_name,
|
479 |
repo_type="dataset")
|
480 |
|
|
|
481 |
|
482 |
txt = gr.Textbox(
|
483 |
scale=4,
|
|
|
486 |
container=False,
|
487 |
)
|
488 |
|
489 |
+
|
490 |
with gr.Blocks(
|
491 |
css=".message-wrap.svelte-1lcyrx4>div.svelte-1lcyrx4 img {min-width: 40px}",
|
492 |
) as demo:
|
|
|
524 |
with gr.Row():
|
525 |
chatbot = gr.Chatbot([], elem_id="PULSE", bubble_full_width=False, height=750)
|
526 |
|
527 |
+
# our_chatbot = None
|
528 |
+
# our_chatbot = gr.Variable(None)
|
529 |
+
# our_chatbot = gr.State(None)
|
530 |
+
|
531 |
with gr.Row():
|
532 |
upvote_btn = gr.Button(value="π Upvote", interactive=True)
|
533 |
downvote_btn = gr.Button(value="π Downvote", interactive=True)
|
|
|
631 |
chat_image_num = 0
|
632 |
print("### tokenzier",tokenizer)
|
633 |
model=model.to(torch.device('cuda'))
|
634 |
+
# our_chatbot = None
|
635 |
demo.launch()
|