Shanshan Wang commited on
Commit
d259dc9
1 Parent(s): da76dba

updated default model

Browse files
Files changed (1) hide show
  1. app.py +23 -15
app.py CHANGED
@@ -51,7 +51,6 @@ def inference(image_input,
51
  tile_num,
52
  chatbot,
53
  state,
54
- # image_state,
55
  model_state,
56
  tokenizer_state):
57
 
@@ -129,9 +128,6 @@ def regenerate_response(chatbot,
129
  if model_state is None or tokenizer_state is None:
130
  chatbot.append(("System", "Please select a model to start the conversation."))
131
  return chatbot, state
132
-
133
- model = model_state
134
- tokenizer = tokenizer_state
135
 
136
  # Check if there is a previous user message
137
  if chatbot is None or len(chatbot) == 0:
@@ -139,11 +135,6 @@ def regenerate_response(chatbot,
139
  chatbot.append(("System", "Nothing to regenerate. Please start a conversation first."))
140
  return chatbot, state,
141
 
142
- # # Check if there is a previous user message
143
- # if state is None or len(state) == 0:
144
- # chatbot.append(("System", "Nothing to regenerate. Please start a conversation first."))
145
- # return chatbot, state
146
-
147
  # Get the last user message
148
  last_user_message, _ = chatbot[-1]
149
 
@@ -152,11 +143,16 @@ def regenerate_response(chatbot,
152
  chatbot.append(("System", "Cannot regenerate response for an empty or invalid message."))
153
  return chatbot, state
154
 
155
-
156
- state = state[:-1] # Remove last assistant's response from history
157
-
158
- if len(state) == 0 or not state:
 
 
159
  state = None
 
 
 
160
  # Set generation config
161
  do_sample = (float(temperature) != 0.0)
162
 
@@ -167,6 +163,8 @@ def regenerate_response(chatbot,
167
  temperature= float(temperature),
168
  top_p= float(top_p),
169
  )
 
 
170
  # Regenerate the response
171
  response_text, new_state = model.chat(
172
  tokenizer,
@@ -182,7 +180,7 @@ def regenerate_response(chatbot,
182
  state = new_state
183
 
184
  # Update chatbot with the regenerated response
185
- chatbot.append((last_user_message, response_text))
186
 
187
  return chatbot, state
188
 
@@ -213,6 +211,13 @@ with gr.Blocks() as demo:
213
  outputs=[model_state, tokenizer_state]
214
  )
215
 
 
 
 
 
 
 
 
216
  with gr.Row(equal_height=True):
217
  # First column with image input
218
  with gr.Column(scale=1):
@@ -222,7 +227,10 @@ with gr.Blocks() as demo:
222
  # Second column with chatbot and user input
223
  with gr.Column(scale=2):
224
  chatbot = gr.Chatbot(label="Conversation")
225
- user_input = gr.Textbox(label="What is your question", placeholder="Type your message here")
 
 
 
226
 
227
  with gr.Accordion('Parameters', open=False):
228
  with gr.Row():
 
51
  tile_num,
52
  chatbot,
53
  state,
 
54
  model_state,
55
  tokenizer_state):
56
 
 
128
  if model_state is None or tokenizer_state is None:
129
  chatbot.append(("System", "Please select a model to start the conversation."))
130
  return chatbot, state
 
 
 
131
 
132
  # Check if there is a previous user message
133
  if chatbot is None or len(chatbot) == 0:
 
135
  chatbot.append(("System", "Nothing to regenerate. Please start a conversation first."))
136
  return chatbot, state,
137
 
 
 
 
 
 
138
  # Get the last user message
139
  last_user_message, _ = chatbot[-1]
140
 
 
143
  chatbot.append(("System", "Cannot regenerate response for an empty or invalid message."))
144
  return chatbot, state
145
 
146
+ # Remove last assistant's response from state
147
+ if state is not None and len(state) > 0:
148
+ state = state[:-1] # Remove last assistant's response from history
149
+ if len(state) == 0:
150
+ state = None
151
+ else:
152
  state = None
153
+
154
+ model = model_state
155
+ tokenizer = tokenizer_state
156
  # Set generation config
157
  do_sample = (float(temperature) != 0.0)
158
 
 
163
  temperature= float(temperature),
164
  top_p= float(top_p),
165
  )
166
+
167
+
168
  # Regenerate the response
169
  response_text, new_state = model.chat(
170
  tokenizer,
 
180
  state = new_state
181
 
182
  # Update chatbot with the regenerated response
183
+ chatbot[-1] = (last_user_message, response_text)
184
 
185
  return chatbot, state
186
 
 
211
  outputs=[model_state, tokenizer_state]
212
  )
213
 
214
+ # Load the default model when the app starts
215
+ demo.load(
216
+ fn=load_model_and_set_image_function,
217
+ inputs=[model_dropdown],
218
+ outputs=[model_state, tokenizer_state]
219
+ )
220
+
221
  with gr.Row(equal_height=True):
222
  # First column with image input
223
  with gr.Column(scale=1):
 
227
  # Second column with chatbot and user input
228
  with gr.Column(scale=2):
229
  chatbot = gr.Chatbot(label="Conversation")
230
+ user_input = gr.Textbox(label="What is your question",
231
+ placeholder="Type your message here",
232
+ interactive=True,
233
+ lines=1)
234
 
235
  with gr.Accordion('Parameters', open=False):
236
  with gr.Row():