Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -62,6 +62,7 @@ repo_name = os.environ["LOG_REPO"]
|
|
62 |
|
63 |
external_log_dir = "./logs"
|
64 |
LOGDIR = external_log_dir
|
|
|
65 |
|
66 |
|
67 |
def install_gradio_4_35_0():
|
@@ -87,6 +88,36 @@ def get_conv_log_filename():
|
|
87 |
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_conv.json")
|
88 |
return name
|
89 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
class InferenceDemo(object):
|
91 |
def __init__(
|
92 |
self, args, model_path, tokenizer, model, image_processor, context_len
|
@@ -298,6 +329,14 @@ def bot(history, temperature, top_p, max_output_tokens):
|
|
298 |
our_chatbot.conversation.append_message(our_chatbot.conversation.roles[1], None)
|
299 |
prompt = our_chatbot.conversation.get_prompt()
|
300 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
# input_ids = (
|
302 |
# tokenizer_image_token(
|
303 |
# prompt, our_chatbot.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt"
|
@@ -648,6 +687,15 @@ with gr.Blocks(
|
|
648 |
fn=clear_history, inputs=[chatbot], outputs=[chatbot], api_name="clear_all"
|
649 |
)
|
650 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
651 |
|
652 |
demo.queue()
|
653 |
|
|
|
62 |
|
63 |
external_log_dir = "./logs"
|
64 |
LOGDIR = external_log_dir
|
65 |
+
VOTEDIR = "./votes"
|
66 |
|
67 |
|
68 |
def install_gradio_4_35_0():
|
|
|
88 |
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_conv.json")
|
89 |
return name
|
90 |
|
91 |
+
def get_conv_vote_filename():
|
92 |
+
t = datetime.datetime.now()
|
93 |
+
name = os.path.join(VOTEDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-user_vote.json")
|
94 |
+
if not os.path.isfile(name):
|
95 |
+
os.makedirs(os.path.dirname(name), exist_ok=True)
|
96 |
+
return name
|
97 |
+
|
98 |
+
def vote_last_response(state, vote_type, model_selector):
|
99 |
+
with open(get_conv_vote_filename(), "a") as fout:
|
100 |
+
data = {
|
101 |
+
"type": vote_type,
|
102 |
+
"model": model_selector,
|
103 |
+
"state": state,
|
104 |
+
}
|
105 |
+
fout.write(json.dumps(data) + "\n")
|
106 |
+
api.upload_file(
|
107 |
+
path_or_fileobj=get_conv_vote_filename(),
|
108 |
+
path_in_repo=get_conv_vote_filename().replace("./votes/", ""),
|
109 |
+
repo_id=repo_name,
|
110 |
+
repo_type="dataset")
|
111 |
+
|
112 |
+
|
113 |
+
def upvote_last_response(state):
|
114 |
+
vote_last_response(state, "upvote", "Pangea-7b")
|
115 |
+
return state
|
116 |
+
|
117 |
+
def downvote_last_response(state):
|
118 |
+
vote_last_response(state, "downvote", "Pangea-7b")
|
119 |
+
return state
|
120 |
+
|
121 |
class InferenceDemo(object):
|
122 |
def __init__(
|
123 |
self, args, model_path, tokenizer, model, image_processor, context_len
|
|
|
329 |
our_chatbot.conversation.append_message(our_chatbot.conversation.roles[1], None)
|
330 |
prompt = our_chatbot.conversation.get_prompt()
|
331 |
|
332 |
+
if len(images_this_term) == 0:
|
333 |
+
gr.Warning("You should upload an image. Please upload the image and try again.")
|
334 |
+
return history
|
335 |
+
|
336 |
+
if len(images_this_term) > 1:
|
337 |
+
gr.Warning("Only one image can be uploaded in a conversation. Please reduce the number of images and start a new conversation.")
|
338 |
+
return history
|
339 |
+
|
340 |
# input_ids = (
|
341 |
# tokenizer_image_token(
|
342 |
# prompt, our_chatbot.tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt"
|
|
|
687 |
fn=clear_history, inputs=[chatbot], outputs=[chatbot], api_name="clear_all"
|
688 |
)
|
689 |
|
690 |
+
upvote_btn.click(
|
691 |
+
fn=upvote_last_response, inputs=[chatbot], outputs=[chatbot], api_name="upvote_last_response"
|
692 |
+
)
|
693 |
+
|
694 |
+
|
695 |
+
downvote_btn.click(
|
696 |
+
fn=downvote_last_response, inputs=[chatbot], outputs=[chatbot], api_name="upvote_last_response"
|
697 |
+
)
|
698 |
+
|
699 |
|
700 |
demo.queue()
|
701 |
|