3DGen-Arena / serve /utils.py
ZhangYuhan's picture
update serve
7affe33
raw
history blame
6.42 kB
import os
import json
import random
import datetime
import requests
import numpy as np
import gradio as gr
from pathlib import Path
from model.model_registry import *
from constants import LOGDIR, LOG_SERVER_ADDR, APPEND_JSON, SAVE_IMAGE, SAVE_LOG, EVALUATE_DIMS
from typing import Union
enable_btn = gr.update(interactive=True, visible=True)
disable_btn = gr.update(interactive=False)
invisible_btn = gr.update(interactive=False, visible=False)
no_change_btn = gr.update(value="No Change", interactive=True, visible=True)
def build_about():
about_markdown = f"""
# About Us
Supported by Shanghai AI Laboratory
## Contributors:
Yuhan Zhang, Mengchen Zhang, Tong Wu, Tengfei Wang, Ziwei Liu, Dahua Lin
## Contact:
Email: [email protected]
## Sponsorship
We are keep looking for sponsorship to support the arena project for the long term. Please contact us if you are interested in supporting this project.
"""
gr.Markdown(about_markdown, elem_id="about_markdown")
acknowledgment_md = """
### Acknowledgment
<div class="image-container">
<p> Our code base is built upon <a href="https://github.com/lm-sys/FastChat" target="_blank">FastChat</a> and <a href="https://github.com/TIGER-AI-Lab/GenAI-Arena" target="_blank">GenAI-Arena</a></p>.
</div>
"""
block_css = """
#notice_markdown {
font-size: 110%
}
#notice_markdown th {
display: none;
}
#notice_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#model_description_markdown {
font-size: 110%
}
#leaderboard_markdown {
font-size: 110%
}
#leaderboard_markdown td {
padding-top: 6px;
padding-bottom: 6px;
}
#leaderboard_dataframe td {
line-height: 0.1em;
}
#about_markdown {
font-size: 110%
}
#ack_markdown {
font-size: 110%
}
#evaldim_markdown {
font-weight: bold;
text-align: center;
background-color: white;
}
#input_box textarea {
}
footer {
display:none !important
}
.image-about img {
margin: 0 30px;
margin-top: 30px;
height: 60px;
max-height: 100%;
width: auto;
float: left;
.input-image, .image-preview {
margin: 0 30px;
height: 30px;
max-height: 100%;
width: auto;
max-width: 30%;}
}
"""
def enable_mds():
return tuple(gr.update(visible=True) for _ in range(EVALUATE_DIMS))
def disable_mds():
return tuple(gr.update(visible=False) for _ in range(EVALUATE_DIMS))
def enable_buttons_side_by_side():
return tuple(gr.update(visible=True, interactive=True) for i in range(EVALUATE_DIMS*4 + 2))
def disable_buttons_side_by_side():
return tuple(gr.update(visible=(i>=EVALUATE_DIMS*4), interactive=False) for i in range(EVALUATE_DIMS*4 + 2))
def enable_buttons():
return tuple(gr.update(interactive=True) for _ in range(EVALUATE_DIMS*3 + 2))
def disable_buttons():
return tuple(gr.update(interactive=False) for _ in range(EVALUATE_DIMS*3 + 2))
def reset_state(state):
state.normal_video, state.rgb_video = None, None
state.evaluted_dims = 0
return (state,) + tuple(gr.update(interactive=False) for _ in range(EVALUATE_DIMS*3 + 2))
def reset_states_side_by_side(state_0, state_1):
state_0.normal_video, state_0.rgb_video = None, None
state_1.normal_video, state_1.rgb_video = None, None
state_0.evaluted_dims, state_1.evaluted_dims = 0, 0
return (state_0, state_1) \
+ tuple(gr.update(visible=(i>=EVALUATE_DIMS*4), interactive=False) for i in range(EVALUATE_DIMS*4 + 2)) \
+ tuple(gr.update(visible=False) for _ in range(EVALUATE_DIMS))
def reset_states_side_by_side_anony(state_0, state_1):
state_0.model_name, state_1.model_name = "", ""
state_0.normal_video, state_0.rgb_video = None, None
state_1.normal_video, state_1.rgb_video = None, None
state_0.evaluted_dims, state_1.evaluted_dims = 0, 0
return (state_0, state_1) \
+ (gr.Markdown("", visible=False), gr.Markdown("", visible=False))\
+ tuple(gr.update(visible=(i>=EVALUATE_DIMS*4), interactive=False) for i in range(EVALUATE_DIMS*4 + 2)) \
+ tuple(gr.update(visible=False) for _ in range(EVALUATE_DIMS))
def clear_t2s_history():
return None, "", None, None
def clear_t2s_history_side_by_side():
return [None] * 2 + [""] + [None] * 4
def clear_t2s_history_side_by_side_anony():
return [None] * 2 + [""] + [None] * 4 + [gr.Markdown("", visible=False), gr.Markdown("", visible=False)]
def clear_i2s_history():
return None, None, None, None
def clear_i2s_history_side_by_side():
return [None] * 2 + [None] + [None] * 4
def clear_i2s_history_side_by_side_anony():
return [None] * 2 + [None] + [None] * 4 + [gr.Markdown("", visible=False), gr.Markdown("", visible=False)]
def get_ip(request: gr.Request):
if request:
if "cf-connecting-ip" in request.headers:
ip = request.headers["cf-connecting-ip"] or request.client.host
else:
ip = request.client.host
else:
ip = None
return ip
def get_conv_log_filename():
t = datetime.datetime.now()
name = os.path.join(LOGDIR, f"{t.year}-{t.month:02d}-{t.day:02d}-conv.json")
return name
def save_image_file_on_log_server(image_file:str):
image_file = Path(image_file).absolute().relative_to(os.getcwd())
image_file = str(image_file)
# Open the image file in binary mode
url = f"{LOG_SERVER_ADDR}/{SAVE_IMAGE}"
with open(image_file, 'rb') as f:
# Make the POST request, sending the image file and the image path
response = requests.post(url, files={'image': f}, data={'image_path': image_file})
return response
def append_json_item_on_log_server(json_item: Union[dict, str], log_file: str):
if isinstance(json_item, dict):
json_item = json.dumps(json_item)
log_file = Path(log_file).absolute().relative_to(os.getcwd())
log_file = str(log_file)
url = f"{LOG_SERVER_ADDR}/{APPEND_JSON}"
# Make the POST request, sending the JSON string and the log file name
response = requests.post(url, data={'json_str': json_item, 'file_name': log_file})
return response
def save_log_str_on_log_server(log_str: str, log_file: str):
log_file = Path(log_file).absolute().relative_to(os.getcwd())
log_file = str(log_file)
url = f"{LOG_SERVER_ADDR}/{SAVE_LOG}"
# Make the POST request, sending the log message and the log file name
response = requests.post(url, data={'message': log_str, 'log_path': log_file})
return response