Spaces:
Sleeping
Sleeping
XufengDuan
commited on
Commit
•
b3b0417
1
Parent(s):
2e0ca43
update scripts
Browse files
app.py
CHANGED
@@ -113,7 +113,9 @@ def process_pending_evals():
|
|
113 |
print("No eval requests found. Exiting.")
|
114 |
return
|
115 |
|
116 |
-
|
|
|
|
|
117 |
pp.pprint(eval_request)
|
118 |
run_eval_suite.run_evaluation(
|
119 |
eval_request=eval_request,
|
@@ -122,8 +124,8 @@ def process_pending_evals():
|
|
122 |
batch_size=1,
|
123 |
device=envs.DEVICE,
|
124 |
no_cache=True,
|
125 |
-
need_check=
|
126 |
-
write_results=
|
127 |
)
|
128 |
logging.info(f"Eval finished for model {eval_request.model}, now setting status to finished")
|
129 |
|
@@ -136,6 +138,38 @@ def process_pending_evals():
|
|
136 |
local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
|
137 |
)
|
138 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
139 |
|
140 |
# Searching and filtering
|
141 |
def update_table(
|
|
|
113 |
print("No eval requests found. Exiting.")
|
114 |
return
|
115 |
|
116 |
+
import concurrent.futures
|
117 |
+
|
118 |
+
def process_eval_request(eval_request):
|
119 |
pp.pprint(eval_request)
|
120 |
run_eval_suite.run_evaluation(
|
121 |
eval_request=eval_request,
|
|
|
124 |
batch_size=1,
|
125 |
device=envs.DEVICE,
|
126 |
no_cache=True,
|
127 |
+
need_check=False,
|
128 |
+
write_results=False
|
129 |
)
|
130 |
logging.info(f"Eval finished for model {eval_request.model}, now setting status to finished")
|
131 |
|
|
|
138 |
local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
|
139 |
)
|
140 |
|
141 |
+
# 定义线程池的数量
|
142 |
+
max_workers = 5 # 你可以根据你的需求设置合适的数量
|
143 |
+
|
144 |
+
# 使用 ThreadPoolExecutor 来并行执行多个 eval_request
|
145 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
146 |
+
futures = [executor.submit(process_eval_request, eval_request) for eval_request in eval_requests]
|
147 |
+
|
148 |
+
# 等待所有任务完成
|
149 |
+
concurrent.futures.wait(futures)
|
150 |
+
# for eval_request in eval_requests:
|
151 |
+
# pp.pprint(eval_request)
|
152 |
+
# run_eval_suite.run_evaluation(
|
153 |
+
# eval_request=eval_request,
|
154 |
+
# local_dir=envs.EVAL_RESULTS_PATH_BACKEND,
|
155 |
+
# results_repo=envs.RESULTS_REPO,
|
156 |
+
# batch_size=1,
|
157 |
+
# device=envs.DEVICE,
|
158 |
+
# no_cache=True,
|
159 |
+
# need_check= False,
|
160 |
+
# write_results= False
|
161 |
+
# )
|
162 |
+
# logging.info(f"Eval finished for model {eval_request.model}, now setting status to finished")
|
163 |
+
#
|
164 |
+
# # Update the status to FINISHED
|
165 |
+
# manage_requests.set_eval_request(
|
166 |
+
# api=envs.API,
|
167 |
+
# eval_request=eval_request,
|
168 |
+
# new_status=FINISHED_STATUS,
|
169 |
+
# hf_repo=envs.QUEUE_REPO,
|
170 |
+
# local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
|
171 |
+
# )
|
172 |
+
|
173 |
|
174 |
# Searching and filtering
|
175 |
def update_table(
|