XufengDuan commited on
Commit
8a5da23
1 Parent(s): c755378

update scripts

Browse files
Files changed (1) hide show
  1. app.py +75 -20
app.py CHANGED
@@ -1,9 +1,13 @@
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  from apscheduler.schedulers.background import BackgroundScheduler
4
  from huggingface_hub import snapshot_download
5
  import src.envs as envs
6
- from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, REPO_ID, RESULTS_REPO
 
 
7
  import src.backend.manage_requests as manage_requests
8
 
9
  import socket
@@ -16,6 +20,9 @@ import src.submission.submit as submit
16
  import os
17
  import datetime
18
  import spacy_transformers
 
 
 
19
 
20
  TOKEN = os.environ.get("H4_TOKEN", None)
21
  print("TOKEN", TOKEN)
@@ -47,22 +54,70 @@ original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_d
47
  leaderboard_df = original_df.copy()
48
 
49
  def process_pending_evals():
50
- if len(pending_eval_queue_df) == 0:
51
- print("No pending evaluations found.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  return
53
 
54
- for _, eval_request in pending_eval_queue_df.iterrows():
55
- import re
56
- model_link = eval_request['model']
57
- match = re.search(r'>([^<]+)<', model_link)
58
- if match:
59
- eval_request['model'] = match.group(1) # 赋值给 eval_request['model']
60
- else:
61
- eval_request['model'] = model_link # 如果无法匹配,保留原始字符串
62
-
63
- print(f"Evaluating model: {eval_request['model']}")
64
-
65
- # 调用评估函数
66
  run_eval_suite.run_evaluation(
67
  eval_request=eval_request,
68
  local_dir=envs.EVAL_RESULTS_PATH_BACKEND,
@@ -70,21 +125,21 @@ def process_pending_evals():
70
  batch_size=1,
71
  device=envs.DEVICE,
72
  no_cache=True,
73
- need_check=False, # 根据需要设定是否需要检查
74
- write_results=False # 根据需要设定是否写入结果
75
  )
76
- print(f"Finished evaluation for model: {eval_request['model']}")
 
77
  # Update the status to FINISHED
78
  manage_requests.set_eval_request(
79
  api=envs.API,
80
  eval_request=eval_request,
81
- new_status="FINISHED",
82
  hf_repo=envs.QUEUE_REPO,
83
  local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
84
  )
85
 
86
 
87
-
88
  # Searching and filtering
89
  def update_table(
90
  hidden_df: pd.DataFrame,
 
1
+ import logging
2
+
3
  import gradio as gr
4
  import pandas as pd
5
  from apscheduler.schedulers.background import BackgroundScheduler
6
  from huggingface_hub import snapshot_download
7
  import src.envs as envs
8
+ from main_backend import PENDING_STATUS, RUNNING_STATUS, FINISHED_STATUS, FAILED_STATUS
9
+ from src.backend import sort_queue
10
+ from src.envs import EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, RESULTS_REPO
11
  import src.backend.manage_requests as manage_requests
12
 
13
  import socket
 
20
  import os
21
  import datetime
22
  import spacy_transformers
23
+ import pprint
24
+
25
+ pp = pprint.PrettyPrinter(width=80)
26
 
27
  TOKEN = os.environ.get("H4_TOKEN", None)
28
  print("TOKEN", TOKEN)
 
54
  leaderboard_df = original_df.copy()
55
 
56
  def process_pending_evals():
57
+ # if len(pending_eval_queue_df) == 0:
58
+ # print("No pending evaluations found.")
59
+ # return
60
+ #
61
+ # for _, eval_request in pending_eval_queue_df.iterrows():
62
+ # import re
63
+ # model_link = eval_request['model']
64
+ # match = re.search(r'>([^<]+)<', model_link)
65
+ # if match:
66
+ # eval_request['model'] = match.group(1) # 赋值给 eval_request['model']
67
+ # else:
68
+ # eval_request['model'] = model_link # 如果无法匹配,保留原始字符串
69
+ #
70
+ # print(f"Evaluating model: {eval_request['model']}")
71
+ #
72
+ # # 调用评估函数
73
+ # run_eval_suite.run_evaluation(
74
+ # eval_request=eval_request,
75
+ # local_dir=envs.EVAL_RESULTS_PATH_BACKEND,
76
+ # results_repo=envs.RESULTS_REPO,
77
+ # batch_size=1,
78
+ # device=envs.DEVICE,
79
+ # no_cache=True,
80
+ # need_check=False, # 根据需要设定是否需要检查
81
+ # write_results=False # 根据需要设定是否写入结果
82
+ # )
83
+ # print(f"Finished evaluation for model: {eval_request['model']}")
84
+ # # Update the status to FINISHED
85
+ # manage_requests.set_eval_request(
86
+ # api=envs.API,
87
+ # eval_request=eval_request,
88
+ # new_status="FINISHED",
89
+ # hf_repo=envs.QUEUE_REPO,
90
+ # local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
91
+ # )
92
+ current_pending_status = [PENDING_STATUS]
93
+ print('_________________')
94
+ manage_requests.check_completed_evals(
95
+ api=envs.API,
96
+ checked_status=RUNNING_STATUS,
97
+ completed_status=FINISHED_STATUS,
98
+ failed_status=FAILED_STATUS,
99
+ hf_repo=envs.QUEUE_REPO,
100
+ local_dir=envs.EVAL_REQUESTS_PATH_BACKEND,
101
+ hf_repo_results=envs.RESULTS_REPO,
102
+ local_dir_results=envs.EVAL_RESULTS_PATH_BACKEND
103
+ )
104
+ logging.info("Checked completed evals")
105
+ eval_requests = manage_requests.get_eval_requests(
106
+ job_status=current_pending_status,
107
+ hf_repo=envs.QUEUE_REPO,
108
+ local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
109
+ )
110
+ logging.info("Got eval requests")
111
+ eval_requests = sort_queue.sort_models_by_priority(api=envs.API, models=eval_requests)
112
+ logging.info("Sorted eval requests")
113
+
114
+ print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests")
115
+ if len(eval_requests) == 0:
116
+ print("No eval requests found. Exiting.")
117
  return
118
 
119
+ for eval_request in eval_requests:
120
+ pp.pprint(eval_request)
 
 
 
 
 
 
 
 
 
 
121
  run_eval_suite.run_evaluation(
122
  eval_request=eval_request,
123
  local_dir=envs.EVAL_RESULTS_PATH_BACKEND,
 
125
  batch_size=1,
126
  device=envs.DEVICE,
127
  no_cache=True,
128
+ need_check= False,
129
+ write_results= False
130
  )
131
+ logging.info(f"Eval finished for model {eval_request.model}, now setting status to finished")
132
+
133
  # Update the status to FINISHED
134
  manage_requests.set_eval_request(
135
  api=envs.API,
136
  eval_request=eval_request,
137
+ new_status=FINISHED_STATUS,
138
  hf_repo=envs.QUEUE_REPO,
139
  local_dir=envs.EVAL_REQUESTS_PATH_BACKEND
140
  )
141
 
142
 
 
143
  # Searching and filtering
144
  def update_table(
145
  hidden_df: pd.DataFrame,