XufengDuan commited on
Commit
bdf31b0
1 Parent(s): 6162a3c

update scripts

Browse files
Files changed (1) hide show
  1. app.py +30 -30
app.py CHANGED
@@ -74,36 +74,6 @@ def process_pending_evals():
74
  print(f"Finished evaluation for model: {eval_request['model']}")
75
 
76
 
77
- # 在初始化完成后调用
78
- original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space()
79
- process_pending_evals()
80
-
81
- # try:
82
- # print(envs.EVAL_REQUESTS_PATH)
83
- # snapshot_download(
84
- # repo_id=envs.QUEUE_REPO, local_dir=envs.EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
85
- # )
86
- # except Exception:
87
- # restart_space()
88
- # try:
89
- # print(envs.EVAL_RESULTS_PATH)
90
- # snapshot_download(
91
- # repo_id=envs.RESULTS_REPO, local_dir=envs.EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
92
- # )
93
- # except Exception:
94
- # restart_space()
95
-
96
- # raw_data, original_df = populate.get_leaderboard_df(envs.RESULTS_REPO, envs.QUEUE_REPO, utils.COLS, utils.BENCHMARK_COLS)
97
-
98
-
99
- leaderboard_df = original_df.copy()
100
-
101
- (
102
- finished_eval_queue_df,
103
- running_eval_queue_df,
104
- pending_eval_queue_df,
105
- ) = populate.get_evaluation_queue_df(envs.EVAL_REQUESTS_PATH, utils.EVAL_COLS)
106
-
107
 
108
  # Searching and filtering
109
  def update_table(
@@ -392,3 +362,33 @@ scheduler = BackgroundScheduler()
392
  scheduler.add_job(restart_space, "interval", seconds=1800)
393
  scheduler.start()
394
  demo.queue(default_concurrency_limit=40).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
  print(f"Finished evaluation for model: {eval_request['model']}")
75
 
76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
 
78
  # Searching and filtering
79
  def update_table(
 
362
  scheduler.add_job(restart_space, "interval", seconds=1800)
363
  scheduler.start()
364
  demo.queue(default_concurrency_limit=40).launch()
365
+ # 在初始化完成后调用
366
+ original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space()
367
+ process_pending_evals()
368
+
369
+ # try:
370
+ # print(envs.EVAL_REQUESTS_PATH)
371
+ # snapshot_download(
372
+ # repo_id=envs.QUEUE_REPO, local_dir=envs.EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
373
+ # )
374
+ # except Exception:
375
+ # restart_space()
376
+ # try:
377
+ # print(envs.EVAL_RESULTS_PATH)
378
+ # snapshot_download(
379
+ # repo_id=envs.RESULTS_REPO, local_dir=envs.EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
380
+ # )
381
+ # except Exception:
382
+ # restart_space()
383
+
384
+ # raw_data, original_df = populate.get_leaderboard_df(envs.RESULTS_REPO, envs.QUEUE_REPO, utils.COLS, utils.BENCHMARK_COLS)
385
+
386
+
387
+ leaderboard_df = original_df.copy()
388
+
389
+ (
390
+ finished_eval_queue_df,
391
+ running_eval_queue_df,
392
+ pending_eval_queue_df,
393
+ ) = populate.get_evaluation_queue_df(envs.EVAL_REQUESTS_PATH, utils.EVAL_COLS)
394
+