Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update app.py
Browse files
app.py
CHANGED
@@ -52,6 +52,17 @@ try:
|
|
52 |
except Exception:
|
53 |
restart_space()
|
54 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
57 |
original_df = LEADERBOARD_DF
|
|
|
52 |
except Exception:
|
53 |
restart_space()
|
54 |
|
55 |
+
eval_result_file = os.path.join(EVAL_RESULTS_PATH, 'llm-jp-13b-v2.0', 'llm-jp--llm-jp-13b-v2.0_vllm_20240908_170440.json')
|
56 |
+
|
57 |
+
with open(eval_result_file, 'r') as file:
|
58 |
+
result_data = json.load(file)
|
59 |
+
|
60 |
+
# 確認すべきキーのみを出力
|
61 |
+
print(f"Model name: {result_data.get('model_name')}")
|
62 |
+
print(f"Precision (dtype): {result_data.get('config', {}).get('model', {}).get('dtype')}")
|
63 |
+
print(f"Other config data: {result_data.get('config')}")
|
64 |
+
print(f"Scores: {result_data.get('scores')}")
|
65 |
+
|
66 |
|
67 |
LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
|
68 |
original_df = LEADERBOARD_DF
|