File size: 3,271 Bytes
ecb1e20
 
 
 
 
0f8e886
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ecb1e20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import streamlit as st
import pandas as pd

st.title("β€Žβ€Žβ€Ž  β€Žβ€Ž β€Ž β€Ž β€Ž β€Ž β€Ž β€ŽπŸ‹οΈβ€β™‚οΈ benchbench-Leaderboard πŸ‹οΈβ€β™‚οΈ")

import pandas as pd
from bat import Tester, Config, Benchmark, Reporter
from bat.utils import get_holistic_benchmark


cfg = Config(
    exp_to_run="example",
    n_models_taken_list=[0],
    model_select_strategy_list=["random"],
    n_exps=10,
    # reference_data_path="data/combined_holistic.csv",
)


newbench_name = "livebench"
new_bench_agg_name = f"{newbench_name}_mwr"

tester = Tester(cfg=cfg)

# models_for_benchmark_scoring = tester.fetch_reference_models_names(
#     reference_benchmark=get_holistic_benchmark(), n_models=20
# )

newbench = Benchmark(
    pd.read_csv(f"assets/{newbench_name}.csv"),
    data_source=newbench_name,
)

# newbench.add_aggragete(new_col_name=new_bench_agg_name)
# newbench_agreements = tester.all_vs_all_agreement_testing(newbench)

reporter = Reporter()
# reporter.draw_agreements(
#     newbench_agreements, ref_sources=[newbench_name], scenario_sources=[newbench_name]
# )

holistic = get_holistic_benchmark()
holistic.add_aggragete(new_col_name="aggregate", agg_source_name="holistic")

allbench = newbench.extend(holistic)
allbench.clear_repeated_scenarios(source_to_keep=newbench_name)


@st.cache_data
def run_load():
    return tester.all_vs_all_agreement_testing(allbench)


all_agreements = run_load()

observed_scenario = "arena_elo"  # "livebench_lb"
blacklist_sources = []  # "livebench"

z_score = reporter.get_z_score(all_agreements, observed_scenario, blacklist_sources)

st.write(f"zscore of {observed_scenario}: {z_score}")

# df = pd.read_csv("BAT_w_arena_10_random.csv")
# df = (
#     (
#         df.rename(
#             columns={
#                 "z_score": "Z_Score",
#                 "benchmark": "Benchmark",
#             }
#         ).drop(
#             columns=[
#                 "Unnamed: 0",
#                 "z_test_pass",
#             ]
#         )
#     )
#     .sort_values("Z_Score", ascending=False)
#     .query(
#         'Benchmark!="Aggregate" and Benchmark!="MAGI" and Benchmark!="Alpaca(v2, len adj)" and Benchmark!="GPT4All"'
#     )
# )


# df.replace(
#     {
#         "Arena Elo": "LMSys Arena",
#         "Hugging-6": "HF OpenLLM",
#         "Alpaca(v2)": "Alpaca v2",
#         "Alpaca(v1)": "Alpaca v1",
#         "EQ-Bench(v2)": "EQ-Bench v2",
#     },
#     inplace=True,
# )

# col1, col2, col3 = st.columns(3)

# with col1:
#     st.header("β€Ž β€Ž β€Ž β€Ž β€Ž β€Ž β€Ž β€Ž Agree")
#     st.dataframe(df.query("Z_Score>=0"), hide_index=True)

# with col2:
#     st.header("β€Ž β€Žβ€Ž  β€Ž Disagree")
#     st.dataframe(df.query("Z_Score<0").sort_values("Z_Score"), hide_index=True)

# with col3:
#     st.header("β€Ž β€Žβ€Ž  β€Ž Configs")
#     # st.selectbox(label="Reference Benchmarks", options=["LMSys Arena"])
#     options = st.multiselect(
#         "Reference Benchmarks",
#         ["LMSys Arena", "Open Compass", "Yellow", "Red", "Blue"],
#         ["LMSys Arena", "Open Compass"],
#     )
#     st.selectbox(label="# models compared", options=[20])
#     st.selectbox(label="Model Select Strategy", options=["Random"])
#     st.write("β€Žβ€Žβ€Žβ€Žβ€Žβ€Žβ€Ž")
#     st.button("Upload a new benchmark")