Spaces:
Runtime error
Runtime error
File size: 8,929 Bytes
b338d34 3ece82c b338d34 3ece82c 9c40f3c 328c0c0 3ece82c 9c40f3c 3ece82c 9c40f3c 3ece82c b338d34 3ece82c b338d34 59f2f34 b338d34 e088bab b338d34 0ebce0d b338d34 27cbb3d 3ece82c 27cbb3d 1d13608 ef3159e 328c0c0 3ece82c 328c0c0 8d72f71 328c0c0 8d72f71 328c0c0 eb4cda3 328c0c0 8753c00 328c0c0 8753c00 8d72f71 8753c00 8d72f71 c1b4827 328c0c0 d265ff6 c1b4827 328c0c0 c1b4827 328c0c0 d265ff6 0ebce0d d265ff6 549b766 d265ff6 1ac0a66 27cbb3d eb3a4e2 27cbb3d 2167bbc 27cbb3d 2167bbc 27cbb3d 0ebce0d 2167bbc ef3159e 27cbb3d b338d34 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
import re
import streamlit as st
import requests
import pandas as pd
from io import StringIO
import plotly.graph_objs as go
from huggingface_hub import HfApi
from huggingface_hub.utils import RepositoryNotFoundError, RevisionNotFoundError
from yall import create_yall
def convert_markdown_table_to_dataframe(md_content):
"""
Converts markdown table to Pandas DataFrame, handling special characters and links,
extracts Hugging Face URLs, and adds them to a new column.
"""
# Remove leading and trailing | characters
cleaned_content = re.sub(r'\|\s*$', '', re.sub(r'^\|\s*', '', md_content, flags=re.MULTILINE), flags=re.MULTILINE)
# Create DataFrame from cleaned content
df = pd.read_csv(StringIO(cleaned_content), sep="\|", engine='python')
# Remove the first row after the header
df = df.drop(0, axis=0)
# Strip whitespace from column names
df.columns = df.columns.str.strip()
# Extract Hugging Face URLs and add them to a new column
model_link_pattern = r'\[(.*?)\]\((.*?)\)\s*\[.*?\]\(.*?\)'
df['URL'] = df['Model'].apply(lambda x: re.search(model_link_pattern, x).group(2) if re.search(model_link_pattern, x) else None)
# Clean Model column to have only the model link text
df['Model'] = df['Model'].apply(lambda x: re.sub(model_link_pattern, r'\1', x))
return df
@st.cache_data
def get_model_info(df):
api = HfApi()
# Initialize new columns for likes and tags
df['Likes'] = None
df['Tags'] = None
# Iterate through DataFrame rows
for index, row in df.iterrows():
model = row['Model'].strip()
try:
model_info = api.model_info(repo_id=str(model))
df.loc[index, 'Likes'] = model_info.likes
df.loc[index, 'Tags'] = ', '.join(model_info.tags)
except (RepositoryNotFoundError, RevisionNotFoundError):
df.loc[index, 'Likes'] = -1
df.loc[index, 'Tags'] = ''
return df
def create_bar_chart(df, category):
"""Create and display a bar chart for a given category."""
st.write(f"### {category} Scores")
# Sort the DataFrame based on the category score
sorted_df = df[['Model', category]].sort_values(by=category, ascending=True)
# Create the bar chart with color gradient
fig = go.Figure(go.Bar(
x=sorted_df[category],
y=sorted_df['Model'],
orientation='h',
marker=dict(color=sorted_df[category], colorscale='Inferno')
))
# Update layout for better readability
fig.update_layout(
margin=dict(l=20, r=20, t=20, b=20)
)
st.plotly_chart(fig, use_container_width=True, height=len(df)*35)
def main():
st.set_page_config(page_title="YALL - Yet Another LLM Leaderboard", layout="wide")
st.title("๐ YALL - Yet Another LLM Leaderboard")
st.markdown("Leaderboard made with ๐ง [LLM AutoEval](https://github.com/mlabonne/llm-autoeval) using [Nous](https://huggingface.co/NousResearch) benchmark suite.")
content = create_yall()
tab1, tab2 = st.tabs(["๐ Leaderboard", "๐ About"])
# Leaderboard tab
with tab1:
if content:
try:
score_columns = ['Average', 'AGIEval', 'GPT4All', 'TruthfulQA', 'Bigbench']
# Display dataframe
full_df = convert_markdown_table_to_dataframe(content)
for col in score_columns:
# Corrected use of pd.to_numeric
full_df[col] = pd.to_numeric(full_df[col].str.strip(), errors='coerce')
full_df = get_model_info(full_df)
full_df['Tags'] = full_df['Tags'].fillna('')
df = pd.DataFrame(columns=full_df.columns)
# Toggles
col1, col2, col3 = st.columns(3)
with col1:
show_phi = st.checkbox("Phi (2.8B)", value=True)
with col2:
show_mistral = st.checkbox("Mistral (7B)", value=True)
with col3:
show_other = st.checkbox("Other", value=True)
dfs_to_concat = []
if show_phi:
dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('phi,|phi-msft,')])
if show_mistral:
dfs_to_concat.append(full_df[full_df['Tags'].str.lower().str.contains('mistral,')])
if show_other:
other_df = full_df[~full_df['Tags'].str.lower().str.contains('phi,|phi-msft,|mistral,')]
dfs_to_concat.append(other_df)
# Concatenate the DataFrames
if dfs_to_concat:
df = pd.concat(dfs_to_concat, ignore_index=True)
# Sort values
df = df.sort_values(by='Average', ascending=False)
# Display the DataFrame
st.dataframe(
df[['Model'] + score_columns + ['Likes', 'URL']],
use_container_width=True,
column_config={
"Likes": st.column_config.NumberColumn(
"Likes",
help="Number of likes on Hugging Face",
format="%d โค๏ธ",
),
"URL": st.column_config.LinkColumn("URL"),
},
hide_index=True,
height=len(df)*37,
)
# Full-width plot for the first category
create_bar_chart(df, score_columns[0])
# Next two plots in two columns
col1, col2 = st.columns(2)
with col1:
create_bar_chart(df, score_columns[1])
with col2:
create_bar_chart(df, score_columns[2])
# Last two plots in two columns
col3, col4 = st.columns(2)
with col3:
create_bar_chart(df, score_columns[3])
with col4:
create_bar_chart(df, score_columns[4])
except Exception as e:
st.error("An error occurred while processing the markdown table.")
st.error(str(e))
else:
st.error("Failed to download the content from the URL provided.")
# About tab
with tab2:
st.markdown('''
### Nous benchmark suite
Popularized by [Teknium](https://huggingface.co/teknium) and [NousResearch](https://huggingface.co/NousResearch), this benchmark suite aggregates four benchmarks:
* [**AGIEval**](https://arxiv.org/abs/2304.06364) (0-shot): `agieval_aqua_rat,agieval_logiqa_en,agieval_lsat_ar,agieval_lsat_lr,agieval_lsat_rc,agieval_sat_en,agieval_sat_en_without_passage,agieval_sat_math`
* **GPT4ALL** (0-shot): `hellaswag,openbookqa,winogrande,arc_easy,arc_challenge,boolq,piqa`
* [**TruthfulQA**](https://arxiv.org/abs/2109.07958) (0-shot): `truthfulqa_mc`
* [**Bigbench**](https://arxiv.org/abs/2206.04615) (0-shot): `bigbench_causal_judgement,bigbench_date_understanding,bigbench_disambiguation_qa,bigbench_geometric_shapes,bigbench_logical_deduction_five_objects,bigbench_logical_deduction_seven_objects,bigbench_logical_deduction_three_objects,bigbench_movie_recommendation,bigbench_navigate,bigbench_reasoning_about_colored_objects,bigbench_ruin_names,bigbench_salient_translation_error_detection,bigbench_snarks,bigbench_sports_understanding,bigbench_temporal_sequences,bigbench_tracking_shuffled_objects_five_objects,bigbench_tracking_shuffled_objects_seven_objects,bigbench_tracking_shuffled_objects_three_objects`
### Reproducibility
You can easily reproduce these results using ๐ง [LLM AutoEval](https://github.com/mlabonne/llm-autoeval/tree/master), a colab notebook that automates the evaluation process (benchmark: `nous`). This will upload the results to GitHub as gists. You can find the entire table with the links to the detailed results [here](https://gist.github.com/mlabonne/90294929a2dbcb8877f9696f28105fdf).
### Clone this space
You can create your own leaderboard with your LLM AutoEval results on GitHub Gist. You just need to clone this space and specify two variables:
* Change the `gist_id` in [yall.py](https://huggingface.co/spaces/mlabonne/Yet_Another_LLM_Leaderboard/blob/main/yall.py#L126).
* Create "New Secret" in Settings > Variables and secrets (name: "github", value: [your GitHub token](https://github.com/settings/tokens))
A special thanks to [gblazex](https://huggingface.co/gblazex) for providing many evaluations.
''')
if __name__ == "__main__":
main() |