Spaces:
Runtime error
Runtime error
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from huggingface_hub import CommitOperationAdd, create_commit, HfApi, HfFileSystem, login
|
2 |
+
from huggingface_hub.utils import RepositoryNotFoundError as deneme
|
3 |
+
from openllm import *
|
4 |
+
import gradio as gr
|
5 |
+
import requests
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
+
api = HfApi()
|
9 |
+
fs = HfFileSystem()
|
10 |
+
|
11 |
+
data = get_json_format_data()
|
12 |
+
finished_models = get_datas(data)
|
13 |
+
df = pd.DataFrame(finished_models)
|
14 |
+
|
15 |
+
|
16 |
+
def search(df, value):
|
17 |
+
result_df = df[df["Model"] == value]
|
18 |
+
return result_df.iloc[0].to_dict() if not result_df.empty else None
|
19 |
+
|
20 |
+
|
21 |
+
def get_details_url(repo):
|
22 |
+
author, model = repo.split("/")
|
23 |
+
return f"https://huggingface.co/datasets/open-llm-leaderboard/details_{author}__{model}"
|
24 |
+
|
25 |
+
|
26 |
+
def get_eval_results(repo):
|
27 |
+
results = search(df, repo)
|
28 |
+
|
29 |
+
text = f"""
|
30 |
+
# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
31 |
+
Detailed results can be found [here]({get_details_url(repo)})
|
32 |
+
|
33 |
+
| Metric | Value |
|
34 |
+
|-----------------------|---------------------------|
|
35 |
+
| Avg. | {results['Average ⬆️']} |
|
36 |
+
| ARC (25-shot) | {results['ARC']} |
|
37 |
+
| HellaSwag (10-shot) | {results['HellaSwag']} |
|
38 |
+
| MMLU (5-shot) | {results['MMLU']} |
|
39 |
+
| TruthfulQA (0-shot) | {results['TruthfulQA']} |
|
40 |
+
| Winogrande (5-shot) | {results['Winogrande']} |
|
41 |
+
| GSM8K (5-shot) | {results['GSM8K']} |
|
42 |
+
| DROP (3-shot) | {results['DROP']} |
|
43 |
+
"""
|
44 |
+
return text
|
45 |
+
|
46 |
+
|
47 |
+
desc = """
|
48 |
+
This is an automated PR created with https://huggingface.co/spaces/Weyaxi/open-llm-leaderboard-results-pr
|
49 |
+
|
50 |
+
The purpose of this PR is to add evaluation results from the Open LLM Leaderboard to your model card.
|
51 |
+
|
52 |
+
If you encounter any issues, please report them to https://huggingface.co/spaces/Weyaxi/open-llm-leaderboard-results-pr/discussions
|
53 |
+
"""
|
54 |
+
|
55 |
+
|
56 |
+
def commit(hf_token, repo):
|
57 |
+
try:
|
58 |
+
try: # check if there is a readme already
|
59 |
+
readme_text = fs.read_text(f"{repo}/README.md") + get_eval_results(repo)
|
60 |
+
except:
|
61 |
+
readme_text = get_eval_results(repo)
|
62 |
+
|
63 |
+
liste = [CommitOperationAdd(path_in_repo="README.md", path_or_fileobj=readme_text.encode())]
|
64 |
+
commit = (create_commit(repo_id=repo, operations=liste, commit_message=f"Adding Evaluation Results", commit_description=desc, repo_type="model", create_pr=True).__dict__['pr_url'])
|
65 |
+
return commit
|
66 |
+
except: # unexpected error
|
67 |
+
return "Error"
|
68 |
+
|
69 |
+
|
70 |
+
demo = gr.Interface(fn=commit, inputs=["text", "text"], outputs="text")
|
71 |
+
|
72 |
+
demo.launch()
|