Spaces:
Running
Running
from dataclasses import dataclass | |
from enum import Enum | |
class Task: | |
benchmark: str | |
metric: str | |
col_name: str | |
class Tasks(Enum): | |
# task_key in the json file, metric_key in the json file, name to display in the leaderboard | |
Overall = Task("overall_js_divergence", "overall_js_divergence", "Overall Humanlike %") | |
Overall_ci = Task("overall_confidence_interval", "overall_confidence_interval", "Overall CI") | |
E1 = Task("E1", "E1", "E1 Humanlike %") | |
E1_ci = Task("E1_ci", "E1_ci", "E1 CI") | |
E2 = Task("E2", "E2", "E2 Humanlike %") | |
E2_ci = Task("E2_ci", "E2_ci", "E2 CI") | |
E3 = Task("E3", "E3", "E3 Humanlike %") | |
E3_ci = Task("E3_ci", "E3_ci", "E3 CI") | |
E4 = Task("E4", "E4", "E4 Humanlike %") | |
E4_ci = Task("E4_ci", "E4_ci", "E4 CI") | |
E5 = Task("E5", "E5", "E5 Humanlike %") | |
E5_ci = Task("E5_ci", "E5_ci", "E5 CI") | |
E6 = Task("E6", "E6", "E6 Humanlike %") | |
E6_ci = Task("E6_ci", "E6_ci", "E6 CI") | |
E7 = Task("E7", "E7", "E7 Humanlike %") | |
E7_ci = Task("E7_ci", "E7_ci", "E7 CI") | |
E8 = Task("E8", "E8", "E8 Humanlike %") | |
E8_ci = Task("E8_ci", "E8_ci", "E8 CI") | |
E9 = Task("E9", "E9", "E9 Humanlike %") | |
E9_ci = Task("E9_ci", "E9_ci", "E9 CI") | |
E10 = Task("E10", "E10", "E10 Humanlike %") | |
E10_ci = Task("E10_ci", "E10_ci", "E10 CI") | |
# factual_consistency_rate = Task("factual_consistency_rate", "factual_consistency_rate", "Factual Consistency Rate (%)") | |
# answer_rate = Task("answer_rate", "answer_rate", "Answer Rate (%)") | |
# average_summary_length = Task("average_summary_length", | |
# "average_summary_length", "Average Summary Length") | |
# Your leaderboard name | |
TITLE = """<h1 align="center" id="space-title">Humanlike Evaluation Model (HEM) leaderboard</h1>""" | |
# What does your leaderboard evaluate? | |
INTRODUCTION_TEXT = """ | |
This leaderboard (by [Xufeng Duan](https://xufengduan.github.io/)) evaluates the similarities between human and model responses in language use <br> | |
""" | |
# Which evaluations are you running? how can people reproduce what you have? | |
LLM_BENCHMARKS_TEXT = """ | |
## Introduction | |
This study aims to compare the similarities between human and model responses in language use by employing ten psycholinguistic tasks. Each task consists of multiple stimuli, with each stimulus having both expected and unexpected responses. | |
To quantify the similarity, we collected responses from 2000 human participants, creating a binomial distribution for each stimulus within each task. The same stimuli were then presented to a language model, generating another binomial distribution for comparison. | |
## How it works | |
To measure the similarity between human and model responses, we utilize the Jensen-Shannon (JS) divergence. This method allows us to compare the two binomial distributions (one from human responses and one from model responses) for each stimulus. | |
The similarity is quantified by calculating 1 minus the JS divergence, where a value closer to 1 indicates higher similarity. | |
## Evaluation Dataset | |
Our evaluation dataset consists of 1006 documents from multiple public datasets, primarily [CNN/Daily Mail Corpus](https://huggingface.co/datasets/cnn_dailymail/viewer/1.0.0/test). | |
We generate summaries for each of these documents using submitted LLMs and compute hallucination scores for each pair of document and generated summary. (Check the prompt we used [here](https://huggingface.co/spaces/vectara/Hallucination-evaluation-leaderboard)) | |
## Metrics Explained | |
- Individual Task Similarity: For each psycholinguistic task, we calculate the humanlike score for each stimulus, providing a measure of how closely the model’s responses resemble those of humans. | |
- Average Similarity: The average of the humanlike scores across all stimuli and tasks, giving an overall indication of the model’s performance in mimicking human language use. | |
## Note on non-Hugging Face models | |
On HHEM leaderboard, There are currently models such as GPT variants that are not available on the Hugging Face model hub. We ran the evaluations for these models on our own and uploaded the results to the leaderboard. | |
If you would like to submit your model that is not available on the Hugging Face model hub, please contact us at [email protected]. | |
## Model Submissions and Reproducibility | |
You can submit your model for evaluation, whether it's hosted on the Hugging Face model hub or not. (Though it is recommended to host your model on the Hugging Face) | |
### For models available on the Hugging Face model hub: | |
To replicate the evaluation result for a Hugging Face model: | |
1) Clone the Repository | |
```python | |
git lfs install | |
git clone https://huggingface.co/spaces/Simondon/HumanLikeness | |
``` | |
2) Install the Requirements | |
```python | |
pip install -r requirements.txt | |
``` | |
3) Set Up Your Hugging Face Token | |
```python | |
export HF_TOKEN=your_token | |
``` | |
4) Run the Evaluation Script | |
```python | |
python main_backend.py --model your_model_id --precision float16 | |
``` | |
5) Check Results | |
After the evaluation, results are saved in "eval-results-bk/your_model_id/results.json". | |
## Results Format | |
The results are structured in JSON as follows: | |
```python | |
{ | |
"config": { | |
"model_dtype": "float16", | |
"model_name": "your_model_id", | |
"model_sha": "main" | |
}, | |
"results": { | |
"hallucination_rate": { | |
"hallucination_rate": ... | |
}, | |
"factual_consistency_rate": { | |
"factual_consistency_rate": ... | |
}, | |
"answer_rate": { | |
"answer_rate": ... | |
}, | |
"average_summary_length": { | |
"average_summary_length": ... | |
} | |
} | |
} | |
``` | |
For additional queries or model submissions, please contact [email protected]. | |
""" | |
EVALUATION_QUEUE_TEXT = """ | |
""" | |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" | |
CITATION_BUTTON_TEXT = r""" | |
@dataset{HughesBae2023, | |
author = {Xufeng Duan, Bei Xiao, Xuemei Tang, Zhenguang Cai}, | |
title = {Humanlike Leaderboard}, | |
year = {2024}, | |
month = {8}, | |
publisher = {}, | |
doi = {}, | |
url = {https://huggingface.co/spaces/Simondon/HumanLikeness}, | |
abstract = {A leaderboard comparing LLM performance at humanlikeness in language use.}, | |
keywords = {nlp, llm, psycholinguistics, nli, machine learning}, | |
license = {Apache-2.0}, | |
}""" |