File size: 2,909 Bytes
efeee6d
314f91a
95f85ed
efeee6d
 
 
 
 
 
314f91a
efeee6d
 
943f952
9402e16
e39b6d4
 
 
9402e16
e39b6d4
 
 
524d4e3
e39b6d4
524d4e3
 
 
efeee6d
 
 
943f952
58733e4
efeee6d
8c49cb6
943f952
0227006
 
efeee6d
0227006
d313dbd
 
 
9833cdb
d16cee2
d313dbd
 
8c49cb6
d313dbd
 
4085e97
d313dbd
4085e97
 
 
 
 
 
 
 
 
524d4e3
d313dbd
8c49cb6
b323764
d313dbd
 
 
 
 
 
 
b323764
d313dbd
 
 
 
8c49cb6
 
d16cee2
58733e4
2a73469
 
217b585
9833cdb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
from dataclasses import dataclass
from enum import Enum

@dataclass
class Task:
    benchmark: str
    metric: str
    col_name: str


# Init: to update with your specific keys
class Tasks(Enum):
    # task_key in the json file, metric_key in the json file, name to display in the leaderboard 
    task0 = Task("logiqa", "delta_abs", "LogiQA Δ")
    #task1 = Task("logiqa", "delta_rel", "LogiQA Δ%")
    #task2 = Task("logiqa", "acc_base", "LogiQA Acc")
    #task3 = Task("logiqa", "acc_cot", "LogiQA AccCoT")
    task4 = Task("logiqa2", "delta_abs", "LogiQA2 Δ")
    #task5 = Task("logiqa2", "delta_rel", "LogiQA2 Δ%")
    #task6 = Task("logiqa2", "acc_base", "LogiQA2 Acc")
    #task7 = Task("logiqa2", "acc_cot", "LogiQA2 AccCoT")

#METRICS = list(set([task.value.metric for task in Tasks]))





# Your leaderboard name
TITLE = """<h1 align="center" id="space-title">Demo leaderboard</h1>"""

# What does your leaderboard evaluate?
INTRODUCTION_TEXT = """
Intro text
"""

# Which evaluations are you running? how can people reproduce what you have?
LLM_BENCHMARKS_TEXT = f"""
## How it works

## Reproducibility
To reproduce our results, here is the commands you can run:

"""

EVALUATION_QUEUE_TEXT = """
## Some good practices before submitting a model

### 1) Make sure you can load your model and tokenizer with `vLLM`:
```python
from vllm import LLM, SamplingParams
prompts = [
    "Hello, my name is",
    "The president of the United States is",
    "The capital of France is",
    "The future of AI is",
]
sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
llm = LLM(model="<USER>/<MODEL>")
outputs = llm.generate(prompts, sampling_params)
```
If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.

Note: make sure your model is public!

### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!

### 3) Make sure your model has an open license!
This is a leaderboard for Open LLMs, and we'd love for as many people as possible to know they can use your model 🤗

### 4) Fill up your model card
When we add extra information about models to the leaderboard, it will be automatically taken from the model card

## In case of model failure
If your model is displayed in the `FAILED` category, its execution stopped.
Make sure you have followed the above steps first.
If everything is done, check you can launch the EleutherAIHarness on your model locally, using the above command without modifications (you can add `--limit` to limit the number of examples per task).
"""

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""
"""