Gregor Betz commited on
Commit
4085e97
1 Parent(s): e4b2a4a
Files changed (3) hide show
  1. app.py +2 -2
  2. scripts/create_request_file.py +1 -1
  3. src/display/about.py +10 -6
app.py CHANGED
@@ -36,14 +36,14 @@ def restart_space():
36
  try:
37
  print(EVAL_REQUESTS_PATH)
38
  snapshot_download(
39
- repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
40
  )
41
  except Exception:
42
  restart_space()
43
  try:
44
  print(EVAL_RESULTS_PATH)
45
  snapshot_download(
46
- repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30
47
  )
48
  except Exception:
49
  restart_space()
 
36
  try:
37
  print(EVAL_REQUESTS_PATH)
38
  snapshot_download(
39
+ repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, token=TOKEN, etag_timeout=30
40
  )
41
  except Exception:
42
  restart_space()
43
  try:
44
  print(EVAL_RESULTS_PATH)
45
  snapshot_download(
46
+ repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, token=TOKEN, etag_timeout=30
47
  )
48
  except Exception:
49
  restart_space()
scripts/create_request_file.py CHANGED
@@ -9,7 +9,7 @@ from colorama import Fore
9
  from huggingface_hub import HfApi, snapshot_download
10
 
11
  EVAL_REQUESTS_PATH = "eval-queue"
12
- QUEUE_REPO = "open-llm-leaderboard/requests"
13
 
14
  precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
15
  model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
 
9
  from huggingface_hub import HfApi, snapshot_download
10
 
11
  EVAL_REQUESTS_PATH = "eval-queue"
12
+ QUEUE_REPO = "logikon/cot-leaderboard-requests"
13
 
14
  precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ")
15
  model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned")
src/display/about.py CHANGED
@@ -35,17 +35,21 @@ To reproduce our results, here is the commands you can run:
35
  EVALUATION_QUEUE_TEXT = """
36
  ## Some good practices before submitting a model
37
 
38
- ### 1) Make sure you can load your model and tokenizer using AutoClasses:
39
  ```python
40
- from transformers import AutoConfig, AutoModel, AutoTokenizer
41
- config = AutoConfig.from_pretrained("your model name", revision=revision)
42
- model = AutoModel.from_pretrained("your model name", revision=revision)
43
- tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision)
 
 
 
 
 
44
  ```
45
  If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
46
 
47
  Note: make sure your model is public!
48
- Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted!
49
 
50
  ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
51
  It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!
 
35
  EVALUATION_QUEUE_TEXT = """
36
  ## Some good practices before submitting a model
37
 
38
+ ### 1) Make sure you can load your model and tokenizer with `vLLM`:
39
  ```python
40
+ from vllm import LLM, SamplingParams
41
+ prompts = [
42
+ "Hello, my name is",
43
+ "The president of the United States is",
44
+ "The capital of France is",
45
+ "The future of AI is",
46
+ ]
47
+ sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
48
+ llm = LLM(model="<USER>/<MODEL>")
49
  ```
50
  If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded.
51
 
52
  Note: make sure your model is public!
 
53
 
54
  ### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index)
55
  It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`!