5to9 commited on
Commit
b0caeb4
β€’
1 Parent(s): 0c12487

0.39 shuffeling

Browse files
Files changed (1) hide show
  1. app.py +17 -16
app.py CHANGED
@@ -8,7 +8,7 @@ import os
8
  import traceback
9
 
10
  from threading import Thread
11
-
12
 
13
  logging.basicConfig(level=logging.DEBUG)
14
 
@@ -18,25 +18,27 @@ HF_TOKEN = os.environ.get("HF_TOKEN", None)
18
  login(token=HF_TOKEN)
19
 
20
 
21
- model_a_info = {"id": "NousResearch/Meta-Llama-3.1-8B-Instruct",
22
- "name": "Meta Llama 3.1 8B Instruct"}
23
- model_b_info = {"id": "mistralai/Mistral-7B-Instruct-v0.3",
24
- "name": "Mistral 7B Instruct v0.3"}
 
 
25
 
26
  device = "cuda"
27
 
28
  try:
29
- tokenizer_a = AutoTokenizer.from_pretrained(model_a_info['id'])
30
  model_a = AutoModelForCausalLM.from_pretrained(
31
- model_a_info['id'],
32
  torch_dtype=torch.float16,
33
  device_map="auto",
34
  trust_remote_code=True,
35
  )
36
  #model_a.tie_weights()
37
- tokenizer_b = AutoTokenizer.from_pretrained(model_b_info['id'])
38
  model_b = AutoModelForCausalLM.from_pretrained(
39
- model_b_info['id'],
40
  torch_dtype=torch.float16,
41
  device_map="auto",
42
  trust_remote_code=True,
@@ -181,14 +183,14 @@ def clear():
181
 
182
  def reveal_bot(selection, chatbot_a, chatbot_b):
183
  if selection == "Bot A kicks ass!":
184
- chatbot_a.append(["πŸ†", f"Thanks, man. I am {model_a_info['name']}"])
185
- chatbot_b.append(["πŸ’©", f"Pffff … I am {model_b_info['name']}"])
186
  elif selection == "Bot B crushes it!":
187
- chatbot_a.append(["🀑", f"Rigged … I am {model_a_info['name']}"])
188
- chatbot_b.append(["πŸ₯‡", f"Well deserved! I am {model_b_info['name']}"])
189
  else:
190
- chatbot_a.append(["🀝", f"Lame … I am {model_a_info['name']}"])
191
- chatbot_b.append(["🀝", f"Dunno. I am {model_b_info['name']}"])
192
  return chatbot_a, chatbot_b
193
 
194
  arena_notes = """## Important Notes:
@@ -197,7 +199,6 @@ arena_notes = """## Important Notes:
197
 
198
  with gr.Blocks() as demo:
199
  try:
200
- logging.debug('Pass just once')
201
  with gr.Column():
202
  gr.HTML("<center><h1>πŸ€–le Royale</h1></center>")
203
  gr.Markdown(arena_notes)
 
8
  import traceback
9
 
10
  from threading import Thread
11
+ from random import shuffle
12
 
13
  logging.basicConfig(level=logging.DEBUG)
14
 
 
18
  login(token=HF_TOKEN)
19
 
20
 
21
+ model_info = [{"id": "NousResearch/Meta-Llama-3.1-8B-Instruct",
22
+ "name": "Meta Llama 3.1 8B Instruct"},
23
+ {"id": "mistralai/Mistral-7B-Instruct-v0.3",
24
+ "name": "Mistral 7B Instruct v0.3"}]
25
+ shuffle(model_info)
26
+ logging.debug('Models shuffled')
27
 
28
  device = "cuda"
29
 
30
  try:
31
+ tokenizer_a = AutoTokenizer.from_pretrained(model_info[0]['id'])
32
  model_a = AutoModelForCausalLM.from_pretrained(
33
+ model_info[0]['id'],
34
  torch_dtype=torch.float16,
35
  device_map="auto",
36
  trust_remote_code=True,
37
  )
38
  #model_a.tie_weights()
39
+ tokenizer_b = AutoTokenizer.from_pretrained(model_info[1]['id'])
40
  model_b = AutoModelForCausalLM.from_pretrained(
41
+ model_info[1]['id'],
42
  torch_dtype=torch.float16,
43
  device_map="auto",
44
  trust_remote_code=True,
 
183
 
184
  def reveal_bot(selection, chatbot_a, chatbot_b):
185
  if selection == "Bot A kicks ass!":
186
+ chatbot_a.append(["πŸ†", f"Thanks, man. I am {model_info[0]['name']}"])
187
+ chatbot_b.append(["πŸ’©", f"Pffff … I am {model_info[1]['name']}"])
188
  elif selection == "Bot B crushes it!":
189
+ chatbot_a.append(["🀑", f"Rigged … I am {model_info[0]['name']}"])
190
+ chatbot_b.append(["πŸ₯‡", f"Well deserved! I am {model_info[1]['name']}"])
191
  else:
192
+ chatbot_a.append(["🀝", f"Lame … I am {model_info[0]['name']}"])
193
+ chatbot_b.append(["🀝", f"Dunno. I am {model_info[1]['name']}"])
194
  return chatbot_a, chatbot_b
195
 
196
  arena_notes = """## Important Notes:
 
199
 
200
  with gr.Blocks() as demo:
201
  try:
 
202
  with gr.Column():
203
  gr.HTML("<center><h1>πŸ€–le Royale</h1></center>")
204
  gr.Markdown(arena_notes)