as-cle-bert commited on
Commit
0fe190c
1 Parent(s): 31e33fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -59,9 +59,9 @@ def reply(modelA, modelB, prompt):
59
  df.loc[df["MODEL"] == modelA, "MATCHES_PLAYED"] += 1
60
  df.loc[df["MODEL"] == modelB, "MATCHES_PLAYED"] += 1
61
  df.to_csv("models.csv", index=False)
62
- pipeA = pipeline("text-generation", model=models_and_tokenizers[modelA][0], tokenizer=models_and_tokenizers[modelA][1], max_new_tokens=512, repetition_penalty=1.5, temperature=0.5, device_map="auto")
63
  responseA = run_inference(pipeA, prompt)
64
- pipeB = pipeline("text-generation", model=models_and_tokenizers[modelB][0], tokenizer=models_and_tokenizers[modelB][1], max_new_tokens=512, repetition_penalty=1.5, temperature=0.5, device_map="auto")
65
  responseB = run_inference(pipeB, prompt)
66
  return responseA, responseB
67
 
 
59
  df.loc[df["MODEL"] == modelA, "MATCHES_PLAYED"] += 1
60
  df.loc[df["MODEL"] == modelB, "MATCHES_PLAYED"] += 1
61
  df.to_csv("models.csv", index=False)
62
+ pipeA = pipeline("text-generation", model=models_and_tokenizers[modelA][0], tokenizer=models_and_tokenizers[modelA][1], max_new_tokens=512, repetition_penalty=1.5, temperature=0.5, device_map="cuda:0")
63
  responseA = run_inference(pipeA, prompt)
64
+ pipeB = pipeline("text-generation", model=models_and_tokenizers[modelB][0], tokenizer=models_and_tokenizers[modelB][1], max_new_tokens=512, repetition_penalty=1.5, temperature=0.5, device_map="cuda:1")
65
  responseB = run_inference(pipeB, prompt)
66
  return responseA, responseB
67