yilunzhao commited on
Commit
599d40b
1 Parent(s): 8d7cf56

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -7
app.py CHANGED
@@ -16,15 +16,14 @@ else:
16
  @spaces.GPU
17
  def generate_response(passage: str, question: str) -> str:
18
  # Prepare the input text by combining the passage and question
19
- messages = [{"role": "user", "content": f"Passage: {passage}\nQuestion: {question}"}]
20
- inputs = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
 
 
 
21
 
22
- # Generate text, focusing only on the new tokens added by the model
23
- outputs = model.generate(**inputs, max_new_tokens=150)
24
 
25
- # Decode only the generated part, skipping the prompt input
26
- # generated_tokens = outputs[0][inputs.input_ids.shape[-1]:] # Ignore input tokens in the output
27
- response = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]
28
 
29
  return response
30
 
 
16
  @spaces.GPU
17
  def generate_response(passage: str, question: str) -> str:
18
  # Prepare the input text by combining the passage and question
19
+ chat = [{"role": "user", "content": f"Passage: {passage}\nQuestion: {question}"}]
20
+
21
+ prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
22
+ inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
23
+ response = model.generate(input_ids=inputs.to(olmo.device), max_new_tokens=100)
24
 
25
+ response = tokenizer.batch_decode(response, skip_special_tokens=True)[0]
 
26
 
 
 
 
27
 
28
  return response
29