XumengWen commited on
Commit
13c993d
1 Parent(s): 78df23b

update quickstart

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -104,8 +104,8 @@ with open(example_path, "r") as f:
104
  full_prompt = f.read()
105
  answer = full_prompt.split('Answer:')[-1].strip()
106
  prompt_without_answer = full_prompt[:-len(answer)]
107
- print("Prompt:\n", prompt_without_answer)
108
- print("Label:", answer)
109
 
110
  # Inference
111
  inputs = tokenizer(prompt_without_answer, return_tensors="pt")
@@ -118,7 +118,7 @@ outputs = model.generate(
118
  )
119
 
120
  # Print the answer
121
- print("Generate answer:", tokenizer.decode(outputs[0][input_ids.shape[-1]:]))
122
  ```
123
 
124
  ## Responsible AI Considerations
 
104
  full_prompt = f.read()
105
  answer = full_prompt.split('Answer:')[-1].strip()
106
  prompt_without_answer = full_prompt[:-len(answer)]
107
+ print("Prompt:", prompt_without_answer)
108
+ print("Groundtruth:", answer)
109
 
110
  # Inference
111
  inputs = tokenizer(prompt_without_answer, return_tensors="pt")
 
118
  )
119
 
120
  # Print the answer
121
+ print("Generated answer:", tokenizer.decode(outputs[0][input_ids.shape[-1]:]))
122
  ```
123
 
124
  ## Responsible AI Considerations