Update README.md
Browse files
README.md
CHANGED
@@ -56,13 +56,39 @@ The model was evaluated using the **MathQA test dataset(2985 examples)** with **
|
|
56 |
```python
|
57 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
58 |
|
59 |
-
|
60 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
-
# Example usage
|
63 |
-
inputs = tokenizer("Solve: 12 + 7", return_tensors="pt")
|
64 |
-
outputs = model.generate(inputs["input_ids"], max_length=30)
|
65 |
-
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
66 |
```
|
67 |
|
68 |
## Limitations
|
|
|
56 |
```python
|
57 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
58 |
|
59 |
+
local_model_path = "Dasool/mathGemma-2-9b"
|
60 |
+
tokenizer = AutoTokenizer.from_pretrained(local_model_path)
|
61 |
+
model = AutoModelForCausalLM.from_pretrained(local_model_path)
|
62 |
+
|
63 |
+
alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
64 |
+
|
65 |
+
### Instruction:
|
66 |
+
Please select the correct answer for the following math problem from the options provided.
|
67 |
+
|
68 |
+
### Input:
|
69 |
+
Problem: Calculate the square root of 144.
|
70 |
+
Options:
|
71 |
+
a) 10
|
72 |
+
b) 11
|
73 |
+
c) 12
|
74 |
+
d) 13
|
75 |
+
|
76 |
+
### Response:
|
77 |
+
"""
|
78 |
+
|
79 |
+
inputs = tokenizer(alpaca_prompt, return_tensors="pt")
|
80 |
+
outputs = model.generate(
|
81 |
+
inputs["input_ids"],
|
82 |
+
max_new_tokens=100,
|
83 |
+
num_beams=5,
|
84 |
+
early_stopping=True,
|
85 |
+
temperature=0.7,
|
86 |
+
no_repeat_ngram_size=2
|
87 |
+
)
|
88 |
+
|
89 |
+
answer = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
90 |
+
print(f"Answer: {answer}")
|
91 |
|
|
|
|
|
|
|
|
|
92 |
```
|
93 |
|
94 |
## Limitations
|