MaziyarPanahi
commited on
Commit
•
99a2e1d
1
Parent(s):
0369c39
Update README.md
Browse files
README.md
CHANGED
@@ -68,4 +68,26 @@ This model uses `ChatML` prompt template:
|
|
68 |
{Assistant}
|
69 |
````
|
70 |
|
71 |
-
# How to use
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
68 |
{Assistant}
|
69 |
````
|
70 |
|
71 |
+
# How to use
|
72 |
+
|
73 |
+
|
74 |
+
```python
|
75 |
+
|
76 |
+
# Use a pipeline as a high-level helper
|
77 |
+
|
78 |
+
from transformers import pipeline
|
79 |
+
|
80 |
+
messages = [
|
81 |
+
{"role": "user", "content": "Who are you?"},
|
82 |
+
]
|
83 |
+
pipe = pipeline("text-generation", model="MaziyarPanahi/Qwen2-72B-Instruct-v0.1")
|
84 |
+
pipe(messages)
|
85 |
+
|
86 |
+
|
87 |
+
# Load model directly
|
88 |
+
|
89 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
90 |
+
|
91 |
+
tokenizer = AutoTokenizer.from_pretrained("MaziyarPanahi/Qwen2-72B-Instruct-v0.1")
|
92 |
+
model = AutoModelForCausalLM.from_pretrained("MaziyarPanahi/Qwen2-72B-Instruct-v0.1")
|
93 |
+
```
|