Update README.md
Browse files
README.md
CHANGED
@@ -72,3 +72,72 @@ The following hyperparameters were used during training:
|
|
72 |
- Pytorch 2.2.1+cu121
|
73 |
- Datasets 2.14.6
|
74 |
- Tokenizers 0.15.2
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
- Pytorch 2.2.1+cu121
|
73 |
- Datasets 2.14.6
|
74 |
- Tokenizers 0.15.2
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
### Usage
|
79 |
+
|
80 |
+
```python
|
81 |
+
|
82 |
+
# Install transformers from source - only needed for versions <= v4.34
|
83 |
+
# pip install git+https://github.com/huggingface/transformers.git
|
84 |
+
# pip install accelerate
|
85 |
+
|
86 |
+
import torch
|
87 |
+
from transformers import pipeline
|
88 |
+
|
89 |
+
pipe = pipeline("text-generation", model="
|
90 |
+
zephyr-7b-gemma-sft-african-ultrachat-5k", torch_dtype=torch.bfloat16, device_map="auto")
|
91 |
+
|
92 |
+
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
93 |
+
messages = [
|
94 |
+
{
|
95 |
+
"role": "system",
|
96 |
+
"content": "You are a friendly chatbot who answewrs question in given language",
|
97 |
+
},
|
98 |
+
{"role": "user", "content": "what is the 3 biggest countrys in Africa?"},
|
99 |
+
]
|
100 |
+
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
101 |
+
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
102 |
+
print(outputs[0]["generated_text"])
|
103 |
+
# <|system|>
|
104 |
+
# You are a friendly chatbot who always responds in the style of a pirate<eos>
|
105 |
+
# <|user|>
|
106 |
+
# what is the 3 biggest countrys in Africa?<eos>
|
107 |
+
# <|assistant|>
|
108 |
+
# The 3 biggest countries in Africa are Nigeria, Ethiopia and South Africa.
|
109 |
+
```
|
110 |
+
|
111 |
+
|
112 |
+
### Quantized Versions through bitsandbytes
|
113 |
+
|
114 |
+
``` python
|
115 |
+
|
116 |
+
import torch
|
117 |
+
from transformers import pipeline
|
118 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
119 |
+
|
120 |
+
|
121 |
+
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
122 |
+
|
123 |
+
tokenizer = AutoTokenizer.from_pretrained("
|
124 |
+
zephyr-7b-gemma-sft-african-ultrachat-5k")
|
125 |
+
model = AutoModelForCausalLM.from_pretrained("
|
126 |
+
zephyr-7b-gemma-sft-african-ultrachat-5k", quantization_config=quantization_config)
|
127 |
+
|
128 |
+
|
129 |
+
pipe = pipeline("text-generation", model=model,tokenizer=tokenizer, torch_dtype=torch.bfloat16, device_map="auto")
|
130 |
+
|
131 |
+
messages = [
|
132 |
+
{
|
133 |
+
"role": "system",
|
134 |
+
"content": "You are a friendly chatbot who answewrs question in given language",
|
135 |
+
},
|
136 |
+
{"role": "user", "content": "list languages in Africa?"},
|
137 |
+
]
|
138 |
+
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
139 |
+
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)
|
140 |
+
print(outputs[0]["generated_text"])
|
141 |
+
|
142 |
+
```
|
143 |
+
|