Create README.md
Browse files
README.md
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Limstral 7B
|
2 |
+
## Mistral 7B fine-tuned on LIMA
|
3 |
+
|
4 |
+
WIP
|
5 |
+
|
6 |
+
### Usage
|
7 |
+
```py
|
8 |
+
import torch
|
9 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
10 |
+
|
11 |
+
model_id = "mrm8488/limstral-7B-v0.1"
|
12 |
+
tokenizer = "mrm8488/limstral-7B-v0.1"
|
13 |
+
|
14 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16)
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
16 |
+
|
17 |
+
model.resize_token_embeddings(len(tokenizer))
|
18 |
+
|
19 |
+
gen = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)
|
20 |
+
|
21 |
+
instruction = "Write a email to day goodbye to me boss"
|
22 |
+
res = gen(instruction, max_new_tokens=512, temperature=0.3, top_p=0.75, top_k=40, repetition_penalty=1.2)
|
23 |
+
print(res[0]['generated_text'])
|
24 |
+
```
|