ludwigstumpp
commited on
Commit
•
d6c78b8
1
Parent(s):
3e5f68c
Update README.md
Browse files
README.md
CHANGED
@@ -34,15 +34,18 @@ This model is trained based on the scripts provided in https://github.com/tloen/
|
|
34 |
```Python
|
35 |
# adapted from https://github.com/tloen/alpaca-lora/blob/main/generate.py
|
36 |
|
|
|
37 |
from peft import PeftModel
|
|
|
|
|
38 |
|
39 |
if torch.cuda.is_available():
|
40 |
device = "cuda"
|
41 |
else:
|
42 |
device = "cpu"
|
43 |
|
44 |
-
tokenizer =
|
45 |
-
base_model =
|
46 |
"decapoda-research/llama-7b-hf",
|
47 |
load_in_8bit=True,
|
48 |
device_map="auto",
|
|
|
34 |
```Python
|
35 |
# adapted from https://github.com/tloen/alpaca-lora/blob/main/generate.py
|
36 |
|
37 |
+
import torch
|
38 |
from peft import PeftModel
|
39 |
+
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
|
40 |
+
|
41 |
|
42 |
if torch.cuda.is_available():
|
43 |
device = "cuda"
|
44 |
else:
|
45 |
device = "cpu"
|
46 |
|
47 |
+
tokenizer = LlaMATokenizer.from_pretrained("decapoda-research/llama-7b-hf")
|
48 |
+
base_model = LlaMAForCausalLM.from_pretrained(
|
49 |
"decapoda-research/llama-7b-hf",
|
50 |
load_in_8bit=True,
|
51 |
device_map="auto",
|