tomer-deci
commited on
Commit
•
cdd1aef
1
Parent(s):
8d9cdb8
Update README.md
Browse filesAdded text-generation pipeline documentation
README.md
CHANGED
@@ -52,11 +52,17 @@ model_name = "Deci/DeciLM-7B"
|
|
52 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
53 |
|
54 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
55 |
-
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=
|
56 |
|
57 |
inputs = tokenizer.encode("In a shocking finding, scientists discovered a herd of unicorns living in", return_tensors="pt").to(device)
|
58 |
outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_p=0.95)
|
59 |
print(tokenizer.decode(outputs[0]))
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
```
|
61 |
|
62 |
## Evaluation
|
|
|
52 |
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
53 |
|
54 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
55 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype="auto", trust_remote_code=True).to(device)
|
56 |
|
57 |
inputs = tokenizer.encode("In a shocking finding, scientists discovered a herd of unicorns living in", return_tensors="pt").to(device)
|
58 |
outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_p=0.95)
|
59 |
print(tokenizer.decode(outputs[0]))
|
60 |
+
|
61 |
+
# The model can also be used via the text-generation pipeline interface
|
62 |
+
from transformers import pipeline
|
63 |
+
generator = pipeline("text-generation", "Deci/DeciLM-7B", torch_dtype="auto", trust_remote_code=True, device=device)
|
64 |
+
outputs = generator("In a shocking finding, scientists discovered a herd of unicorns living in", max_new_tokens=100, do_sample=True, top_p=0.95)
|
65 |
+
print(outputs[0]["generated_text"])
|
66 |
```
|
67 |
|
68 |
## Evaluation
|