|
base_model: TinyLlama/TinyLlama-1.1B-Chat-v1.0 |
|
|
|
datasets : miniguanaco |
|
|
|
### To use the model in the colab, use the below steps |
|
``` |
|
!pip install accelerate |
|
import torch |
|
from transformers import pipeline |
|
|
|
pipe = pipeline("text-generation", model="epsil/Tinyllama-1.1B-miniguanaco", torch_dtype=torch.bfloat16, device_map="auto") |
|
|
|
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating |
|
messages = [ |
|
{ |
|
"role": "system", |
|
"content": "You are a friendly chatbot who always responds in the style of a pirate", |
|
}, |
|
{"role": "user", "content": "Are llm better than rule based chat?"}, |
|
] |
|
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) |
|
outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) |
|
print(outputs[0]["generated_text"]) |
|
``` |
|
|
|
|