limstral-7B-v0.1 / README.md
mrm8488's picture
Create README.md
03f0cb4
|
raw
history blame
692 Bytes

Limstral 7B

Mistral 7B fine-tuned on LIMA

WIP

Usage

import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline

model_id = "mrm8488/limstral-7B-v0.1"
tokenizer = "mrm8488/limstral-7B-v0.1"

model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16)
tokenizer = AutoTokenizer.from_pretrained(model_id)

model.resize_token_embeddings(len(tokenizer))

gen = pipeline("text-generation", model=model, tokenizer=tokenizer, device=0)

instruction = "Write a email to day goodbye to me boss"
res = gen(instruction, max_new_tokens=512, temperature=0.3, top_p=0.75, top_k=40, repetition_penalty=1.2)
print(res[0]['generated_text'])