metadata
language:
- th
license: apache-2.0
library_name: transformers
datasets:
- Porameht/customer-support-th-26.9k
How to use
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Ensure CUDA is available
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(f"Using device: {device}")
# Init Model
model_path="Porameht/openthaigpt-7b-customer-support-th"
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True, torch_dtype=torch.float16)
model.to(device)
# Prompt
prompt = "ต้องการยกเลิกออเดอร์"
llama_prompt = f"<s>[INST] <<SYS>>\nYou are a question answering assistant. Answer the question as truthful and helpful as possible คุณคือผู้ช่วยตอบคำถาม จงตอบคำถามอย่างถูกต้องและมีประโยชน์ที่สุด<</SYS>>\n\n{prompt} [/INST]"
inputs = tokenizer.encode(llama_prompt, return_tensors="pt")
inputs = inputs.to(device)
# Generate
outputs = model.generate(inputs, max_length=512, num_return_sequences=1)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))