Spaces:
Runtime error
Runtime error
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from peft import PeftModel | |
from unsloth import FastLanguageModel | |
import torch | |
max_seq_length = 4096 # Choose any! We auto support RoPE Scaling internally! | |
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+ | |
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False. | |
# 4bit pre quantized models we support for 4x faster downloading + no OOMs. | |
fourbit_models = [ | |
"unsloth/llama-3-8b-Instruct-bnb-4bit", | |
] | |
model, tokenizer = FastLanguageModel.from_pretrained( | |
model_name = "unsloth/llama-3-8b-Instruct-bnb-4bit", | |
max_seq_length = max_seq_length, | |
dtype = dtype, | |
load_in_4bit = load_in_4bit, | |
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf | |
) | |
# Load the base model and apply LoRA adapters | |
from transformers import AutoModel | |
adapter_model = AutoModel.from_pretrained("Rohan5manza/sentiment_analysis") | |
model = PeftModel.from_pretrained(model, adapter_model) | |
def generate_response(prompt): | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(**inputs) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Example Gradio or Streamlit interface for deploying | |
import gradio as gr | |
def gradio_interface(prompt): | |
response = generate_response(prompt) | |
return response | |
iface = gr.Interface(fn=gradio_interface, inputs="text", outputs="text") | |
iface.launch() | |