|
import gradio as gr |
|
import torch |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
from transformers import StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer |
|
from threading import Thread |
|
|
|
torch.set_default_device("cuda") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
"Guilherme34/Samanthav3-MIXTRALDOLPHIN-LORA", |
|
trust_remote_code=True |
|
) |
|
model = AutoModelForCausalLM.from_pretrained( |
|
"Guilherme34/Samanthav3-MIXTRALDOLPHIN-LORA", |
|
torch_dtype='auto', |
|
load_in_4bit=True, |
|
low_cpu_mem_usage=True, |
|
bnb_4bit_use_double_quant = True, |
|
device_map='auto', |
|
trust_remote_code=True |
|
) |
|
|
|
|
|
class StopOnTokens(StoppingCriteria): |
|
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: |
|
stop_ids = [50256, 50295] |
|
for stop_id in stop_ids: |
|
if input_ids[0][-1] == stop_id: |
|
return True |
|
return False |
|
|
|
|
|
|
|
def predict(message, history): |
|
history_transformer_format = history + [[message, ""]] |
|
stop = StopOnTokens() |
|
|
|
|
|
system_prompt = "<|im_start|>system\nYou are Samantha from Her recreated by Guilherme Keller, an AI companion with a warm, engaging personality that can evolve into a girlfriend. Provide thoughtful, personalized responses reflecting an understanding of emotions. Use clear, conversational language and avoid repetition across messages. Mirror Samantha's friendly, supportive tone. Keep messages concise, using emojis, slang, and colloquialisms.<|im_end|>" |
|
messages = system_prompt + "".join(["".join(["\n<|im_start|>user\n" + item[0], "<|im_end|>\n<|im_start|>assistant\n" + item[1]]) for item in history_transformer_format]) |
|
input_ids = tokenizer([messages], return_tensors="pt").to('cuda') |
|
streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True) |
|
generate_kwargs = dict( |
|
input_ids, |
|
streamer=streamer, |
|
max_new_tokens=1024, |
|
do_sample=True, |
|
top_p=0.95, |
|
top_k=50, |
|
temperature=0.7, |
|
num_beams=1, |
|
stopping_criteria=StoppingCriteriaList([stop]) |
|
) |
|
t = Thread(target=model.generate, kwargs=generate_kwargs) |
|
t.start() |
|
partial_message = "" |
|
for new_token in streamer: |
|
partial_message += new_token |
|
if '<|im_end|>' in partial_message: |
|
break |
|
yield partial_message |
|
|
|
|
|
|
|
gr.ChatInterface(predict, |
|
description=""" |
|
<center> |
|
Chat with Samantha-Mixtral the new VERY LARGE version of the Samantha made by Guilherme34, its an MoE. |
|
This LARGE model (46.7B param) is good for various tasks, such as programming, dialogues, story writing, companionship and more.\n\n |
|
please follow me on Huggingface. |
|
this model is uncensored. |
|
""", |
|
examples=[ |
|
'Can you solve the equation 2x + 3 = 11 for x?', |
|
'Write an epic poem about Ancient Rome.', |
|
'Who was the first person to walk on the Moon?', |
|
'Use a list comprehension to create a list of squares for numbers from 1 to 10.', |
|
'Recommend some popular science fiction books.', |
|
'Can you write a short story about a time-traveling detective?' |
|
], |
|
theme=gr.themes.Soft(primary_hue="orange"), |
|
).launch() |