Shakti-2.5B / app.py
SandLogicTechnologies's picture
Update app.py
d816a8a verified
raw
history blame
5.62 kB
import os
from threading import Thread
from typing import Iterator
import gradio as gr
import spaces
import torch
import json
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
DESCRIPTION = """\
Shakti is a 2.5 billion parameter language model specifically optimized for resource-constrained environments such as edge devices, including smartphones, wearables, and IoT systems. With support for vernacular languages and domain-specific tasks, Shakti excels in industries such as healthcare, finance, and customer service.
For more details, please check [here](https://arxiv.org/pdf/2410.11331v1).
"""
MAX_MAX_NEW_TOKENS = 2048
DEFAULT_MAX_NEW_TOKENS = 1024
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "2048"))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Model configurations
model_options = {
"Shakti-100M": "SandLogicTechnologies/Shakti-100M",
"Shakti-250M": "SandLogicTechnologies/Shakti-250M",
"Shakti-2.5B": "SandLogicTechnologies/Shakti-2.5B"
}
# Initialize tokenizer and model variables
tokenizer = None
model = None
def load_model(selected_model: str):
global tokenizer, model
model_id = model_options[selected_model]
tokenizer = AutoTokenizer.from_pretrained(model_id, token=os.getenv("SHAKTI"))
model = AutoModelForCausalLM.from_pretrained(
model_id,
device_map="auto",
torch_dtype=torch.bfloat16,
token=os.getenv("SHAKTI")
)
model.eval()
# Initial model load (default to 2.5B)
load_model("Shakti-2.5B")
@spaces.GPU(duration=90)
def generate(
message: str,
chat_history: list[tuple[str, str]],
max_new_tokens: int = 1024,
temperature: float = 0.6,
top_p: float = 0.9,
top_k: int = 50,
repetition_penalty: float = 1.2,
) -> Iterator[str]:
conversation = []
for user, assistant in chat_history:
conversation.extend(
[
json.loads(os.getenv("PROMPT")),
{"role": "user", "content": user},
{"role": "assistant", "content": assistant},
]
)
conversation.append({"role": "user", "content": message})
input_ids = tokenizer.apply_chat_template(conversation, add_generation_prompt=True, return_tensors="pt")
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.")
input_ids = input_ids.to(model.device)
streamer = TextIteratorStreamer(tokenizer, timeout=20.0, skip_prompt=True, skip_special_tokens=True)
generate_kwargs = dict(
{"input_ids": input_ids},
streamer=streamer,
max_new_tokens=max_new_tokens,
do_sample=True,
top_p=top_p,
top_k=top_k,
temperature=temperature,
num_beams=1,
repetition_penalty=repetition_penalty,
)
t = Thread(target=model.generate, kwargs=generate_kwargs)
t.start()
outputs = []
for text in streamer:
outputs.append(text)
yield "".join(outputs)
def update_examples(selected_model):
if selected_model == "Shakti-100M":
return [["Tell me a story"],
["Write a short poem on Rose"],
["What are computers"]]
elif selected_model == "Shakti-250M":
return [["Can you explain the pathophysiology of hypertension and its impact on the cardiovascular system?"],
["What are the potential side effects of beta-blockers in the treatment of arrhythmias?"],
["What foods are good for boosting the immune system?"],
["What is the difference between a stock and a bond?"],
["How can I start saving for retirement?"],
["What are some low-risk investment options?"],
["What is a power of attorney and when is it used?"],
["What are the key differences between a will and a trust?"],
["How do I legally protect my business name?"]]
else:
return [["Tell me a story"], ["write a short poem which is hard to sing"], ['मुझे भारतीय इतिहास के बारे में बताएं']]
def on_model_select(selected_model):
load_model(selected_model) # Load the selected model
return update_examples(selected_model) # Return new examples based on the selected model
chat_interface = gr.ChatInterface(
fn=generate,
additional_inputs=[
gr.Slider(
label="Max new tokens",
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
),
gr.Slider(
label="Temperature",
minimum=0.1,
maximum=4.0,
step=0.1,
value=0.6,
),
],
stop_btn=None,
examples=update_examples("Shakti-2.5B"), # Set initial examples for 2.5B model
cache_examples=False,
)
with gr.Blocks(css="style.css", fill_height=True) as demo:
gr.Markdown(DESCRIPTION)
gr.DuplicateButton(value="Duplicate Space for private use", elem_id="duplicate-button")
# Dropdown for model selection
model_dropdown = gr.Dropdown(
label="Select Model",
choices=["Shakti-100M", "Shakti-250M", "Shakti-2.5B"],
value="Shakti-2.5B",
interactive=True,
)
# Function to handle model change and update examples dynamically
model_dropdown.change(on_model_select, inputs=model_dropdown, outputs=[chat_interface])
chat_interface.render()
if __name__ == "__main__":
demo.queue(max_size=20).launch()