Spaces:
Sleeping
Sleeping
from fastapi import FastAPI | |
from pydantic import BaseModel | |
import requests | |
from llama_cpp import Llama | |
import threading | |
import gc | |
llms = { | |
"TinyLLama 1b 4_K_M 2048": { | |
"nctx": 2048, | |
"file": "tinyllama-1.1b-chat-v1.0.Q4_K_M.gguf", | |
"prefix": "<|system|>You are a helpfull assistant</s><|user|>", | |
"suffix": "</s><|assistant|>" | |
}, | |
"TinyLLama 1b OpenOrca 4_K_M 2048": { | |
"nctx": 2048, | |
"file": "tinyllama-1.1b-1t-openorca.Q4_K_M.gguf", | |
"prefix": "<|im_start|>system You are a helpfull assistant<|im_end|><|im_start|>user", | |
"suffix": "<|im_end|><|im_start|>assistant" | |
}, | |
"OpenLLama 3b 4_K_M 196k": { | |
"nctx": 50000, | |
"file": "open-llama-3b-v2-wizard-evol-instuct-v2-196k.Q4_K_M.gguf", | |
"prefix": "### HUMAN:", | |
"suffix": "### RESPONSE:" | |
}, | |
"Phi-2 2.7b 4_K_M 2048": { | |
"nctx": 2048, | |
"file": "phi-2.Q4_K_M.gguf", | |
"prefix": "Instruct:", | |
"suffix": "Output:" | |
}, | |
"Stable Zephyr 3b 4_K_M 4096": { | |
"nctx": 4096, | |
"file": "stablelm-zephyr-3b.Q4_K_M.gguf", | |
"prefix": "<|user|>", | |
"suffix": "<|endoftext|><|assistant|>" | |
} | |
} | |
model = llms["TinyLLama 1b OpenOrca 4_K_M 2048"] | |
llm = Llama(model_path="./code/"+model['file'], n_ctx=2048, verbose=True, n_threads=8) | |
#Fast API | |
app = FastAPI() | |
async def change(item: dict): | |
model = llms[item['llm']] | |
nctx = item['nctx'] if 'nctx' in item.keys() else model['nctx'] | |
llm = Llama(model_path="./code/"+model['file'], n_ctx=nctx, verbose=True, n_threads=8) | |
async def stream(item: dict): | |
prefix=model['prefix'] | |
suffix=model['suffix'] | |
max_tokens = item['max_tokens'] if 'max_tokens' in item.keys() else 512 | |
user=""" | |
{prompt}""" | |
prompt = f"{prefix}{user.replace('{prompt}', item['prompt'])}{suffix}" | |
result = llm(prompt, max_tokens=max_tokens) | |
return result |