import spaces import json import subprocess from llama_cpp import Llama from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType from llama_cpp_agent.providers import LlamaCppPythonProvider from llama_cpp_agent.chat_history import BasicChatHistory from llama_cpp_agent.chat_history.messages import Roles import gradio as gr from huggingface_hub import hf_hub_download llm = None llm_model = None hf_hub_download( repo_id="bartowski/Reflection-Llama-3.1-70B-GGUF", filename="Reflection-Llama-3.1-70B-Q3_K_M.gguf", local_dir = "./models" ) def get_messages_formatter_type(model_name): if "Llama" in model_name: return MessagesFormatterType.LLAMA_3 else: raise ValueError(f"Unsupported model: {model_name}") @spaces.GPU(duration=80) def respond( message, history: list[tuple[str, str]], model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty, ): global llm global llm_model chat_template = get_messages_formatter_type(model) if llm is None or llm_model != model: llm = Llama( model_path=f"models/{model}", flash_attn=True, n_gpu_layers=81, n_batch=1024, n_ctx=8192, ) llm_model = model provider = LlamaCppPythonProvider(llm) agent = LlamaCppAgent( provider, system_prompt=f"{system_message}", predefined_messages_formatter_type=chat_template, debug_output=True ) settings = provider.get_provider_default_settings() settings.temperature = temperature settings.top_k = top_k settings.top_p = top_p settings.max_tokens = max_tokens settings.repeat_penalty = repeat_penalty settings.stream = True messages = BasicChatHistory() for msn in history: user = { 'role': Roles.user, 'content': msn[0] } assistant = { 'role': Roles.assistant, 'content': msn[1] } messages.add_message(user) messages.add_message(assistant) stream = agent.get_chat_response( message, llm_sampling_settings=settings, chat_history=messages, returns_streaming_generator=True, print_output=False ) outputs = "" for output in stream: outputs += output yield outputs description = """