import os import gradio as gr from huggingface_hub import InferenceClient HF_TOKEN = os.environ.get("HF_TOKEN", None) API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-70b-chat-hf" API_URL_2 = "https://api-inference.huggingface.co/models/codellama/CodeLlama-34b-Instruct-hf" BOT_NAME = "Assistant" STOP_SEQUENCES = ["\nUser:", " User:", "###", ""] EXAMPLES = [ ["Hey LLAMA! Any recommendations for my holidays in Abu Dhabi?"], ["What's the Everett interpretation of quantum mechanics?"], ["Give me a list of the top 10 dive sites you would recommend around the world."], ["Can you tell me more about deep-water soloing?"], ["Can you write a short tweet about the release of our latest AI model, LLAMA LLM?"] ] client = InferenceClient( API_URL, headers={"Authorization": f"Bearer {HF_TOKEN}"}, ) client2 = InferenceClient( API_URL_2, headers={"Authorization": f"Bearer {HF_TOKEN}"}, ) def format_prompt(message, history, system_prompt): prompt = "" if system_prompt: prompt += f"System: {system_prompt}\n" for user_prompt, bot_response in history: prompt += f"User: {user_prompt}\n" prompt += f"{BOT_NAME}: {bot_response}\n" prompt += f"""User: {message}\n{BOT_NAME}:""" return prompt seed = 42 def generate( prompt, history, system_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0, ): temperature = float(temperature) if temperature < 1e-2: temperature = 1e-2 top_p = float(top_p) global seed generate_kwargs = dict( temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, stop_sequences=STOP_SEQUENCES, do_sample=True, seed=seed, ) seed = seed + 1 formatted_prompt = format_prompt(prompt, history, system_prompt) try: stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text for stop_str in STOP_SEQUENCES: if output.endswith(stop_str): output = output[:-len(stop_str)] # output = output.rstrip() yield output yield output except Exception as e: raise gr.Error(f"Client 1 error while generating: {e}") try: stream = client2.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False) output = "" for response in stream: output += response.token.text for stop_str in STOP_SEQUENCES: if output.endswith(stop_str): output = output[:-len(stop_str)] # output = output.rstrip() yield output yield output except Exception as e: raise gr.Error(f"Client 2 error while generating: {e}") return output additional_inputs=[ gr.Textbox("", label="Optional system prompt"), gr.Slider( label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs", ), gr.Slider( label="Max new tokens", value=256, minimum=0, maximum=3000, step=64, interactive=True, info="The maximum numbers of new tokens", ), gr.Slider( label="Top-p (nucleus sampling)", value=0.90, minimum=0.01, maximum=0.99, step=0.05, interactive=True, info="Higher values sample more low-probability tokens", ), gr.Slider( label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens", ) ] with gr.Blocks() as demo: gr.ChatInterface( generate, examples=EXAMPLES, additional_inputs=additional_inputs, ) demo.queue(concurrency_count=100, api_open=False).launch(show_api=False)