File size: 1,308 Bytes
9c9ed59
 
 
 
 
 
 
 
 
8341989
 
 
 
 
 
9c9ed59
 
8341989
9c9ed59
 
 
8341989
 
9c9ed59
 
 
 
 
 
ca677a9
9c9ed59
 
 
 
 
 
 
 
e95e8e1
 
 
8341989
1afe06d
e95e8e1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient(
    "mistralai/Mixtral-8x7B-Instruct-v0.1"
)


def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {message} [/INST]"
    return prompt

def generate(
    prompt, history, system_prompt, top_p=0.95, repetition_penalty=1.0,
):
    top_p = float(top_p)
    generate_kwargs = dict(
        temperature=0.9,  # Fixed temperature
        max_new_tokens=2560,  # Updated max_new_tokens value
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
        yield output
    return output

gr.ChatInterface(
    fn=generate,
    chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    title="Nova v2",
    concurrency_limit=20,
).launch(show_api=False)