File size: 2,307 Bytes
7a67b56
689de1d
7a67b56
b13f01f
 
 
7a67b56
b13f01f
 
 
7a67b56
b13f01f
 
 
 
7a67b56
b13f01f
 
 
 
 
 
 
 
7a67b56
 
b13f01f
 
7a67b56
b13f01f
 
 
 
 
 
7a67b56
 
b13f01f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d1e9515
b13f01f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1ba17c
b13f01f
d1e9515
 
c63bd87
51105ad
 
b13f01f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from huggingface_hub import InferenceClient
import gradio as gr

client = InferenceClient(
    "meta-llama/Meta-Llama-3-8B-Instruct"
)

punctuation_marks = [".", "!", "?"]
def generate(
    prompt, history, temperature=0.2, max_new_tokens=256, top_p=0.8, repetition_penalty=1.0,
):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )


    stream = client.text_generation(prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
    while output and output[-1] not in punctuation_marks:
      output = output[:-1]
        # yield output
    return output


additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.2,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=256,
        minimum=0,
        maximum=1048,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.80,
        minimum=0.0,
        maximum=1,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.0,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]


gr.ChatInterface(
    fn=generate,
    chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
    title= "Have a chat with llama 3 8B πŸ¦™",
    additional_inputs=additional_inputs,
    examples=[
        ["Can you explain briefly to me what is the Python programming language?"],
        ["Write a 100-word article on 'Benefits of Open-Source in AI research'."],
    ],
    cache_examples=True
).launch(show_api=False)