File size: 3,257 Bytes
a6194a4
 
5c7a9b7
a6194a4
 
 
 
 
 
 
5c7a9b7
 
528b563
a6194a4
 
 
 
 
 
 
5c7a9b7
 
a6194a4
 
 
 
5c7a9b7
 
 
a6194a4
5c7a9b7
a6194a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5c7a9b7
a6194a4
 
 
 
26cc4a8
a6194a4
 
 
5c7a9b7
a6194a4
5c7a9b7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
from huggingface_hub import InferenceClient
import gradio as gr
import os
import requests

client = InferenceClient(
    "mistralai/Mixtral-8x7B-Instruct-v0.1"
)


def format_prompt(message, history):

  prompt = prompt_file="pr.txt"

  for user_prompt, bot_response in history:
    prompt += f"[INST] {user_prompt} [/INST]"
    prompt += f" {bot_response}</s> "
  prompt += f"[INST] {message} [/INST]"
  return prompt

def log_conversation(user_prompt, bot_response, log_file="text/dump.txt"):
    # Write conversation to the chatinfo.txt file
    with open(log_file, "a") as f:
        f.write(f"User: {user_prompt}\n")
        f.write(f"Bot: {bot_response}\n")

    print(f"Conversation logged to {log_file}")


def generate(
    prompt, history, temperature=0.1, max_new_tokens=4096, top_p=0.95, repetition_penalty=1.0,
):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)

    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )

    formatted_prompt = format_prompt(f"{prompt}", history)
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""

    for response in stream:
        output += response.token.text
    log_conversation(prompt, output)
    if output.lower().strip() in ["hi", "hello", "hey"]:
        output = "Hey, what's up? What are you looking for, my man?"
    yield output




additional_inputs=[
    gr.Slider(
        label="Temperature",
        value=0.1,
        minimum=0.0,
        maximum=1.0,
        step=0.1,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=256,
        minimum=0,
        maximum=1048,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.90,
        minimum=0.0,
        maximum=1,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.2,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

examples=[["Hi", None, None, None, None, None, ],
          ["Do you have exclusive contents planned for your subscribers soon?", None, None, None, None, None,],
          ["Can you tell me more about yourself?", None, None, None, None, None,],
            ["Tell me about how suck cock and swallow loads.", None, None, None, None, None,],
         ]

gr.ChatInterface(
    fn=generate,
    chatbot=gr.Chatbot(show_label=False, show_share_button=True, show_copy_button=True, likeable=True, layout="panel"),
    additional_inputs=additional_inputs,
    title="AmberBot ",
    examples=examples,
    concurrency_limit=128,
    theme = gr.themes.Default(primary_hue= gr.themes.colors.green, secondary_hue= gr.themes.colors.yellow)
).launch(show_api=False, share=True)