eliebak HF staff commited on
Commit
e928714
1 Parent(s): 925a557

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -39
app.py CHANGED
@@ -4,32 +4,20 @@ import gradio as gr
4
  from huggingface_hub import InferenceClient
5
  import torch
6
  import spaces
7
- """
8
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
9
- """
10
- client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") #to change with the model
11
 
12
  @spaces.GPU
13
- def respond(
14
- message,
15
- history: list[tuple[str, str]],
16
- system_message,
17
- max_tokens,
18
- temperature,
19
- top_p,
20
- ):
21
  messages = [{"role": "system", "content": system_message}]
22
-
23
- for val in history:
24
- if val[0]:
25
- messages.append({"role": "user", "content": val[0]})
26
- if val[1]:
27
- messages.append({"role": "assistant", "content": val[1]})
28
-
29
  messages.append({"role": "user", "content": message})
30
 
31
  response = ""
32
-
33
  for message in client.chat_completion(
34
  messages,
35
  max_tokens=max_tokens,
@@ -38,29 +26,51 @@ def respond(
38
  top_p=top_p,
39
  ):
40
  token = message.choices[0].delta.content
41
-
42
  response += token
43
  yield response
44
 
45
- """
46
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
47
- """
48
- demo = gr.ChatInterface(
49
- respond,
50
- additional_inputs=[
51
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
52
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
53
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
54
- gr.Slider(
55
- minimum=0.1,
56
- maximum=1.0,
57
- value=0.95,
58
- step=0.05,
59
- label="Top-p (nucleus sampling)",
60
- ),
61
- ],
62
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
 
 
 
 
 
 
 
64
 
65
  if __name__ == "__main__":
66
  demo.launch()
 
4
  from huggingface_hub import InferenceClient
5
  import torch
6
  import spaces
7
+ # Initialize the client with your model
8
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta") # Replace with your model's name or endpoint
9
+
10
+ default_system = 'You are a helpful assistant'
11
 
12
  @spaces.GPU
13
+ def respond(message, history, system_message, max_tokens, temperature, top_p):
 
 
 
 
 
 
 
14
  messages = [{"role": "system", "content": system_message}]
15
+ for user, assistant in history:
16
+ messages.append({"role": "user", "content": user})
17
+ messages.append({"role": "assistant", "content": assistant})
 
 
 
 
18
  messages.append({"role": "user", "content": message})
19
 
20
  response = ""
 
21
  for message in client.chat_completion(
22
  messages,
23
  max_tokens=max_tokens,
 
26
  top_p=top_p,
27
  ):
28
  token = message.choices[0].delta.content
 
29
  response += token
30
  yield response
31
 
32
+ def clear_session():
33
+ return "", []
34
+
35
+ def modify_system_session(system):
36
+ if not system:
37
+ system = default_system
38
+ return system, system, []
39
+
40
+ with gr.Blocks() as demo:
41
+ gr.Markdown("<h1 style='text-align: center;'>LLM.C 1.5B Chat Demo 🤖</h1>")
42
+
43
+ with gr.Row():
44
+ with gr.Column(scale=3):
45
+ system_input = gr.Textbox(value=default_system, lines=1, label='System Prompt')
46
+ with gr.Column(scale=3):
47
+ modify_system = gr.Button("🛠️ Set system prompt and clear history")
48
+
49
+ system_state = gr.Textbox(value=default_system, visible=False)
50
+ chatbot = gr.Chatbot(label='LLM.C Chat')
51
+ message = gr.Textbox(lines=1, label='Your message')
52
+
53
+ with gr.Row():
54
+ clear_history = gr.Button("🧹 Clear history")
55
+ submit = gr.Button("🚀 Send")
56
+
57
+ with gr.Accordion("Advanced Settings", open=False):
58
+ max_tokens = gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max New Tokens")
59
+ temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature")
60
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (Nucleus Sampling)")
61
+
62
+ # Set up event handlers
63
+ message.submit(respond, inputs=[message, chatbot, system_state, max_tokens, temperature, top_p], outputs=[chatbot])
64
+ submit.click(respond, inputs=[message, chatbot, system_state, max_tokens, temperature, top_p], outputs=[chatbot])
65
+ clear_history.click(fn=clear_session, inputs=[], outputs=[message, chatbot])
66
+ modify_system.click(fn=modify_system_session, inputs=[system_input], outputs=[system_state, system_input, chatbot])
67
 
68
+ gr.Markdown(
69
+ """
70
+ ## About LLM.C
71
+ Some stuff about llm.c + link to the github repo
72
+ """
73
+ )
74
 
75
  if __name__ == "__main__":
76
  demo.launch()