eliebak HF staff commited on
Commit
f1cb7b4
1 Parent(s): 4b1a71c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -26
app.py CHANGED
@@ -6,29 +6,28 @@ import torch
6
  import spaces
7
 
8
  # Initialize the client with your model
9
- client = InferenceClient("karpathy/gpt2_1558M_final2_hf") # Replace with your model's name or endpoint
10
 
11
  default_system = 'You are a helpful assistant'
12
 
13
  @spaces.GPU
14
  def respond(message, history, system_message, max_tokens, temperature, top_p):
15
- messages = [{"role": "system", "content": system_message}]
 
16
  for user, assistant in history:
17
- messages.append({"role": "user", "content": user})
18
- messages.append({"role": "assistant", "content": assistant})
19
- messages.append({"role": "user", "content": message})
20
 
21
  response = ""
22
- for chunk in client.chat_completion(
23
- messages,
24
- max_tokens=max_tokens,
25
  stream=True,
26
  temperature=temperature,
27
  top_p=top_p,
28
  ):
29
- if chunk.choices[0].delta.content is not None:
30
- token = chunk.choices[0].delta.content
31
- response += token
32
  yield history + [(message, response)]
33
 
34
  # If the response is empty, yield a default message
@@ -43,21 +42,15 @@ def modify_system_session(system):
43
  system = default_system
44
  return system, system, []
45
 
46
- def use_example(example):
47
- return example
48
-
49
- def set_unicorn_example():
50
- return unicorn_example
51
-
52
- def set_time_travel_example():
53
- return time_travel_example
54
 
55
  # Define example prompts
56
  unicorn_example = "In a shocking finding, scientist discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English."
57
  time_travel_example = "Explain the grandfather paradox in time travel and propose a potential resolution."
58
 
59
  with gr.Blocks() as demo:
60
- gr.Markdown("<h1 style='text-align: center;'>LLM.C 1.5B Chat Demo</h1>")
61
 
62
  with gr.Row():
63
  with gr.Column(scale=3):
@@ -66,7 +59,7 @@ with gr.Blocks() as demo:
66
  modify_system = gr.Button("🛠️ Set system prompt and clear history")
67
 
68
  system_state = gr.Textbox(value=default_system, visible=False)
69
- chatbot = gr.Chatbot(label='LLM.C Chat')
70
  message = gr.Textbox(lines=1, label='Your message')
71
 
72
  with gr.Row():
@@ -89,14 +82,23 @@ with gr.Blocks() as demo:
89
  submit.click(respond, inputs=[message, chatbot, system_state, max_tokens, temperature, top_p], outputs=[chatbot])
90
  clear_history.click(fn=clear_session, inputs=[], outputs=[message, chatbot])
91
  modify_system.click(fn=modify_system_session, inputs=[system_input], outputs=[system_state, system_input, chatbot])
92
-
93
- example1.click(fn=set_unicorn_example, inputs=[], outputs=[message])
94
- example2.click(fn=set_time_travel_example, inputs=[], outputs=[message])
 
 
 
 
 
 
 
 
 
95
 
96
  gr.Markdown(
97
  """
98
- ## About LLM.C
99
- some stuff about llmc
100
  """
101
  )
102
 
 
6
  import spaces
7
 
8
  # Initialize the client with your model
9
+ client = InferenceClient("karpathy/gpt2_1558M_final2_hf")
10
 
11
  default_system = 'You are a helpful assistant'
12
 
13
  @spaces.GPU
14
  def respond(message, history, system_message, max_tokens, temperature, top_p):
15
+ # Combine system message, history, and new message
16
+ full_prompt = f"{system_message}\n\n"
17
  for user, assistant in history:
18
+ full_prompt += f"Human: {user}\nAssistant: {assistant}\n"
19
+ full_prompt += f"Human: {message}\nAssistant:"
 
20
 
21
  response = ""
22
+ for chunk in client.text_generation(
23
+ full_prompt,
24
+ max_new_tokens=max_tokens,
25
  stream=True,
26
  temperature=temperature,
27
  top_p=top_p,
28
  ):
29
+ if chunk.token.text:
30
+ response += chunk.token.text
 
31
  yield history + [(message, response)]
32
 
33
  # If the response is empty, yield a default message
 
42
  system = default_system
43
  return system, system, []
44
 
45
+ def use_example_and_respond(example, history, system_message, max_tokens, temperature, top_p):
46
+ return example, respond(example, history, system_message, max_tokens, temperature, top_p)
 
 
 
 
 
 
47
 
48
  # Define example prompts
49
  unicorn_example = "In a shocking finding, scientist discovered a herd of unicorns living in a remote, previously unexplored valley, in the Andes Mountains. Even more surprising to the researchers was the fact that the unicorns spoke perfect English."
50
  time_travel_example = "Explain the grandfather paradox in time travel and propose a potential resolution."
51
 
52
  with gr.Blocks() as demo:
53
+ gr.Markdown("<h1 style='text-align: center;'>GPT-2 1.5B Chat Demo</h1>")
54
 
55
  with gr.Row():
56
  with gr.Column(scale=3):
 
59
  modify_system = gr.Button("🛠️ Set system prompt and clear history")
60
 
61
  system_state = gr.Textbox(value=default_system, visible=False)
62
+ chatbot = gr.Chatbot(label='GPT-2 1.5B Chat')
63
  message = gr.Textbox(lines=1, label='Your message')
64
 
65
  with gr.Row():
 
82
  submit.click(respond, inputs=[message, chatbot, system_state, max_tokens, temperature, top_p], outputs=[chatbot])
83
  clear_history.click(fn=clear_session, inputs=[], outputs=[message, chatbot])
84
  modify_system.click(fn=modify_system_session, inputs=[system_input], outputs=[system_state, system_input, chatbot])
85
+
86
+ # New event handlers for example prompts
87
+ example1.click(
88
+ fn=use_example_and_respond,
89
+ inputs=[lambda: unicorn_example, chatbot, system_state, max_tokens, temperature, top_p],
90
+ outputs=[message, chatbot]
91
+ )
92
+ example2.click(
93
+ fn=use_example_and_respond,
94
+ inputs=[lambda: time_travel_example, chatbot, system_state, max_tokens, temperature, top_p],
95
+ outputs=[message, chatbot]
96
+ )
97
 
98
  gr.Markdown(
99
  """
100
+ ## About GPT-2 1.5B
101
+ This is a large language model trained by OpenAI and fine-tuned by Andrej Karpathy. It's capable of generating human-like text based on the input it receives.
102
  """
103
  )
104