ArmelR commited on
Commit
d7a0f2f
1 Parent(s): aa877ce

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +381 -187
app.py CHANGED
@@ -4,17 +4,24 @@ import re
4
  import shutil
5
  import requests
6
  import warnings
7
-
8
  import gradio as gr
9
  from huggingface_hub import Repository
10
  from text_generation import Client
11
-
12
  from share_btn import community_icon_html, loading_icon_html, share_js, share_btn_css
13
 
14
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
 
15
 
16
  API_URL_G = "https://api-inference.huggingface.co/models/ArmelR/starcoder-gradio-v0"
17
  API_URL_S = "https://api-inference.huggingface.co/models/HuggingFaceH4/starcoderbase-finetuned-oasst1"
 
 
 
 
 
 
 
 
18
 
19
  with open("./HHH_prompt_short.txt", "r") as f:
20
  HHH_PROMPT = f.read() + "\n\n"
@@ -30,6 +37,7 @@ FIM_SUFFIX = "<fim_suffix>"
30
 
31
  FIM_INDICATOR = "<FILL_HERE>"
32
 
 
33
  FORMATS = """
34
  # Chat mode
35
  Chat mode prepends the custom [TA prompt](https://huggingface.co/spaces/bigcode/chat-playground/blob/main/TA_prompt_v0.txt) or the [HHH prompt](https://gist.github.com/jareddk/2509330f8ef3d787fc5aaac67aab5f11#file-hhh_prompt-txt) from Anthropic to the request which conditions the model to serve as an assistant.
@@ -38,6 +46,7 @@ Chat mode prepends the custom [TA prompt](https://huggingface.co/spaces/bigcode/
38
 
39
  """
40
 
 
41
  theme = gr.themes.Monochrome(
42
  primary_hue="indigo",
43
  secondary_hue="blue",
@@ -59,6 +68,49 @@ client_s = Client(
59
  API_URL_S, headers={"Authorization": f"Bearer {HF_TOKEN}"},
60
  )
61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  def wrap_html_code(text):
63
  pattern = r"<.*?>"
64
  matches = re.findall(pattern, text)
@@ -66,172 +118,280 @@ def wrap_html_code(text):
66
  return f"```{text}```"
67
  else:
68
  return text
69
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  def generate(
71
- prompt,
72
- temperature=0.9,
73
- max_new_tokens=256,
74
- top_p=0.95,
75
- repetition_penalty=1.0,
76
- chat_mode="TA prompt",
77
- version="StarCoder-gradio",
 
 
 
 
 
78
  ):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
 
80
  temperature = float(temperature)
81
  if temperature < 1e-2:
82
  temperature = 1e-2
83
  top_p = float(top_p)
84
- fim_mode = False
85
 
86
  generate_kwargs = dict(
87
  temperature=temperature,
88
  max_new_tokens=max_new_tokens,
89
  top_p=top_p,
90
  repetition_penalty=repetition_penalty,
91
- truncate=7500,
92
  do_sample=True,
93
- seed=42,
94
- stop_sequences=["\nHuman", "\n-----", "Question:", "Answer:"],
 
95
  )
96
-
97
- if chat_mode == "HHH prompt":
98
- base_prompt = HHH_PROMPT
99
- elif chat_mode == "TA prompt":
100
- base_prompt = TA_PROMPT
101
- else :
102
- base_prompt = NO_PROMPT
103
-
104
-
105
- if version == "StarCoder-gradio" :
106
- chat_prompt = prompt + "\n\nAnswer:"
107
- prompt = base_prompt + chat_prompt
108
- print("PROMPT : "+str(prompt))
109
- stream = client_g.generate_stream(prompt, **generate_kwargs)
110
- elif version == "StarChat-alpha" :
111
- chat_prompt = prompt + "\n\nAssistant:"
112
- prompt = base_prompt + chat_prompt
113
- stream = client_s.generate_stream(prompt, **generate_kwargs)
114
- else :
115
- ValueError("Unsupported version of the Coding assistant.")
116
-
117
- output = ""
118
- previous_token = ""
119
- """
120
- for response in stream:
121
- if (
122
- (response.token.text in ["Human", "-----", "Question:"] and previous_token in ["\n", "-----"])
123
- or response.token.text in ["<|endoftext|>", "<|end|>"]
124
- ):
125
- return wrap_html_code(output.strip())
126
- else:
127
- output += response.token.text
128
- previous_token = response.token.text
129
- return wrap_html_code(output.strip())
130
- """
131
- for idx, response in enumerate(stream) :
132
- if response.token.special :
133
- continue
134
- if (
135
- (response.token.text in ["Human", "-----", "Question:"] and previous_token in ["\n", "-----"])
136
- or response.token.text in ["<|endoftext|>", "<|end|>"]
137
- ):
138
- break
139
- else :
140
- output += response.token.text
141
- previous_token = response.token.text
142
- yield wrap_html_code(output.strip())
143
- return wrap_html_code(output.strip())
144
-
145
- # chatbot mode
146
- def user(user_message, history):
147
- return "", history + [[user_message, None]]
148
-
149
-
150
- def bot(
151
- history,
152
- temperature=0.9,
153
- max_new_tokens=256,
154
- top_p=0.95,
155
- repetition_penalty=1.0,
156
- chat_mode=None,
157
- version="StarChat",
158
- ):
159
- # concat history of prompts with answers expect for last empty answer only add prompt
160
- if version == "StarCoder-gradio" :
161
- prompt = "\n".join(
162
- [f"Question: {prompt}\n\nAnswer: {answer}" for prompt, answer in history[:-1]] + [f"\nQuestion: {history[-1][0]}"]
163
- )
164
- else :
165
- prompt = "\n".join(
166
- [f"Human: {prompt}\n\nAssistant: {answer}" for prompt, answer in history[:-1]] + [f"\nHuman: {history[-1][0]}"]
167
- )
168
-
169
- bot_message = generate(
170
  prompt,
171
- temperature=temperature,
172
- max_new_tokens=max_new_tokens,
173
- top_p=top_p,
174
- repetition_penalty=repetition_penalty,
175
- chat_mode=chat_mode,
176
- version=version
177
-
178
-
179
  )
180
- history[-1][1] = bot_message
181
- return history
182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
183
 
184
  examples = [
185
- "def print_hello_world():",
186
- "def fibonacci(n):",
187
- "class TransformerDecoder(nn.Module):",
188
- "class ComplexNumbers:",
189
- "How to install gradio"
 
 
 
190
  ]
191
 
 
 
 
 
 
 
 
 
 
192
 
193
  def process_example(args):
194
- for x in generate(args):
195
  pass
196
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197
 
 
 
 
 
 
 
 
 
 
 
 
 
198
 
199
- css = ".generating {visibility: hidden}" + share_btn_css
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
 
201
- with gr.Blocks(theme=theme, analytics_enabled=False, css=css) as demo:
202
- with gr.Column():
203
- gr.Markdown(
204
- """\
205
- #Gradio Assistant powered by ‍💫 StarCoder
206
- _Note:_ this is an internal chat playground - **please do not share**. The deployment can also change and thus the space not work as we continue development.\
207
- """
208
  )
209
- with gr.Row():
210
- column_1, column_2 = gr.Column(scale=3), gr.Column(scale=1)
211
- with column_2:
212
- chat_mode = gr.Dropdown(
213
- ["NO prompt","TA prompt", "HHH prompt"],
214
- value="NO prompt",
215
- label="Chat mode",
216
- info="Use Anthropic's HHH prompt or our custom tech prompt to turn the model into an assistant.",
217
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
  temperature = gr.Slider(
219
  label="Temperature",
220
  value=0.2,
221
  minimum=0.0,
222
- maximum=2.0,
223
  step=0.1,
224
  interactive=True,
225
  info="Higher values produce more diverse outputs",
226
  )
227
- max_new_tokens = gr.Slider(
228
- label="Max new tokens",
229
- value=512,
230
- minimum=0,
231
- maximum=8192,
232
- step=64,
233
  interactive=True,
234
- info="The maximum numbers of new tokens",
235
  )
236
  top_p = gr.Slider(
237
  label="Top-p (nucleus sampling)",
@@ -242,68 +402,102 @@ _Note:_ this is an internal chat playground - **please do not share**. The deplo
242
  interactive=True,
243
  info="Higher values sample more low-probability tokens",
244
  )
 
 
 
 
 
 
 
 
 
245
  repetition_penalty = gr.Slider(
246
- label="Repetition penalty",
247
  value=1.2,
248
- minimum=1.0,
249
- maximum=2.0,
250
- step=0.05,
251
  interactive=True,
252
- info="Penalize repeated tokens",
253
- )
254
- version = gr.Dropdown(
255
- ["StarCoder-gradio", "StarChat-alpha"],
256
- value="StarCoder-gradio",
257
- label="Version",
258
- info="",
259
  )
260
- with column_1:
261
- # output = gr.Code(elem_id="q-output")
262
- # add visibl=False and update if chat_mode True
263
- chatbot = gr.Chatbot()
264
- instruction = gr.Textbox(
265
- placeholder="Enter your prompt here",
266
- label="Prompt",
267
- elem_id="q-input",
 
 
 
268
  )
269
- with gr.Row():
270
- with gr.Column():
271
- clear = gr.Button("Clear Chat")
272
- with gr.Column():
273
- submit = gr.Button("Generate", variant="primary")
274
- with gr.Group(elem_id="share-btn-container"):
275
- community_icon = gr.HTML(community_icon_html, visible=True)
276
- loading_icon = gr.HTML(loading_icon_html, visible=True)
277
- share_button = gr.Button(
278
- "Share to community", elem_id="share-btn", visible=True
279
- )
280
- # examples of non-chat mode
281
- #gr.Examples(
282
- # examples=examples,
283
- # inputs=[instruction],
284
- # cache_examples=False,
285
- # fn=process_example,
286
- # outputs=[output],
287
- # )
288
- gr.Markdown(FORMATS)
289
-
290
-
291
- instruction.submit(
292
- user, [instruction, chatbot], [instruction, chatbot], queue=False
293
- ).then(
294
- bot,
295
- [chatbot, temperature, max_new_tokens, top_p, repetition_penalty, chat_mode, version],
296
  chatbot,
297
- )
 
 
 
 
 
 
 
 
 
298
 
299
- submit.click(
300
- user, [instruction, chatbot], [instruction, chatbot], queue=False
301
- ).then(
302
- bot,
303
- [chatbot, temperature, max_new_tokens, top_p, repetition_penalty, chat_mode, version],
 
 
304
  chatbot,
305
- )
306
- clear.click(lambda: None, None, chatbot, queue=False)
307
-
308
- share_button.click(None, [], [], _js=share_js)
309
- demo.queue(concurrency_count=16).launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  import shutil
5
  import requests
6
  import warnings
 
7
  import gradio as gr
8
  from huggingface_hub import Repository
9
  from text_generation import Client
 
10
  from share_btn import community_icon_html, loading_icon_html, share_js, share_btn_css
11
 
12
  HF_TOKEN = os.environ.get("HF_TOKEN", None)
13
+ DIALOGUES_DATASET = "ArmelR/gradio_playground_dialogues"
14
 
15
  API_URL_G = "https://api-inference.huggingface.co/models/ArmelR/starcoder-gradio-v0"
16
  API_URL_S = "https://api-inference.huggingface.co/models/HuggingFaceH4/starcoderbase-finetuned-oasst1"
17
+ API_URL_B = "starchat-beta": "https://api-inference.huggingface.co/models/HuggingFaceH4/starchat-beta"
18
+
19
+ model2endpoint = {
20
+ "starChat-alpha": API_URL_S,
21
+ "starCoder-gradio": API_URL_G,
22
+ "starChat-beta": API_URL_B
23
+ }
24
+ model_names = list(model2endpoint.keys())
25
 
26
  with open("./HHH_prompt_short.txt", "r") as f:
27
  HHH_PROMPT = f.read() + "\n\n"
 
37
 
38
  FIM_INDICATOR = "<FILL_HERE>"
39
 
40
+
41
  FORMATS = """
42
  # Chat mode
43
  Chat mode prepends the custom [TA prompt](https://huggingface.co/spaces/bigcode/chat-playground/blob/main/TA_prompt_v0.txt) or the [HHH prompt](https://gist.github.com/jareddk/2509330f8ef3d787fc5aaac67aab5f11#file-hhh_prompt-txt) from Anthropic to the request which conditions the model to serve as an assistant.
 
46
 
47
  """
48
 
49
+
50
  theme = gr.themes.Monochrome(
51
  primary_hue="indigo",
52
  secondary_hue="blue",
 
68
  API_URL_S, headers={"Authorization": f"Bearer {HF_TOKEN}"},
69
  )
70
 
71
+ def randomize_seed_generator():
72
+ seed = random.randint(0, 1000000)
73
+ return seed
74
+
75
+
76
+ def save_inputs_and_outputs(now, inputs, outputs, generate_kwargs, model):
77
+ buffer = StringIO()
78
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
79
+ file_name = f"prompts_{timestamp}.jsonl"
80
+ data = {"model": model, "inputs": inputs, "outputs": outputs, "generate_kwargs": generate_kwargs}
81
+ pd.DataFrame([data]).to_json(buffer, orient="records", lines=True)
82
+
83
+ # Push to Hub
84
+ upload_file(
85
+ path_in_repo=f"{now.date()}/{now.hour}/{file_name}",
86
+ path_or_fileobj=buffer.getvalue().encode(),
87
+ repo_id=DIALOGUES_DATASET,
88
+ token=HF_TOKEN,
89
+ repo_type="dataset",
90
+ )
91
+
92
+ # Clean and rerun
93
+ buffer.close()
94
+
95
+ def get_total_inputs(inputs, chatbot, preprompt, user_name, assistant_name, sep):
96
+ past = []
97
+ for data in chatbot:
98
+ user_data, model_data = data
99
+
100
+ if not user_data.startswith(user_name):
101
+ user_data = user_name + user_data
102
+ if not model_data.startswith(sep + assistant_name):
103
+ model_data = sep + assistant_name + model_data
104
+
105
+ past.append(user_data + model_data.rstrip() + sep)
106
+
107
+ if not inputs.startswith(user_name):
108
+ inputs = user_name + inputs
109
+
110
+ total_inputs = preprompt + "".join(past) + inputs + sep + assistant_name.rstrip()
111
+
112
+ return total_inputs
113
+
114
  def wrap_html_code(text):
115
  pattern = r"<.*?>"
116
  matches = re.findall(pattern, text)
 
118
  return f"```{text}```"
119
  else:
120
  return text
121
+
122
+ def has_no_history(chatbot, history):
123
+ return not chatbot and not history
124
+
125
+ def get_inference_prompt(messages, model_name):
126
+ if model_name == "starChat-beta" :
127
+ prompt = "<|system|>\n<|endoftext|>\n"
128
+ for message in messages :
129
+ if message["role"] == "user" :
130
+ prompt += f"<|user|>\n{message['content']}<|endoftext|>\n<|assistant|>"
131
+ else : #message["role"] == "assistant"
132
+ prompt += f"{message['content']}<|endoftext|>\n"
133
+ elif model_name == "starChat-alpha" :
134
+ prompt = "<|system|>\n<|end|>\n"
135
+ for message in messages :
136
+ if message["role"] == "user" :
137
+ prompt += f"<|user|>\n{message['content']}<|end|>\n<|assistant|>"
138
+ else : #message["role"] == "assistant"
139
+ prompt += f"{message['content']}<|end|>\n"
140
+ else : # starCoder-gradio
141
+ prompt = ""
142
+ for message in messages :
143
+ if message["role"] == "user" :
144
+ prompt += f"Question: {message['content']}\n\nAnswer:"
145
+ else : #message["role"] == "assistant"
146
+ prompt += f"{message['content']}\n\n"
147
+ return prompt
148
+
149
  def generate(
150
+ RETRY_FLAG,
151
+ model_name,
152
+ system_message,
153
+ user_message,
154
+ chatbot,
155
+ history,
156
+ temperature,
157
+ top_k,
158
+ top_p,
159
+ max_new_tokens,
160
+ repetition_penalty,
161
+ do_save=True,
162
  ):
163
+ client = Client(
164
+ model2endpoint[model_name],
165
+ headers={"Authorization": f"Bearer {API_TOKEN}"},
166
+ timeout=60,
167
+ )
168
+ # Don't return meaningless message when the input is empty
169
+ if not user_message:
170
+ print("Empty input")
171
+
172
+ if not RETRY_FLAG:
173
+ history.append(user_message)
174
+ seed = 42
175
+ else:
176
+ seed = randomize_seed_generator()
177
+
178
+ past_messages = []
179
+ for data in chatbot:
180
+ user_data, model_data = data
181
+
182
+ past_messages.extend(
183
+ [{"role": "user", "content": user_data}, {"role": "assistant", "content": model_data.rstrip()}]
184
+ )
185
+
186
+ if len(past_messages) < 1:
187
+ prompt = get_inference_prompt(messages=[{"role": "user", "content": user_message}], model_name=model_name)
188
+ else:
189
+ prompt = dialogue_template.get_inference_prompt(messages=past_messages + [{"role": "user", "content": user_message}], model_name=model_name)
190
+
191
+ generate_kwargs = {
192
+ "temperature": temperature,
193
+ "top_k": top_k,
194
+ "top_p": top_p,
195
+ "max_new_tokens": max_new_tokens,
196
+ }
197
 
198
  temperature = float(temperature)
199
  if temperature < 1e-2:
200
  temperature = 1e-2
201
  top_p = float(top_p)
 
202
 
203
  generate_kwargs = dict(
204
  temperature=temperature,
205
  max_new_tokens=max_new_tokens,
206
  top_p=top_p,
207
  repetition_penalty=repetition_penalty,
 
208
  do_sample=True,
209
+ truncate=4096,
210
+ seed=seed,
211
+ stop_sequences=["<|end|>", "Question:"],
212
  )
213
+
214
+ stream = client.generate_stream(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
215
  prompt,
216
+ **generate_kwargs,
 
 
 
 
 
 
 
217
  )
 
 
218
 
219
+ output = ""
220
+ for idx, response in enumerate(stream):
221
+ if response.token.special:
222
+ continue
223
+ output += response.token.text
224
+ if idx == 0:
225
+ history.append(" " + output)
226
+ else:
227
+ history[-1] = output
228
+
229
+ chat = [
230
+ (wrap_html_code(history[i].strip()), wrap_html_code(history[i + 1].strip()))
231
+ for i in range(0, len(history) - 1, 2)
232
+ ]
233
+
234
+ # chat = [(history[i].strip(), history[i + 1].strip()) for i in range(0, len(history) - 1, 2)]
235
+
236
+ yield chat, history, user_message, ""
237
+
238
+ if HF_TOKEN and do_save:
239
+ try:
240
+ now = datetime.datetime.now()
241
+ current_time = now.strftime("%Y-%m-%d %H:%M:%S")
242
+ print(f"[{current_time}] Pushing prompt and completion to the Hub")
243
+ save_inputs_and_outputs(now, prompt, output, generate_kwargs, model_name)
244
+ except Exception as e:
245
+ print(e)
246
+
247
+ return chat, history, user_message, ""
248
 
249
  examples = [
250
+ "How can I write a Python function to generate the nth Fibonacci number?",
251
+ "How do I get the current date using shell commands? Explain how it works.",
252
+ "What's the meaning of life?",
253
+ "Write a function in Javascript to reverse words in a given string.",
254
+ "Give the following data {'Name':['Tom', 'Brad', 'Kyle', 'Jerry'], 'Age':[20, 21, 19, 18], 'Height' : [6.1, 5.9, 6.0, 6.1]}. Can you plot one graph with two subplots as columns. The first is a bar graph showing the height of each person. The second is a bargraph showing the age of each person? Draw the graph in seaborn talk mode.",
255
+ "Create a regex to extract dates from logs",
256
+ "How to decode JSON into a typescript object",
257
+ "Write a list into a jsonlines file and save locally",
258
  ]
259
 
260
+ def clear_chat():
261
+ return [], []
262
+
263
+ def delete_last_turn(chat, history):
264
+ if chat and history:
265
+ chat.pop(-1)
266
+ history.pop(-1)
267
+ history.pop(-1)
268
+ return chat, history
269
 
270
  def process_example(args):
271
+ for [x, y] in generate(args):
272
  pass
273
+ return [x, y]
274
+
275
+ # Regenerate response
276
+ def retry_last_answer(
277
+ selected_model,
278
+ system_message,
279
+ user_message,
280
+ chat,
281
+ history,
282
+ temperature,
283
+ top_k,
284
+ top_p,
285
+ max_new_tokens,
286
+ repetition_penalty,
287
+ do_save,
288
+ ):
289
+ if chat and history:
290
+ # Removing the previous conversation from chat
291
+ chat.pop(-1)
292
+ # Removing bot response from the history
293
+ history.pop(-1)
294
+ # Setting up a flag to capture a retry
295
+ RETRY_FLAG = True
296
+ # Getting last message from user
297
+ user_message = history[-1]
298
+
299
+ yield from generate(
300
+ RETRY_FLAG,
301
+ selected_model,
302
+ system_message,
303
+ user_message,
304
+ chat,
305
+ history,
306
+ temperature,
307
+ top_k,
308
+ top_p,
309
+ max_new_tokens,
310
+ repetition_penalty,
311
+ do_save,
312
+ )
313
 
314
+ title = """<h1 align="center">⭐ Gradio Playground 💬</h1>"""
315
+ custom_css = """
316
+ #banner-image {
317
+ display: block;
318
+ margin-left: auto;
319
+ margin-right: auto;
320
+ }
321
+ #chat-message {
322
+ font-size: 14px;
323
+ min-height: 300px;
324
+ }
325
+ """
326
 
327
+ with gr.Blocks(analytics_enabled=False, css=custom_css) as demo:
328
+ gr.HTML(title)
329
+
330
+ with gr.Row():
331
+ with gr.Column():
332
+ gr.Image("thumbnail.png", elem_id="banner-image", show_label=False)
333
+ with gr.Column():
334
+ gr.Markdown(
335
+ """
336
+ 💻 This demo showcases a series of **[StarChat](https://huggingface.co/models?search=huggingfaceh4/starchat)** language models, which are fine-tuned versions of the StarCoder family to act as helpful coding assistants. The base model has 16B parameters and was pretrained on one trillion tokens sourced from 80+ programming languages, GitHub issues, Git commits, and Jupyter notebooks (all permissively licensed).
337
+ 📝 For more details, check out our [blog post](https://huggingface.co/blog/starchat-alpha).
338
+ ⚠️ **Intended Use**: this app and its [supporting models](https://huggingface.co/models?search=huggingfaceh4/starchat) are provided as educational tools to explain large language model fine-tuning; not to serve as replacement for human expertise.
339
+ ⚠️ **Known Failure Modes**: the alpha and beta version of **StarChat** have not been aligned to human preferences with techniques like RLHF, so they can produce problematic outputs (especially when prompted to do so). Since the base model was pretrained on a large corpus of code, it may produce code snippets that are syntactically valid but semantically incorrect. For example, it may produce code that does not compile or that produces incorrect results. It may also produce code that is vulnerable to security exploits. We have observed the model also has a tendency to produce false URLs which should be carefully inspected before clicking. For more details on the model's limitations in terms of factuality and biases, see the [model card](https://huggingface.co/HuggingFaceH4/starchat-alpha#bias-risks-and-limitations).
340
+ ⚠️ **Data Collection**: by default, we are collecting the prompts entered in this app to further improve and evaluate the models. Do **NOT** share any personal or sensitive information while using the app! You can opt out of this data collection by removing the checkbox below.
341
+ """
342
+ )
343
 
344
+ with gr.Row():
345
+ do_save = gr.Checkbox(
346
+ value=True,
347
+ label="Store data",
348
+ info="You agree to the storage of your prompt and generated text for research and development purposes:",
 
 
349
  )
350
+
351
+ with gr.Row():
352
+ selected_model = gr.Radio(choices=model_names, value=model_names[1], label="Select a model")
353
+
354
+ with gr.Accordion(label="System Prompt", open=False, elem_id="parameters-accordion"):
355
+ system_message = gr.Textbox(
356
+ elem_id="system-message",
357
+ placeholder="Below is a conversation between a human user and a helpful AI coding assistant.",
358
+ show_label=False,
359
+ )
360
+ with gr.Row():
361
+ with gr.Box():
362
+ output = gr.Markdown()
363
+ chatbot = gr.Chatbot(elem_id="chat-message", label="Chat")
364
+
365
+ with gr.Row():
366
+ with gr.Column(scale=3):
367
+ user_message = gr.Textbox(placeholder="Enter your message here", show_label=False, elem_id="q-input")
368
+ with gr.Row():
369
+ send_button = gr.Button("Send", elem_id="send-btn", visible=True)
370
+
371
+ regenerate_button = gr.Button("Regenerate", elem_id="retry-btn", visible=True)
372
+
373
+ delete_turn_button = gr.Button("Delete last turn", elem_id="delete-btn", visible=True)
374
+
375
+ clear_chat_button = gr.Button("Clear chat", elem_id="clear-btn", visible=True)
376
+
377
+ with gr.Accordion(label="Parameters", open=False, elem_id="parameters-accordion"):
378
  temperature = gr.Slider(
379
  label="Temperature",
380
  value=0.2,
381
  minimum=0.0,
382
+ maximum=1.0,
383
  step=0.1,
384
  interactive=True,
385
  info="Higher values produce more diverse outputs",
386
  )
387
+ top_k = gr.Slider(
388
+ label="Top-k",
389
+ value=50,
390
+ minimum=0.0,
391
+ maximum=100,
392
+ step=1,
393
  interactive=True,
394
+ info="Sample from a shortlist of top-k tokens",
395
  )
396
  top_p = gr.Slider(
397
  label="Top-p (nucleus sampling)",
 
402
  interactive=True,
403
  info="Higher values sample more low-probability tokens",
404
  )
405
+ max_new_tokens = gr.Slider(
406
+ label="Max new tokens",
407
+ value=512,
408
+ minimum=0,
409
+ maximum=1024,
410
+ step=4,
411
+ interactive=True,
412
+ info="The maximum numbers of new tokens",
413
+ )
414
  repetition_penalty = gr.Slider(
415
+ label="Repetition Penalty",
416
  value=1.2,
417
+ minimum=0.0,
418
+ maximum=10,
419
+ step=0.1,
420
  interactive=True,
421
+ info="The parameter for repetition penalty. 1.0 means no penalty.",
 
 
 
 
 
 
422
  )
423
+ # with gr.Group(elem_id="share-btn-container"):
424
+ # community_icon = gr.HTML(community_icon_html, visible=True)
425
+ # loading_icon = gr.HTML(loading_icon_html, visible=True)
426
+ # share_button = gr.Button("Share to community", elem_id="share-btn", visible=True)
427
+ with gr.Row():
428
+ gr.Examples(
429
+ examples=examples,
430
+ inputs=[user_message],
431
+ cache_examples=False,
432
+ fn=process_example,
433
+ outputs=[output],
434
  )
435
+
436
+ history = gr.State([])
437
+ RETRY_FLAG = gr.Checkbox(value=False, visible=False)
438
+
439
+ # To clear out "message" input textbox and use this to regenerate message
440
+ last_user_message = gr.State("")
441
+
442
+ user_message.submit(
443
+ generate,
444
+ inputs=[
445
+ RETRY_FLAG,
446
+ selected_model,
447
+ system_message,
448
+ user_message,
 
 
 
 
 
 
 
 
 
 
 
 
 
449
  chatbot,
450
+ history,
451
+ temperature,
452
+ top_k,
453
+ top_p,
454
+ max_new_tokens,
455
+ repetition_penalty,
456
+ do_save,
457
+ ],
458
+ outputs=[chatbot, history, last_user_message, user_message],
459
+ )
460
 
461
+ send_button.click(
462
+ generate,
463
+ inputs=[
464
+ RETRY_FLAG,
465
+ selected_model,
466
+ system_message,
467
+ user_message,
468
  chatbot,
469
+ history,
470
+ temperature,
471
+ top_k,
472
+ top_p,
473
+ max_new_tokens,
474
+ repetition_penalty,
475
+ do_save,
476
+ ],
477
+ outputs=[chatbot, history, last_user_message, user_message],
478
+ )
479
+
480
+ regenerate_button.click(
481
+ retry_last_answer,
482
+ inputs=[
483
+ selected_model,
484
+ system_message,
485
+ user_message,
486
+ chatbot,
487
+ history,
488
+ temperature,
489
+ top_k,
490
+ top_p,
491
+ max_new_tokens,
492
+ repetition_penalty,
493
+ do_save,
494
+ ],
495
+ outputs=[chatbot, history, last_user_message, user_message],
496
+ )
497
+
498
+ delete_turn_button.click(delete_last_turn, [chatbot, history], [chatbot, history])
499
+ clear_chat_button.click(clear_chat, outputs=[chatbot, history])
500
+ selected_model.change(clear_chat, outputs=[chatbot, history])
501
+ # share_button.click(None, [], [], _js=share_js)
502
+
503
+ demo.queue(concurrency_count=16).launch(debug=True)