Spaces:
Running
on
T4
Running
on
T4
Update app.py
Browse files
app.py
CHANGED
@@ -36,12 +36,12 @@ tokenizer = transformers.AutoTokenizer.from_pretrained(
|
|
36 |
|
37 |
DESCRIPTION = """
|
38 |
# Stable Beluga 7B Chat
|
39 |
-
This is a streaming Chat Interface implementation of [StableBeluga-7B](https://huggingface.co/stabilityai/StableBeluga-7B)
|
40 |
-
You can modify the system prompt, which can be quite fun. For example, you can try something like "You are a mean AI. Phrase all replies as insults" for a good laugh.
|
41 |
|
42 |
Sometimes the model doesn't appropriately hit its stop token. Feel free to hit "stop" and "retry" if this happens to you. Or PR a fix to stop the stream if the tokens for User: get hit or something.
|
43 |
"""
|
44 |
|
|
|
45 |
|
46 |
def prompt_build(system_prompt, user_inp, hist):
|
47 |
prompt = f"""### System:\n{system_prompt}\n\n"""
|
@@ -52,7 +52,7 @@ def prompt_build(system_prompt, user_inp, hist):
|
|
52 |
prompt += f"""### User:\n{user_inp}\n\n### Assistant:"""
|
53 |
return prompt
|
54 |
|
55 |
-
def chat(user_input, history
|
56 |
|
57 |
prompt = prompt_build(system_prompt, user_input, history)
|
58 |
model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
|
@@ -80,7 +80,6 @@ def chat(user_input, history, system_prompt):
|
|
80 |
|
81 |
with gr.Blocks() as demo:
|
82 |
gr.Markdown(DESCRIPTION)
|
83 |
-
|
84 |
-
chatbot = gr.ChatInterface(fn=chat, additional_inputs=[system_prompt])
|
85 |
|
86 |
demo.queue().launch()
|
|
|
36 |
|
37 |
DESCRIPTION = """
|
38 |
# Stable Beluga 7B Chat
|
39 |
+
This is a streaming Chat Interface implementation of [StableBeluga-7B](https://huggingface.co/stabilityai/StableBeluga-7B). We'll use it to deploy a Discord bot that you can add to your server!
|
|
|
40 |
|
41 |
Sometimes the model doesn't appropriately hit its stop token. Feel free to hit "stop" and "retry" if this happens to you. Or PR a fix to stop the stream if the tokens for User: get hit or something.
|
42 |
"""
|
43 |
|
44 |
+
system_prompt = "You are helpful AI."
|
45 |
|
46 |
def prompt_build(system_prompt, user_inp, hist):
|
47 |
prompt = f"""### System:\n{system_prompt}\n\n"""
|
|
|
52 |
prompt += f"""### User:\n{user_inp}\n\n### Assistant:"""
|
53 |
return prompt
|
54 |
|
55 |
+
def chat(user_input, history):
|
56 |
|
57 |
prompt = prompt_build(system_prompt, user_input, history)
|
58 |
model_inputs = tokenizer([prompt], return_tensors="pt").to("cuda")
|
|
|
80 |
|
81 |
with gr.Blocks() as demo:
|
82 |
gr.Markdown(DESCRIPTION)
|
83 |
+
chatbot = gr.ChatInterface(fn=chat)
|
|
|
84 |
|
85 |
demo.queue().launch()
|