ZennethKenneth commited on
Commit
4f2e42d
1 Parent(s): 4663596

update system prompt

Browse files
Files changed (1) hide show
  1. app.py +55 -39
app.py CHANGED
@@ -1,40 +1,49 @@
1
  import gradio as gr
2
- from transformers import pipeline
3
- import os
4
-
5
- # Retrieve the Hugging Face API token from environment variables
6
- hf_token = os.getenv("HF_TOKEN")
7
-
8
- if not hf_token:
9
- raise ValueError("API token is not set. Please set the HF_TOKEN environment variable in Space Settings.")
10
-
11
- def authenticate_and_generate(message, history, system_message, max_tokens, temperature, top_p):
12
- try:
13
- # Initialize the text-generation pipeline without use_auth_token if not needed
14
- text_generator = pipeline("text-generation", model="gpt2")
15
-
16
- # Ensure that system_message is a string
17
- system_message = str(system_message)
18
-
19
- # Construct the prompt with system message, history, and user input
20
- history_str = "\n".join([f"User: {str(msg[0])}\nAssistant: {str(msg[1])}" for msg in history if isinstance(msg, (tuple, list)) and len(msg) == 2])
21
- prompt = system_message + "\n" + history_str
22
- prompt += f"\nUser: {message}\nAssistant:"
23
-
24
- # Generate a response using the model
25
- response = text_generator(prompt, max_length=max_tokens, temperature=temperature, top_p=top_p, do_sample=True, truncation=True)
26
-
27
- # Extract the generated text from the response list
28
- assistant_response = response[0]['generated_text']
29
- # Optionally trim the assistant response if it includes the prompt again
30
- assistant_response = assistant_response.split("Assistant:", 1)[-1].strip()
31
- return assistant_response
32
-
33
- except Exception as e:
34
- return str(e) # Return the error message for debugging
35
-
36
- athena = gr.ChatInterface(
37
- fn=authenticate_and_generate,
 
 
 
 
 
 
 
 
 
38
  additional_inputs=[
39
  gr.Textbox(
40
  value="""
@@ -52,11 +61,18 @@ athena = gr.ChatInterface(
52
  """,
53
  label="System message"
54
  ),
55
- gr.Slider(minimum=1, maximum=4096, value=512, step=1, label="Max new tokens"),
56
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
57
- gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
58
  ],
59
  )
60
 
 
61
  if __name__ == "__main__":
62
- athena.launch()
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
26
+ messages.append({"role": "user", "content": message})
27
+
28
+ response = ""
29
+
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+ """
43
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
+ """
45
+ demo = gr.ChatInterface(
46
+ respond,
47
  additional_inputs=[
48
  gr.Textbox(
49
  value="""
 
61
  """,
62
  label="System message"
63
  ),
64
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
65
  gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
66
+ gr.Slider(
67
+ minimum=0.1,
68
+ maximum=1.0,
69
+ value=0.95,
70
+ step=0.05,
71
+ label="Top-p (nucleus sampling)",
72
+ ),
73
  ],
74
  )
75
 
76
+
77
  if __name__ == "__main__":
78
+ demo.launch()