abhillubillu commited on
Commit
b876f7f
1 Parent(s): 2ce0b80

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -10
app.py CHANGED
@@ -1,8 +1,16 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
- # Initialize the InferenceClient with the chosen model
5
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
 
 
 
 
 
 
 
 
6
 
7
  def respond(
8
  message,
@@ -14,14 +22,16 @@ def respond(
14
  ):
15
  messages = [{"role": "system", "content": system_message}]
16
 
17
- for user_msg, bot_msg in history:
18
- messages.append({"role": "user", "content": user_msg})
19
- if bot_msg:
20
- messages.append({"role": "assistant", "content": bot_msg})
 
21
 
22
  messages.append({"role": "user", "content": message})
23
 
24
  response = ""
 
25
  for message in client.chat_completion(
26
  messages,
27
  max_tokens=max_tokens,
@@ -30,16 +40,19 @@ def respond(
30
  top_p=top_p,
31
  ):
32
  token = message.choices[0].delta.content
 
33
  response += token
34
  yield response
35
 
36
- # Setup the Gradio interface
 
 
37
  demo = gr.ChatInterface(
38
  respond,
39
  additional_inputs=[
40
- gr.Textbox(value="You are a friendly Chatbot. Your name is QuizBot, you are a code expert. Output everything in JSON format.", label="System message"),
41
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
42
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
43
  gr.Slider(
44
  minimum=0.1,
45
  maximum=1.0,
@@ -50,5 +63,6 @@ demo = gr.ChatInterface(
50
  ],
51
  )
52
 
 
53
  if __name__ == "__main__":
54
- demo.launch(show_error=True)
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
 
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+
8
+ # client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
9
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
10
+ # client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct" , "HPAI-BSC/Llama3-Aloe-8B-Alpha")
11
+ # client = InferenceClient("Xenova/gpt-4o")
12
+ # client = InferenceClient("mistralai/mamba-codestral-7B-v0.1")
13
+ # client = InferenceClient("deepseek-ai/DeepSeek-Coder-V2-Instruct")
14
 
15
  def respond(
16
  message,
 
22
  ):
23
  messages = [{"role": "system", "content": system_message}]
24
 
25
+ for val in history:
26
+ if val[0]:
27
+ messages.append({"role": "user", "content": val[0]})
28
+ if val[1]:
29
+ messages.append({"role": "assistant", "content": val[1]})
30
 
31
  messages.append({"role": "user", "content": message})
32
 
33
  response = ""
34
+
35
  for message in client.chat_completion(
36
  messages,
37
  max_tokens=max_tokens,
 
40
  top_p=top_p,
41
  ):
42
  token = message.choices[0].delta.content
43
+
44
  response += token
45
  yield response
46
 
47
+ """
48
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
49
+ """
50
  demo = gr.ChatInterface(
51
  respond,
52
  additional_inputs=[
53
+ gr.Textbox(value="You are a friendly Chatbot.your name is GameApp ICG , you are a code expert . output everything in .json format . ", label="System message"),
54
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
55
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.5, step=0.1, label="Temperature"),
56
  gr.Slider(
57
  minimum=0.1,
58
  maximum=1.0,
 
63
  ],
64
  )
65
 
66
+
67
  if __name__ == "__main__":
68
+ demo.launch()