Blane187 commited on
Commit
6cc7257
1 Parent(s): f720d17

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -7
app.py CHANGED
@@ -2,6 +2,8 @@ import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import edge_tts
4
  import asyncio
 
 
5
 
6
  client = InferenceClient("google/gemma-2b-it")
7
 
@@ -44,15 +46,15 @@ def respond(message, history):
44
  messages = []
45
 
46
  functions_metadata = [
47
- {"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
48
- ]
49
 
50
- system_messages = f'[SYSTEM]You are a helpful assistant, and respond without emoji, You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'
51
  for val in history:
52
  if val[0]:
53
- messages.append({"role": "user", "content": system_messages})
54
  if val[1]:
55
- messages.append({"role": "assistant", "content": system_messages})
56
 
57
  messages.append({"role": "user", "content": message})
58
 
@@ -69,23 +71,27 @@ def respond(message, history):
69
  response += token
70
  yield response
71
 
 
72
  async def tts(response):
73
  tts = edge_tts.Communicate(response, voice="en-US-JennyNeural")
74
  await tts.save("output.mp3")
75
  return "output.mp3"
76
 
 
77
  def generate_tts(response):
78
  return asyncio.run(tts(response))
79
 
 
80
  def process(message, history):
81
  response = list(respond(message, history))[-1]
82
  tts_file = generate_tts(response)
83
  history.append((message, response))
84
  return history, history, tts_file
85
 
 
86
  with gr.Blocks() as demo:
87
  gr.Markdown("<h1><center>gemma-2b-it</center></h1>")
88
-
89
  chatbot = gr.Chatbot()
90
  audio = gr.Audio(label='Your audio output')
91
 
@@ -93,7 +99,7 @@ with gr.Blocks() as demo:
93
  submit_button = gr.Button("Submit")
94
 
95
  submit_button.click(
96
- process,
97
  inputs=[input_text, chatbot],
98
  outputs=[chatbot, chatbot, audio]
99
  )
 
2
  from huggingface_hub import InferenceClient
3
  import edge_tts
4
  import asyncio
5
+ import requests
6
+ from bs4 import BeautifulSoup
7
 
8
  client = InferenceClient("google/gemma-2b-it")
9
 
 
46
  messages = []
47
 
48
  functions_metadata = [
49
+ {"type": "function", "function": {"name": "web_search", "description": "Search query on google", "parameters": {"type": "object", "properties": {"query": {"type": "string", "description": "web search query"}}, "required": ["query"]}}},
50
+ ]
51
 
52
+ system_messages = f'[SYSTEM]You are a helpful assistant, and respond without emoji, You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message}'
53
  for val in history:
54
  if val[0]:
55
+ messages.append({"role": "user", "content": val[0]})
56
  if val[1]:
57
+ messages.append({"role": "assistant", "content": val[1]})
58
 
59
  messages.append({"role": "user", "content": message})
60
 
 
71
  response += token
72
  yield response
73
 
74
+
75
  async def tts(response):
76
  tts = edge_tts.Communicate(response, voice="en-US-JennyNeural")
77
  await tts.save("output.mp3")
78
  return "output.mp3"
79
 
80
+
81
  def generate_tts(response):
82
  return asyncio.run(tts(response))
83
 
84
+
85
  def process(message, history):
86
  response = list(respond(message, history))[-1]
87
  tts_file = generate_tts(response)
88
  history.append((message, response))
89
  return history, history, tts_file
90
 
91
+
92
  with gr.Blocks() as demo:
93
  gr.Markdown("<h1><center>gemma-2b-it</center></h1>")
94
+
95
  chatbot = gr.Chatbot()
96
  audio = gr.Audio(label='Your audio output')
97
 
 
99
  submit_button = gr.Button("Submit")
100
 
101
  submit_button.click(
102
+ process,
103
  inputs=[input_text, chatbot],
104
  outputs=[chatbot, chatbot, audio]
105
  )