KingNish commited on
Commit
f98760e
1 Parent(s): dd4fd98

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -67
app.py CHANGED
@@ -12,22 +12,6 @@ from huggingface_hub import hf_hub_download, InferenceClient
12
  import requests
13
  from bs4 import BeautifulSoup
14
  import urllib
15
- import random
16
-
17
- # List of user agents to choose from for requests
18
- _useragent_list = [
19
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:66.0) Gecko/20100101 Firefox/66.0',
20
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
21
- 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
22
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
23
- 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36',
24
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62',
25
- 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/111.0'
26
- ]
27
-
28
- def get_useragent():
29
- """Returns a random user agent from the list."""
30
- return random.choice(_useragent_list)
31
 
32
  def extract_text_from_webpage(html_content):
33
  """Extracts visible text from HTML content using BeautifulSoup."""
@@ -39,54 +23,58 @@ def extract_text_from_webpage(html_content):
39
  visible_text = soup.get_text(strip=True)
40
  return visible_text
41
 
42
- # Perform a Google search and return the results
43
- def search(term, num_results=3, lang="en", advanced=True, timeout=5, safe="active", ssl_verify=None):
44
  """Performs a Google search and returns the results."""
45
  escaped_term = urllib.parse.quote_plus(term)
46
  start = 0
47
  all_results = []
48
- # Limit the number of characters from each webpage to stay under the token limit
49
- max_chars_per_page = 3000 # Adjust this value based on your token limit and average webpage length
50
-
51
- with requests.Session() as session:
52
- while start < num_results:
53
- resp = session.get(
54
- url="https://www.google.com/search",
55
- headers={"User-Agent":get_useragent()},
56
- params={
57
- "q": term,
58
- "num": num_results - start,
59
- "hl": lang,
60
- "start": start,
61
- "safe": safe,
62
- },
63
- timeout=timeout,
64
- verify=ssl_verify,
65
- )
66
- resp.raise_for_status()
67
- soup = BeautifulSoup(resp.text, "html.parser")
68
- result_block = soup.find_all("div", attrs={"class": "g"})
69
- if not result_block:
70
- start += 1
71
- continue
72
- for result in result_block:
73
- link = result.find("a", href=True)
74
- if link:
75
- link = link["href"]
76
- try:
77
- webpage = session.get(link, headers={"User-Agent": get_useragent()})
78
- webpage.raise_for_status()
79
- visible_text = extract_text_from_webpage(webpage.text)
80
- # Truncate text if it's too long
81
- if len(visible_text) > max_chars_per_page:
82
- visible_text = visible_text[:max_chars_per_page] + "..."
83
- all_results.append({"text": visible_text})
84
- except requests.exceptions.RequestException as e:
85
- print(f"Error fetching or processing {link}: {e}")
86
- all_results.append({"text": None})
87
- else:
88
- all_results.append({"text": None})
89
- start += len(result_block)
 
 
 
 
 
90
  return all_results
91
 
92
  # Speech Recognition Model Configuration
@@ -100,7 +88,7 @@ tokenizer = spm.SentencePieceProcessor(hf_hub_download(model_name, "tokenizer.sp
100
 
101
  # Mistral Model Configuration
102
  client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
103
- system_instructions1 = "<s>[SYSTEM] Answer as Real OpenGPT 4o, Made by 'KingNish', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses. The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
104
 
105
  def resample(audio_fp32, sr):
106
  return soxr.resample(audio_fp32, sr, sample_rate)
@@ -132,12 +120,12 @@ def model(text, web_search):
132
  if web_search is True:
133
  """Performs a web search, feeds the results to a language model, and returns the answer."""
134
  web_results = search(text)
135
- web2 = ' '.join([f"Text: {res['text']}\n\n" for res in web_results])
136
- formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[OpenGPT 4o]"
137
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
138
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
139
  else:
140
- formatted_prompt = system_instructions1 + text + "[OpenGPT 4o]"
141
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
142
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
143
 
@@ -153,9 +141,9 @@ async def respond(audio, web_search):
153
  with gr.Blocks() as demo:
154
  with gr.Row():
155
  web_search = gr.Checkbox(label="Web Search", value=False)
156
- input = gr.Audio(label="Voice Chat", sources="microphone")
157
- output = gr.Audio(label="AI",autoplay=True)
158
  gr.Interface(fn=respond, inputs=[input, web_search], outputs=[output], live=True)
159
-
160
  if __name__ == "__main__":
161
  demo.queue(max_size=200).launch()
 
12
  import requests
13
  from bs4 import BeautifulSoup
14
  import urllib
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  def extract_text_from_webpage(html_content):
17
  """Extracts visible text from HTML content using BeautifulSoup."""
 
23
  visible_text = soup.get_text(strip=True)
24
  return visible_text
25
 
26
+ def search(term, num_results=1, lang="en", advanced=True, sleep_interval=0, timeout=5, safe="active", ssl_verify=None):
 
27
  """Performs a Google search and returns the results."""
28
  escaped_term = urllib.parse.quote_plus(term)
29
  start = 0
30
  all_results = []
31
+
32
+ # Fetch results in batches
33
+ while start < num_results:
34
+ resp = requests.get(
35
+ url="https://www.google.com/search",
36
+ headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62"}, # Set random user agent
37
+ params={
38
+ "q": term,
39
+ "num": num_results - start, # Number of results to fetch in this batch
40
+ "hl": lang,
41
+ "start": start,
42
+ "safe": safe,
43
+ },
44
+ timeout=timeout,
45
+ verify=ssl_verify,
46
+ )
47
+ resp.raise_for_status() # Raise an exception if request fails
48
+
49
+ soup = BeautifulSoup(resp.text, "html.parser")
50
+ result_block = soup.find_all("div", attrs={"class": "g"})
51
+
52
+ # If no results, continue to the next batch
53
+ if not result_block:
54
+ start += 1
55
+ continue
56
+
57
+ # Extract link and text from each result
58
+ for result in result_block:
59
+ link = result.find("a", href=True)
60
+ if link:
61
+ link = link["href"]
62
+ try:
63
+ # Fetch webpage content
64
+ webpage = requests.get(link, headers={"User-Agent": get_useragent()})
65
+ webpage.raise_for_status()
66
+ # Extract visible text from webpage
67
+ visible_text = extract_text_from_webpage(webpage.text)
68
+ all_results.append({"link": link, "text": visible_text})
69
+ except requests.exceptions.RequestException as e:
70
+ # Handle errors fetching or processing webpage
71
+ print(f"Error fetching or processing {link}: {e}")
72
+ all_results.append({"link": link, "text": None})
73
+ else:
74
+ all_results.append({"link": None, "text": None})
75
+
76
+ start += len(result_block) # Update starting index for next batch
77
+
78
  return all_results
79
 
80
  # Speech Recognition Model Configuration
 
88
 
89
  # Mistral Model Configuration
90
  client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
91
+ system_instructions1 = "<s>[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses as if You are the character Jarvis, made by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
92
 
93
  def resample(audio_fp32, sr):
94
  return soxr.resample(audio_fp32, sr, sample_rate)
 
120
  if web_search is True:
121
  """Performs a web search, feeds the results to a language model, and returns the answer."""
122
  web_results = search(text)
123
+ web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
124
+ formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[ANSWER]"
125
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
126
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
127
  else:
128
+ formatted_prompt = system_instructions1 + text + "[JARVIS]"
129
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
130
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
131
 
 
141
  with gr.Blocks() as demo:
142
  with gr.Row():
143
  web_search = gr.Checkbox(label="Web Search", value=False)
144
+ input = gr.Audio(label="Voice Chat (BETA)", sources="microphone", type="filepath", waveform_options=False)
145
+ output = gr.Audio(label="JARVIS", type="filepath", interactive=False, autoplay=True, elem_classes="audio")
146
  gr.Interface(fn=respond, inputs=[input, web_search], outputs=[output], live=True)
147
+
148
  if __name__ == "__main__":
149
  demo.queue(max_size=200).launch()