Baweja commited on
Commit
90e89a4
1 Parent(s): 3ed7f9a

Rename app_old.py to app_R.py

Browse files
Files changed (1) hide show
  1. app_old.py → app_R.py +20 -103
app_old.py → app_R.py RENAMED
@@ -1,79 +1,21 @@
1
- # import gradio as gr
2
- # from huggingface_hub import InferenceClient
3
-
4
- # """
5
- # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- # """
7
- # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
-
9
-
10
- # def respond(
11
- # message,
12
- # history: list[tuple[str, str]],
13
- # system_message,
14
- # max_tokens,
15
- # temperature,
16
- # top_p,
17
- # ):
18
- # messages = [{"role": "system", "content": system_message}]
19
-
20
- # for val in history:
21
- # if val[0]:
22
- # messages.append({"role": "user", "content": val[0]})
23
- # if val[1]:
24
- # messages.append({"role": "assistant", "content": val[1]})
25
-
26
- # messages.append({"role": "user", "content": message})
27
-
28
- # response = ""
29
-
30
- # for message in client.chat_completion(
31
- # messages,
32
- # max_tokens=max_tokens,
33
- # stream=True,
34
- # temperature=temperature,
35
- # top_p=top_p,
36
- # ):
37
- # token = message.choices[0].delta.content
38
-
39
- # response += token
40
- # yield response
41
-
42
- # """
43
- # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- # """
45
- # demo = gr.ChatInterface(
46
- # respond,
47
- # additional_inputs=[
48
- # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- # gr.Slider(
52
- # minimum=0.1,
53
- # maximum=1.0,
54
- # value=0.95,
55
- # step=0.05,
56
- # label="Top-p (nucleus sampling)",
57
- # ),
58
- # ],
59
- # )
60
-
61
-
62
- # if __name__ == "__main__":
63
- # demo.launch()
64
-
65
-
66
-
67
-
68
  import gradio as gr
69
  import torch
70
- from transformers import RagRetriever, RagSequenceForGeneration, AutoTokenizer
71
 
72
- """
73
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
74
- """
75
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
76
 
 
 
 
 
 
 
 
 
 
 
 
77
  def strip_title(title):
78
  if title.startswith('"'):
79
  title = title[1:]
@@ -81,7 +23,7 @@ def strip_title(title):
81
  title = title[:-1]
82
  return title
83
 
84
- def retrieved_info(rag_model, query):
85
  # Tokenize query
86
  retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
87
  [query],
@@ -109,16 +51,11 @@ def retrieved_info(rag_model, query):
109
  titles = [strip_title(title) for title in docs["title"]]
110
  texts = docs["text"]
111
  for title, text in zip(titles, texts):
112
- #print(f"Title: {title}")
113
- #print(f"Context: {text}")
114
  retrieved_context.append(f"{title}: {text}")
115
 
116
  answer = retrieved_context
117
  return answer
118
 
119
-
120
-
121
-
122
  def respond(
123
  message,
124
  history: list[tuple[str, str]],
@@ -127,21 +64,7 @@ def respond(
127
  temperature,
128
  top_p,
129
  ):
130
- # Load model
131
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
132
-
133
- dataset_path = "./sample/my_knowledge_dataset"
134
- index_path = "./sample/my_knowledge_dataset_hnsw_index.faiss"
135
-
136
- tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
137
- retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="custom",
138
- passages_path = dataset_path,
139
- index_path = index_path,
140
- n_docs = 5)
141
- rag_model = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
142
- rag_model.retriever.init_retrieval()
143
- rag_model.to(device)
144
-
145
  if message: # If there's a user query
146
  response = retrieved_info(rag_model, message) # Get the answer from your local FAISS and Q&A model
147
  return response[0]
@@ -150,22 +73,16 @@ def respond(
150
  return ""
151
 
152
 
153
-
154
- """
155
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
156
- """
157
  # Custom title and description
158
  title = "🧠 Welcome to Your AI Knowledge Assistant"
159
  description = """
160
- HI!!, I am a chatbot, I retrieves relevant information from a custom dataset using RAG. Ask any question, and let me assist you.
161
- My capabilities and knowledge is limited right now because of computational resources. Originally I can acess more than a million files
162
- from my knowledge-base but, right now, I am limited to less than 1000 files. LET'S BEGGINNNN......
163
  """
164
 
165
  demo = gr.ChatInterface(
166
  respond,
167
  type = 'messages',
168
- submit_btn = True,
169
  additional_inputs=[
170
  gr.Textbox(value="You are a helpful and friendly assistant.", label="System message"),
171
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
@@ -180,12 +97,12 @@ demo = gr.ChatInterface(
180
  ],
181
  title=title,
182
  description=description,
 
183
  textbox=gr.Textbox(placeholder=["'What is the future of AI?' or 'App Development'"]),
184
  examples=[["✨Future of AI"], ["📱App Development"]],
185
- example_icons=["🤖", "📱"],
186
  theme="compact",
187
  )
188
 
189
-
190
  if __name__ == "__main__":
191
- demo.launch(share = True )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
+ from transformers import RagRetriever, RagSequenceForGeneration
4
 
5
+ # Load model
 
 
6
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
7
 
8
+ dataset_path = "./sample/my_knowledge_dataset"
9
+ index_path = "./sample/my_knowledge_dataset_hnsw_index.faiss"
10
+
11
+ retriever = RagRetriever.from_pretrained("facebook/rag-sequence-nq", index_name="custom",
12
+ passages_path = dataset_path,
13
+ index_path = index_path,
14
+ n_docs = 5)
15
+ rag_model = RagSequenceForGeneration.from_pretrained('facebook/rag-sequence-nq', retriever=retriever)
16
+ rag_model.retriever.init_retrieval()
17
+ rag_model.to(device)
18
+
19
  def strip_title(title):
20
  if title.startswith('"'):
21
  title = title[1:]
 
23
  title = title[:-1]
24
  return title
25
 
26
+ def retrieved_info(query, rag_model = rag_model):
27
  # Tokenize query
28
  retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
29
  [query],
 
51
  titles = [strip_title(title) for title in docs["title"]]
52
  texts = docs["text"]
53
  for title, text in zip(titles, texts):
 
 
54
  retrieved_context.append(f"{title}: {text}")
55
 
56
  answer = retrieved_context
57
  return answer
58
 
 
 
 
59
  def respond(
60
  message,
61
  history: list[tuple[str, str]],
 
64
  temperature,
65
  top_p,
66
  ):
67
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  if message: # If there's a user query
69
  response = retrieved_info(rag_model, message) # Get the answer from your local FAISS and Q&A model
70
  return response[0]
 
73
  return ""
74
 
75
 
 
 
 
 
76
  # Custom title and description
77
  title = "🧠 Welcome to Your AI Knowledge Assistant"
78
  description = """
79
+ HI!!, I am your loyal assistant, My functionality is based on RAG model, I retrieves relevant information and provide answers based on that. Ask me any question, and let me assist you.
80
+ My capabilities are limited because I am still in development phase. I will do my best to assist you. SOOO LET'S BEGGINNNN......
 
81
  """
82
 
83
  demo = gr.ChatInterface(
84
  respond,
85
  type = 'messages',
 
86
  additional_inputs=[
87
  gr.Textbox(value="You are a helpful and friendly assistant.", label="System message"),
88
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
 
97
  ],
98
  title=title,
99
  description=description,
100
+ submit_btn = True,
101
  textbox=gr.Textbox(placeholder=["'What is the future of AI?' or 'App Development'"]),
102
  examples=[["✨Future of AI"], ["📱App Development"]],
103
+ #example_icons=["🤖", "📱"],
104
  theme="compact",
105
  )
106
 
 
107
  if __name__ == "__main__":
108
+ demo.launch(share = True )