Upload 2 files
Browse filesUpload mainflow notebook
- mainflow.ipynb +329 -0
- utils.py +245 -0
mainflow.ipynb
ADDED
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 12,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [
|
8 |
+
{
|
9 |
+
"name": "stdout",
|
10 |
+
"output_type": "stream",
|
11 |
+
"text": [
|
12 |
+
"Configuration ok\n"
|
13 |
+
]
|
14 |
+
}
|
15 |
+
],
|
16 |
+
"source": [
|
17 |
+
"from ctransformers import AutoModelForCausalLM, AutoConfig\n",
|
18 |
+
"import utils\n",
|
19 |
+
"import json\n",
|
20 |
+
"from llama_cpp import Llama\n",
|
21 |
+
"\n",
|
22 |
+
"# load model .gguf bằng CTransformers\n",
|
23 |
+
"\n",
|
24 |
+
"\n",
|
25 |
+
"def configure():\n",
|
26 |
+
" path = \"models/theblokeai/Mistral-7B-Instruct-v0.2.Q4_K_M\"\n",
|
27 |
+
" # llm = CTransformers(\n",
|
28 |
+
" # model=path, max_new_tokens=1024, context_length=1024, gpu_layers=20\n",
|
29 |
+
" # )\n",
|
30 |
+
" # llm = AutoModelForCausalLM.from_pretrained(\n",
|
31 |
+
" # path=path,\n",
|
32 |
+
" # model_file=\"mistral-7b-instruct-v0.2.Q4_K_M.gguf\",\n",
|
33 |
+
" # local_files_only=True,\n",
|
34 |
+
" # config = \n",
|
35 |
+
" # )\n",
|
36 |
+
" llm = Llama(\n",
|
37 |
+
" model_path=path+\"/mistral-7b-instruct-v0.2.Q4_K_M.gguf\", # Download the model file first\n",
|
38 |
+
" n_ctx=1024, # The max sequence length to use - note that longer sequence lengths require much more resources\n",
|
39 |
+
" n_threads=4, # The number of CPU threads to use, tailor to your system and the resulting performance\n",
|
40 |
+
" n_batch=1024,\n",
|
41 |
+
" verbose=False, # Verbose = True to see full configuration, but expect output logs on every model call\n",
|
42 |
+
" # n_gpu_layers=35, # The number of layers to offload to GPU, if you have GPU acceleration available\n",
|
43 |
+
" )\n",
|
44 |
+
" db = utils.ArxivChroma()\n",
|
45 |
+
" sqldb = utils.ArxivSQL()\n",
|
46 |
+
" print(\"Configuration ok\")\n",
|
47 |
+
" return llm, sqldb, db\n",
|
48 |
+
"\n",
|
49 |
+
"\n",
|
50 |
+
"model, sqldb, db = configure()"
|
51 |
+
]
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"cell_type": "code",
|
55 |
+
"execution_count": 2,
|
56 |
+
"metadata": {},
|
57 |
+
"outputs": [],
|
58 |
+
"source": [
|
59 |
+
"def extract_keyword_prompt(query):\n",
|
60 |
+
" \"\"\"A prompt that return a JSON block as arguments for querying database\"\"\"\n",
|
61 |
+
"\n",
|
62 |
+
" prompt = (\n",
|
63 |
+
" \"\"\"[INST] You are an assistant that choose only one action below based on guest question.\n",
|
64 |
+
" 1. If the guest question is asking for some specific document or article, you need to respond the information in JSON format with 2 keys \"title\", \"author\" if found any above. The authors are separated with the word 'and'. \n",
|
65 |
+
" 2. If the guest question is asking for relevant informations about a topic, you need to respond the information in JSON format with 2 keys \"keywords\", \"description\", include a list of keywords represent the main academic topic, \\\n",
|
66 |
+
" and a description about the main topic. You may paraphrase the keywords to add more. \\\n",
|
67 |
+
" 3. If the guest is not asking for any informations or documents, you need to respond with a polite answer in JSON format with 1 key \"answer\".\n",
|
68 |
+
" QUESTION: '{query}'\n",
|
69 |
+
" [/INST]\n",
|
70 |
+
" ANSWER: \n",
|
71 |
+
" \"\"\"\n",
|
72 |
+
" ).format(query=query)\n",
|
73 |
+
"\n",
|
74 |
+
" return prompt\n",
|
75 |
+
"\n",
|
76 |
+
"def make_answer_prompt(input, contexts):\n",
|
77 |
+
" \"\"\"A prompt that return the final answer, based on the queried context\"\"\"\n",
|
78 |
+
"\n",
|
79 |
+
" prompt = (\n",
|
80 |
+
" \"\"\"[INST] You are an library assistant that help to search articles and documents based on user's question.\n",
|
81 |
+
" From guest's question, you have found some records and documents that may help. Now you need to answer the guest with the information found.\n",
|
82 |
+
" You should answer in a conversational form politely.\n",
|
83 |
+
" QUESTION: '{input}'\n",
|
84 |
+
" INFORMATION: '{contexts}'\n",
|
85 |
+
" [/INST]\n",
|
86 |
+
" ANSWER:\n",
|
87 |
+
" \"\"\"\n",
|
88 |
+
" ).format(input=input, contexts=contexts)\n",
|
89 |
+
"\n",
|
90 |
+
" return prompt"
|
91 |
+
]
|
92 |
+
},
|
93 |
+
{
|
94 |
+
"cell_type": "code",
|
95 |
+
"execution_count": 15,
|
96 |
+
"metadata": {},
|
97 |
+
"outputs": [],
|
98 |
+
"source": [
|
99 |
+
"def response(args):\n",
|
100 |
+
" \"\"\"Create response context, based on input arguments\"\"\"\n",
|
101 |
+
" keys = list(dict.keys(args))\n",
|
102 |
+
" if \"answer\" in keys:\n",
|
103 |
+
" return args['answer'], None # trả lời trực tiếp\n",
|
104 |
+
" if \"keywords\" in keys:\n",
|
105 |
+
" # perform query\n",
|
106 |
+
" query_texts = args[\"description\"]\n",
|
107 |
+
" keywords = args[\"keywords\"]\n",
|
108 |
+
" results = db.query_relevant(keywords=keywords, query_texts=query_texts)\n",
|
109 |
+
" # print(results)\n",
|
110 |
+
" ids = results['metadatas'][0]\n",
|
111 |
+
" paper_id = [id['paper_id'] for id in ids]\n",
|
112 |
+
" paper_info = sqldb.query_id(paper_id)\n",
|
113 |
+
" # print(paper_info)\n",
|
114 |
+
" records = [] # get title (2), author (3), link (6)\n",
|
115 |
+
" result_string = \"\"\n",
|
116 |
+
" for i in range(len(paper_id)):\n",
|
117 |
+
" result_string += \"Title: {}, Author: {}, Link: {}\".format(paper_info[i][2],paper_info[i][3],paper_info[i][6])\n",
|
118 |
+
" records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n",
|
119 |
+
" # process results:\n",
|
120 |
+
" return result_string, records\n",
|
121 |
+
" # invoke llm and return result\n",
|
122 |
+
" if \"title\" in keys:\n",
|
123 |
+
" results = sqldb.query(title = args['title'],author = args['author'])\n",
|
124 |
+
" print(results)\n",
|
125 |
+
" paper_info = sqldb.query(title = args['title'],author = args['author'])\n",
|
126 |
+
" # if query not found then go crawl brh\n",
|
127 |
+
" # -------------------------------------\n",
|
128 |
+
" records = [] # get title (2), author (3), link (6)\n",
|
129 |
+
" result_string = \"\"\n",
|
130 |
+
" for i in range(len(paper_info)):\n",
|
131 |
+
" result_string += \"Title: {}, Author: {}, Link: {}\".format(paper_info[i][2],paper_info[i][3],paper_info[i][6])\n",
|
132 |
+
" records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])\n",
|
133 |
+
" # process results:\n",
|
134 |
+
" if len(result_string) == 0:\n",
|
135 |
+
" return \"Information not found\", None\n",
|
136 |
+
" return result_string, records\n",
|
137 |
+
" # invoke llm and return result"
|
138 |
+
]
|
139 |
+
},
|
140 |
+
{
|
141 |
+
"cell_type": "code",
|
142 |
+
"execution_count": 23,
|
143 |
+
"metadata": {},
|
144 |
+
"outputs": [
|
145 |
+
{
|
146 |
+
"name": "stdout",
|
147 |
+
"output_type": "stream",
|
148 |
+
"text": [
|
149 |
+
"--------------------------\n",
|
150 |
+
" {\n",
|
151 |
+
" \"keywords\": [\"LSTM model\", \"video analysis\", \"action recognition\"],\n",
|
152 |
+
" \"description\": \"For recognizing actions in videos using Long Short-Term Memory (LSTM) models, you may find the following papers insightful. These studies explore various aspects of action recognition using LSTM: (1) 'Action Recognition with 3D Convolutional Neural Networks and Long Short-Term Memory' by Tran et al., 2015; (2) 'ConvLSTM: A Convolutional Neural Network for Video Recognition' by Shi et al., 2015; (3) 'Using Long-Short Term Memory for Action Recognition in Videos' by Graves, 2013.\"\n",
|
153 |
+
" }\n",
|
154 |
+
"For recognizing actions in videos using Long Short-Term Memory (LSTM) models, you may find the following papers insightful. These studies explore various aspects of action recognition using LSTM: (1) 'Action Recognition with 3D Convolutional Neural Networks and Long Short-Term Memory' by Tran et al., 2015; (2) 'ConvLSTM: A Convolutional Neural Network for Video Recognition' by Shi et al., 2015; (3) 'Using Long-Short Term Memory for Action Recognition in Videos' by Graves, 2013.\n",
|
155 |
+
"['LSTM model', 'video analysis', 'action recognition']\n"
|
156 |
+
]
|
157 |
+
}
|
158 |
+
],
|
159 |
+
"source": [
|
160 |
+
"# test first step\n",
|
161 |
+
"# input_prompt = input()\n",
|
162 |
+
"input_prompt = \"I'm working on a LSTM model to recognize actions in a video, recommend me some related papers\"\n",
|
163 |
+
"first_prompt = extract_keyword_prompt(input_prompt)\n",
|
164 |
+
"# print(first_prompt)\n",
|
165 |
+
"# answer = model.invoke(first_prompt,\n",
|
166 |
+
"# temperature=0.0) # ctrans\n",
|
167 |
+
"answer = model(prompt=first_prompt,\n",
|
168 |
+
" temperature=0.0) # llama\n",
|
169 |
+
"print(\"--------------------------\")\n",
|
170 |
+
"print(answer)\n",
|
171 |
+
"args = json.loads(utils.trimming(answer))\n",
|
172 |
+
"# print(args)\n",
|
173 |
+
"response(args)"
|
174 |
+
]
|
175 |
+
},
|
176 |
+
{
|
177 |
+
"cell_type": "code",
|
178 |
+
"execution_count": 10,
|
179 |
+
"metadata": {},
|
180 |
+
"outputs": [
|
181 |
+
{
|
182 |
+
"name": "stdout",
|
183 |
+
"output_type": "stream",
|
184 |
+
"text": [
|
185 |
+
"For recognizing actions in videos using Long Short-Term Memory (LSTM) models, you may find the following papers insightful. These studies explore various aspects of action recognition using LSTM: (1) 'Action Recognition with 3D Convolutional Neural Networks and Long Short-Term Memory' by Tran et al., 2015; (2) 'ConvLSTM: A Convolutional Neural Network for Video Recognition' by Shi et al., 2015; (3) 'Using Long-Short Term Memory for Action Recognition in Videos' by Graves, 2013.\n",
|
186 |
+
"['LSTM model', 'video analysis', 'action recognition']\n"
|
187 |
+
]
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"name": "stderr",
|
191 |
+
"output_type": "stream",
|
192 |
+
"text": [
|
193 |
+
"Llama.generate: prefix-match hit\n",
|
194 |
+
"\n",
|
195 |
+
"llama_print_timings: load time = 139768.70 ms\n",
|
196 |
+
"llama_print_timings: sample time = 140.16 ms / 412 runs ( 0.34 ms per token, 2939.41 tokens per second)\n",
|
197 |
+
"llama_print_timings: prompt eval time = 0.00 ms / 1 tokens ( 0.00 ms per token, inf tokens per second)\n",
|
198 |
+
"llama_print_timings: eval time = 91570.34 ms / 412 runs ( 222.26 ms per token, 4.50 tokens per second)\n",
|
199 |
+
"llama_print_timings: total time = 93048.98 ms / 413 tokens\n"
|
200 |
+
]
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"name": "stdout",
|
204 |
+
"output_type": "stream",
|
205 |
+
"text": [
|
206 |
+
"------------------------\n",
|
207 |
+
"Hello there! I see that you're working on an LSTM model for recognizing actions in videos. I have found some related papers that might be of interest to you.\n",
|
208 |
+
"\n",
|
209 |
+
" 1. The first one is titled \"Action in Mind: A Neural Network Approach to Action Recognition and Segmentation\" by Zahra Gharae. This paper proposes a neural network approach for action recognition and segmentation, which could provide some insights into your work with LSTM models. You can find it at this link: <http://arxiv.org/pdf/2104.14870v1>\n",
|
210 |
+
"\n",
|
211 |
+
" 2. Another interesting paper is \"Deep Neural Networks in Video Human Action Recognition: A Review\" by Zihan Wang, Yang Yang, Zhi Liu, and Yifan Zhen. This review discusses the application of deep neural networks for video human action recognition. It could give you a broader perspective on the current state-of-the-art methods in this field. You can access it here: <http://arxiv.org/pdf/2305.15692v1>\n",
|
212 |
+
"\n",
|
213 |
+
" 3. Lastly, there's \"3D Convolutional Neural Networks for Ultrasound-Based Silent Speech Interfaces\" by László Tóth and Amin Honarmandi Shandi. Although the title might not seem directly related to your work on LSTM models for action recognition, it does involve the use of 3D convolutional neural networks in video processing, which could still provide valuable insights. You can read it at this link: <http://arxiv.org/pdf/2104.11532v1>\n",
|
214 |
+
"\n",
|
215 |
+
" I hope you find these resources helpful! Let me know if there's anything else I can assist you with. Have a great day!\n"
|
216 |
+
]
|
217 |
+
}
|
218 |
+
],
|
219 |
+
"source": [
|
220 |
+
"# test response, second step\n",
|
221 |
+
"input_prompt = \"I'm working on a LSTM model to recognize actions in a video, recommend me some related papers\"\n",
|
222 |
+
"args = {\n",
|
223 |
+
" \"keywords\": [\"LSTM model\", \"video analysis\", \"action recognition\"],\n",
|
224 |
+
" \"description\": \"For recognizing actions in videos using Long Short-Term Memory (LSTM) models, you may find the following papers insightful. These studies explore various aspects of action recognition using LSTM: (1) 'Action Recognition with 3D Convolutional Neural Networks and Long Short-Term Memory' by Tran et al., 2015; (2) 'ConvLSTM: A Convolutional Neural Network for Video Recognition' by Shi et al., 2015; (3) 'Using Long-Short Term Memory for Action Recognition in Videos' by Graves, 2013.\"\n",
|
225 |
+
" }\n",
|
226 |
+
"contexts, results = response(args)\n",
|
227 |
+
"if not results:\n",
|
228 |
+
" # direct answer\n",
|
229 |
+
" print(contexts)\n",
|
230 |
+
"else:\n",
|
231 |
+
" output_prompt = make_answer_prompt(input_prompt,contexts)\n",
|
232 |
+
" # answer = model.invoke(output_prompt,\n",
|
233 |
+
" # temperature=0.3) # ctrans\n",
|
234 |
+
" answer = model(prompt=output_prompt,\n",
|
235 |
+
" temperature=0.0,\n",
|
236 |
+
" max_tokens=1024,\n",
|
237 |
+
" ) # llama\n",
|
238 |
+
" print(\"------------------------\")\n",
|
239 |
+
" print(answer['choices'][0]['text'])"
|
240 |
+
]
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"cell_type": "code",
|
244 |
+
"execution_count": 16,
|
245 |
+
"metadata": {},
|
246 |
+
"outputs": [
|
247 |
+
{
|
248 |
+
"name": "stdout",
|
249 |
+
"output_type": "stream",
|
250 |
+
"text": [
|
251 |
+
"--------------------------\n",
|
252 |
+
"------------------------\n",
|
253 |
+
"Hello there! I see that you're working on an LSTM model for recognizing actions in videos. I have some related papers that might be of interest to you.\n",
|
254 |
+
"\n",
|
255 |
+
" 1. The first paper is titled \"Deep Neural Networks in Video Human Action Recognition: A Review\" by Zihan Wang, Yang Yang, Zhi Liu, and Yifan Zhen. This review discusses the application of deep neural networks in recognizing human actions from videos. You can find it at this link: <http://arxiv.org/pdf/2305.15692v1>\n",
|
256 |
+
"\n",
|
257 |
+
" 2. The second paper is titled \"A Video Recognition Method by using Adaptive Structural Learning of Long Short Term Memory based Deep Belief Network\" by Shin Kamada and Takumi Ichimur. This study proposes a method for video recognition using an adaptive structural learning LSTM-DBN model. You can access it at this link: <http://arxiv.org/pdf/1909.13480v1>\n",
|
258 |
+
"\n",
|
259 |
+
" 3. Lastly, there's the paper \"3D Convolutional Neural Networks for Ultrasound-Based Silent Speech Interfaces\" by László Tóth and Amin Honarmandi Shandi. Although it focuses on silent speech interfaces using ultrasound videos, it also employs 3D CNNs and LSTM networks for action recognition. You can read it here: <http://arxiv.org/pdf/2104.11532v1>\n",
|
260 |
+
"\n",
|
261 |
+
" I hope these resources will help you in your research! Let me know if there's anything else I can assist you with. Have a great day!\n"
|
262 |
+
]
|
263 |
+
}
|
264 |
+
],
|
265 |
+
"source": [
|
266 |
+
"# full chain\n",
|
267 |
+
"# inference time depends on hardware ability :')\n",
|
268 |
+
"# with CPU i7-8750H, 16GB RAM and no GPU cuda, expect inference time up to 5-6 min\n",
|
269 |
+
"# input_prompt = input()\n",
|
270 |
+
"input_prompt = \"I'm working on a LSTM model to recognize actions in a video, recommend me some related papers\"\n",
|
271 |
+
"first_prompt = extract_keyword_prompt(input_prompt)\n",
|
272 |
+
"# print(first_prompt)\n",
|
273 |
+
"# answer = model.invoke(first_prompt,\n",
|
274 |
+
"# temperature=0.0) # ctrans, answer is a string\n",
|
275 |
+
"answer = model(prompt=first_prompt,\n",
|
276 |
+
" temperature=0.0,\n",
|
277 |
+
" max_tokens=512,\n",
|
278 |
+
" ) # llama, answer is a dict\n",
|
279 |
+
"print(\"--------------------------\")\n",
|
280 |
+
"# print(answer['choices'][0]['text']) # see middle answer\n",
|
281 |
+
"args = json.loads(utils.trimming(answer['choices'][0]['text']))\n",
|
282 |
+
"contexts, results = response(args)\n",
|
283 |
+
"if not results:\n",
|
284 |
+
" # direct answer\n",
|
285 |
+
" print(contexts)\n",
|
286 |
+
"else:\n",
|
287 |
+
" output_prompt = make_answer_prompt(input_prompt,contexts)\n",
|
288 |
+
" # answer = model.invoke(output_prompt,\n",
|
289 |
+
" # temperature=0.3) # ctrans, answer is a string\n",
|
290 |
+
" answer = model(prompt=output_prompt,\n",
|
291 |
+
" temperature=0.0,\n",
|
292 |
+
" max_tokens=1024,\n",
|
293 |
+
" ) # llama, answer is a dict\n",
|
294 |
+
" print(\"--------------------------\")\n",
|
295 |
+
" print(answer['choices'][0]['text'])"
|
296 |
+
]
|
297 |
+
},
|
298 |
+
{
|
299 |
+
"cell_type": "code",
|
300 |
+
"execution_count": 17,
|
301 |
+
"metadata": {},
|
302 |
+
"outputs": [],
|
303 |
+
"source": [
|
304 |
+
"!pip freeze > requirements.txt"
|
305 |
+
]
|
306 |
+
}
|
307 |
+
],
|
308 |
+
"metadata": {
|
309 |
+
"kernelspec": {
|
310 |
+
"display_name": "langchain_llms",
|
311 |
+
"language": "python",
|
312 |
+
"name": "python3"
|
313 |
+
},
|
314 |
+
"language_info": {
|
315 |
+
"codemirror_mode": {
|
316 |
+
"name": "ipython",
|
317 |
+
"version": 3
|
318 |
+
},
|
319 |
+
"file_extension": ".py",
|
320 |
+
"mimetype": "text/x-python",
|
321 |
+
"name": "python",
|
322 |
+
"nbconvert_exporter": "python",
|
323 |
+
"pygments_lexer": "ipython3",
|
324 |
+
"version": "3.11.2"
|
325 |
+
}
|
326 |
+
},
|
327 |
+
"nbformat": 4,
|
328 |
+
"nbformat_minor": 2
|
329 |
+
}
|
utils.py
ADDED
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import chromadb
|
2 |
+
from chromadb import Documents, EmbeddingFunction, Embeddings
|
3 |
+
from transformers import AutoModel
|
4 |
+
import json
|
5 |
+
from numpy.linalg import norm
|
6 |
+
import sqlite3
|
7 |
+
import urllib
|
8 |
+
|
9 |
+
class JinaAIEmbeddingFunction(EmbeddingFunction):
|
10 |
+
def __init__(self, model):
|
11 |
+
super().__init__()
|
12 |
+
self.model = model
|
13 |
+
|
14 |
+
def __call__(self, input: Documents) -> Embeddings:
|
15 |
+
embeddings = self.model.encode(input)
|
16 |
+
return embeddings.tolist()
|
17 |
+
|
18 |
+
class ArxivSQL:
|
19 |
+
def __init__(self, table="arxivsql", name="arxiv_records_sql"):
|
20 |
+
self.con = sqlite3.connect(name)
|
21 |
+
self.cur = self.con.cursor()
|
22 |
+
self.table = table
|
23 |
+
|
24 |
+
def query(self, title="", author=[]):
|
25 |
+
if len(title)>0:
|
26 |
+
query_title = 'title like "%{}%"'.format(title)
|
27 |
+
else:
|
28 |
+
query_title = "True"
|
29 |
+
if len(author)>0:
|
30 |
+
query_author = 'author like '
|
31 |
+
for auth in author:
|
32 |
+
query_author += "'%{}%' or ".format(auth)
|
33 |
+
query_author = query_author[:-4]
|
34 |
+
else:
|
35 |
+
query_author = "True"
|
36 |
+
query = "select * from {} where {} and {}".format(self.table,query_title,query_author)
|
37 |
+
result = self.cur.execute(query)
|
38 |
+
return result.fetchall()
|
39 |
+
|
40 |
+
def query_id(self, ids=[]):
|
41 |
+
query = "select * from {} where id in (".format(self.table)
|
42 |
+
for id in ids:
|
43 |
+
query+="'"+id+"',"
|
44 |
+
query = query[:-1] + ")"
|
45 |
+
result = self.cur.execute(query)
|
46 |
+
return result.fetchall()
|
47 |
+
|
48 |
+
def add(self, crawl_records):
|
49 |
+
"""
|
50 |
+
Add crawl_records (list) obtained from arxiv_crawlers
|
51 |
+
A record is a list of 8 columns:
|
52 |
+
[topic, id, updated, published, title, author, link, summary]
|
53 |
+
Return the final length of the database table
|
54 |
+
"""
|
55 |
+
results = ""
|
56 |
+
for record in crawl_records:
|
57 |
+
try:
|
58 |
+
query = """insert into arxivsql values("{}","{}","{}","{}","{}","{}","{}")""".format(
|
59 |
+
record[1][21:],
|
60 |
+
record[0],
|
61 |
+
record[4].replace('"',"'"),
|
62 |
+
process_authors_str(record[5]),
|
63 |
+
record[2][:10],
|
64 |
+
record[3][:10],
|
65 |
+
record[6]
|
66 |
+
)
|
67 |
+
self.cur.execute(query)
|
68 |
+
self.con.commit()
|
69 |
+
except Exception as e:
|
70 |
+
result+=str(e)
|
71 |
+
result+="\n" + query + "\n"
|
72 |
+
finally:
|
73 |
+
return results
|
74 |
+
class ArxivChroma:
|
75 |
+
"""
|
76 |
+
Create an interface to arxivdb, which only support query and addition.
|
77 |
+
This interface do not support edition and deletion procedures.
|
78 |
+
"""
|
79 |
+
def __init__(self, table="arxiv_records", name="arxivdb/"):
|
80 |
+
self.client = chromadb.PersistentClient(name)
|
81 |
+
self.model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en',
|
82 |
+
trust_remote_code=True,
|
83 |
+
cache_dir='models')
|
84 |
+
self.collection = self.client.get_or_create_collection(table,
|
85 |
+
embedding_function=JinaAIEmbeddingFunction(
|
86 |
+
model = self.model
|
87 |
+
))
|
88 |
+
|
89 |
+
def query_relevant(self, keywords, query_texts, n_results=3):
|
90 |
+
"""
|
91 |
+
Perform a query using a list of keywords (str),
|
92 |
+
or using a relavant string
|
93 |
+
"""
|
94 |
+
contains = []
|
95 |
+
for keyword in keywords:
|
96 |
+
contains.append({"$contains":keyword})
|
97 |
+
return self.collection.query(
|
98 |
+
query_texts=query_texts,
|
99 |
+
where_document={
|
100 |
+
"$or":contains
|
101 |
+
},
|
102 |
+
n_results=n_results,
|
103 |
+
)
|
104 |
+
|
105 |
+
def query_exact(self, id):
|
106 |
+
ids = ["{}_{}".format(id,j) for j in range(0,10)]
|
107 |
+
return self.collection.get(ids=ids)
|
108 |
+
|
109 |
+
def add(self, crawl_records):
|
110 |
+
"""
|
111 |
+
Add crawl_records (list) obtained from arxiv_crawlers
|
112 |
+
A record is a list of 8 columns:
|
113 |
+
[topic, id, updated, published, title, author, link, summary]
|
114 |
+
Return the final length of the database table
|
115 |
+
"""
|
116 |
+
for record in crawl_records:
|
117 |
+
embed_text = """
|
118 |
+
Topic: {},
|
119 |
+
Title: {},
|
120 |
+
Summary: {}
|
121 |
+
""".format(record[0],record[4],record[7])
|
122 |
+
chunks = chunk_text_with_overlap(embed_text)
|
123 |
+
ids = [record[1][21:]+"_"+str(j) for j in range(len(chunks))]
|
124 |
+
paper_ids = [{"paper_id":record[1][21:]} for _ in range(len(chunks))]
|
125 |
+
self.collection.add(
|
126 |
+
documents = chunks,
|
127 |
+
metadatas=paper_ids,
|
128 |
+
ids = ids
|
129 |
+
)
|
130 |
+
return self.collection.count()
|
131 |
+
|
132 |
+
def chunk_text_with_overlap(text, max_char=400, overlap=100):
|
133 |
+
"""
|
134 |
+
Chunk a long text into several chunks, with each chunk about 300-400 characters long,
|
135 |
+
but make sure no word is cut in half. It also ensures an overlap of a specified length
|
136 |
+
between consecutive chunks.
|
137 |
+
|
138 |
+
Args:
|
139 |
+
text: The long text to be chunked.
|
140 |
+
max_char: The maximum number of characters per chunk (default: 400).
|
141 |
+
overlap: The desired overlap between consecutive chunks (default: 70).
|
142 |
+
|
143 |
+
Returns:
|
144 |
+
A list of chunks.
|
145 |
+
"""
|
146 |
+
chunks = []
|
147 |
+
current_chunk = ""
|
148 |
+
words = text.split()
|
149 |
+
for word in words:
|
150 |
+
# Check if adding the word would exceed the chunk limit (including overlap)
|
151 |
+
if len(current_chunk) + len(word) + 1 >= max_char:
|
152 |
+
chunks.append(current_chunk)
|
153 |
+
split_point = current_chunk.find(" ",len(current_chunk)-overlap)
|
154 |
+
current_chunk = current_chunk[split_point:] + " " + word
|
155 |
+
else:
|
156 |
+
current_chunk += " " + word
|
157 |
+
# Add the last chunk (including potential overlap)
|
158 |
+
chunks.append(current_chunk.strip())
|
159 |
+
return chunks
|
160 |
+
|
161 |
+
def trimming(txt):
|
162 |
+
start = txt.find("{")
|
163 |
+
end = txt.rfind("}")
|
164 |
+
return txt[start:end+1]
|
165 |
+
|
166 |
+
def extract_tag(txt,tagname):
|
167 |
+
return txt[txt.find("<"+tagname+">")+len(tagname)+2:txt.find("</"+tagname+">")]
|
168 |
+
|
169 |
+
def get_record(extract):
|
170 |
+
# id = extract[extract.find("<id>")+4:extract.find("</id>")]
|
171 |
+
# updated = extract[extract.find("<updated>")+9:extract.find("</updated>")]
|
172 |
+
# published = extract[extract.find("<published>")+11:extract.find("</published>")]
|
173 |
+
# title = extract[extract.find("<title>")+7:extract.find("</title>")]
|
174 |
+
# summary = extract[extract.find("<summary>")+9:extract.find("</summary>")]
|
175 |
+
id = extract_tag(extract,"id")
|
176 |
+
updated = extract_tag(extract,"updated")
|
177 |
+
published = extract_tag(extract,"published")
|
178 |
+
title = extract_tag(extract,"title").replace("\n ","").strip()
|
179 |
+
summary = extract_tag(extract,"summary").replace("\n","").strip()
|
180 |
+
authors = []
|
181 |
+
while extract.find("<author>")!=-1:
|
182 |
+
# author = extract[extract.find("<name>")+6:extract.find("</name>")]
|
183 |
+
author = extract_tag(extract,"name")
|
184 |
+
extract = extract[extract.find("</author>")+9:]
|
185 |
+
authors.append(author)
|
186 |
+
pattern = '<link title="pdf" href="'
|
187 |
+
link_start = extract.find('<link title="pdf" href="')
|
188 |
+
link = extract[link_start+len(pattern):extract.find("rel=",link_start)-2]
|
189 |
+
return [id, updated, published, title, authors, link, summary]
|
190 |
+
|
191 |
+
def choose_topic(summary):
|
192 |
+
model_embedding = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en',
|
193 |
+
trust_remote_code=True,
|
194 |
+
cache_dir='models')
|
195 |
+
embed = model_embedding.encode(summary)
|
196 |
+
cos_sim = lambda a,b: (a @ b.T) / (norm(a)*norm(b))
|
197 |
+
descriptions = json.load(open("topic_descriptions"))
|
198 |
+
topic = ""
|
199 |
+
max_sim = 0.
|
200 |
+
for key in descriptions:
|
201 |
+
sim = cos_sim(embed,model_embedding.encode(descriptions[key]))
|
202 |
+
if sim > max_sim:
|
203 |
+
topic = key
|
204 |
+
max_sim = sim
|
205 |
+
return topic
|
206 |
+
|
207 |
+
def crawl_arxiv(keyword_list, max_results=100):
|
208 |
+
baseurl = 'http://export.arxiv.org/api/query?search_query='
|
209 |
+
records = []
|
210 |
+
for keyword in keyword_list:
|
211 |
+
if i ==0:
|
212 |
+
url = baseurl + 'all:' + keyword
|
213 |
+
i = i + 1
|
214 |
+
else:
|
215 |
+
url = url + '+OR+' + 'all:' + keyword
|
216 |
+
url = url+ '&max_results=' + str(max_results)
|
217 |
+
url = url.replace(' ', '%20')
|
218 |
+
try:
|
219 |
+
arxiv_page = urllib.request.urlopen(url,timeout=100).read()
|
220 |
+
arxiv_page = str(arxiv_page,encoding="utf-8")
|
221 |
+
while xml.find("<entry>") != -1:
|
222 |
+
extract = xml[xml.find("<entry>")+7:xml.find("</entry>")]
|
223 |
+
xml = xml[xml.find("</entry>")+8:]
|
224 |
+
extract = get_record(extract)
|
225 |
+
topic = choose_topic(extract[6])
|
226 |
+
records.append([topic,*extract])
|
227 |
+
return records
|
228 |
+
except Exception as e:
|
229 |
+
return "Error: "+str(e)
|
230 |
+
|
231 |
+
def process_authors_str(authors):
|
232 |
+
"""input a list of authors, return a string represent authors"""
|
233 |
+
text = ""
|
234 |
+
for author in authors:
|
235 |
+
text+=author+", "
|
236 |
+
return text[:-3]
|
237 |
+
|
238 |
+
def process_authors_list(string):
|
239 |
+
"""input a string of authors, return a list of authors"""
|
240 |
+
authors = []
|
241 |
+
list_auth = string.split("and").strip()
|
242 |
+
for author in list_auth:
|
243 |
+
if author != "et al.":
|
244 |
+
authors.append(author)
|
245 |
+
return authors
|