first
Browse files- app.py +624 -0
- nlp4web_codebase/__init__.py +0 -0
- nlp4web_codebase/ir/__init__.py +0 -0
- nlp4web_codebase/ir/analysis.py +160 -0
- nlp4web_codebase/ir/data_loaders/__init__.py +35 -0
- nlp4web_codebase/ir/data_loaders/dm.py +22 -0
- nlp4web_codebase/ir/data_loaders/sciq.py +86 -0
- nlp4web_codebase/ir/models/__init__.py +21 -0
- requirements.txt +10 -0
- setup.py +37 -0
app.py
ADDED
@@ -0,0 +1,624 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
from __future__ import annotations
|
3 |
+
from dataclasses import dataclass
|
4 |
+
import pickle
|
5 |
+
import os
|
6 |
+
from typing import Iterable, Callable, List, Dict, Optional, Type, TypeVar
|
7 |
+
from nlp4web_codebase.ir.data_loaders.dm import Document
|
8 |
+
from collections import Counter
|
9 |
+
import tqdm
|
10 |
+
import re
|
11 |
+
import nltk
|
12 |
+
|
13 |
+
nltk.download("stopwords", quiet=True)
|
14 |
+
from nltk.corpus import stopwords as nltk_stopwords
|
15 |
+
|
16 |
+
LANGUAGE = "english"
|
17 |
+
word_splitter = re.compile(r"(?u)\b\w\w+\b").findall
|
18 |
+
stopwords = set(nltk_stopwords.words(LANGUAGE))
|
19 |
+
|
20 |
+
|
21 |
+
def word_splitting(text: str) -> List[str]:
|
22 |
+
return word_splitter(text.lower())
|
23 |
+
|
24 |
+
|
25 |
+
def lemmatization(words: List[str]) -> List[str]:
|
26 |
+
return words # We ignore lemmatization here for simplicity
|
27 |
+
|
28 |
+
|
29 |
+
def simple_tokenize(text: str) -> List[str]:
|
30 |
+
words = word_splitting(text)
|
31 |
+
tokenized = list(filter(lambda w: w not in stopwords, words))
|
32 |
+
tokenized = lemmatization(tokenized)
|
33 |
+
return tokenized
|
34 |
+
|
35 |
+
|
36 |
+
T = TypeVar("T", bound="InvertedIndex")
|
37 |
+
|
38 |
+
|
39 |
+
@dataclass
|
40 |
+
class PostingList:
|
41 |
+
term: str # The term
|
42 |
+
docid_postings: List[int] # docid_postings[i] means the docid (int) of the i-th associated posting
|
43 |
+
tweight_postings: List[float] # tweight_postings[i] means the term weight (float) of the i-th associated posting
|
44 |
+
|
45 |
+
|
46 |
+
@dataclass
|
47 |
+
class InvertedIndex:
|
48 |
+
posting_lists: List[PostingList] # docid -> posting_list
|
49 |
+
vocab: Dict[str, int]
|
50 |
+
cid2docid: Dict[str, int] # collection_id -> docid
|
51 |
+
collection_ids: List[str] # docid -> collection_id
|
52 |
+
doc_texts: Optional[List[str]] = None # docid -> document text
|
53 |
+
|
54 |
+
def save(self, output_dir: str) -> None:
|
55 |
+
os.makedirs(output_dir, exist_ok=True)
|
56 |
+
with open(os.path.join(output_dir, "index.pkl"), "wb") as f:
|
57 |
+
pickle.dump(self, f)
|
58 |
+
|
59 |
+
@classmethod
|
60 |
+
def from_saved(cls: Type[T], saved_dir: str) -> T:
|
61 |
+
index = cls(
|
62 |
+
posting_lists=[], vocab={}, cid2docid={}, collection_ids=[], doc_texts=None
|
63 |
+
)
|
64 |
+
with open(os.path.join(saved_dir, "index.pkl"), "rb") as f:
|
65 |
+
index = pickle.load(f)
|
66 |
+
return index
|
67 |
+
|
68 |
+
|
69 |
+
# The output of the counting function:
|
70 |
+
@dataclass
|
71 |
+
class Counting:
|
72 |
+
posting_lists: List[PostingList]
|
73 |
+
vocab: Dict[str, int]
|
74 |
+
cid2docid: Dict[str, int]
|
75 |
+
collection_ids: List[str]
|
76 |
+
dfs: List[int] # tid -> df
|
77 |
+
dls: List[int] # docid -> doc length
|
78 |
+
avgdl: float
|
79 |
+
nterms: int
|
80 |
+
doc_texts: Optional[List[str]] = None
|
81 |
+
|
82 |
+
|
83 |
+
def run_counting(
|
84 |
+
documents: Iterable[Document],
|
85 |
+
tokenize_fn: Callable[[str], List[str]] = simple_tokenize,
|
86 |
+
store_raw: bool = True, # store the document text in doc_texts
|
87 |
+
ndocs: Optional[int] = None,
|
88 |
+
show_progress_bar: bool = True,
|
89 |
+
) -> Counting:
|
90 |
+
"""Counting TFs, DFs, doc_lengths, etc."""
|
91 |
+
posting_lists: List[PostingList] = []
|
92 |
+
vocab: Dict[str, int] = {}
|
93 |
+
cid2docid: Dict[str, int] = {}
|
94 |
+
collection_ids: List[str] = []
|
95 |
+
dfs: List[int] = [] # tid -> df
|
96 |
+
dls: List[int] = [] # docid -> doc length
|
97 |
+
nterms: int = 0
|
98 |
+
doc_texts: Optional[List[str]] = []
|
99 |
+
for doc in tqdm.tqdm(
|
100 |
+
documents,
|
101 |
+
desc="Counting",
|
102 |
+
total=ndocs,
|
103 |
+
disable=not show_progress_bar,
|
104 |
+
):
|
105 |
+
if doc.collection_id in cid2docid:
|
106 |
+
continue
|
107 |
+
collection_ids.append(doc.collection_id)
|
108 |
+
docid = cid2docid.setdefault(doc.collection_id, len(cid2docid))
|
109 |
+
toks = tokenize_fn(doc.text)
|
110 |
+
tok2tf = Counter(toks)
|
111 |
+
dls.append(sum(tok2tf.values()))
|
112 |
+
for tok, tf in tok2tf.items():
|
113 |
+
nterms += tf
|
114 |
+
tid = vocab.get(tok, None)
|
115 |
+
if tid is None:
|
116 |
+
posting_lists.append(
|
117 |
+
PostingList(term=tok, docid_postings=[], tweight_postings=[])
|
118 |
+
)
|
119 |
+
tid = vocab.setdefault(tok, len(vocab))
|
120 |
+
posting_lists[tid].docid_postings.append(docid)
|
121 |
+
posting_lists[tid].tweight_postings.append(tf)
|
122 |
+
if tid < len(dfs):
|
123 |
+
dfs[tid] += 1
|
124 |
+
else:
|
125 |
+
dfs.append(0)
|
126 |
+
if store_raw:
|
127 |
+
doc_texts.append(doc.text)
|
128 |
+
else:
|
129 |
+
doc_texts = None
|
130 |
+
return Counting(
|
131 |
+
posting_lists=posting_lists,
|
132 |
+
vocab=vocab,
|
133 |
+
cid2docid=cid2docid,
|
134 |
+
collection_ids=collection_ids,
|
135 |
+
dfs=dfs,
|
136 |
+
dls=dls,
|
137 |
+
avgdl=sum(dls) / len(dls),
|
138 |
+
nterms=nterms,
|
139 |
+
doc_texts=doc_texts,
|
140 |
+
)
|
141 |
+
|
142 |
+
|
143 |
+
from nlp4web_codebase.ir.data_loaders.sciq import load_sciq
|
144 |
+
|
145 |
+
sciq = load_sciq()
|
146 |
+
counting = run_counting(documents=iter(sciq.corpus), ndocs=len(sciq.corpus))
|
147 |
+
|
148 |
+
"""### BM25 Index"""
|
149 |
+
|
150 |
+
from dataclasses import asdict, dataclass
|
151 |
+
import math
|
152 |
+
import os
|
153 |
+
from typing import Iterable, List, Optional, Type
|
154 |
+
import tqdm
|
155 |
+
from nlp4web_codebase.ir.data_loaders.dm import Document
|
156 |
+
|
157 |
+
|
158 |
+
@dataclass
|
159 |
+
class BM25Index(InvertedIndex):
|
160 |
+
|
161 |
+
@staticmethod
|
162 |
+
def tokenize(text: str) -> List[str]:
|
163 |
+
return simple_tokenize(text)
|
164 |
+
|
165 |
+
@staticmethod
|
166 |
+
def cache_term_weights(
|
167 |
+
posting_lists: List[PostingList],
|
168 |
+
total_docs: int,
|
169 |
+
avgdl: float,
|
170 |
+
dfs: List[int],
|
171 |
+
dls: List[int],
|
172 |
+
k1: float,
|
173 |
+
b: float,
|
174 |
+
) -> None:
|
175 |
+
"""Compute term weights and caching"""
|
176 |
+
|
177 |
+
N = total_docs
|
178 |
+
for tid, posting_list in enumerate(
|
179 |
+
tqdm.tqdm(posting_lists, desc="Regularizing TFs")
|
180 |
+
):
|
181 |
+
idf = BM25Index.calc_idf(df=dfs[tid], N=N)
|
182 |
+
for i in range(len(posting_list.docid_postings)):
|
183 |
+
docid = posting_list.docid_postings[i]
|
184 |
+
tf = posting_list.tweight_postings[i]
|
185 |
+
dl = dls[docid]
|
186 |
+
regularized_tf = BM25Index.calc_regularized_tf(
|
187 |
+
tf=tf, dl=dl, avgdl=avgdl, k1=k1, b=b
|
188 |
+
)
|
189 |
+
posting_list.tweight_postings[i] = regularized_tf * idf
|
190 |
+
|
191 |
+
@staticmethod
|
192 |
+
def calc_regularized_tf(
|
193 |
+
tf: int, dl: float, avgdl: float, k1: float, b: float
|
194 |
+
) -> float:
|
195 |
+
return tf / (tf + k1 * (1 - b + b * dl / avgdl))
|
196 |
+
|
197 |
+
@staticmethod
|
198 |
+
def calc_idf(df: int, N: int):
|
199 |
+
return math.log(1 + (N - df + 0.5) / (df + 0.5))
|
200 |
+
|
201 |
+
@classmethod
|
202 |
+
def build_from_documents(
|
203 |
+
cls: Type[BM25Index],
|
204 |
+
documents: Iterable[Document],
|
205 |
+
store_raw: bool = True,
|
206 |
+
output_dir: Optional[str] = None,
|
207 |
+
ndocs: Optional[int] = None,
|
208 |
+
show_progress_bar: bool = True,
|
209 |
+
k1: float = 0.9,
|
210 |
+
b: float = 0.4,
|
211 |
+
) -> BM25Index:
|
212 |
+
# Counting TFs, DFs, doc_lengths, etc.:
|
213 |
+
counting = run_counting(
|
214 |
+
documents=documents,
|
215 |
+
tokenize_fn=BM25Index.tokenize,
|
216 |
+
store_raw=store_raw,
|
217 |
+
ndocs=ndocs,
|
218 |
+
show_progress_bar=show_progress_bar,
|
219 |
+
)
|
220 |
+
|
221 |
+
# Compute term weights and caching:
|
222 |
+
posting_lists = counting.posting_lists
|
223 |
+
total_docs = len(counting.cid2docid)
|
224 |
+
BM25Index.cache_term_weights(
|
225 |
+
posting_lists=posting_lists,
|
226 |
+
total_docs=total_docs,
|
227 |
+
avgdl=counting.avgdl,
|
228 |
+
dfs=counting.dfs,
|
229 |
+
dls=counting.dls,
|
230 |
+
k1=k1,
|
231 |
+
b=b,
|
232 |
+
)
|
233 |
+
|
234 |
+
# Assembly and save:
|
235 |
+
index = BM25Index(
|
236 |
+
posting_lists=posting_lists,
|
237 |
+
vocab=counting.vocab,
|
238 |
+
cid2docid=counting.cid2docid,
|
239 |
+
collection_ids=counting.collection_ids,
|
240 |
+
doc_texts=counting.doc_texts,
|
241 |
+
)
|
242 |
+
return index
|
243 |
+
|
244 |
+
|
245 |
+
bm25_index = BM25Index.build_from_documents(
|
246 |
+
documents=iter(sciq.corpus),
|
247 |
+
ndocs=12160,
|
248 |
+
show_progress_bar=True,
|
249 |
+
)
|
250 |
+
bm25_index.save("output/bm25_index")
|
251 |
+
|
252 |
+
"""### BM25 Retriever"""
|
253 |
+
|
254 |
+
from nlp4web_codebase.ir.models import BaseRetriever
|
255 |
+
from typing import Type
|
256 |
+
from abc import abstractmethod
|
257 |
+
|
258 |
+
|
259 |
+
class BaseInvertedIndexRetriever(BaseRetriever):
|
260 |
+
|
261 |
+
@property
|
262 |
+
@abstractmethod
|
263 |
+
def index_class(self) -> Type[InvertedIndex]:
|
264 |
+
pass
|
265 |
+
|
266 |
+
def __init__(self, index_dir: str) -> None:
|
267 |
+
self.index = self.index_class.from_saved(index_dir)
|
268 |
+
|
269 |
+
def get_term_weights(self, query: str, cid: str) -> Dict[str, float]:
|
270 |
+
toks = self.index.tokenize(query)
|
271 |
+
target_docid = self.index.cid2docid[cid]
|
272 |
+
term_weights = {}
|
273 |
+
for tok in toks:
|
274 |
+
if tok not in self.index.vocab:
|
275 |
+
continue
|
276 |
+
tid = self.index.vocab[tok]
|
277 |
+
posting_list = self.index.posting_lists[tid]
|
278 |
+
for docid, tweight in zip(
|
279 |
+
posting_list.docid_postings, posting_list.tweight_postings
|
280 |
+
):
|
281 |
+
if docid == target_docid:
|
282 |
+
term_weights[tok] = tweight
|
283 |
+
break
|
284 |
+
return term_weights
|
285 |
+
|
286 |
+
def score(self, query: str, cid: str) -> float:
|
287 |
+
return sum(self.get_term_weights(query=query, cid=cid).values())
|
288 |
+
|
289 |
+
def retrieve(self, query: str, topk: int = 10) -> Dict[str, float]:
|
290 |
+
toks = self.index.tokenize(query)
|
291 |
+
docid2score: Dict[int, float] = {}
|
292 |
+
for tok in toks:
|
293 |
+
if tok not in self.index.vocab:
|
294 |
+
continue
|
295 |
+
tid = self.index.vocab[tok]
|
296 |
+
posting_list = self.index.posting_lists[tid]
|
297 |
+
for docid, tweight in zip(
|
298 |
+
posting_list.docid_postings, posting_list.tweight_postings
|
299 |
+
):
|
300 |
+
docid2score.setdefault(docid, 0)
|
301 |
+
docid2score[docid] += tweight
|
302 |
+
docid2score = dict(
|
303 |
+
sorted(docid2score.items(), key=lambda pair: pair[1], reverse=True)[:topk]
|
304 |
+
)
|
305 |
+
return {
|
306 |
+
self.index.collection_ids[docid]: score
|
307 |
+
for docid, score in docid2score.items()
|
308 |
+
}
|
309 |
+
|
310 |
+
|
311 |
+
class BM25Retriever(BaseInvertedIndexRetriever):
|
312 |
+
|
313 |
+
@property
|
314 |
+
def index_class(self) -> Type[BM25Index]:
|
315 |
+
return BM25Index
|
316 |
+
|
317 |
+
|
318 |
+
bm25_retriever = BM25Retriever(index_dir="output/bm25_index")
|
319 |
+
bm25_retriever.retrieve("What type of diseases occur when the immune system attacks normal body cells?")
|
320 |
+
|
321 |
+
"""# TASK1: tune b and k1 (4 points)
|
322 |
+
|
323 |
+
Tune b and k1 on the **dev** split of SciQ using the metric MAP@10. The evaluation function (`evalaute_map`) is provided. Record the values in `plots_k1` and `plots_b`. Do it in a greedy manner: as the influence from b is larger, please first tune b (with k1 fixed to the default value 0.9) and use the best value of b to further tune k1.
|
324 |
+
|
325 |
+
$${\displaystyle {\text{score}}(D,Q)=\sum _{i=1}^{n}{\text{IDF}}(q_{i})\cdot {\frac {f(q_{i},D)\cdot (k_{1}+1)}{f(q_{i},D)+k_{1}\cdot \left(1-b+b\cdot {\frac {|D|}{\text{avgdl}}}\right)}}}$$
|
326 |
+
"""
|
327 |
+
|
328 |
+
from nlp4web_codebase.ir.data_loaders import Split
|
329 |
+
import pytrec_eval
|
330 |
+
import numpy as np
|
331 |
+
|
332 |
+
|
333 |
+
def evaluate_map(rankings: Dict[str, Dict[str, float]], split=Split.dev) -> float:
|
334 |
+
metric = "map_cut_10"
|
335 |
+
qrels = sciq.get_qrels_dict(split)
|
336 |
+
evaluator = pytrec_eval.RelevanceEvaluator(sciq.get_qrels_dict(split), (metric,))
|
337 |
+
qps = evaluator.evaluate(rankings)
|
338 |
+
return float(np.mean([qp[metric] for qp in qps.values()]))
|
339 |
+
|
340 |
+
|
341 |
+
"""Example of using the pre-requisite code:"""
|
342 |
+
|
343 |
+
# Loading dataset:
|
344 |
+
from nlp4web_codebase.ir.data_loaders.sciq import load_sciq
|
345 |
+
|
346 |
+
sciq = load_sciq()
|
347 |
+
counting = run_counting(documents=iter(sciq.corpus), ndocs=len(sciq.corpus))
|
348 |
+
|
349 |
+
# Building BM25 index and save:
|
350 |
+
bm25_index = BM25Index.build_from_documents(
|
351 |
+
documents=iter(sciq.corpus),
|
352 |
+
ndocs=12160,
|
353 |
+
show_progress_bar=True
|
354 |
+
)
|
355 |
+
bm25_index.save("output/bm25_index")
|
356 |
+
|
357 |
+
# Loading index and use BM25 retriever to retrieve:
|
358 |
+
bm25_retriever = BM25Retriever(index_dir="output/bm25_index")
|
359 |
+
|
360 |
+
"""# TASK2: CSC matrix and `CSCBM25Index` (12 points)
|
361 |
+
|
362 |
+
Recall that we use Python lists to implement posting lists, mapping term IDs to the documents in which they appear. This is inefficient due to its naive design. Actually [Compressed Sparse Column matrix](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csc_matrix.html) is very suitable for storing the posting lists and can boost the efficiency.
|
363 |
+
|
364 |
+
## TASK2.1: learn about `scipy.sparse.csc_matrix` (2 point)
|
365 |
+
|
366 |
+
Convert the matrix \begin{bmatrix}
|
367 |
+
0 & 1 & 0 & 3 \\
|
368 |
+
10 & 2 & 1 & 0 \\
|
369 |
+
0 & 0 & 0 & 9
|
370 |
+
\end{bmatrix} to a `csc_matrix` by specifying `data`, `indices`, `indptr` and `shape`.
|
371 |
+
"""
|
372 |
+
|
373 |
+
from scipy.sparse._csc import csc_matrix
|
374 |
+
|
375 |
+
input_matrix = [[0, 1, 0, 3], [10, 2, 1, 0], [0, 0, 0, 9]]
|
376 |
+
data = None
|
377 |
+
indices = None
|
378 |
+
indptr = None
|
379 |
+
shape = None
|
380 |
+
## YOUR_CODE_STARTS_HERE
|
381 |
+
# Please assign the values to data, indices, indptr and shape
|
382 |
+
# One can just do it in a hard-coded manner
|
383 |
+
|
384 |
+
input_matrix_np = np.array(input_matrix).T
|
385 |
+
non_z_idx = input_matrix_np.nonzero()
|
386 |
+
|
387 |
+
data = input_matrix_np[non_z_idx]
|
388 |
+
|
389 |
+
indices = non_z_idx[0]
|
390 |
+
|
391 |
+
indptr = np.zeros(input_matrix_np.shape[1] + 1, dtype=int)
|
392 |
+
np.add.at(indptr, non_z_idx[1] + 1, 1)
|
393 |
+
indptr = np.cumsum(indptr)
|
394 |
+
|
395 |
+
shape = input_matrix_np.shape
|
396 |
+
|
397 |
+
## YOUR_CODE_ENDS_HERE
|
398 |
+
output_matrix = csc_matrix((data, indices, indptr), shape=shape)
|
399 |
+
|
400 |
+
## TEST_CASES (should be 3 and 11)
|
401 |
+
print((output_matrix.indices + output_matrix.data).tolist()[2])
|
402 |
+
print((output_matrix.indices + output_matrix.data).tolist()[-1])
|
403 |
+
|
404 |
+
## RESULT_CHECKING_POINT
|
405 |
+
print((output_matrix.indices + output_matrix.data).tolist())
|
406 |
+
|
407 |
+
"""## TASK2.2: implement `CSCBM25Index` (4 points)
|
408 |
+
|
409 |
+
Implement `CSCBM25Index` by completing the missing code. Note that `CSCInvertedIndex` is similar to `InvertedIndex` which we talked about during the class. The main difference is posting lists are represented by a CSC sparse matrix.
|
410 |
+
"""
|
411 |
+
|
412 |
+
|
413 |
+
@dataclass
|
414 |
+
class CSCInvertedIndex:
|
415 |
+
posting_lists_matrix: csc_matrix # docid -> posting_list
|
416 |
+
vocab: Dict[str, int]
|
417 |
+
cid2docid: Dict[str, int] # collection_id -> docid
|
418 |
+
collection_ids: List[str] # docid -> collection_id
|
419 |
+
doc_texts: Optional[List[str]] = None # docid -> document text
|
420 |
+
|
421 |
+
def save(self, output_dir: str) -> None:
|
422 |
+
os.makedirs(output_dir, exist_ok=True)
|
423 |
+
with open(os.path.join(output_dir, "index.pkl"), "wb") as f:
|
424 |
+
pickle.dump(self, f)
|
425 |
+
|
426 |
+
@classmethod
|
427 |
+
def from_saved(cls: Type[T], saved_dir: str) -> T:
|
428 |
+
index = cls(
|
429 |
+
posting_lists_matrix=None, vocab={}, cid2docid={}, collection_ids=[], doc_texts=None
|
430 |
+
)
|
431 |
+
with open(os.path.join(saved_dir, "index.pkl"), "rb") as f:
|
432 |
+
index = pickle.load(f)
|
433 |
+
return index
|
434 |
+
|
435 |
+
|
436 |
+
@dataclass
|
437 |
+
class CSCBM25Index(CSCInvertedIndex):
|
438 |
+
|
439 |
+
@staticmethod
|
440 |
+
def tokenize(text: str) -> List[str]:
|
441 |
+
return simple_tokenize(text)
|
442 |
+
|
443 |
+
@staticmethod
|
444 |
+
def cache_term_weights(
|
445 |
+
posting_lists: List[PostingList],
|
446 |
+
total_docs: int,
|
447 |
+
avgdl: float,
|
448 |
+
dfs: List[int],
|
449 |
+
dls: List[int],
|
450 |
+
k1: float,
|
451 |
+
b: float,
|
452 |
+
) -> csc_matrix:
|
453 |
+
"""Compute term weights and caching"""
|
454 |
+
|
455 |
+
## YOUR_CODE_STARTS_HERE
|
456 |
+
N = total_docs
|
457 |
+
|
458 |
+
data = []
|
459 |
+
indices = []
|
460 |
+
indptr = [0]
|
461 |
+
shape = (N, len(posting_lists))
|
462 |
+
for tid, posting_list in enumerate(
|
463 |
+
tqdm.tqdm(posting_lists, desc="Regularizing TFs")
|
464 |
+
):
|
465 |
+
idf = BM25Index.calc_idf(df=dfs[tid], N=N)
|
466 |
+
for i in range(len(posting_list.docid_postings)):
|
467 |
+
docid = posting_list.docid_postings[i]
|
468 |
+
tf = posting_list.tweight_postings[i]
|
469 |
+
dl = dls[docid]
|
470 |
+
regularized_tf = BM25Index.calc_regularized_tf(
|
471 |
+
tf=tf, dl=dl, avgdl=avgdl, k1=k1, b=b
|
472 |
+
)
|
473 |
+
data.append(regularized_tf * idf)
|
474 |
+
indices.append(docid)
|
475 |
+
indptr.append(len(indices))
|
476 |
+
return csc_matrix((np.array(data, dtype=np.float32), indices, indptr), shape=shape)
|
477 |
+
## YOUR_CODE_ENDS_HERE
|
478 |
+
|
479 |
+
@staticmethod
|
480 |
+
def calc_regularized_tf(
|
481 |
+
tf: int, dl: float, avgdl: float, k1: float, b: float
|
482 |
+
) -> float:
|
483 |
+
return tf / (tf + k1 * (1 - b + b * dl / avgdl))
|
484 |
+
|
485 |
+
@staticmethod
|
486 |
+
def calc_idf(df: int, N: int):
|
487 |
+
return math.log(1 + (N - df + 0.5) / (df + 0.5))
|
488 |
+
|
489 |
+
@classmethod
|
490 |
+
def build_from_documents(
|
491 |
+
cls: Type[CSCBM25Index],
|
492 |
+
documents: Iterable[Document],
|
493 |
+
store_raw: bool = True,
|
494 |
+
output_dir: Optional[str] = None,
|
495 |
+
ndocs: Optional[int] = None,
|
496 |
+
show_progress_bar: bool = True,
|
497 |
+
k1: float = 0.9,
|
498 |
+
b: float = 0.4,
|
499 |
+
) -> CSCBM25Index:
|
500 |
+
# Counting TFs, DFs, doc_lengths, etc.:
|
501 |
+
counting = run_counting(
|
502 |
+
documents=documents,
|
503 |
+
tokenize_fn=CSCBM25Index.tokenize,
|
504 |
+
store_raw=store_raw,
|
505 |
+
ndocs=ndocs,
|
506 |
+
show_progress_bar=show_progress_bar,
|
507 |
+
)
|
508 |
+
|
509 |
+
# Compute term weights and caching:
|
510 |
+
posting_lists = counting.posting_lists
|
511 |
+
total_docs = len(counting.cid2docid)
|
512 |
+
posting_lists_matrix = CSCBM25Index.cache_term_weights(
|
513 |
+
posting_lists=posting_lists,
|
514 |
+
total_docs=total_docs,
|
515 |
+
avgdl=counting.avgdl,
|
516 |
+
dfs=counting.dfs,
|
517 |
+
dls=counting.dls,
|
518 |
+
k1=k1,
|
519 |
+
b=b,
|
520 |
+
)
|
521 |
+
|
522 |
+
# Assembly and save:
|
523 |
+
index = CSCBM25Index(
|
524 |
+
posting_lists_matrix=posting_lists_matrix,
|
525 |
+
vocab=counting.vocab,
|
526 |
+
cid2docid=counting.cid2docid,
|
527 |
+
collection_ids=counting.collection_ids,
|
528 |
+
doc_texts=counting.doc_texts,
|
529 |
+
)
|
530 |
+
return index
|
531 |
+
|
532 |
+
|
533 |
+
class BaseCSCInvertedIndexRetriever(BaseRetriever):
|
534 |
+
|
535 |
+
@property
|
536 |
+
@abstractmethod
|
537 |
+
def index_class(self) -> Type[CSCInvertedIndex]:
|
538 |
+
pass
|
539 |
+
|
540 |
+
def __init__(self, index_dir: str) -> None:
|
541 |
+
self.index = self.index_class.from_saved(index_dir)
|
542 |
+
|
543 |
+
def get_term_weights(self, query: str, cid: str) -> Dict[str, float]:
|
544 |
+
## YOUR_CODE_STARTS_HERE
|
545 |
+
toks = self.index.tokenize(query)
|
546 |
+
target_docid = self.index.cid2docid[cid]
|
547 |
+
w_matrix = self.index.posting_lists_matrix
|
548 |
+
term_weights = {}
|
549 |
+
for tok in toks:
|
550 |
+
if tok not in self.index.vocab:
|
551 |
+
continue
|
552 |
+
tid = self.index.vocab[tok]
|
553 |
+
w = w_matrix[target_docid, tid]
|
554 |
+
if w > 0:
|
555 |
+
term_weights[tok] = w
|
556 |
+
|
557 |
+
return term_weights
|
558 |
+
## YOUR_CODE_ENDS_HERE
|
559 |
+
|
560 |
+
def score(self, query: str, cid: str) -> float:
|
561 |
+
return sum(self.get_term_weights(query=query, cid=cid).values())
|
562 |
+
|
563 |
+
def retrieve(self, query: str, topk: int = 10) -> Dict[str, float]:
|
564 |
+
## YOUR_CODE_STARTS_HERE
|
565 |
+
toks = self.index.tokenize(query)
|
566 |
+
docid2score: Dict[int, float] = {}
|
567 |
+
for tok in toks:
|
568 |
+
if tok not in self.index.vocab:
|
569 |
+
continue
|
570 |
+
tid = self.index.vocab[tok]
|
571 |
+
posting_list = self.index.posting_lists_matrix[:, tid]
|
572 |
+
for i, w in enumerate(posting_list):
|
573 |
+
docid2score.setdefault(i, 0)
|
574 |
+
docid2score[i] += w
|
575 |
+
|
576 |
+
docid2score = dict(
|
577 |
+
sorted(docid2score.items(), key=lambda pair: pair[1], reverse=True)[:topk]
|
578 |
+
)
|
579 |
+
return {
|
580 |
+
self.index.collection_ids[docid]: score.data[0]
|
581 |
+
for docid, score in docid2score.items() if len(score.data) == 1
|
582 |
+
}
|
583 |
+
## YOUR_CODE_ENDS_HERE
|
584 |
+
|
585 |
+
|
586 |
+
class CSCBM25Retriever(BaseCSCInvertedIndexRetriever):
|
587 |
+
|
588 |
+
@property
|
589 |
+
def index_class(self) -> Type[CSCBM25Index]:
|
590 |
+
return CSCBM25Index
|
591 |
+
|
592 |
+
|
593 |
+
import gradio as gr
|
594 |
+
from typing import TypedDict
|
595 |
+
|
596 |
+
|
597 |
+
class Hit(TypedDict):
|
598 |
+
cid: str
|
599 |
+
score: float
|
600 |
+
text: str
|
601 |
+
|
602 |
+
|
603 |
+
demo: Optional[gr.Interface] = None # Assign your gradio demo to this variable
|
604 |
+
return_type = List[Hit]
|
605 |
+
|
606 |
+
## YOUR_CODE_STARTS_HERE
|
607 |
+
csc_bm25_index = CSCBM25Index.build_from_documents(
|
608 |
+
documents=iter(sciq.corpus),
|
609 |
+
ndocs=12160,
|
610 |
+
show_progress_bar=False
|
611 |
+
)
|
612 |
+
csc_bm25_index.save("output/csc_bm25_index")
|
613 |
+
csc_bm25_retriever = CSCBM25Retriever(index_dir="output/csc_bm25_index")
|
614 |
+
|
615 |
+
|
616 |
+
def BM25_search(term):
|
617 |
+
result = csc_bm25_retriever.retrieve(term)
|
618 |
+
hits = [Hit(cid=k, score=v, text=sciq.corpus[bm25_index.cid2docid[k]].text) for k, v in result.items()]
|
619 |
+
return hits
|
620 |
+
|
621 |
+
|
622 |
+
demo = gr.Interface(fn=BM25_search, inputs=gr.Textbox(), outputs=gr.Textbox())
|
623 |
+
## YOUR_CODE_ENDS_HERE
|
624 |
+
demo.launch()
|
nlp4web_codebase/__init__.py
ADDED
File without changes
|
nlp4web_codebase/ir/__init__.py
ADDED
File without changes
|
nlp4web_codebase/ir/analysis.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Dict, List, Optional, Protocol
|
3 |
+
import pandas as pd
|
4 |
+
import tqdm
|
5 |
+
import ujson
|
6 |
+
from nlp4web_codebase.ir.data_loaders import IRDataset
|
7 |
+
|
8 |
+
|
9 |
+
def round_dict(obj: Dict[str, float], ndigits: int = 4) -> Dict[str, float]:
|
10 |
+
return {k: round(v, ndigits=ndigits) for k, v in obj.items()}
|
11 |
+
|
12 |
+
|
13 |
+
def sort_dict(obj: Dict[str, float], reverse: bool = True) -> Dict[str, float]:
|
14 |
+
return dict(sorted(obj.items(), key=lambda pair: pair[1], reverse=reverse))
|
15 |
+
|
16 |
+
|
17 |
+
def save_ranking_results(
|
18 |
+
output_dir: str,
|
19 |
+
query_ids: List[str],
|
20 |
+
rankings: List[Dict[str, float]],
|
21 |
+
query_performances_lists: List[Dict[str, float]],
|
22 |
+
cid2tweights_lists: Optional[List[Dict[str, Dict[str, float]]]] = None,
|
23 |
+
):
|
24 |
+
os.makedirs(output_dir, exist_ok=True)
|
25 |
+
output_path = os.path.join(output_dir, "ranking_results.jsonl")
|
26 |
+
rows = []
|
27 |
+
for i, (query_id, ranking, query_performances) in enumerate(
|
28 |
+
zip(query_ids, rankings, query_performances_lists)
|
29 |
+
):
|
30 |
+
row = {
|
31 |
+
"query_id": query_id,
|
32 |
+
"ranking": round_dict(ranking),
|
33 |
+
"query_performances": round_dict(query_performances),
|
34 |
+
"cid2tweights": {},
|
35 |
+
}
|
36 |
+
if cid2tweights_lists is not None:
|
37 |
+
row["cid2tweights"] = {
|
38 |
+
cid: round_dict(tws) for cid, tws in cid2tweights_lists[i].items()
|
39 |
+
}
|
40 |
+
rows.append(row)
|
41 |
+
pd.DataFrame(rows).to_json(
|
42 |
+
output_path,
|
43 |
+
orient="records",
|
44 |
+
lines=True,
|
45 |
+
)
|
46 |
+
|
47 |
+
|
48 |
+
class TermWeightingFunction(Protocol):
|
49 |
+
def __call__(self, query: str, cid: str) -> Dict[str, float]: ...
|
50 |
+
|
51 |
+
|
52 |
+
def compare(
|
53 |
+
dataset: IRDataset,
|
54 |
+
results_path1: str,
|
55 |
+
results_path2: str,
|
56 |
+
output_dir: str,
|
57 |
+
main_metric: str = "recip_rank",
|
58 |
+
system1: Optional[str] = None,
|
59 |
+
system2: Optional[str] = None,
|
60 |
+
term_weighting_fn1: Optional[TermWeightingFunction] = None,
|
61 |
+
term_weighting_fn2: Optional[TermWeightingFunction] = None,
|
62 |
+
) -> None:
|
63 |
+
os.makedirs(output_dir, exist_ok=True)
|
64 |
+
df1 = pd.read_json(results_path1, orient="records", lines=True)
|
65 |
+
df2 = pd.read_json(results_path2, orient="records", lines=True)
|
66 |
+
assert len(df1) == len(df2)
|
67 |
+
all_qrels = {}
|
68 |
+
for split in dataset.split2qrels:
|
69 |
+
all_qrels.update(dataset.get_qrels_dict(split))
|
70 |
+
qid2query = {query.query_id: query for query in dataset.queries}
|
71 |
+
cid2doc = {doc.collection_id: doc for doc in dataset.corpus}
|
72 |
+
diff_col = f"{main_metric}:qp1-qp2"
|
73 |
+
merged = pd.merge(df1, df2, on="query_id", how="outer")
|
74 |
+
rows = []
|
75 |
+
for _, example in tqdm.tqdm(merged.iterrows(), desc="Comparing", total=len(merged)):
|
76 |
+
docs = {cid: cid2doc[cid].text for cid in dict(example["ranking_x"])}
|
77 |
+
docs.update({cid: cid2doc[cid].text for cid in dict(example["ranking_y"])})
|
78 |
+
query_id = example["query_id"]
|
79 |
+
row = {
|
80 |
+
"query_id": query_id,
|
81 |
+
"query": qid2query[query_id].text,
|
82 |
+
diff_col: example["query_performances_x"][main_metric]
|
83 |
+
- example["query_performances_y"][main_metric],
|
84 |
+
"ranking1": ujson.dumps(example["ranking_x"], indent=4),
|
85 |
+
"ranking2": ujson.dumps(example["ranking_y"], indent=4),
|
86 |
+
"docs": ujson.dumps(docs, indent=4),
|
87 |
+
"query_performances1": ujson.dumps(
|
88 |
+
example["query_performances_x"], indent=4
|
89 |
+
),
|
90 |
+
"query_performances2": ujson.dumps(
|
91 |
+
example["query_performances_y"], indent=4
|
92 |
+
),
|
93 |
+
"qrels": ujson.dumps(all_qrels[query_id], indent=4),
|
94 |
+
}
|
95 |
+
if term_weighting_fn1 is not None and term_weighting_fn2 is not None:
|
96 |
+
all_cids = set(example["ranking_x"]) | set(example["ranking_y"])
|
97 |
+
cid2tweights1 = {}
|
98 |
+
cid2tweights2 = {}
|
99 |
+
ranking1 = {}
|
100 |
+
ranking2 = {}
|
101 |
+
for cid in all_cids:
|
102 |
+
tweights1 = term_weighting_fn1(query=qid2query[query_id].text, cid=cid)
|
103 |
+
tweights2 = term_weighting_fn2(query=qid2query[query_id].text, cid=cid)
|
104 |
+
ranking1[cid] = sum(tweights1.values())
|
105 |
+
ranking2[cid] = sum(tweights2.values())
|
106 |
+
cid2tweights1[cid] = tweights1
|
107 |
+
cid2tweights2[cid] = tweights2
|
108 |
+
ranking1 = sort_dict(ranking1)
|
109 |
+
ranking2 = sort_dict(ranking2)
|
110 |
+
row["ranking1"] = ujson.dumps(ranking1, indent=4)
|
111 |
+
row["ranking2"] = ujson.dumps(ranking2, indent=4)
|
112 |
+
cid2tweights1 = {cid: cid2tweights1[cid] for cid in ranking1}
|
113 |
+
cid2tweights2 = {cid: cid2tweights2[cid] for cid in ranking2}
|
114 |
+
row["cid2tweights1"] = ujson.dumps(cid2tweights1, indent=4)
|
115 |
+
row["cid2tweights2"] = ujson.dumps(cid2tweights2, indent=4)
|
116 |
+
rows.append(row)
|
117 |
+
table = pd.DataFrame(rows).sort_values(by=diff_col, ascending=False)
|
118 |
+
output_path = os.path.join(output_dir, f"compare-{system1}_vs_{system2}.tsv")
|
119 |
+
table.to_csv(output_path, sep="\t", index=False)
|
120 |
+
|
121 |
+
|
122 |
+
# if __name__ == "__main__":
|
123 |
+
# # python -m lecture2.bm25.analysis
|
124 |
+
# from nlp4web_codebase.ir.data_loaders.sciq import load_sciq
|
125 |
+
# from lecture2.bm25.bm25_retriever import BM25Retriever
|
126 |
+
# from lecture2.bm25.tfidf_retriever import TFIDFRetriever
|
127 |
+
# import numpy as np
|
128 |
+
|
129 |
+
# sciq = load_sciq()
|
130 |
+
# system1 = "bm25"
|
131 |
+
# system2 = "tfidf"
|
132 |
+
# results_path1 = f"output/sciq-{system1}/results/ranking_results.jsonl"
|
133 |
+
# results_path2 = f"output/sciq-{system2}/results/ranking_results.jsonl"
|
134 |
+
# index_dir1 = f"output/sciq-{system1}"
|
135 |
+
# index_dir2 = f"output/sciq-{system2}"
|
136 |
+
# compare(
|
137 |
+
# dataset=sciq,
|
138 |
+
# results_path1=results_path1,
|
139 |
+
# results_path2=results_path2,
|
140 |
+
# output_dir=f"output/sciq-{system1}_vs_{system2}",
|
141 |
+
# system1=system1,
|
142 |
+
# system2=system2,
|
143 |
+
# term_weighting_fn1=BM25Retriever(index_dir1).get_term_weights,
|
144 |
+
# term_weighting_fn2=TFIDFRetriever(index_dir2).get_term_weights,
|
145 |
+
# )
|
146 |
+
|
147 |
+
# # bias on #shared_terms of TFIDF:
|
148 |
+
# df1 = pd.read_json(results_path1, orient="records", lines=True)
|
149 |
+
# df2 = pd.read_json(results_path2, orient="records", lines=True)
|
150 |
+
# merged = pd.merge(df1, df2, on="query_id", how="outer")
|
151 |
+
# nterms1 = []
|
152 |
+
# nterms2 = []
|
153 |
+
# for _, row in merged.iterrows():
|
154 |
+
# nterms1.append(len(list(dict(row["cid2tweights_x"]).values())[0]))
|
155 |
+
# nterms2.append(len(list(dict(row["cid2tweights_y"]).values())[0]))
|
156 |
+
# percentiles = (5, 25, 50, 75, 95)
|
157 |
+
# print(system1, np.percentile(nterms1, percentiles), np.mean(nterms1).round(2))
|
158 |
+
# print(system2, np.percentile(nterms2, percentiles), np.mean(nterms2).round(2))
|
159 |
+
# # bm25 [ 3. 4. 5. 7. 11.] 5.64
|
160 |
+
# # tfidf [1. 2. 3. 5. 9.] 3.58
|
nlp4web_codebase/ir/data_loaders/__init__.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from enum import Enum
|
3 |
+
from typing import Dict, List
|
4 |
+
from nlp4web_codebase.ir.data_loaders.dm import Document, Query, QRel
|
5 |
+
|
6 |
+
|
7 |
+
class Split(str, Enum):
|
8 |
+
train = "train"
|
9 |
+
dev = "dev"
|
10 |
+
test = "test"
|
11 |
+
|
12 |
+
|
13 |
+
@dataclass
|
14 |
+
class IRDataset:
|
15 |
+
corpus: List[Document]
|
16 |
+
queries: List[Query]
|
17 |
+
split2qrels: Dict[Split, List[QRel]]
|
18 |
+
|
19 |
+
def get_stats(self) -> Dict[str, int]:
|
20 |
+
stats = {"|corpus|": len(self.corpus), "|queries|": len(self.queries)}
|
21 |
+
for split, qrels in self.split2qrels.items():
|
22 |
+
stats[f"|qrels-{split}|"] = len(qrels)
|
23 |
+
return stats
|
24 |
+
|
25 |
+
def get_qrels_dict(self, split: Split) -> Dict[str, Dict[str, int]]:
|
26 |
+
qrels_dict = {}
|
27 |
+
for qrel in self.split2qrels[split]:
|
28 |
+
qrels_dict.setdefault(qrel.query_id, {})
|
29 |
+
qrels_dict[qrel.query_id][qrel.collection_id] = qrel.relevance
|
30 |
+
return qrels_dict
|
31 |
+
|
32 |
+
def get_split_queries(self, split: Split) -> List[Query]:
|
33 |
+
qrels = self.split2qrels[split]
|
34 |
+
qids = {qrel.query_id for qrel in qrels}
|
35 |
+
return list(filter(lambda query: query.query_id in qids, self.queries))
|
nlp4web_codebase/ir/data_loaders/dm.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
|
5 |
+
@dataclass
|
6 |
+
class Document:
|
7 |
+
collection_id: str
|
8 |
+
text: str
|
9 |
+
|
10 |
+
|
11 |
+
@dataclass
|
12 |
+
class Query:
|
13 |
+
query_id: str
|
14 |
+
text: str
|
15 |
+
|
16 |
+
|
17 |
+
@dataclass
|
18 |
+
class QRel:
|
19 |
+
query_id: str
|
20 |
+
collection_id: str
|
21 |
+
relevance: int
|
22 |
+
answer: Optional[str] = None
|
nlp4web_codebase/ir/data_loaders/sciq.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List
|
2 |
+
from nlp4web_codebase.ir.data_loaders import IRDataset, Split
|
3 |
+
from nlp4web_codebase.ir.data_loaders.dm import Document, Query, QRel
|
4 |
+
from datasets import load_dataset
|
5 |
+
import joblib
|
6 |
+
|
7 |
+
|
8 |
+
@(joblib.Memory(".cache").cache)
|
9 |
+
def load_sciq(verbose: bool = False) -> IRDataset:
|
10 |
+
train = load_dataset("allenai/sciq", split="train")
|
11 |
+
validation = load_dataset("allenai/sciq", split="validation")
|
12 |
+
test = load_dataset("allenai/sciq", split="test")
|
13 |
+
data = {Split.train: train, Split.dev: validation, Split.test: test}
|
14 |
+
|
15 |
+
# Each duplicated record is the same to each other:
|
16 |
+
df = train.to_pandas() + validation.to_pandas() + test.to_pandas()
|
17 |
+
for question, group in df.groupby("question"):
|
18 |
+
assert len(set(group["support"].tolist())) == len(group)
|
19 |
+
assert len(set(group["correct_answer"].tolist())) == len(group)
|
20 |
+
|
21 |
+
# Build:
|
22 |
+
corpus = []
|
23 |
+
queries = []
|
24 |
+
split2qrels: Dict[str, List[dict]] = {}
|
25 |
+
question2id = {}
|
26 |
+
support2id = {}
|
27 |
+
for split, rows in data.items():
|
28 |
+
if verbose:
|
29 |
+
print(f"|raw_{split}|", len(rows))
|
30 |
+
split2qrels[split] = []
|
31 |
+
for i, row in enumerate(rows):
|
32 |
+
example_id = f"{split}-{i}"
|
33 |
+
support: str = row["support"]
|
34 |
+
if len(support.strip()) == 0:
|
35 |
+
continue
|
36 |
+
question = row["question"]
|
37 |
+
if len(support.strip()) == 0:
|
38 |
+
continue
|
39 |
+
if support in support2id:
|
40 |
+
continue
|
41 |
+
else:
|
42 |
+
support2id[support] = example_id
|
43 |
+
if question in question2id:
|
44 |
+
continue
|
45 |
+
else:
|
46 |
+
question2id[question] = example_id
|
47 |
+
doc = {"collection_id": example_id, "text": support}
|
48 |
+
query = {"query_id": example_id, "text": row["question"]}
|
49 |
+
qrel = {
|
50 |
+
"query_id": example_id,
|
51 |
+
"collection_id": example_id,
|
52 |
+
"relevance": 1,
|
53 |
+
"answer": row["correct_answer"],
|
54 |
+
}
|
55 |
+
corpus.append(Document(**doc))
|
56 |
+
queries.append(Query(**query))
|
57 |
+
split2qrels[split].append(QRel(**qrel))
|
58 |
+
|
59 |
+
# Assembly and return:
|
60 |
+
return IRDataset(corpus=corpus, queries=queries, split2qrels=split2qrels)
|
61 |
+
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
# python -m nlp4web_codebase.ir.data_loaders.sciq
|
65 |
+
import ujson
|
66 |
+
import time
|
67 |
+
|
68 |
+
start = time.time()
|
69 |
+
dataset = load_sciq(verbose=True)
|
70 |
+
print(f"Loading costs: {time.time() - start}s")
|
71 |
+
print(ujson.dumps(dataset.get_stats(), indent=4))
|
72 |
+
# ________________________________________________________________________________
|
73 |
+
# [Memory] Calling __main__--home-kwang-research-nlp4web-ir-exercise-nlp4web-nlp4web-ir-data_loaders-sciq.load_sciq...
|
74 |
+
# load_sciq(verbose=True)
|
75 |
+
# |raw_train| 11679
|
76 |
+
# |raw_dev| 1000
|
77 |
+
# |raw_test| 1000
|
78 |
+
# ________________________________________________________load_sciq - 7.3s, 0.1min
|
79 |
+
# Loading costs: 7.260092735290527s
|
80 |
+
# {
|
81 |
+
# "|corpus|": 12160,
|
82 |
+
# "|queries|": 12160,
|
83 |
+
# "|qrels-train|": 10409,
|
84 |
+
# "|qrels-dev|": 875,
|
85 |
+
# "|qrels-test|": 876
|
86 |
+
# }
|
nlp4web_codebase/ir/models/__init__.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
from typing import Any, Dict, Type
|
3 |
+
|
4 |
+
|
5 |
+
class BaseRetriever(ABC):
|
6 |
+
|
7 |
+
@property
|
8 |
+
@abstractmethod
|
9 |
+
def index_class(self) -> Type[Any]:
|
10 |
+
pass
|
11 |
+
|
12 |
+
def get_term_weights(self, query: str, cid: str) -> Dict[str, float]:
|
13 |
+
raise NotImplementedError
|
14 |
+
|
15 |
+
@abstractmethod
|
16 |
+
def score(self, query: str, cid: str) -> float:
|
17 |
+
pass
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def retrieve(self, query: str, topk: int = 10) -> Dict[str, float]:
|
21 |
+
pass
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
nltk==3.8.1
|
2 |
+
numpy==1.26.4
|
3 |
+
scipy==1.13.1
|
4 |
+
pandas==2.2.2
|
5 |
+
tqdm==4.66.5
|
6 |
+
ujson==5.10.0
|
7 |
+
joblib==1.4.2
|
8 |
+
datasets==3.0.1
|
9 |
+
pytrec_eval==0.5
|
10 |
+
gradio==5.5.0
|
setup.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from setuptools import setup, find_packages
|
2 |
+
|
3 |
+
|
4 |
+
with open("README.md", "r", encoding="utf-8") as fh:
|
5 |
+
readme = fh.read()
|
6 |
+
|
7 |
+
setup(
|
8 |
+
name="nlp4web-codebase",
|
9 |
+
version="0.0.0",
|
10 |
+
author="Kexin Wang",
|
11 |
+
author_email="[email protected]",
|
12 |
+
description="Codebase of teaching materials for NLP4Web.",
|
13 |
+
long_description=readme,
|
14 |
+
long_description_content_type="text/markdown",
|
15 |
+
url="https://https://github.com/kwang2049/nlp4web-codebase",
|
16 |
+
project_urls={
|
17 |
+
"Bug Tracker": "https://github.com/kwang2049/nlp4web-codebase/issues",
|
18 |
+
},
|
19 |
+
packages=find_packages(),
|
20 |
+
classifiers=[
|
21 |
+
"Programming Language :: Python :: 3",
|
22 |
+
"License :: OSI Approved :: Apache Software License",
|
23 |
+
"Operating System :: OS Independent",
|
24 |
+
],
|
25 |
+
python_requires=">=3.10",
|
26 |
+
install_requires=[
|
27 |
+
"nltk==3.8.1",
|
28 |
+
"numpy==1.26.4",
|
29 |
+
"scipy==1.13.1",
|
30 |
+
"pandas==2.2.2",
|
31 |
+
"tqdm==4.66.5",
|
32 |
+
"ujson==5.10.0",
|
33 |
+
"joblib==1.4.2",
|
34 |
+
"datasets==3.0.1",
|
35 |
+
"pytrec_eval==0.5",
|
36 |
+
],
|
37 |
+
)
|