|
""" Process raw T-Rex file. |
|
mkdir data_raw |
|
cd data_raw |
|
wget https://figshare.com/ndownloader/files/8760241 |
|
unzip 8760241 |
|
cd ../ |
|
""" |
|
|
|
import json |
|
import string |
|
import re |
|
import os |
|
from glob import glob |
|
from tqdm import tqdm |
|
|
|
import pandas as pd |
|
|
|
|
|
if not os.path.exists('data/t_rex.raw.jsonl'): |
|
os.makedirs('data', exist_ok=True) |
|
f_writer = open('data/t_rex.raw.jsonl', 'w') |
|
for i in tqdm(glob("data_raw/*.json")): |
|
with open(i) as f: |
|
data = json.load(f) |
|
for _data in data: |
|
for triple in _data['triples']: |
|
p = triple['predicate']['surfaceform'] |
|
if p is None: |
|
p = os.path.basename(triple['predicate']['uri']) |
|
o = triple['object']['surfaceform'] |
|
s = triple['subject']['surfaceform'] |
|
if o is None or s is None: |
|
input(triple) |
|
out = {"predicate": p, "object": o, "subject": s, "title": _data["title"], "text": _data["text"]} |
|
f_writer.write(json.dumps(out) + "\n") |
|
f_writer.close() |
|
|
|
|
|
stopwords = ["he", "she", "they", "it"] |
|
list_alnum = string.ascii_lowercase + '0123456789 ' |
|
|
|
|
|
def filtering(entry): |
|
|
|
def _subfilter(token): |
|
if len(re.findall(rf'[^{list_alnum}]+', token)) != 0: |
|
return False |
|
if token in stopwords: |
|
return False |
|
if token.startswith("www"): |
|
return False |
|
if token.startswith("."): |
|
return False |
|
if token.startswith(","): |
|
return False |
|
if token.startswith("$"): |
|
return False |
|
if token.startswith("+"): |
|
return False |
|
if token.startswith("#"): |
|
return False |
|
return True |
|
|
|
if not _subfilter(entry["object"].lower()): |
|
return False |
|
if not _subfilter(entry["subject"].lower()): |
|
return False |
|
|
|
if entry['object'].islower() and entry['subject'].islower(): |
|
return False |
|
|
|
return True |
|
|
|
|
|
with open(f"data/t_rex.raw.jsonl") as f: |
|
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
print(f"[raw dataset]: {len(data)} triples, {len(set([i['predicate'] for i in data]))} predicates") |
|
data = [i for i in data if filtering(i)] |
|
df = pd.DataFrame(data) |
|
df = df.drop_duplicates() |
|
print(f"[entity only] : {len(df)} triples, {len(df['predicate'].unique())} predicates") |
|
count = df.groupby("predicate")['title'].count() |
|
df = df[[count[p] >= 3 for p in df['predicate']]] |
|
print(f"[remove rare predicate] : {len(df)} triples, {len(df['predicate'].unique())} predicates") |
|
|
|
with open(f"data/t_rex.filter.jsonl", 'w') as f: |
|
for _, i in df.iterrows(): |
|
f.write(json.dumps(i.to_dict()) + '\n') |
|
|