Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
File size: 2,770 Bytes
373bd8e
 
 
7b9e520
 
373bd8e
7b9e520
 
 
373bd8e
 
7b9e520
 
 
373bd8e
 
 
 
 
 
 
 
 
 
 
 
 
b544d38
 
373bd8e
 
b544d38
 
373bd8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f479237
4eb79ee
373bd8e
 
b96728b
4eb79ee
373bd8e
 
4eb79ee
373bd8e
 
6c8d646
7755823
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
""" Process raw T-Rex file.
mkdir data_raw
cd data_raw
wget https://figshare.com/ndownloader/files/8760241
unzip 8760241
cd ../
"""

import json
import string
import re
import os
from glob import glob
from tqdm import tqdm

import pandas as pd

# process raw data
if not os.path.exists('data/t_rex.raw.jsonl'):
    os.makedirs('data', exist_ok=True)
    f_writer = open('data/t_rex.raw.jsonl', 'w')
    for i in tqdm(glob("data_raw/*.json")):
        with open(i) as f:
            data = json.load(f)
        for _data in data:
            for triple in _data['triples']:
                p = triple['predicate']['surfaceform']
                if p is None:
                    p = os.path.basename(triple['predicate']['uri'])
                o = triple['object']['surfaceform']
                s = triple['subject']['surfaceform']
                if o is None or s is None:
                    input(triple)
                out = {"predicate": p, "object": o, "subject": s, "title": _data["title"], "text": _data["text"]}
                f_writer.write(json.dumps(out) + "\n")
    f_writer.close()

# apply filtering to remove noisy instances
stopwords = ["he", "she", "they", "it"]
list_alnum = string.ascii_lowercase + '0123456789 '


def filtering(entry):

    def _subfilter(token):
        if len(re.findall(rf'[^{list_alnum}]+', token)) != 0:
            return False
        if token in stopwords:
            return False
        if token.startswith("www"):
            return False
        if token.startswith("."):
            return False
        if token.startswith(","):
            return False
        if token.startswith("$"):
            return False
        if token.startswith("+"):
            return False
        if token.startswith("#"):
            return False
        return True

    if not _subfilter(entry["object"].lower()):
        return False
    if not _subfilter(entry["subject"].lower()):
        return False

    if entry['object'].islower() and entry['subject'].islower():
        return False

    return True


with open(f"data/t_rex.raw.jsonl") as f:
    data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
print(f"[raw dataset]: {len(data)} triples, {len(set([i['predicate'] for i in data]))} predicates")
data = [i for i in data if filtering(i)]
df = pd.DataFrame(data)
df = df.drop_duplicates()
print(f"[entity only] : {len(df)} triples, {len(df['predicate'].unique())} predicates")
count = df.groupby("predicate")['title'].count()
df = df[[count[p] >= 3 for p in df['predicate']]]
print(f"[remove rare predicate] : {len(df)} triples, {len(df['predicate'].unique())} predicates")

with open(f"data/t_rex.filter.jsonl", 'w') as f:
    for _, i in df.iterrows():
        f.write(json.dumps(i.to_dict()) + '\n')