Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
File size: 3,410 Bytes
29b9fa1
 
 
 
 
 
 
 
 
 
989839e
 
29b9fa1
 
 
 
 
 
 
 
 
 
 
 
 
989839e
29b9fa1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
989839e
29b9fa1
 
 
 
 
 
 
989839e
29b9fa1
 
 
b82e694
 
 
 
 
 
 
 
989839e
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import json
from itertools import product

import pandas as pd


parameters_min_e_freq = [1, 2, 3, 4]
parameters_max_p_freq = [100, 50, 25, 10]

stats = []
predicate = {}

for min_e_freq, max_p_freq in product(parameters_min_e_freq, parameters_max_p_freq):

    with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.train.jsonl") as f:
        train = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
        df_train = pd.DataFrame(train)

    with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.validation.jsonl") as f:
        validation = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
        df_validation = pd.DataFrame(validation)

    with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl") as f:
        full = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
        df_full = pd.DataFrame(full)
        predicate[f"min_entity_{min_e_freq}_max_predicate_{max_p_freq}"] = df_full['predicate'].unique().tolist()

    stats.append({
        "data": f"filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}",
        "number of triples (train)": len(train),
        "number of triples (validation)": len(validation),
        "number of triples (all)": len(full),
        "number of unique predicates (train)": len(df_train['predicate'].unique()),
        "number of unique predicates (validation)": len(df_validation['predicate'].unique()),
        "number of unique predicates (all)": len(df_full['predicate'].unique()),
        "number of unique entities (train)": len(
            list(set(df_train['object'].unique().tolist() + df_train['subject'].unique().tolist()))),
        "number of unique entities (validation)": len(
            list(set(df_validation['object'].unique().tolist() + df_validation['subject'].unique().tolist()))),
        "number of unique entities (all)": len(
            list(set(df_full['object'].unique().tolist() + df_full['subject'].unique().tolist())))
    })

df = pd.DataFrame(stats)
df.index = df.pop("data")
for c in df.columns:
    df.loc[:, c] = df[c].map('{:,d}'.format)

print(df.to_markdown())

with open(f"data/t_rex.filter_unified.test.jsonl") as f:
    test = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
    df_test = pd.DataFrame(test)
    predicate["test"] = df_test['predicate'].unique().tolist()
df_test = pd.DataFrame([{
    "number of triples (test)": len(df_test),
    "number of unique predicates (test)": len(df_test['predicate'].unique()),
    "number of unique entities (test)": len(
            list(set(df_test['object'].unique().tolist() + df_test['subject'].unique().tolist()))
    )
}])

for c in df_test.columns:
    df_test.loc[:, c] = df_test[c].map('{:,d}'.format)
print(df_test.to_markdown(index=False))
df["number of triples (test)"] = df_test["number of triples (test)"].values[0]
df["number of unique predicates (test)"] = df_test["number of unique predicates (test)"].values[0]
df["number of unique entities (test)"] = df_test["number of unique entities (test)"].values[0]
df.pop("number of triples (all)")
df.pop("number of unique predicates (all)")
df.pop("number of unique entities (all)")
df = df[sorted(df.columns)]
df.to_csv("data/stats.csv")

# predicates in training vs test
with open("data/predicates.json", "w") as f:
    json.dump(predicate, f)