|
import json |
|
from itertools import product |
|
|
|
import pandas as pd |
|
|
|
|
|
parameters_min_e_freq = [1, 2, 3, 4] |
|
parameters_max_p_freq = [100, 50, 25, 10] |
|
|
|
stats = [] |
|
predicate = {} |
|
|
|
for min_e_freq, max_p_freq in product(parameters_min_e_freq, parameters_max_p_freq): |
|
|
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.train.jsonl") as f: |
|
train = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
df_train = pd.DataFrame(train) |
|
|
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.validation.jsonl") as f: |
|
validation = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
df_validation = pd.DataFrame(validation) |
|
|
|
with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl") as f: |
|
full = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
df_full = pd.DataFrame(full) |
|
predicate[f"min_entity_{min_e_freq}_max_predicate_{max_p_freq}"] = df_full['predicate'].unique().tolist() |
|
|
|
stats.append({ |
|
"data": f"filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}", |
|
"number of triples (train)": len(train), |
|
"number of triples (validation)": len(validation), |
|
"number of triples (all)": len(full), |
|
"number of unique predicates (train)": len(df_train['predicate'].unique()), |
|
"number of unique predicates (validation)": len(df_validation['predicate'].unique()), |
|
"number of unique predicates (all)": len(df_full['predicate'].unique()), |
|
"number of unique entities (train)": len( |
|
list(set(df_train['object'].unique().tolist() + df_train['subject'].unique().tolist()))), |
|
"number of unique entities (validation)": len( |
|
list(set(df_validation['object'].unique().tolist() + df_validation['subject'].unique().tolist()))), |
|
"number of unique entities (all)": len( |
|
list(set(df_full['object'].unique().tolist() + df_full['subject'].unique().tolist()))) |
|
}) |
|
|
|
df = pd.DataFrame(stats) |
|
df.index = df.pop("data") |
|
for c in df.columns: |
|
df.loc[:, c] = df[c].map('{:,d}'.format) |
|
|
|
print(df.to_markdown()) |
|
|
|
with open(f"data/t_rex.filter_unified.test.jsonl") as f: |
|
test = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
df_test = pd.DataFrame(test) |
|
predicate["test"] = df_test['predicate'].unique().tolist() |
|
df_test = pd.DataFrame([{ |
|
"number of triples (test)": len(df_test), |
|
"number of unique predicates (test)": len(df_test['predicate'].unique()), |
|
"number of unique entities (test)": len( |
|
list(set(df_test['object'].unique().tolist() + df_test['subject'].unique().tolist())) |
|
) |
|
}]) |
|
|
|
for c in df_test.columns: |
|
df_test.loc[:, c] = df_test[c].map('{:,d}'.format) |
|
print(df_test.to_markdown(index=False)) |
|
df["number of triples (test)"] = df_test["number of triples (test)"].values[0] |
|
df["number of unique predicates (test)"] = df_test["number of unique predicates (test)"].values[0] |
|
df["number of unique entities (test)"] = df_test["number of unique entities (test)"].values[0] |
|
df.pop("number of triples (all)") |
|
df.pop("number of unique predicates (all)") |
|
df.pop("number of unique entities (all)") |
|
df = df[sorted(df.columns)] |
|
df.to_csv("data/stats.csv") |
|
|
|
|
|
with open("data/predicates.json", "w") as f: |
|
json.dump(predicate, f) |
|
|