init
Browse files
data/t_rex.filter_unified.jsonl
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:486a5dc38fd4234f4b515eba60eaf09c9ac5c216589bb2af362092bd9a810413
|
3 |
-
size 408499494
|
|
|
|
|
|
|
|
data/t_rex.filter_unified.min_entity_5.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:30a17125e286dc208fa19a3b58ea64a6464b3663d51d3239928a5f4774fe3a75
|
3 |
+
size 1680622480
|
data/t_rex.filter_unified.min_entity_5.train.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c446973c74549a9c6ce4a27355139b22bb78f83ea3484c31d2e31d1bfe9494dd
|
3 |
+
size 1317139435
|
data/t_rex.filter_unified.min_entity_5.validation.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1494754935b2fcdc4bfabf6ae7155efa21b1d64db245cd1edcfee469479a799f
|
3 |
+
size 329092326
|
stats.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
from itertools import product
|
2 |
-
import pandas as pd
|
3 |
-
from datasets import load_dataset
|
4 |
-
|
5 |
-
|
6 |
-
def get_stats(name):
|
7 |
-
relation = []
|
8 |
-
size = []
|
9 |
-
data = load_dataset("relbert/t_rex", name)
|
10 |
-
splits = data.keys()
|
11 |
-
for split in splits:
|
12 |
-
df = data[split].to_pandas()
|
13 |
-
size.append({
|
14 |
-
"number of pairs": len(df),
|
15 |
-
"number of unique relation types": len(df["relation"].unique())
|
16 |
-
})
|
17 |
-
relation.append(df.groupby('relation')['head'].count().to_dict())
|
18 |
-
relation = pd.DataFrame(relation, index=[f"number of pairs ({s})" for s in splits]).T
|
19 |
-
relation = relation.fillna(0).astype(int)
|
20 |
-
size = pd.DataFrame(size, index=splits).T
|
21 |
-
return relation, size
|
22 |
-
|
23 |
-
df_relation, df_size = get_stats("filter_unified.min_entity_4_max_predicate_10")
|
24 |
-
print(f"\n- Number of instances (`filter_unified.min_entity_4_max_predicate_10`) \n\n {df_size.to_markdown()}")
|
25 |
-
print(f"\n- Number of pairs in each relation type (`filter_unified.min_entity_4_max_predicate_10`) \n\n {df_relation.to_markdown()}")
|
26 |
-
|
27 |
-
|
28 |
-
parameters_min_e_freq = [1, 2, 3, 4]
|
29 |
-
parameters_max_p_freq = [100, 50, 25, 10]
|
30 |
-
df_size_list = []
|
31 |
-
for e, p in product(parameters_min_e_freq, parameters_max_p_freq):
|
32 |
-
_, df_size = get_stats(f"filter_unified.min_entity_{e}_max_predicate_{p}")
|
33 |
-
df_size.pop("test")
|
34 |
-
df_size.columns = [f"min_entity_{e}_max_predicate_{p} ({c})" for c in df_size.columns]
|
35 |
-
df_size_list.append(df_size)
|
36 |
-
df_size_list = pd.concat([i.T for i in df_size_list])
|
37 |
-
print(df_size_list.to_markdown())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|