Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
32d7e91
1 Parent(s): d3b6c7f
Files changed (2) hide show
  1. README.md +2 -1
  2. stats.py +16 -10
README.md CHANGED
@@ -40,7 +40,8 @@ We apply filtering to keep triples with alpha-numeric subject and object, as wel
40
  We reduce the size of the dataset by applying filtering based on the number of predicates and entities in the triples.
41
  We first remove triples that contain either of subject or object with the occurrence in the dataset that is lower than `min entity`.
42
  Then, we reduce the number triples in each predicate to be less than `max predicate`.
43
- If the number of triples in a predicate is higher than `max predicate`, we choose top-`max predicate` triples in terms of the frequency of the subject and the object.
 
44
 
45
  - number of triples in each configuration
46
 
 
40
  We reduce the size of the dataset by applying filtering based on the number of predicates and entities in the triples.
41
  We first remove triples that contain either of subject or object with the occurrence in the dataset that is lower than `min entity`.
42
  Then, we reduce the number triples in each predicate to be less than `max predicate`.
43
+ If the number of triples in a predicate is higher than `max predicate`,
44
+ we choose top-`max predicate` triples based on the frequency of the subject and the object, or random sampling.
45
 
46
  - number of triples in each configuration
47
 
stats.py CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  import json
2
  from itertools import product
3
 
@@ -12,11 +15,15 @@ sns.set_theme(style="whitegrid")
12
 
13
  # load filtered data
14
  tmp = []
 
15
  for s in ['train', 'validation', 'test']:
16
  with open(f"data/t_rex.filter.{s}.jsonl") as f:
17
- tmp += [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
 
 
18
  data = Dataset.from_list(tmp)
19
  df_main = data.to_pandas()
 
20
 
21
 
22
  def is_entity(token):
@@ -29,8 +36,7 @@ def filtering(row, min_freq: int = 3, target: str = "subject"):
29
  return row[target] >= min_freq
30
 
31
 
32
- def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 1,
33
- return_stats: bool = True, random_sampling: bool = True):
34
 
35
  df = df_main.copy()
36
 
@@ -67,17 +73,17 @@ def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 1,
67
  [g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in
68
  df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])
69
 
70
- if not return_stats:
71
- df_balanced.pop("count_subject")
72
- df_balanced.pop("count_object")
73
- df_balanced.pop("count_sum")
74
- return [i.to_dict() for _, i in df_balanced]
75
 
76
  # return distribution
77
  predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
78
  entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True)
79
  entity_dist = dict(list(zip(entity.tolist(), count.tolist())))
80
- return predicate_dist, entity_dist, len(df_balanced)
81
 
82
 
83
  if __name__ == '__main__':
@@ -89,7 +95,7 @@ if __name__ == '__main__':
89
 
90
  # run filtering with different configs
91
  for min_e_freq, max_p_freq in candidates:
92
- p_dist, e_dist, data_size = main(min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq)
93
  p_dist_full.append(p_dist)
94
  e_dist_full.append(e_dist)
95
  data_size_full.append(data_size)
 
1
+ """
2
+ TODO:
3
+ """
4
  import json
5
  from itertools import product
6
 
 
15
 
16
  # load filtered data
17
  tmp = []
18
+ splits = []
19
  for s in ['train', 'validation', 'test']:
20
  with open(f"data/t_rex.filter.{s}.jsonl") as f:
21
+ _tmp = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
22
+ tmp += _tmp
23
+ splits += [s] * len(_tmp)
24
  data = Dataset.from_list(tmp)
25
  df_main = data.to_pandas()
26
+ df_main['split'] = splits
27
 
28
 
29
  def is_entity(token):
 
36
  return row[target] >= min_freq
37
 
38
 
39
+ def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 1, random_sampling: bool = True):
 
40
 
41
  df = df_main.copy()
42
 
 
73
  [g if len(g) <= max_pairs_predicate else g.sort_values(by='count_sum', ascending=False).head(max_pairs_predicate) for _, g in
74
  df_filter.groupby("predicate") if len(g) >= min_pairs_predicate])
75
 
76
+
77
+ df_balanced.pop("count_subject")
78
+ df_balanced.pop("count_object")
79
+ df_balanced.pop("count_sum")
80
+ target_data = [i.to_dict() for _, i in df_balanced]
81
 
82
  # return distribution
83
  predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
84
  entity, count = np.unique(df_balanced['object'].tolist() + df_balanced['subject'].tolist(), return_counts=True)
85
  entity_dist = dict(list(zip(entity.tolist(), count.tolist())))
86
+ return predicate_dist, entity_dist, len(df_balanced), target_data
87
 
88
 
89
  if __name__ == '__main__':
 
95
 
96
  # run filtering with different configs
97
  for min_e_freq, max_p_freq in candidates:
98
+ p_dist, e_dist, data_size, new_data = main(min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq)
99
  p_dist_full.append(p_dist)
100
  e_dist_full.append(e_dist)
101
  data_size_full.append(data_size)