Gaëtan Caillaut commited on
Commit
6453182
1 Parent(s): b6388cf

flatten dataset

Browse files
Files changed (2) hide show
  1. README.md +6 -8
  2. frwiki_good_pages_el.py +12 -17
README.md CHANGED
@@ -41,14 +41,12 @@ It is intended to be used to train Entity Linking (EL) systems. Links in article
41
  {
42
  "title": "Title of the page",
43
  "qid": "QID of the corresponding Wikidata entity",
44
- "text": {
45
- "words": ["tokens"],
46
- "wikipedia": ["Wikipedia description of each entity"],
47
- "wikidata": ["Wikidata description of each entity"],
48
- "labels": ["NER labels"],
49
- "titles": ["Wikipedia title of each entity"],
50
- "qids": ["QID of each entity"],
51
- }
52
  }
53
  ```
54
 
 
41
  {
42
  "title": "Title of the page",
43
  "qid": "QID of the corresponding Wikidata entity",
44
+ "words": ["tokens"],
45
+ "wikipedia": ["Wikipedia description of each entity"],
46
+ "wikidata": ["Wikidata description of each entity"],
47
+ "labels": ["NER labels"],
48
+ "titles": ["Wikipedia title of each entity"],
49
+ "qids": ["QID of each entity"],
 
 
50
  }
51
  ```
52
 
frwiki_good_pages_el.py CHANGED
@@ -47,14 +47,11 @@ def read_file(path):
47
  # Find for instance the citation on arxiv or on the dataset repo/website
48
  _CITATION = ""
49
 
50
- # TODO: Add description of the dataset here
51
- # You can copy an official description
52
  _DESCRIPTION = """\
53
  French Wikipedia dataset for Entity Linking
54
  """
55
 
56
- # TODO: Add a link to an official homepage for the dataset here
57
- _HOMEPAGE = ""
58
 
59
  # TODO: Add the licence for the dataset here if you can find it
60
  _LICENSE = ""
@@ -94,7 +91,7 @@ def text_to_el_features(doc_qid, doc_title, text, title2qid, title2wikipedia, ti
94
  mention_title = m.group(1)
95
  mention = m.group(2)
96
 
97
- mention_qid = title2qid.get(mention_title, "")
98
  mention_wikipedia = title2wikipedia.get(mention_title, "")
99
  mention_wikidata = title2wikidata.get(mention_title, "")
100
 
@@ -131,9 +128,9 @@ def text_to_el_features(doc_qid, doc_title, text, title2qid, title2wikipedia, ti
131
  text_dict["wikidata"].extend([None] * len_mention)
132
  else:
133
  len_mention_tail = len(mention_words) - 1
134
- wikipedia_words = mention_wikipedia.split()
135
- wikidata_words = mention_wikidata.split()
136
- title_words = mention_title.replace("_", " ").split()
137
 
138
  text_dict["labels"].extend(["B"] + ["I"] * len_mention_tail)
139
  text_dict["qids"].extend([mention_qid] + [None] * len_mention_tail)
@@ -154,7 +151,7 @@ def text_to_el_features(doc_qid, doc_title, text, title2qid, title2wikipedia, ti
154
  text_dict["titles"].extend([None] * len_tail)
155
  text_dict["wikipedia"].extend([None] * len_tail)
156
  text_dict["wikidata"].extend([None] * len_tail)
157
- res["text"] = text_dict
158
  return res
159
 
160
 
@@ -188,14 +185,12 @@ class FrWikiGoodPagesELDataset(datasets.GeneratorBasedBuilder):
188
  features = datasets.Features({
189
  "title": datasets.Value("string"),
190
  "qid": datasets.Value("string"),
191
- "text": {
192
- "words": [datasets.Value("string")],
193
- "wikipedia": [datasets.Value("string")],
194
- "wikidata": [datasets.Value("string")],
195
- "labels": [datasets.ClassLabel(names=_CLASS_LABELS)],
196
- "titles": [datasets.Value("string")],
197
- "qids": [datasets.Value("string")],
198
- }
199
  })
200
 
201
  return datasets.DatasetInfo(
 
47
  # Find for instance the citation on arxiv or on the dataset repo/website
48
  _CITATION = ""
49
 
 
 
50
  _DESCRIPTION = """\
51
  French Wikipedia dataset for Entity Linking
52
  """
53
 
54
+ _HOMEPAGE = "https://github.com/GaaH/frwiki_good_pages_el"
 
55
 
56
  # TODO: Add the licence for the dataset here if you can find it
57
  _LICENSE = ""
 
91
  mention_title = m.group(1)
92
  mention = m.group(2)
93
 
94
+ mention_qid = title2qid.get(mention_title, "").replace("_", " ")
95
  mention_wikipedia = title2wikipedia.get(mention_title, "")
96
  mention_wikidata = title2wikidata.get(mention_title, "")
97
 
 
128
  text_dict["wikidata"].extend([None] * len_mention)
129
  else:
130
  len_mention_tail = len(mention_words) - 1
131
+ # wikipedia_words = mention_wikipedia.split()
132
+ # wikidata_words = mention_wikidata.split()
133
+ # title_words = mention_title.replace("_", " ").split()
134
 
135
  text_dict["labels"].extend(["B"] + ["I"] * len_mention_tail)
136
  text_dict["qids"].extend([mention_qid] + [None] * len_mention_tail)
 
151
  text_dict["titles"].extend([None] * len_tail)
152
  text_dict["wikipedia"].extend([None] * len_tail)
153
  text_dict["wikidata"].extend([None] * len_tail)
154
+ res.update(text_dict)
155
  return res
156
 
157
 
 
185
  features = datasets.Features({
186
  "title": datasets.Value("string"),
187
  "qid": datasets.Value("string"),
188
+ "words": [datasets.Value("string")],
189
+ "wikipedia": [datasets.Value("string")],
190
+ "wikidata": [datasets.Value("string")],
191
+ "labels": [datasets.ClassLabel(names=_CLASS_LABELS)],
192
+ "titles": [datasets.Value("string")],
193
+ "qids": [datasets.Value("string")],
 
 
194
  })
195
 
196
  return datasets.DatasetInfo(