gabrielaltay commited on
Commit
be90a7a
1 Parent(s): b17e23e

upload hubscripts/n2c2_2008_hub.py to hub from bigbio repo

Browse files
Files changed (1) hide show
  1. n2c2_2008.py +424 -0
n2c2_2008.py ADDED
@@ -0,0 +1,424 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+
17
+ """
18
+ A dataset loader for the n2c2 2008 obesity and comorbidities dataset.
19
+
20
+ https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/
21
+
22
+ The dataset consists of eight xml files,
23
+
24
+ * obesity_patient_records_training.xml
25
+ * obesity_patient_records_training2.xml
26
+ * obesity_standoff_annotations_training.xml
27
+ * obesity_standoff_annotations_training_addendum.xml
28
+ * obesity_standoff_annotations_training_addendum2.xml
29
+ * obesity_standoff_annotations_training_addendum3.xml
30
+ * obesity_patient_records_test.xml
31
+ * obesity_standoff_annotations_test.xml
32
+
33
+ containing patient records as well as textual and intuitive annotations.
34
+
35
+
36
+ The files comprising this dataset must be on the users local machine
37
+ in a single directory that is passed to `datasets.load_datset` via
38
+ the `data_dir` kwarg. This loader script will read the xml files
39
+ directly. For example, if the following directory structure exists
40
+ on the users local machine,
41
+
42
+
43
+ n2c2_2008
44
+ ├── obesity_patient_records_training.xml
45
+ ├── obesity_patient_records_training2.xml
46
+ ├── obesity_standoff_annotations_training.xml
47
+ ├── obesity_standoff_annotations_training_addendum.xml
48
+ ├── obesity_standoff_annotations_training_addendum2.xml
49
+ ├── obesity_standoff_annotations_training_addendum3.xml
50
+ ├── obesity_patient_records_test.xml
51
+ ├── obesity_standoff_annotations_test.xml
52
+
53
+
54
+ Data Access
55
+
56
+ from https://www.i2b2.org/NLP/DataSets/Main.php
57
+
58
+ "As always, you must register AND submit a DUA for access. If you previously
59
+ accessed the data sets here on i2b2.org, you will need to set a new password
60
+ for your account on the Data Portal, but your original DUA will be retained."
61
+
62
+
63
+ """
64
+
65
+ import os
66
+ import xml.etree.ElementTree as et
67
+ from pathlib import Path
68
+ from typing import Dict, List, Tuple
69
+
70
+ import datasets
71
+
72
+ from .bigbiohub import text_features
73
+ from .bigbiohub import BigBioConfig
74
+ from .bigbiohub import Tasks
75
+
76
+ _DATASETNAME = "n2c2_2008"
77
+ _DISPLAYNAME = "n2c2 2008 Obesity"
78
+
79
+ # https://academic.oup.com/jamia/article/16/4/561/766997
80
+ _LANGUAGES = ['English']
81
+ _PUBMED = True
82
+ _LOCAL = True
83
+ _CITATION = """\
84
+ @article{uzuner2009recognizing,
85
+ author = {
86
+ Uzuner, Ozlem
87
+ },
88
+ title = {Recognizing Obesity and Comorbidities in Sparse Data},
89
+ journal = {Journal of the American Medical Informatics Association},
90
+ volume = {16},
91
+ number = {4},
92
+ pages = {561-570},
93
+ year = {2009},
94
+ month = {07},
95
+ url = {https://doi.org/10.1197/jamia.M3115},
96
+ doi = {10.1197/jamia.M3115},
97
+ eprint = {https://academic.oup.com/jamia/article-pdf/16/4/561/2302602/16-4-561.pdf}
98
+ }
99
+ """
100
+
101
+ _DESCRIPTION = """\
102
+ The data for the n2c2 2008 obesity challenge consisted of discharge summaries from
103
+ the Partners HealthCare Research Patient Data Repository. These data were chosen
104
+ from the discharge summaries of patients who were overweight or diabetic and had
105
+ been hospitalized for obesity or diabetes sometime since 12/1/04. De-identification
106
+ was performed semi-automatically. All private health information was replaced with
107
+ synthetic identifiers.
108
+
109
+ The data for the challenge were annotated by two obesity experts from the
110
+ Massachusetts General Hospital Weight Center. The experts were given a textual task,
111
+ which asked them to classify each disease (see list of diseases above) as Present,
112
+ Absent, Questionable, or Unmentioned based on explicitly documented information in
113
+ the discharge summaries, e.g., the statement “the patient is obese”. The experts were
114
+ also given an intuitive task, which asked them to classify each disease as Present,
115
+ Absent, or Questionable by applying their intuition and judgment to information in
116
+ the discharge summaries.
117
+ """
118
+
119
+ _HOMEPAGE = "https://portal.dbmi.hms.harvard.edu/projects/n2c2-nlp/"
120
+
121
+ _LICENSE = 'Data User Agreement'
122
+
123
+ _SUPPORTED_TASKS = [Tasks.TEXT_CLASSIFICATION]
124
+
125
+ _CLASS_NAMES = ["present", "absent", "unmentioned", "questionable"]
126
+ _disease_names = [
127
+ "Obesity",
128
+ "Asthma",
129
+ "CAD",
130
+ "CHF",
131
+ "Depression",
132
+ "Diabetes",
133
+ "Gallstones",
134
+ "GERD",
135
+ "Gout",
136
+ "Hypercholesterolemia",
137
+ "Hypertension",
138
+ "Hypertriglyceridemia",
139
+ "OA",
140
+ "OSA",
141
+ "PVD",
142
+ "Venous Insufficiency",
143
+ ]
144
+
145
+ _SOURCE_VERSION = "1.0.0"
146
+ _BIGBIO_VERSION = "1.0.0"
147
+
148
+
149
+ def _map_labels(doc, task):
150
+ """
151
+ Map obesity and comorbidity labels.
152
+ :param doc: a document indexde by id
153
+ :param task: textual or intuitive annotation task
154
+ """
155
+ lmap = {"Y": "present", "N": "absent", "U": "unmentioned", "Q": "questionable"}
156
+
157
+ def _map_label(doc, task, label_name):
158
+ if label_name in doc[task].keys():
159
+ return lmap[doc[task][label_name]]
160
+ else:
161
+ return None
162
+
163
+ if task in doc.keys():
164
+ return {
165
+ "Obesity": _map_label(doc, task, "Obesity"),
166
+ "Asthma": _map_label(doc, task, "Asthma"),
167
+ "CAD": _map_label(doc, task, "CAD"),
168
+ "CHF": _map_label(doc, task, "CHF"),
169
+ "Depression": _map_label(doc, task, "Depression"),
170
+ "Diabetes": _map_label(doc, task, "Diabetes"),
171
+ "Gallstones": _map_label(doc, task, "Gallstones"),
172
+ "GERD": _map_label(doc, task, "GERD"),
173
+ "Gout": _map_label(doc, task, "Gout"),
174
+ "Hypercholesterolemia": _map_label(doc, task, "Hypercholesterolemia"),
175
+ "Hypertension": _map_label(doc, task, "Hypertension"),
176
+ "Hypertriglyceridemia": _map_label(doc, task, "Hypertriglyceridemia"),
177
+ "OA": _map_label(doc, task, "OA"),
178
+ "OSA": _map_label(doc, task, "OSA"),
179
+ "PVD": _map_label(doc, task, "PVD"),
180
+ "Venous Insufficiency": _map_label(doc, task, "Venous Insufficiency"),
181
+ }
182
+ else:
183
+ return {task: None}
184
+
185
+
186
+ def _read_xml(partition, data_dir):
187
+ """
188
+ Load the data split.
189
+ :param partition: train/test
190
+ :param data_dir: train and test data directory
191
+ """
192
+ documents = {}
193
+ all_diseases = set()
194
+ notes = tuple()
195
+ if partition == "train":
196
+ with open(data_dir / "obesity_patient_records_training.xml") as t1, open(
197
+ data_dir / "obesity_patient_records_training2.xml"
198
+ ) as t2:
199
+ notes1 = t1.read().strip()
200
+ notes2 = t2.read().strip()
201
+ notes = (notes1, notes2)
202
+ elif partition == "test":
203
+ with open(data_dir / "obesity_patient_records_test.xml") as t1:
204
+ notes1 = t1.read().strip()
205
+ notes = (notes1,)
206
+
207
+ for file in notes:
208
+ root = et.fromstring(file)
209
+ root = root.findall("./docs")[0]
210
+ for document in root.findall("./doc"):
211
+ assert document.attrib["id"] not in documents
212
+ documents[document.attrib["id"]] = {}
213
+ documents[document.attrib["id"]]["text"] = document.findall("./text")[
214
+ 0
215
+ ].text
216
+
217
+ annotation_files = tuple()
218
+ if partition == "train":
219
+ with open(data_dir / "obesity_standoff_annotations_training.xml") as t1, open(
220
+ data_dir / "obesity_standoff_annotations_training_addendum.xml"
221
+ ) as t2, open(
222
+ data_dir / "obesity_standoff_annotations_training_addendum2.xml"
223
+ ) as t3, open(
224
+ data_dir / "obesity_standoff_annotations_training_addendum3.xml"
225
+ ) as t4:
226
+ train1 = t1.read().strip()
227
+ train2 = t2.read().strip()
228
+ train3 = t3.read().strip()
229
+ train4 = t4.read().strip()
230
+ annotation_files = (train1, train2, train3, train4)
231
+ elif partition == "test":
232
+ with open(data_dir / "obesity_standoff_annotations_test.xml") as t1:
233
+ test1 = t1.read().strip()
234
+ annotation_files = (test1,)
235
+
236
+ for file in annotation_files:
237
+ root = et.fromstring(file)
238
+ for diseases_annotation in root.findall("./diseases"):
239
+
240
+ annotation_source = diseases_annotation.attrib["source"]
241
+ assert isinstance(annotation_source, str)
242
+ for disease in diseases_annotation.findall("./disease"):
243
+ disease_name = disease.attrib["name"]
244
+ all_diseases.add(disease_name)
245
+ for annotation in disease.findall("./doc"):
246
+ doc_id = annotation.attrib["id"]
247
+ if not annotation_source in documents[doc_id]:
248
+ documents[doc_id][annotation_source] = {}
249
+ assert doc_id in documents
250
+ judgment = annotation.attrib["judgment"]
251
+ documents[doc_id][annotation_source][disease_name] = judgment
252
+ return [
253
+ {
254
+ "document_id": str(id),
255
+ "text": documents[id]["text"],
256
+ "textual": _map_labels(documents[id], "textual"),
257
+ "intuitive": _map_labels(documents[id], "intuitive"),
258
+ }
259
+ for id in documents
260
+ ]
261
+
262
+
263
+ class N2C22008ObesityDataset(datasets.GeneratorBasedBuilder):
264
+ """n2c2 2008 obesity and comorbidities recognition task"""
265
+
266
+ SOURCE_VERSION = datasets.Version(_SOURCE_VERSION)
267
+ BIGBIO_VERSION = datasets.Version(_BIGBIO_VERSION)
268
+
269
+ BUILDER_CONFIGS = [
270
+ BigBioConfig(
271
+ name="n2c2_2008_source",
272
+ version=SOURCE_VERSION,
273
+ description="n2c2_2008 source schema",
274
+ schema="source",
275
+ subset_id="n2c2_2008",
276
+ ),
277
+ BigBioConfig(
278
+ name="n2c2_2008_bigbio_text",
279
+ version=BIGBIO_VERSION,
280
+ description="n2c2_2008 BigBio schema",
281
+ schema="bigbio_text",
282
+ subset_id="n2c2_2008",
283
+ ),
284
+ ]
285
+
286
+ DEFAULT_CONFIG_NAME = "n2c2_2008_source"
287
+
288
+ def _info(self) -> datasets.DatasetInfo:
289
+
290
+ if self.config.schema == "source":
291
+ features = datasets.Features(
292
+ {
293
+ "document_id": datasets.Value("string"),
294
+ "text": datasets.Value("string"),
295
+ "labels": [
296
+ {
297
+ "annotation": datasets.ClassLabel(
298
+ names=["textual", "intuitive"]
299
+ ),
300
+ "disease_name": datasets.ClassLabel(names=_disease_names),
301
+ "label": datasets.ClassLabel(names=_CLASS_NAMES),
302
+ }
303
+ ],
304
+ }
305
+ )
306
+
307
+ elif self.config.schema == "bigbio_text":
308
+ features = text_features
309
+
310
+ return datasets.DatasetInfo(
311
+ description=_DESCRIPTION,
312
+ features=features,
313
+ homepage=_HOMEPAGE,
314
+ license=str(_LICENSE),
315
+ citation=_CITATION,
316
+ )
317
+
318
+ def _split_generators(
319
+ self, dl_manager: datasets.DownloadManager
320
+ ) -> List[datasets.SplitGenerator]:
321
+ """Returns SplitGenerators."""
322
+
323
+ if self.config.data_dir is None:
324
+ raise ValueError(
325
+ "This is a local dataset. Please pass the data_dir kwarg to load_dataset."
326
+ )
327
+ else:
328
+ data_dir = self.config.data_dir
329
+
330
+ return [
331
+ datasets.SplitGenerator(
332
+ name=datasets.Split.TRAIN,
333
+ gen_kwargs={
334
+ "data_dir": data_dir,
335
+ "split": "train",
336
+ },
337
+ ),
338
+ datasets.SplitGenerator(
339
+ name=datasets.Split.TEST,
340
+ gen_kwargs={
341
+ "data_dir": data_dir,
342
+ "split": "test",
343
+ },
344
+ ),
345
+ ]
346
+
347
+ @staticmethod
348
+ def _get_source_sample(sample):
349
+ textual_labels = [
350
+ ("textual", disease_name, sample["textual"][disease_name])
351
+ for disease_name in sample["textual"].keys()
352
+ if sample["textual"][disease_name]
353
+ ]
354
+ intuitive_labels = [
355
+ ("intuitive", disease_name, sample["intuitive"][disease_name])
356
+ for disease_name in sample["intuitive"].keys()
357
+ if sample["intuitive"][disease_name]
358
+ ]
359
+
360
+ return {
361
+ "document_id": sample["document_id"],
362
+ "text": sample["text"],
363
+ "labels": [
364
+ {
365
+ "annotation": label[0],
366
+ "disease_name": label[1],
367
+ "label": label[2],
368
+ }
369
+ for label in textual_labels + intuitive_labels
370
+ ],
371
+ }
372
+
373
+ @staticmethod
374
+ def _get_bigbio_sample(sample_id, sample):
375
+ textual_labels = [
376
+ ("textual", disease_name, sample["textual"][disease_name])
377
+ for disease_name in sample["textual"].keys()
378
+ if sample["textual"][disease_name]
379
+ ]
380
+ intuitive_labels = [
381
+ ("intuitive", disease_name, sample["intuitive"][disease_name])
382
+ for disease_name in sample["intuitive"].keys()
383
+ if sample["intuitive"][disease_name]
384
+ ]
385
+
386
+ return {
387
+ "id": str(sample_id),
388
+ "document_id": sample["document_id"],
389
+ "text": sample["text"],
390
+ "labels": [
391
+ {
392
+ "annotation": label[0],
393
+ "disease_name": label[1],
394
+ "label": label[2],
395
+ }
396
+ for label in textual_labels + intuitive_labels
397
+ ],
398
+ }
399
+
400
+ def _generate_examples(self, data_dir, split: str) -> Tuple[int, Dict]:
401
+ """Yields examples as (key, example) tuples."""
402
+
403
+ data_dir = Path(data_dir).resolve()
404
+ if split == "train":
405
+ _id = 0
406
+ samples = _read_xml(split, data_dir)
407
+ for sample in samples:
408
+ if self.config.schema == "source":
409
+ yield _id, self._get_source_sample(sample)
410
+
411
+ elif self.config.schema == "bigbio_text":
412
+ yield _id, self._get_bigbio_sample(_id, sample)
413
+ _id += 1
414
+
415
+ elif split == "test":
416
+ _id = 0
417
+ samples = _read_xml(split, data_dir)
418
+ for sample in samples:
419
+ if self.config.schema == "source":
420
+ yield _id, self._get_source_sample(sample)
421
+
422
+ elif self.config.schema == "bigbio_text":
423
+ yield _id, self._get_bigbio_sample(_id, sample)
424
+ _id += 1