jpcorb20 commited on
Commit
6d00b84
1 Parent(s): 0095967

Update medical_wikipedia.py

Browse files
Files changed (1) hide show
  1. medical_wikipedia.py +95 -96
medical_wikipedia.py CHANGED
@@ -1,96 +1,95 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """MedWiki."""
15
-
16
- import os
17
-
18
- import datasets
19
- import pandas as pd
20
-
21
-
22
- _CITATION = """\
23
- @article{corbeil2024iryonlp,
24
- title={IryoNLP at MEDIQA-CORR 2024: Tackling the Medical Error Detection & Correction Task On the Shoulders of Medical Agents},
25
- author={Jean-Philippe Corbeil},
26
- journal={arXiv preprint arXiv:2404.15488},
27
- year={2024}
28
- }
29
- """
30
-
31
- _DESCRIPTION = """\
32
- This is a filtered version of the `Cohere/wikipedia-22-12` on medical topic articles using `MaartenGr/BERTopic_Wikipedia`. Keep note that some articles in the viewer might seem off topic, but usually they are related in some way (e.g. World War I is linked to the Spanish Flu). This is artefacts of some noise in the topic modelling.
33
- """
34
- _HOMEPAGE = ""
35
- _LICENSE = "CC-BY-SA"
36
- _URLS = {
37
- "first_domain": "Cohere/wikipedia-22-12",
38
- }
39
-
40
-
41
- class MedWikiDataset(datasets.GeneratorBasedBuilder):
42
- """Medical Wikipedia Articles."""
43
-
44
- VERSION = datasets.Version("0.0.1")
45
-
46
- BUILDER_CONFIGS = [
47
- datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
48
- ]
49
-
50
- DEFAULT_CONFIG_NAME = "first_domain"
51
-
52
- def _info(self):
53
- features = datasets.Features(
54
- {
55
- "wiki_id": datasets.Value("int32"),
56
- "title": datasets.Value("string"),
57
- "text": datasets.Value("string"),
58
- "paragraph_id": datasets.Value("int32"),
59
- }
60
- )
61
-
62
- return datasets.DatasetInfo(
63
- description=_DESCRIPTION,
64
- features=features,
65
- homepage=_HOMEPAGE,
66
- license=_LICENSE,
67
- citation=_CITATION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
- urls = _URLS[self.config.name]
72
- dataset = datasets.load_dataset(urls, "en", trust_remote_code=True, cache_dir="/Users/jcorbeil/Documents/datasets/TEMP")
73
- dl_manager.download_and_extract(urls + "/med_topics.csv")
74
- df = pd.read_csv(os.path.join(self.cache_dir, "med_topics.csv"))
75
- med_wiki_ids = set(df["wiki_id"].values.tolist())
76
- return [
77
- datasets.SplitGenerator(
78
- name=datasets.Split.TRAIN,
79
- gen_kwargs={
80
- "dataset": dataset["train"],
81
- "med_wiki_ids": med_wiki_ids,
82
- },
83
- ),
84
- ]
85
-
86
- def _generate_examples(self, dataset, med_wiki_ids):
87
- count = -1
88
- for data in dataset:
89
- if data["wiki_id"] in med_wiki_ids:
90
- count += 1
91
- yield count, {
92
- "wiki_id": data["wiki_id"],
93
- "title": data["title"],
94
- "text": data["text"],
95
- "paragraph_id": data["paragraph_id"],
96
- }
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """MedWiki."""
15
+
16
+ import os
17
+
18
+ import datasets
19
+ import pandas as pd
20
+
21
+
22
+ _CITATION = """\
23
+ @article{corbeil2024iryonlp,
24
+ title={IryoNLP at MEDIQA-CORR 2024: Tackling the Medical Error Detection & Correction Task On the Shoulders of Medical Agents},
25
+ author={Jean-Philippe Corbeil},
26
+ journal={arXiv preprint arXiv:2404.15488},
27
+ year={2024}
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ This is a filtered version of the `Cohere/wikipedia-22-12` on medical topic articles using `MaartenGr/BERTopic_Wikipedia`. Keep note that some articles in the viewer might seem off topic, but usually they are related in some way (e.g. World War I is linked to the Spanish Flu). This is artefacts of some noise in the topic modelling.
33
+ """
34
+ _HOMEPAGE = ""
35
+ _LICENSE = "CC-BY-SA"
36
+ _URLS = {
37
+ "first_domain": "Cohere/wikipedia-22-12",
38
+ }
39
+
40
+
41
+ class MedWikiDataset(datasets.GeneratorBasedBuilder):
42
+ """Medical Wikipedia Articles."""
43
+
44
+ VERSION = datasets.Version("0.0.1")
45
+
46
+ BUILDER_CONFIGS = [
47
+ datasets.BuilderConfig(name="first_domain", version=VERSION, description="This part of my dataset covers a first domain"),
48
+ ]
49
+
50
+ DEFAULT_CONFIG_NAME = "first_domain"
51
+
52
+ def _info(self):
53
+ features = datasets.Features(
54
+ {
55
+ "wiki_id": datasets.Value("int32"),
56
+ "title": datasets.Value("string"),
57
+ "text": datasets.Value("string"),
58
+ "paragraph_id": datasets.Value("int32"),
59
+ }
60
+ )
61
+
62
+ return datasets.DatasetInfo(
63
+ description=_DESCRIPTION,
64
+ features=features,
65
+ homepage=_HOMEPAGE,
66
+ license=_LICENSE,
67
+ citation=_CITATION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ urls = _URLS[self.config.name]
72
+ dataset = datasets.load_dataset(urls, "en", trust_remote_code=True, cache_dir="/Users/jcorbeil/Documents/datasets/TEMP")
73
+ df = pd.read_csv(os.path.join(self.cache_dir, "med_topics.csv"))
74
+ med_wiki_ids = set(df["wiki_id"].values.tolist())
75
+ return [
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TRAIN,
78
+ gen_kwargs={
79
+ "dataset": dataset["train"],
80
+ "med_wiki_ids": med_wiki_ids,
81
+ },
82
+ ),
83
+ ]
84
+
85
+ def _generate_examples(self, dataset, med_wiki_ids):
86
+ count = -1
87
+ for data in dataset:
88
+ if data["wiki_id"] in med_wiki_ids:
89
+ count += 1
90
+ yield count, {
91
+ "wiki_id": data["wiki_id"],
92
+ "title": data["title"],
93
+ "text": data["text"],
94
+ "paragraph_id": data["paragraph_id"],
95
+ }